]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[AF_IUCV]: Add lock when updating accept_q
authorUrsula Braun <braunu@de.ibm.com>
Sun, 15 Jul 2007 02:04:25 +0000 (19:04 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sun, 15 Jul 2007 02:04:25 +0000 (19:04 -0700)
The accept_queue of an af_iucv socket will be corrupted, if
adding and deleting of entries in this queue occurs at the
same time (connect request from one client, while accept call
is processed for another client).
Solution: add locking when updating accept_q

Signed-off-by: Ursula Braun <braunu@de.ibm.com>
Acked-by: Frank Pavlic <fpavlic@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/iucv/af_iucv.h
net/iucv/af_iucv.c

index f9bd11be1891539e807120d8e0bb8e0e3cc634d2..b6c468cd7f5ba9fb61f2627fd15a54cd135206af 100644 (file)
@@ -60,6 +60,7 @@ struct iucv_sock {
        char                    dst_user_id[8];
        char                    dst_name[8];
        struct list_head        accept_q;
+       spinlock_t              accept_q_lock;
        struct sock             *parent;
        struct iucv_path        *path;
        struct sk_buff_head     send_skb_q;
index d9e9ddb8eac5e68a22365e7e1cc1519a8649df04..53ae14c35f70865eb4a224e9b51425d8add8d19a 100644 (file)
@@ -219,6 +219,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
 
        sock_init_data(sock, sk);
        INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
+       spin_lock_init(&iucv_sk(sk)->accept_q_lock);
        skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
        skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
        iucv_sk(sk)->send_tag = 0;
@@ -274,15 +275,25 @@ void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
 
 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
 {
+       unsigned long flags;
+       struct iucv_sock *par = iucv_sk(parent);
+
        sock_hold(sk);
-       list_add_tail(&iucv_sk(sk)->accept_q, &iucv_sk(parent)->accept_q);
+       spin_lock_irqsave(&par->accept_q_lock, flags);
+       list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
+       spin_unlock_irqrestore(&par->accept_q_lock, flags);
        iucv_sk(sk)->parent = parent;
        parent->sk_ack_backlog++;
 }
 
 void iucv_accept_unlink(struct sock *sk)
 {
+       unsigned long flags;
+       struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
+
+       spin_lock_irqsave(&par->accept_q_lock, flags);
        list_del_init(&iucv_sk(sk)->accept_q);
+       spin_unlock_irqrestore(&par->accept_q_lock, flags);
        iucv_sk(sk)->parent->sk_ack_backlog--;
        iucv_sk(sk)->parent = NULL;
        sock_put(sk);
@@ -298,8 +309,8 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
                lock_sock(sk);
 
                if (sk->sk_state == IUCV_CLOSED) {
-                       release_sock(sk);
                        iucv_accept_unlink(sk);
+                       release_sock(sk);
                        continue;
                }
 
@@ -879,6 +890,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
        /* Find out if this path belongs to af_iucv. */
        read_lock(&iucv_sk_list.lock);
        iucv = NULL;
+       sk = NULL;
        sk_for_each(sk, node, &iucv_sk_list.head)
                if (sk->sk_state == IUCV_LISTEN &&
                    !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {