Skip to content

Commit

Permalink
[AF_IUCV]: Add lock when updating accept_q
Browse files Browse the repository at this point in the history
The accept_queue of an af_iucv socket will be corrupted, if
adding and deleting of entries in this queue occurs at the
same time (connect request from one client, while accept call
is processed for another client).
Solution: add locking when updating accept_q

Signed-off-by: Ursula Braun <[email protected]>
Acked-by: Frank Pavlic <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
Ursula Braun authored and davem330 committed Jul 15, 2007
1 parent 13fdc9a commit febca28
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 2 deletions.
1 change: 1 addition & 0 deletions include/net/iucv/af_iucv.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ struct iucv_sock {
char dst_user_id[8];
char dst_name[8];
struct list_head accept_q;
spinlock_t accept_q_lock;
struct sock *parent;
struct iucv_path *path;
struct sk_buff_head send_skb_q;
Expand Down
16 changes: 14 additions & 2 deletions net/iucv/af_iucv.c
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)

sock_init_data(sock, sk);
INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
spin_lock_init(&iucv_sk(sk)->accept_q_lock);
skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
iucv_sk(sk)->send_tag = 0;
Expand Down Expand Up @@ -274,15 +275,25 @@ void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)

void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
{
unsigned long flags;
struct iucv_sock *par = iucv_sk(parent);

sock_hold(sk);
list_add_tail(&iucv_sk(sk)->accept_q, &iucv_sk(parent)->accept_q);
spin_lock_irqsave(&par->accept_q_lock, flags);
list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
spin_unlock_irqrestore(&par->accept_q_lock, flags);
iucv_sk(sk)->parent = parent;
parent->sk_ack_backlog++;
}

void iucv_accept_unlink(struct sock *sk)
{
unsigned long flags;
struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);

spin_lock_irqsave(&par->accept_q_lock, flags);
list_del_init(&iucv_sk(sk)->accept_q);
spin_unlock_irqrestore(&par->accept_q_lock, flags);
iucv_sk(sk)->parent->sk_ack_backlog--;
iucv_sk(sk)->parent = NULL;
sock_put(sk);
Expand All @@ -298,8 +309,8 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
lock_sock(sk);

if (sk->sk_state == IUCV_CLOSED) {
release_sock(sk);
iucv_accept_unlink(sk);
release_sock(sk);
continue;
}

Expand Down Expand Up @@ -879,6 +890,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
/* Find out if this path belongs to af_iucv. */
read_lock(&iucv_sk_list.lock);
iucv = NULL;
sk = NULL;
sk_for_each(sk, node, &iucv_sk_list.head)
if (sk->sk_state == IUCV_LISTEN &&
!memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
Expand Down

0 comments on commit febca28

Please sign in to comment.