Skip to content

Commit

Permalink
net/af_iucv: don't track individual TX skbs for TRANS_HIPER sockets
Browse files Browse the repository at this point in the history
Stop maintaining the skb_send_q list for TRANS_HIPER sockets.

Not only is it extra overhead, but keeping around a list of skb clones
means that we later also have to match the ->sk_txnotify() calls
against these clones and free them accordingly.
The current matching logic (comparing the skbs' shinfo location) is
frustratingly fragile, and breaks if the skb's head is mangled in any
sort of way while passing from dev_queue_xmit() to the device's
HW queue.

Also adjust the interface for ->sk_txnotify(), to make clear that we
don't actually care about any skb internals.

Signed-off-by: Julian Wiedmann <[email protected]>
Acked-by: Willem de Bruijn <[email protected]>
Signed-off-by: Jakub Kicinski <[email protected]>
  • Loading branch information
julianwiedmann authored and kuba-moo committed Jan 29, 2021
1 parent ef6af7b commit 80bc97a
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 62 deletions.
6 changes: 4 additions & 2 deletions drivers/s390/net/qeth_core_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -1409,10 +1409,12 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
struct sk_buff *skb;

skb_queue_walk(&buf->skb_list, skb) {
struct sock *sk = skb->sk;

QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
if (skb->sk && skb->sk->sk_family == PF_IUCV)
iucv_sk(skb->sk)->sk_txnotify(skb, notification);
if (sk && sk->sk_family == PF_IUCV)
iucv_sk(sk)->sk_txnotify(sk, notification);
}
}

Expand Down
2 changes: 1 addition & 1 deletion include/net/iucv/af_iucv.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ struct iucv_sock {
atomic_t msg_recv;
atomic_t pendings;
int transport;
void (*sk_txnotify)(struct sk_buff *skb,
void (*sk_txnotify)(struct sock *sk,
enum iucv_tx_notify n);
};

Expand Down
80 changes: 21 additions & 59 deletions net/iucv/af_iucv.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ static struct sock *iucv_accept_dequeue(struct sock *parent,
static void iucv_sock_kill(struct sock *sk);
static void iucv_sock_close(struct sock *sk);

static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify);

/* Call Back functions */
static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
Expand Down Expand Up @@ -211,7 +211,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
{
struct iucv_sock *iucv = iucv_sk(sock);
struct af_iucv_trans_hdr *phs_hdr;
struct sk_buff *nskb;
int err, confirm_recv = 0;

phs_hdr = skb_push(skb, sizeof(*phs_hdr));
Expand Down Expand Up @@ -261,20 +260,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
}
skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);

__skb_header_release(skb);
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb) {
err = -ENOMEM;
goto err_free;
}

skb_queue_tail(&iucv->send_skb_q, nskb);
atomic_inc(&iucv->skbs_in_xmit);
err = dev_queue_xmit(skb);
if (net_xmit_eval(err)) {
atomic_dec(&iucv->skbs_in_xmit);
skb_unlink(nskb, &iucv->send_skb_q);
kfree_skb(nskb);
} else {
atomic_sub(confirm_recv, &iucv->msg_recv);
WARN_ON(atomic_read(&iucv->msg_recv) < 0);
Expand Down Expand Up @@ -2146,67 +2135,40 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
* afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
* transport
**/
static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
enum iucv_tx_notify n)
static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify n)
{
struct iucv_sock *iucv = iucv_sk(skb->sk);
struct sock *sk = skb->sk;
struct sk_buff_head *list;
struct sk_buff *list_skb;
struct sk_buff *nskb;
unsigned long flags;
struct iucv_sock *iucv = iucv_sk(sk);

if (sock_flag(sk, SOCK_ZAPPED))
return;

list = &iucv->send_skb_q;
spin_lock_irqsave(&list->lock, flags);
skb_queue_walk_safe(list, list_skb, nskb) {
if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
switch (n) {
case TX_NOTIFY_OK:
atomic_dec(&iucv->skbs_in_xmit);
__skb_unlink(list_skb, list);
kfree_skb(list_skb);
iucv_sock_wake_msglim(sk);
break;
case TX_NOTIFY_PENDING:
atomic_inc(&iucv->pendings);
break;
case TX_NOTIFY_DELAYED_OK:
atomic_dec(&iucv->skbs_in_xmit);
__skb_unlink(list_skb, list);
atomic_dec(&iucv->pendings);
if (atomic_read(&iucv->pendings) <= 0)
iucv_sock_wake_msglim(sk);
kfree_skb(list_skb);
break;
case TX_NOTIFY_UNREACHABLE:
case TX_NOTIFY_DELAYED_UNREACHABLE:
case TX_NOTIFY_TPQFULL: /* not yet used */
case TX_NOTIFY_GENERALERROR:
case TX_NOTIFY_DELAYED_GENERALERROR:
atomic_dec(&iucv->skbs_in_xmit);
__skb_unlink(list_skb, list);
kfree_skb(list_skb);
if (sk->sk_state == IUCV_CONNECTED) {
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
break;
}
break;
switch (n) {
case TX_NOTIFY_OK:
atomic_dec(&iucv->skbs_in_xmit);
iucv_sock_wake_msglim(sk);
break;
case TX_NOTIFY_PENDING:
atomic_inc(&iucv->pendings);
break;
case TX_NOTIFY_DELAYED_OK:
atomic_dec(&iucv->skbs_in_xmit);
if (atomic_dec_return(&iucv->pendings) <= 0)
iucv_sock_wake_msglim(sk);
break;
default:
atomic_dec(&iucv->skbs_in_xmit);
if (sk->sk_state == IUCV_CONNECTED) {
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
}
spin_unlock_irqrestore(&list->lock, flags);

if (sk->sk_state == IUCV_CLOSING) {
if (atomic_read(&iucv->skbs_in_xmit) == 0) {
sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk);
}
}

}

/*
Expand Down

0 comments on commit 80bc97a

Please sign in to comment.