Skip to content

Commit

Permalink
rxrpc: Unlock new call in rxrpc_new_incoming_call() rather than the c…
Browse files Browse the repository at this point in the history
…aller

Move the unlock and the ping transmission for a new incoming call into
rxrpc_new_incoming_call() rather than doing it in the caller.  This makes
it clearer to see what's going on.

Suggested-by: Peter Zijlstra <[email protected]>
Signed-off-by: David Howells <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
cc: Ingo Molnar <[email protected]>
cc: Will Deacon <[email protected]>
cc: Davidlohr Bueso <[email protected]>
  • Loading branch information
dhowells committed Dec 20, 2019
1 parent 615f22f commit f33121c
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 26 deletions.
36 changes: 28 additions & 8 deletions net/rxrpc/call_accept.c
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,22 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
kfree(b);
}

/*
* Ping the other end to fill our RTT cache and to retrieve the rwind
* and MTU parameters.
*/
static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
ktime_t now = skb->tstamp;

if (call->peer->rtt_usage < 3 ||
ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
true, true,
rxrpc_propose_ack_ping_for_params);
}

/*
* Allocate a new incoming call from the prealloc pool, along with a connection
* and a peer as necessary.
Expand Down Expand Up @@ -346,9 +362,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
skb->priority = RX_INVALID_OPERATION;
_leave(" = NULL [close]");
call = NULL;
goto out;
goto no_call;
}

/* The peer, connection and call may all have sprung into existence due
Expand All @@ -361,9 +375,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
if (!call) {
skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
_leave(" = NULL [busy]");
call = NULL;
goto out;
goto no_call;
}

trace_rxrpc_receive(call, rxrpc_receive_incoming,
Expand Down Expand Up @@ -432,10 +444,18 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
*/
rxrpc_put_call(call, rxrpc_call_put);

_leave(" = %p{%d}", call, call->debug_id);
out:
spin_unlock(&rx->incoming_lock);

rxrpc_send_ping(call, skb);
mutex_unlock(&call->user_mutex);

_leave(" = %p{%d}", call, call->debug_id);
return call;

no_call:
spin_unlock(&rx->incoming_lock);
_leave(" = NULL [%u]", skb->mark);
return NULL;
}

/*
Expand Down
18 changes: 0 additions & 18 deletions net/rxrpc/input.c
Original file line number Diff line number Diff line change
Expand Up @@ -192,22 +192,6 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
goto out_no_clear_ca;
}

/*
* Ping the other end to fill our RTT cache and to retrieve the rwind
* and MTU parameters.
*/
static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
ktime_t now = skb->tstamp;

if (call->peer->rtt_usage < 3 ||
ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
true, true,
rxrpc_propose_ack_ping_for_params);
}

/*
* Apply a hard ACK by advancing the Tx window.
*/
Expand Down Expand Up @@ -1396,8 +1380,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
call = rxrpc_new_incoming_call(local, rx, skb);
if (!call)
goto reject_packet;
rxrpc_send_ping(call, skb);
mutex_unlock(&call->user_mutex);
}

/* Process a call packet; this either discards or passes on the ref
Expand Down

0 comments on commit f33121c

Please sign in to comment.