Skip to content

Commit

Permalink
Merge tag 'rxrpc-rewrite-20160930' of git://git.kernel.org/pub/scm/li…
Browse files Browse the repository at this point in the history
…nux/kernel/git/dhowells/linux-fs

David Howells says:

====================
rxrpc: More fixes and adjustments

This set of patches contains some more fixes and adjustments:

 (1) Actually display the retransmission indication previously added to the
     tx_data trace.

 (2) Switch to Congestion Avoidance mode properly at cwnd==ssthresh rather
     than relying on detection during an overshoot and correction.

 (3) Reduce ssthresh to the peer's declared receive window.

 (4) The offset field in rxrpc_skb_priv can be dispensed with and the error
     field is no longer used.  Get rid of them.

 (5) Keep the call timeouts as ktimes rather than jiffies to make it easier
     to deal with RTT-based timeout values in future.  Rounding to jiffies
     is still necessary when the system timer is set.

 (6) Fix the call timer handling to avoid retriggering of expired timeout
     actions.
====================

Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
davem330 committed Oct 3, 2016
2 parents 32986b5 + 405dea1 commit 7667d44
Show file tree
Hide file tree
Showing 12 changed files with 125 additions and 105 deletions.
28 changes: 15 additions & 13 deletions include/trace/events/rxrpc.h
Original file line number Diff line number Diff line change
Expand Up @@ -280,11 +280,12 @@ TRACE_EVENT(rxrpc_tx_data,
__entry->lose = lose;
),

TP_printk("c=%p DATA %08x q=%08x fl=%02x%s",
TP_printk("c=%p DATA %08x q=%08x fl=%02x%s%s",
__entry->call,
__entry->serial,
__entry->seq,
__entry->flags,
__entry->retrans ? " *RETRANS*" : "",
__entry->lose ? " *LOSE*" : "")
);

Expand Down Expand Up @@ -452,17 +453,18 @@ TRACE_EVENT(rxrpc_rtt_rx,

TRACE_EVENT(rxrpc_timer,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
unsigned long now),
ktime_t now, unsigned long now_j),

TP_ARGS(call, why, now),
TP_ARGS(call, why, now, now_j),

TP_STRUCT__entry(
__field(struct rxrpc_call *, call )
__field(enum rxrpc_timer_trace, why )
__field(unsigned long, now )
__field(unsigned long, expire_at )
__field(unsigned long, ack_at )
__field(unsigned long, resend_at )
__field_struct(ktime_t, now )
__field_struct(ktime_t, expire_at )
__field_struct(ktime_t, ack_at )
__field_struct(ktime_t, resend_at )
__field(unsigned long, now_j )
__field(unsigned long, timer )
),

Expand All @@ -473,17 +475,17 @@ TRACE_EVENT(rxrpc_timer,
__entry->expire_at = call->expire_at;
__entry->ack_at = call->ack_at;
__entry->resend_at = call->resend_at;
__entry->now_j = now_j;
__entry->timer = call->timer.expires;
),

TP_printk("c=%p %s now=%lx x=%ld a=%ld r=%ld t=%ld",
TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
__entry->call,
rxrpc_timer_traces[__entry->why],
__entry->now,
__entry->expire_at - __entry->now,
__entry->ack_at - __entry->now,
__entry->resend_at - __entry->now,
__entry->timer - __entry->now)
ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
__entry->timer - __entry->now_j)
);

TRACE_EVENT(rxrpc_rx_lose,
Expand Down
10 changes: 4 additions & 6 deletions net/rxrpc/ar-internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -144,9 +144,7 @@ struct rxrpc_skb_priv {
u8 nr_jumbo; /* Number of jumbo subpackets */
};
union {
unsigned int offset; /* offset into buffer of next read */
int remain; /* amount of space remaining for next write */
u32 error; /* network error code */
};

struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
Expand Down Expand Up @@ -466,9 +464,9 @@ struct rxrpc_call {
struct rxrpc_connection *conn; /* connection carrying call */
struct rxrpc_peer *peer; /* Peer record for remote address */
struct rxrpc_sock __rcu *socket; /* socket responsible */
unsigned long ack_at; /* When deferred ACK needs to happen */
unsigned long resend_at; /* When next resend needs to happen */
unsigned long expire_at; /* When the call times out */
ktime_t ack_at; /* When deferred ACK needs to happen */
ktime_t resend_at; /* When next resend needs to happen */
ktime_t expire_at; /* When the call times out */
struct timer_list timer; /* Combined event timer */
struct work_struct processor; /* Event processor */
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
Expand Down Expand Up @@ -807,7 +805,7 @@ int rxrpc_reject_call(struct rxrpc_sock *);
/*
* call_event.c
*/
void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace);
void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
enum rxrpc_propose_ack_trace);
void rxrpc_process_call(struct work_struct *);
Expand Down
87 changes: 51 additions & 36 deletions net/rxrpc/call_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,29 +24,53 @@
/*
* Set the timer
*/
void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why)
void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
ktime_t now)
{
unsigned long t, now = jiffies;
unsigned long t_j, now_j = jiffies;
ktime_t t;
bool queue = false;

read_lock_bh(&call->state_lock);

if (call->state < RXRPC_CALL_COMPLETE) {
t = call->expire_at;
if (time_before_eq(t, now))
if (!ktime_after(t, now))
goto out;

if (time_after(call->resend_at, now) &&
time_before(call->resend_at, t))
if (!ktime_after(call->resend_at, now)) {
call->resend_at = call->expire_at;
if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
queue = true;
} else if (ktime_before(call->resend_at, t)) {
t = call->resend_at;
}

if (time_after(call->ack_at, now) &&
time_before(call->ack_at, t))
if (!ktime_after(call->ack_at, now)) {
call->ack_at = call->expire_at;
if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
queue = true;
} else if (ktime_before(call->ack_at, t)) {
t = call->ack_at;
}

if (call->timer.expires != t || !timer_pending(&call->timer)) {
mod_timer(&call->timer, t);
trace_rxrpc_timer(call, why, now);
t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
t_j += jiffies;

/* We have to make sure that the calculated jiffies value falls
* at or after the nsec value, or we may loop ceaselessly
* because the timer times out, but we haven't reached the nsec
* timeout yet.
*/
t_j++;

if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
mod_timer(&call->timer, t_j);
trace_rxrpc_timer(call, why, now, now_j);
}

if (queue)
rxrpc_queue_call(call);
}

out:
Expand All @@ -62,7 +86,8 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
enum rxrpc_propose_ack_trace why)
{
enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
unsigned long now, ack_at, expiry = rxrpc_soft_ack_delay;
unsigned int expiry = rxrpc_soft_ack_delay;
ktime_t now, ack_at;
s8 prior = rxrpc_ack_priority[ack_reason];

/* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
Expand Down Expand Up @@ -111,7 +136,6 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
break;
}

now = jiffies;
if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
_debug("already scheduled");
} else if (immediate || expiry == 0) {
Expand All @@ -120,11 +144,11 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
background)
rxrpc_queue_call(call);
} else {
ack_at = now + expiry;
_debug("deferred ACK %ld < %ld", expiry, call->ack_at - now);
if (time_before(ack_at, call->ack_at)) {
now = ktime_get_real();
ack_at = ktime_add_ms(now, expiry);
if (ktime_before(ack_at, call->ack_at)) {
call->ack_at = ack_at;
rxrpc_set_timer(call, rxrpc_timer_set_for_ack);
rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now);
}
}

Expand Down Expand Up @@ -157,12 +181,12 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
/*
* Perform retransmission of NAK'd and unack'd packets.
*/
static void rxrpc_resend(struct rxrpc_call *call)
static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
{
struct rxrpc_skb_priv *sp;
struct sk_buff *skb;
rxrpc_seq_t cursor, seq, top;
ktime_t now = ktime_get_real(), max_age, oldest, resend_at, ack_ts;
ktime_t max_age, oldest, ack_ts;
int ix;
u8 annotation, anno_type, retrans = 0, unacked = 0;

Expand Down Expand Up @@ -212,14 +236,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
}

resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
call->resend_at = jiffies +
nsecs_to_jiffies(ktime_to_ns(ktime_sub(resend_at, now))) +
1; /* We have to make sure that the calculated jiffies value
* falls at or after the nsec value, or we shall loop
* ceaselessly because the timer times out, but we haven't
* reached the nsec timeout yet.
*/
call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);

if (unacked)
rxrpc_congestion_timeout(call);
Expand All @@ -229,7 +246,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
* retransmitting data.
*/
if (!retrans) {
rxrpc_set_timer(call, rxrpc_timer_set_for_resend);
rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
spin_unlock_bh(&call->lock);
ack_ts = ktime_sub(now, call->acks_latest_ts);
if (ktime_to_ns(ack_ts) < call->peer->rtt)
Expand Down Expand Up @@ -301,7 +318,7 @@ void rxrpc_process_call(struct work_struct *work)
{
struct rxrpc_call *call =
container_of(work, struct rxrpc_call, processor);
unsigned long now;
ktime_t now;

rxrpc_see_call(call);

Expand All @@ -320,29 +337,27 @@ void rxrpc_process_call(struct work_struct *work)
goto out_put;
}

now = jiffies;
if (time_after_eq(now, call->expire_at)) {
now = ktime_get_real();
if (ktime_before(call->expire_at, now)) {
rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, ETIME);
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
goto recheck_state;
}

if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
time_after_eq(now, call->ack_at)) {
if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) {
call->ack_at = call->expire_at;
if (call->ackr_reason) {
rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
goto recheck_state;
}
}

if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) ||
time_after_eq(now, call->resend_at)) {
rxrpc_resend(call);
if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) {
rxrpc_resend(call, now);
goto recheck_state;
}

rxrpc_set_timer(call, rxrpc_timer_set_for_resend);
rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);

/* other events may have been raised since we started checking */
if (call->events && call->state < RXRPC_CALL_COMPLETE) {
Expand Down
19 changes: 6 additions & 13 deletions net/rxrpc/call_object.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,6 @@
#include <net/af_rxrpc.h>
#include "ar-internal.h"

/*
* Maximum lifetime of a call (in jiffies).
*/
unsigned int rxrpc_max_call_lifetime = 60 * HZ;

const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
[RXRPC_CALL_UNINITIALISED] = "Uninit ",
[RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
Expand Down Expand Up @@ -76,10 +71,8 @@ static void rxrpc_call_timer_expired(unsigned long _call)

_enter("%d", call->debug_id);

if (call->state < RXRPC_CALL_COMPLETE) {
trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
rxrpc_queue_call(call);
}
if (call->state < RXRPC_CALL_COMPLETE)
rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real());
}

/*
Expand Down Expand Up @@ -207,14 +200,14 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
*/
static void rxrpc_start_call_timer(struct rxrpc_call *call)
{
unsigned long expire_at;
ktime_t now = ktime_get_real(), expire_at;

expire_at = jiffies + rxrpc_max_call_lifetime;
expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
call->expire_at = expire_at;
call->ack_at = expire_at;
call->resend_at = expire_at;
call->timer.expires = expire_at + 1;
rxrpc_set_timer(call, rxrpc_timer_begin);
call->timer.expires = jiffies + LONG_MAX / 2;
rxrpc_set_timer(call, rxrpc_timer_begin, now);
}

/*
Expand Down
3 changes: 2 additions & 1 deletion net/rxrpc/conn_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,8 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
return 0;

case RXRPC_PACKET_TYPE_ABORT:
if (skb_copy_bits(skb, sp->offset, &wtmp, sizeof(wtmp)) < 0)
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
&wtmp, sizeof(wtmp)) < 0)
return -EPROTO;
abort_code = ntohl(wtmp);
_proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
Expand Down
Loading

0 comments on commit 7667d44

Please sign in to comment.