Skip to content

Commit

Permalink
[TCP]: Abstract out all write queue operations.
Browse files Browse the repository at this point in the history
This allows the write queue implementation to be changed,
for example, to one which allows fast interval searching.

Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
David S. Miller committed Apr 26, 2007
1 parent 02ea492 commit fe067e8
Show file tree
Hide file tree
Showing 7 changed files with 221 additions and 117 deletions.
21 changes: 0 additions & 21 deletions include/net/sock.h
Original file line number Diff line number Diff line change
Expand Up @@ -710,15 +710,6 @@ static inline void sk_stream_mem_reclaim(struct sock *sk)
__sk_stream_mem_reclaim(sk);
}

static inline void sk_stream_writequeue_purge(struct sock *sk)
{
struct sk_buff *skb;

while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
sk_stream_free_skb(sk, skb);
sk_stream_mem_reclaim(sk);
}

static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
{
return (int)skb->truesize <= sk->sk_forward_alloc ||
Expand Down Expand Up @@ -1256,18 +1247,6 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk)
return page;
}

#define sk_stream_for_retrans_queue(skb, sk) \
for (skb = (sk)->sk_write_queue.next; \
(skb != (sk)->sk_send_head) && \
(skb != (struct sk_buff *)&(sk)->sk_write_queue); \
skb = skb->next)

/*from STCP for fast SACK Process*/
#define sk_stream_for_retrans_queue_from(skb, sk) \
for (; (skb != (sk)->sk_send_head) && \
(skb != (struct sk_buff *)&(sk)->sk_write_queue); \
skb = skb->next)

/*
* Default write policy as shown to user space via poll/select/SIGIO
*/
Expand Down
114 changes: 114 additions & 0 deletions include/net/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -1162,6 +1162,120 @@ static inline void tcp_put_md5sig_pool(void)
put_cpu();
}

/* write queue abstraction */
static inline void tcp_write_queue_purge(struct sock *sk)
{
struct sk_buff *skb;

while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
sk_stream_free_skb(sk, skb);
sk_stream_mem_reclaim(sk);
}

static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
{
struct sk_buff *skb = sk->sk_write_queue.next;
if (skb == (struct sk_buff *) &sk->sk_write_queue)
return NULL;
return skb;
}

static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
{
struct sk_buff *skb = sk->sk_write_queue.prev;
if (skb == (struct sk_buff *) &sk->sk_write_queue)
return NULL;
return skb;
}

static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
{
return skb->next;
}

#define tcp_for_write_queue(skb, sk) \
for (skb = (sk)->sk_write_queue.next; \
(skb != (struct sk_buff *)&(sk)->sk_write_queue); \
skb = skb->next)

#define tcp_for_write_queue_from(skb, sk) \
for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\
skb = skb->next)

static inline struct sk_buff *tcp_send_head(struct sock *sk)
{
return sk->sk_send_head;
}

static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
{
sk->sk_send_head = skb->next;
if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
sk->sk_send_head = NULL;
}

static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
{
if (sk->sk_send_head == skb_unlinked)
sk->sk_send_head = NULL;
}

static inline void tcp_init_send_head(struct sock *sk)
{
sk->sk_send_head = NULL;
}

static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
{
__skb_queue_tail(&sk->sk_write_queue, skb);
}

static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
{
__tcp_add_write_queue_tail(sk, skb);

/* Queue it, remembering where we must start sending. */
if (sk->sk_send_head == NULL)
sk->sk_send_head = skb;
}

static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
{
__skb_queue_head(&sk->sk_write_queue, skb);
}

/* Insert buff after skb on the write queue of sk. */
static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
struct sk_buff *buff,
struct sock *sk)
{
__skb_append(skb, buff, &sk->sk_write_queue);
}

/* Insert skb between prev and next on the write queue of sk. */
static inline void tcp_insert_write_queue_before(struct sk_buff *new,
struct sk_buff *skb,
struct sock *sk)
{
__skb_insert(new, skb->prev, skb, &sk->sk_write_queue);
}

static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
{
__skb_unlink(skb, &sk->sk_write_queue);
}

static inline int tcp_skb_is_last(const struct sock *sk,
const struct sk_buff *skb)
{
return skb->next == (struct sk_buff *)&sk->sk_write_queue;
}

static inline int tcp_write_queue_empty(struct sock *sk)
{
return skb_queue_empty(&sk->sk_write_queue);
}

/* /proc */
enum tcp_seq_states {
TCP_SEQ_STATE_LISTENING,
Expand Down
32 changes: 16 additions & 16 deletions net/ipv4/tcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -470,10 +470,8 @@ static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
tcb->flags = TCPCB_FLAG_ACK;
tcb->sacked = 0;
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
tcp_add_write_queue_tail(sk, skb);
sk_charge_skb(sk, skb);
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (tp->nonagle & TCP_NAGLE_PUSH)
tp->nonagle &= ~TCP_NAGLE_PUSH;
}
Expand All @@ -491,8 +489,8 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
int mss_now, int nonagle)
{
if (sk->sk_send_head) {
struct sk_buff *skb = sk->sk_write_queue.prev;
if (tcp_send_head(sk)) {
struct sk_buff *skb = tcp_write_queue_tail(sk);
if (!(flags & MSG_MORE) || forced_push(tp))
tcp_mark_push(tp, skb);
tcp_mark_urg(tp, flags, skb);
Expand Down Expand Up @@ -526,13 +524,13 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
goto do_error;

while (psize > 0) {
struct sk_buff *skb = sk->sk_write_queue.prev;
struct sk_buff *skb = tcp_write_queue_tail(sk);
struct page *page = pages[poffset / PAGE_SIZE];
int copy, i, can_coalesce;
int offset = poffset % PAGE_SIZE;
int size = min_t(size_t, psize, PAGE_SIZE - offset);

if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
new_segment:
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
Expand Down Expand Up @@ -589,7 +587,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
if (forced_push(tp)) {
tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
} else if (skb == sk->sk_send_head)
} else if (skb == tcp_send_head(sk))
tcp_push_one(sk, mss_now);
continue;

Expand Down Expand Up @@ -704,9 +702,9 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
while (seglen > 0) {
int copy;

skb = sk->sk_write_queue.prev;
skb = tcp_write_queue_tail(sk);

if (!sk->sk_send_head ||
if (!tcp_send_head(sk) ||
(copy = size_goal - skb->len) <= 0) {

new_segment:
Expand Down Expand Up @@ -833,7 +831,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (forced_push(tp)) {
tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
} else if (skb == sk->sk_send_head)
} else if (skb == tcp_send_head(sk))
tcp_push_one(sk, mss_now);
continue;

Expand All @@ -860,9 +858,11 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,

do_fault:
if (!skb->len) {
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
tcp_unlink_write_queue(skb, sk);
/* It is the one place in all of TCP, except connection
* reset, where we can be unlinking the send_head.
*/
tcp_check_send_head(sk, skb);
sk_stream_free_skb(sk, skb);
}

Expand Down Expand Up @@ -1732,7 +1732,7 @@ int tcp_disconnect(struct sock *sk, int flags)

tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
sk_stream_writequeue_purge(sk);
tcp_write_queue_purge(sk);
__skb_queue_purge(&tp->out_of_order_queue);
#ifdef CONFIG_NET_DMA
__skb_queue_purge(&sk->sk_async_wait_queue);
Expand All @@ -1758,7 +1758,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_set_ca_state(sk, TCP_CA_Open);
tcp_clear_retrans(tp);
inet_csk_delack_init(sk);
sk->sk_send_head = NULL;
tcp_init_send_head(sk);
tp->rx_opt.saw_tstamp = 0;
tcp_sack_reset(&tp->rx_opt);
__sk_dst_reset(sk);
Expand Down
Loading

0 comments on commit fe067e8

Please sign in to comment.