Skip to content

Commit

Permalink
net: rename NET_{ADD|INC}_STATS_BH()
Browse files Browse the repository at this point in the history
Rename NET_INC_STATS_BH() to __NET_INC_STATS()
and NET_ADD_STATS_BH() to __NET_ADD_STATS()

Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
Eric Dumazet authored and davem330 committed Apr 28, 2016
1 parent b15084e commit 02a1d6e
Show file tree
Hide file tree
Showing 25 changed files with 153 additions and 149 deletions.
4 changes: 2 additions & 2 deletions include/net/ip.h
Original file line number Diff line number Diff line change
Expand Up @@ -193,9 +193,9 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
#define __IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val)
#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
#define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field)
#define __NET_INC_STATS(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field)
#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
#define __NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)

u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
unsigned long snmp_fold_field(void __percpu *mib, int offt);
Expand Down
4 changes: 2 additions & 2 deletions include/net/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -1743,7 +1743,7 @@ static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
__u16 *mss)
{
tcp_synq_overflow(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
return ops->cookie_init_seq(skb, mss);
}
#else
Expand Down Expand Up @@ -1852,7 +1852,7 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
static inline void tcp_listendrop(const struct sock *sk)
{
atomic_inc(&((struct sock *)sk)->sk_drops);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
}

#endif /* _TCP_H */
4 changes: 2 additions & 2 deletions net/core/dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -4982,8 +4982,8 @@ bool sk_busy_loop(struct sock *sk, int nonblock)
netpoll_poll_unlock(have);
}
if (rc > 0)
NET_ADD_STATS_BH(sock_net(sk),
LINUX_MIB_BUSYPOLLRXPACKETS, rc);
__NET_ADD_STATS(sock_net(sk),
LINUX_MIB_BUSYPOLLRXPACKETS, rc);
local_bh_enable();

if (rc == LL_FLUSH_FAILED)
Expand Down
10 changes: 5 additions & 5 deletions net/dccp/ipv4.c
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ void dccp_req_err(struct sock *sk, u64 seq)
* socket here.
*/
if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
} else {
/*
* Still in RESPOND, just remove it silently.
Expand Down Expand Up @@ -273,15 +273,15 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
* servers this needs to be solved differently.
*/
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);

if (sk->sk_state == DCCP_CLOSED)
goto out;

dp = dccp_sk(sk);
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
!between48(seq, dp->dccps_awl, dp->dccps_awh)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}

Expand Down Expand Up @@ -431,11 +431,11 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
return newsk;

exit_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
exit_nonewsk:
dst_release(dst);
exit:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL;
put_and_exit:
inet_csk_prepare_forced_close(newsk);
Expand Down
8 changes: 4 additions & 4 deletions net/dccp/ipv6.c
Original file line number Diff line number Diff line change
Expand Up @@ -106,15 +106,15 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,

bh_lock_sock(sk);
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);

if (sk->sk_state == DCCP_CLOSED)
goto out;

dp = dccp_sk(sk);
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
!between48(seq, dp->dccps_awl, dp->dccps_awh)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}

Expand Down Expand Up @@ -527,11 +527,11 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
return newsk;

out_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk:
dst_release(dst);
out:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL;
}

Expand Down
4 changes: 2 additions & 2 deletions net/dccp/timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ static void dccp_delack_timer(unsigned long data)
if (sock_owned_by_user(sk)) {
/* Try again later. */
icsk->icsk_ack.blocked = 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
sk_reset_timer(sk, &icsk->icsk_delack_timer,
jiffies + TCP_DELACK_MIN);
goto out;
Expand Down Expand Up @@ -209,7 +209,7 @@ static void dccp_delack_timer(unsigned long data)
icsk->icsk_ack.ato = TCP_ATO_MIN;
}
dccp_send_ack(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
}
out:
bh_unlock_sock(sk);
Expand Down
2 changes: 1 addition & 1 deletion net/ipv4/arp.c
Original file line number Diff line number Diff line change
Expand Up @@ -436,7 +436,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
if (IS_ERR(rt))
return 1;
if (rt->dst.dev != dev) {
NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER);
__NET_INC_STATS(net, LINUX_MIB_ARPFILTER);
flag = 1;
}
ip_rt_put(rt);
Expand Down
2 changes: 1 addition & 1 deletion net/ipv4/inet_hashtables.c
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
__sk_nulls_add_node_rcu(sk, &head->chain);
if (tw) {
sk_nulls_del_node_init_rcu((struct sock *)tw);
NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
__NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
}
spin_unlock(lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
Expand Down
4 changes: 2 additions & 2 deletions net/ipv4/inet_timewait_sock.c
Original file line number Diff line number Diff line change
Expand Up @@ -147,9 +147,9 @@ static void tw_timer_handler(unsigned long data)
struct inet_timewait_sock *tw = (struct inet_timewait_sock *)data;

if (tw->tw_kill)
NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
__NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
else
NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
__NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITED);
inet_twsk_kill(tw);
}

Expand Down
2 changes: 1 addition & 1 deletion net/ipv4/ip_input.c
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
iph->tos, skb->dev);
if (unlikely(err)) {
if (err == -EXDEV)
NET_INC_STATS_BH(net, LINUX_MIB_IPRPFILTER);
__NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
goto drop;
}
}
Expand Down
4 changes: 2 additions & 2 deletions net/ipv4/syncookies.c
Original file line number Diff line number Diff line change
Expand Up @@ -312,11 +312,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)

mss = __cookie_v4_check(ip_hdr(skb), th, cookie);
if (mss == 0) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
goto out;
}

NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);

/* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt));
Expand Down
4 changes: 2 additions & 2 deletions net/ipv4/tcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -2148,7 +2148,7 @@ void tcp_close(struct sock *sk, long timeout)
if (tp->linger2 < 0) {
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC);
NET_INC_STATS_BH(sock_net(sk),
__NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPABORTONLINGER);
} else {
const int tmo = tcp_fin_time(sk);
Expand All @@ -2167,7 +2167,7 @@ void tcp_close(struct sock *sk, long timeout)
if (tcp_check_oom(sk, 0)) {
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC);
NET_INC_STATS_BH(sock_net(sk),
__NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPABORTONMEMORY);
}
}
Expand Down
20 changes: 10 additions & 10 deletions net/ipv4/tcp_cdg.c
Original file line number Diff line number Diff line change
Expand Up @@ -155,11 +155,11 @@ static void tcp_cdg_hystart_update(struct sock *sk)

ca->last_ack = now_us;
if (after(now_us, ca->round_start + base_owd)) {
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINDETECT);
NET_ADD_STATS_BH(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINCWND,
tp->snd_cwnd);
__NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINDETECT);
__NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINCWND,
tp->snd_cwnd);
tp->snd_ssthresh = tp->snd_cwnd;
return;
}
Expand All @@ -174,11 +174,11 @@ static void tcp_cdg_hystart_update(struct sock *sk)
125U);

if (ca->rtt.min > thresh) {
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYDETECT);
NET_ADD_STATS_BH(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYCWND,
tp->snd_cwnd);
__NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYDETECT);
__NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYCWND,
tp->snd_cwnd);
tp->snd_ssthresh = tp->snd_cwnd;
}
}
Expand Down
20 changes: 10 additions & 10 deletions net/ipv4/tcp_cubic.c
Original file line number Diff line number Diff line change
Expand Up @@ -402,11 +402,11 @@ static void hystart_update(struct sock *sk, u32 delay)
ca->last_ack = now;
if ((s32)(now - ca->round_start) > ca->delay_min >> 4) {
ca->found |= HYSTART_ACK_TRAIN;
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINDETECT);
NET_ADD_STATS_BH(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINCWND,
tp->snd_cwnd);
__NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINDETECT);
__NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINCWND,
tp->snd_cwnd);
tp->snd_ssthresh = tp->snd_cwnd;
}
}
Expand All @@ -423,11 +423,11 @@ static void hystart_update(struct sock *sk, u32 delay)
if (ca->curr_rtt > ca->delay_min +
HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
ca->found |= HYSTART_DELAY;
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYDETECT);
NET_ADD_STATS_BH(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYCWND,
tp->snd_cwnd);
__NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYDETECT);
__NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYCWND,
tp->snd_cwnd);
tp->snd_ssthresh = tp->snd_cwnd;
}
}
Expand Down
14 changes: 7 additions & 7 deletions net/ipv4/tcp_fastopen.c
Original file line number Diff line number Diff line change
Expand Up @@ -256,8 +256,8 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
req1 = fastopenq->rskq_rst_head;
if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
spin_unlock(&fastopenq->lock);
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
__NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
return false;
}
fastopenq->rskq_rst_head = req1->dl_next;
Expand All @@ -282,7 +282,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
struct sock *child;

if (foc->len == 0) /* Client requests a cookie */
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);

if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
(syn_data || foc->len >= 0) &&
Expand Down Expand Up @@ -311,13 +311,13 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
child = tcp_fastopen_create_child(sk, skb, dst, req);
if (child) {
foc->len = -1;
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPFASTOPENPASSIVE);
__NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENPASSIVE);
return child;
}
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
} else if (foc->len > 0) /* Client presents an invalid cookie */
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);

valid_foc.exp = foc->exp;
*foc = valid_foc;
Expand Down
Loading

0 comments on commit 02a1d6e

Please sign in to comment.