Skip to content

Commit

Permalink
net: sched: Merge Qdisc::bstats and Qdisc::cpu_bstats data types
Browse files Browse the repository at this point in the history
The only factor differentiating per-CPU bstats data type (struct
gnet_stats_basic_cpu) from the packed non-per-CPU one (struct
gnet_stats_basic_packed) was a u64_stats sync point inside the former.
The two data types are now equivalent: earlier commits added a u64_stats
sync point to the latter.

Combine both data types into "struct gnet_stats_basic_sync". This
eliminates redundancy and simplifies the bstats read/write APIs.

Use u64_stats_t for bstats "packets" and "bytes" data types. On 64-bit
architectures, u64_stats sync points do not use sequence counter
protection.

Signed-off-by: Ahmed S. Darwish <[email protected]>
Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
a-darwish authored and davem330 committed Oct 18, 2021
1 parent f56940d commit 50dc9a8
Show file tree
Hide file tree
Showing 30 changed files with 155 additions and 160 deletions.
2 changes: 1 addition & 1 deletion drivers/net/ethernet/netronome/nfp/abm/qdisc.c
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
static void
nfp_abm_stats_calculate(struct nfp_alink_stats *new,
struct nfp_alink_stats *old,
struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_sync *bstats,
struct gnet_stats_queue *qstats)
{
_bstats_update(bstats, new->tx_bytes - old->tx_bytes,
Expand Down
10 changes: 5 additions & 5 deletions include/net/act_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,13 @@ struct tc_action {
atomic_t tcfa_bindcnt;
int tcfa_action;
struct tcf_t tcfa_tm;
struct gnet_stats_basic_packed tcfa_bstats;
struct gnet_stats_basic_packed tcfa_bstats_hw;
struct gnet_stats_basic_sync tcfa_bstats;
struct gnet_stats_basic_sync tcfa_bstats_hw;
struct gnet_stats_queue tcfa_qstats;
struct net_rate_estimator __rcu *tcfa_rate_est;
spinlock_t tcfa_lock;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_basic_sync __percpu *cpu_bstats_hw;
struct gnet_stats_queue __percpu *cpu_qstats;
struct tc_cookie __rcu *act_cookie;
struct tcf_chain __rcu *goto_chain;
Expand Down Expand Up @@ -206,7 +206,7 @@ static inline void tcf_action_update_bstats(struct tc_action *a,
struct sk_buff *skb)
{
if (likely(a->cpu_bstats)) {
bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb);
bstats_update(this_cpu_ptr(a->cpu_bstats), skb);
return;
}
spin_lock(&a->tcfa_lock);
Expand Down
44 changes: 23 additions & 21 deletions include/net/gen_stats.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,17 @@
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>

/* Note: this used to be in include/uapi/linux/gen_stats.h */
struct gnet_stats_basic_packed {
__u64 bytes;
__u64 packets;
struct u64_stats_sync syncp;
};

struct gnet_stats_basic_cpu {
struct gnet_stats_basic_packed bstats;
/* Throughput stats.
* Must be initialized beforehand with gnet_stats_basic_sync_init().
*
* If no reads can ever occur parallel to writes (e.g. stack-allocated
* bstats), then the internal stat values can be written to and read
* from directly. Otherwise, use _bstats_set/update() for writes and
* gnet_stats_add_basic() for reads.
*/
struct gnet_stats_basic_sync {
u64_stats_t bytes;
u64_stats_t packets;
struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64));

Expand All @@ -35,7 +37,7 @@ struct gnet_dump {
struct tc_stats tc_stats;
};

void gnet_stats_basic_packed_init(struct gnet_stats_basic_packed *b);
void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
struct gnet_dump *d, int padattr);

Expand All @@ -46,16 +48,16 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,

int gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
struct gnet_stats_basic_sync __percpu *cpu,
struct gnet_stats_basic_sync *b);
void gnet_stats_add_basic(const seqcount_t *running,
struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_sync __percpu *cpu,
struct gnet_stats_basic_sync *b);
int gnet_stats_copy_basic_hw(const seqcount_t *running,
struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
struct gnet_stats_basic_sync __percpu *cpu,
struct gnet_stats_basic_sync *b);
int gnet_stats_copy_rate_est(struct gnet_dump *d,
struct net_rate_estimator __rcu **ptr);
int gnet_stats_copy_queue(struct gnet_dump *d,
Expand All @@ -68,14 +70,14 @@ int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);

int gnet_stats_finish_copy(struct gnet_dump *d);

int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
spinlock_t *lock,
seqcount_t *running, struct nlattr *opt);
void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **ptr,
spinlock_t *lock,
seqcount_t *running, struct nlattr *opt);
Expand Down
2 changes: 1 addition & 1 deletion include/net/netfilter/xt_rateest.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

struct xt_rateest {
/* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
struct gnet_stats_basic_packed bstats;
struct gnet_stats_basic_sync bstats;
spinlock_t lock;


Expand Down
4 changes: 2 additions & 2 deletions include/net/pkt_cls.h
Original file line number Diff line number Diff line change
Expand Up @@ -765,7 +765,7 @@ struct tc_cookie {
};

struct tc_qopt_offload_stats {
struct gnet_stats_basic_packed *bstats;
struct gnet_stats_basic_sync *bstats;
struct gnet_stats_queue *qstats;
};

Expand Down Expand Up @@ -885,7 +885,7 @@ struct tc_gred_qopt_offload_params {
};

struct tc_gred_qopt_offload_stats {
struct gnet_stats_basic_packed bstats[MAX_DPs];
struct gnet_stats_basic_sync bstats[MAX_DPs];
struct gnet_stats_queue qstats[MAX_DPs];
struct red_stats *xstats[MAX_DPs];
};
Expand Down
34 changes: 9 additions & 25 deletions include/net/sch_generic.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ struct Qdisc {
struct netdev_queue *dev_queue;

struct net_rate_estimator __rcu *rate_est;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
int pad;
refcount_t refcnt;
Expand All @@ -107,7 +107,7 @@ struct Qdisc {
*/
struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
struct gnet_stats_basic_sync bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
unsigned long state;
Expand Down Expand Up @@ -849,43 +849,27 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return sch->enqueue(skb, sch, to_free);
}

static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
__u64 bytes, __u32 packets)
{
u64_stats_update_begin(&bstats->syncp);
bstats->bytes += bytes;
bstats->packets += packets;
u64_stats_add(&bstats->bytes, bytes);
u64_stats_add(&bstats->packets, packets);
u64_stats_update_end(&bstats->syncp);
}

static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
const struct sk_buff *skb)
{
_bstats_update(bstats,
qdisc_pkt_len(skb),
skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
}

static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
__u64 bytes, __u32 packets)
{
u64_stats_update_begin(&bstats->syncp);
_bstats_update(&bstats->bstats, bytes, packets);
u64_stats_update_end(&bstats->syncp);
}

static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
const struct sk_buff *skb)
{
u64_stats_update_begin(&bstats->syncp);
bstats_update(&bstats->bstats, skb);
u64_stats_update_end(&bstats->syncp);
}

static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
const struct sk_buff *skb)
{
bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
bstats_update(this_cpu_ptr(sch->cpu_bstats), skb);
}

static inline void qdisc_bstats_update(struct Qdisc *sch,
Expand Down Expand Up @@ -1317,15 +1301,15 @@ void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
struct mini_Qdisc {
struct tcf_proto *filter_list;
struct tcf_block *block;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
struct rcu_head rcu;
};

static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
const struct sk_buff *skb)
{
bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb);
}

static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
Expand Down
36 changes: 20 additions & 16 deletions net/core/gen_estimator.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,10 @@
*/

struct net_rate_estimator {
struct gnet_stats_basic_packed *bstats;
struct gnet_stats_basic_sync *bstats;
spinlock_t *stats_lock;
seqcount_t *running;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_basic_sync __percpu *cpu_bstats;
u8 ewma_log;
u8 intvl_log; /* period : (250ms << intvl_log) */

Expand All @@ -60,9 +60,9 @@ struct net_rate_estimator {
};

static void est_fetch_counters(struct net_rate_estimator *e,
struct gnet_stats_basic_packed *b)
struct gnet_stats_basic_sync *b)
{
gnet_stats_basic_packed_init(b);
gnet_stats_basic_sync_init(b);
if (e->stats_lock)
spin_lock(e->stats_lock);

Expand All @@ -76,23 +76,27 @@ static void est_fetch_counters(struct net_rate_estimator *e,
static void est_timer(struct timer_list *t)
{
struct net_rate_estimator *est = from_timer(est, t, timer);
struct gnet_stats_basic_packed b;
struct gnet_stats_basic_sync b;
u64 b_bytes, b_packets;
u64 rate, brate;

est_fetch_counters(est, &b);
brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log);
b_bytes = u64_stats_read(&b.bytes);
b_packets = u64_stats_read(&b.packets);

brate = (b_bytes - est->last_bytes) << (10 - est->intvl_log);
brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);

rate = (b.packets - est->last_packets) << (10 - est->intvl_log);
rate = (b_packets - est->last_packets) << (10 - est->intvl_log);
rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);

write_seqcount_begin(&est->seq);
est->avbps += brate;
est->avpps += rate;
write_seqcount_end(&est->seq);

est->last_bytes = b.bytes;
est->last_packets = b.packets;
est->last_bytes = b_bytes;
est->last_packets = b_packets;

est->next_jiffies += ((HZ/4) << est->intvl_log);

Expand Down Expand Up @@ -121,16 +125,16 @@ static void est_timer(struct timer_list *t)
* Returns 0 on success or a negative error code.
*
*/
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
spinlock_t *lock,
seqcount_t *running,
struct nlattr *opt)
{
struct gnet_estimator *parm = nla_data(opt);
struct net_rate_estimator *old, *est;
struct gnet_stats_basic_packed b;
struct gnet_stats_basic_sync b;
int intvl_log;

if (nla_len(opt) < sizeof(*parm))
Expand Down Expand Up @@ -164,8 +168,8 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
est_fetch_counters(est, &b);
if (lock)
local_bh_enable();
est->last_bytes = b.bytes;
est->last_packets = b.packets;
est->last_bytes = u64_stats_read(&b.bytes);
est->last_packets = u64_stats_read(&b.packets);

if (lock)
spin_lock_bh(lock);
Expand Down Expand Up @@ -222,8 +226,8 @@ EXPORT_SYMBOL(gen_kill_estimator);
*
* Returns 0 on success or a negative error code.
*/
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
spinlock_t *lock,
seqcount_t *running, struct nlattr *opt)
Expand Down
Loading

0 comments on commit 50dc9a8

Please sign in to comment.