Skip to content

Commit

Permalink
net/mlx5e: Introduce wrapper for xdp_buff
Browse files Browse the repository at this point in the history
Preparation for implementing HW metadata kfuncs. No functional change.

Cc: Tariq Toukan <[email protected]>
Cc: Saeed Mahameed <[email protected]>
Cc: John Fastabend <[email protected]>
Cc: David Ahern <[email protected]>
Cc: Martin KaFai Lau <[email protected]>
Cc: Jakub Kicinski <[email protected]>
Cc: Willem de Bruijn <[email protected]>
Cc: Jesper Dangaard Brouer <[email protected]>
Cc: Anatoly Burakov <[email protected]>
Cc: Alexander Lobakin <[email protected]>
Cc: Magnus Karlsson <[email protected]>
Cc: Maryam Tahhan <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Toke Høiland-Jørgensen <[email protected]>
Signed-off-by: Stanislav Fomichev <[email protected]>
Reviewed-by: Tariq Toukan <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Martin KaFai Lau <[email protected]>
  • Loading branch information
tohojo authored and Martin KaFai Lau committed Jan 23, 2023
1 parent 94ecc5c commit 384a13c
Show file tree
Hide file tree
Showing 4 changed files with 57 additions and 43 deletions.
3 changes: 2 additions & 1 deletion drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
Original file line number Diff line number Diff line change
Expand Up @@ -158,8 +158,9 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,

/* returns true if packet was consumed by xdp */
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
struct bpf_prog *prog, struct xdp_buff *xdp)
struct bpf_prog *prog, struct mlx5e_xdp_buff *mxbuf)
{
struct xdp_buff *xdp = &mxbuf->xdp;
u32 act;
int err;

Expand Down
6 changes: 5 additions & 1 deletion drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,14 @@
(MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
sizeof(struct mlx5_wqe_inline_seg))

struct mlx5e_xdp_buff {
struct xdp_buff xdp;
};

struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
struct bpf_prog *prog, struct xdp_buff *xdp);
struct bpf_prog *prog, struct mlx5e_xdp_buff *mlctx);
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
Expand Down
33 changes: 21 additions & 12 deletions drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,14 @@

/* RX data path */

static struct mlx5e_xdp_buff *xsk_buff_to_mxbuf(struct xdp_buff *xdp)
{
/* mlx5e_xdp_buff shares its layout with xdp_buff_xsk
* and private mlx5e_xdp_buff fields fall into xdp_buff_xsk->cb
*/
return (struct mlx5e_xdp_buff *)xdp;
}

int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
Expand All @@ -22,6 +30,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
goto err;

BUILD_BUG_ON(sizeof(wi->alloc_units[0]) != sizeof(wi->alloc_units[0].xsk));
XSK_CHECK_PRIV_TYPE(struct mlx5e_xdp_buff);
batch = xsk_buff_alloc_batch(rq->xsk_pool, (struct xdp_buff **)wi->alloc_units,
rq->mpwqe.pages_per_wqe);

Expand Down Expand Up @@ -233,7 +242,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
u32 head_offset,
u32 page_idx)
{
struct xdp_buff *xdp = wi->alloc_units[page_idx].xsk;
struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[page_idx].xsk);
struct bpf_prog *prog;

/* Check packet size. Note LRO doesn't use linear SKB */
Expand All @@ -249,9 +258,9 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(head_offset);

xsk_buff_set_size(xdp, cqe_bcnt);
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
net_prefetch(xdp->data);
xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
net_prefetch(mxbuf->xdp.data);

/* Possible flows:
* - XDP_REDIRECT to XSKMAP:
Expand All @@ -269,7 +278,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/

prog = rcu_dereference(rq->xdp_prog);
if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp))) {
if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, mxbuf))) {
if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
Expand All @@ -278,14 +287,14 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
* frame. On SKB allocation failure, NULL is returned.
*/
return mlx5e_xsk_construct_skb(rq, xdp);
return mlx5e_xsk_construct_skb(rq, &mxbuf->xdp);
}

struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt)
{
struct xdp_buff *xdp = wi->au->xsk;
struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->au->xsk);
struct bpf_prog *prog;

/* wi->offset is not used in this function, because xdp->data and the
Expand All @@ -295,17 +304,17 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(wi->offset);

xsk_buff_set_size(xdp, cqe_bcnt);
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
net_prefetch(xdp->data);
xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
net_prefetch(mxbuf->xdp.data);

prog = rcu_dereference(rq->xdp_prog);
if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp)))
if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, mxbuf)))
return NULL; /* page/packet was consumed by XDP */

/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
* will be handled by mlx5e_free_rx_wqe.
* On SKB allocation failure, NULL is returned.
*/
return mlx5e_xsk_construct_skb(rq, xdp);
return mlx5e_xsk_construct_skb(rq, &mxbuf->xdp);
}
58 changes: 29 additions & 29 deletions drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
Original file line number Diff line number Diff line change
Expand Up @@ -1575,11 +1575,11 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
return skb;
}

static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
u32 len, struct xdp_buff *xdp)
static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, void *va, u16 headroom,
u32 len, struct mlx5e_xdp_buff *mxbuf)
{
xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
xdp_prepare_buff(xdp, va, headroom, len, true);
xdp_init_buff(&mxbuf->xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
xdp_prepare_buff(&mxbuf->xdp, va, headroom, len, true);
}

static struct sk_buff *
Expand All @@ -1606,16 +1606,16 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,

prog = rcu_dereference(rq->xdp_prog);
if (prog) {
struct xdp_buff xdp;
struct mlx5e_xdp_buff mxbuf;

net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
if (mlx5e_xdp_handle(rq, au->page, prog, &xdp))
mlx5e_fill_mxbuf(rq, va, rx_headroom, cqe_bcnt, &mxbuf);
if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf))
return NULL; /* page/packet was consumed by XDP */

rx_headroom = xdp.data - xdp.data_hard_start;
metasize = xdp.data - xdp.data_meta;
cqe_bcnt = xdp.data_end - xdp.data;
rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
Expand All @@ -1637,9 +1637,9 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
struct skb_shared_info *sinfo;
struct mlx5e_xdp_buff mxbuf;
u32 frag_consumed_bytes;
struct bpf_prog *prog;
struct xdp_buff xdp;
struct sk_buff *skb;
dma_addr_t addr;
u32 truesize;
Expand All @@ -1654,8 +1654,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom);

mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp);
sinfo = xdp_get_shared_info_from_buff(&xdp);
mlx5e_fill_mxbuf(rq, va, rx_headroom, frag_consumed_bytes, &mxbuf);
sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
truesize = 0;

cqe_bcnt -= frag_consumed_bytes;
Expand All @@ -1673,13 +1673,13 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
frag_consumed_bytes, rq->buff.map_dir);

if (!xdp_buff_has_frags(&xdp)) {
if (!xdp_buff_has_frags(&mxbuf.xdp)) {
/* Init on the first fragment to avoid cold cache access
* when possible.
*/
sinfo->nr_frags = 0;
sinfo->xdp_frags_size = 0;
xdp_buff_set_frags_flag(&xdp);
xdp_buff_set_frags_flag(&mxbuf.xdp);
}

frag = &sinfo->frags[sinfo->nr_frags++];
Expand All @@ -1688,7 +1688,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
skb_frag_size_set(frag, frag_consumed_bytes);

if (page_is_pfmemalloc(au->page))
xdp_buff_set_frag_pfmemalloc(&xdp);
xdp_buff_set_frag_pfmemalloc(&mxbuf.xdp);

sinfo->xdp_frags_size += frag_consumed_bytes;
truesize += frag_info->frag_stride;
Expand All @@ -1701,7 +1701,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
au = head_wi->au;

prog = rcu_dereference(rq->xdp_prog);
if (prog && mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
if (prog && mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) {
if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
int i;

Expand All @@ -1711,22 +1711,22 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
return NULL; /* page/packet was consumed by XDP */
}

skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz,
xdp.data - xdp.data_hard_start,
xdp.data_end - xdp.data,
xdp.data - xdp.data_meta);
skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
mxbuf.xdp.data - mxbuf.xdp.data_hard_start,
mxbuf.xdp.data_end - mxbuf.xdp.data,
mxbuf.xdp.data - mxbuf.xdp.data_meta);
if (unlikely(!skb))
return NULL;

page_ref_inc(au->page);

if (unlikely(xdp_buff_has_frags(&xdp))) {
if (unlikely(xdp_buff_has_frags(&mxbuf.xdp))) {
int i;

/* sinfo->nr_frags is reset by build_skb, calculate again. */
xdp_update_skb_shared_info(skb, wi - head_wi - 1,
sinfo->xdp_frags_size, truesize,
xdp_buff_is_frag_pfmemalloc(&xdp));
xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));

for (i = 0; i < sinfo->nr_frags; i++) {
skb_frag_t *frag = &sinfo->frags[i];
Expand Down Expand Up @@ -2007,19 +2007,19 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,

prog = rcu_dereference(rq->xdp_prog);
if (prog) {
struct xdp_buff xdp;
struct mlx5e_xdp_buff mxbuf;

net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
if (mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
mlx5e_fill_mxbuf(rq, va, rx_headroom, cqe_bcnt, &mxbuf);
if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
}

rx_headroom = xdp.data - xdp.data_hard_start;
metasize = xdp.data - xdp.data_meta;
cqe_bcnt = xdp.data_end - xdp.data;
rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
Expand Down

0 comments on commit 384a13c

Please sign in to comment.