Skip to content

Commit

Permalink
net: mana: Move NAPI from EQ to CQ
Browse files Browse the repository at this point in the history
The existing code has NAPI threads polling on EQ directly. To prepare
for EQ sharing among vPorts, move NAPI from EQ to CQ so that one EQ
can serve multiple CQs from different vPorts.

The "arm bit" is only set when CQ processing is completed to reduce
the number of EQ entries, which in turn reduce the number of interrupts
on EQ.

Signed-off-by: Haiyang Zhang <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
haiyangz authored and davem330 committed Aug 25, 2021
1 parent 807d103 commit e1b5683
Show file tree
Hide file tree
Showing 5 changed files with 74 additions and 103 deletions.
9 changes: 1 addition & 8 deletions drivers/net/ethernet/microsoft/mana/gdma.h
Original file line number Diff line number Diff line change
Expand Up @@ -239,10 +239,8 @@ struct gdma_event {

struct gdma_queue;

#define CQE_POLLING_BUFFER 512
struct mana_eq {
struct gdma_queue *eq;
struct gdma_comp cqe_poll[CQE_POLLING_BUFFER];
};

typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
Expand Down Expand Up @@ -291,11 +289,6 @@ struct gdma_queue {
unsigned int msix_index;

u32 log2_throttle_limit;

/* NAPI data */
struct napi_struct napi;
int work_done;
int budget;
} eq;

struct {
Expand Down Expand Up @@ -406,7 +399,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);

int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);

void mana_gd_arm_cq(struct gdma_queue *cq);
void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);

struct gdma_wqe {
u32 reserved :24;
Expand Down
55 changes: 4 additions & 51 deletions drivers/net/ethernet/microsoft/mana/gdma_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
queue->id, queue->head * GDMA_WQE_BU_SIZE, 1);
}

void mana_gd_arm_cq(struct gdma_queue *cq)
void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
{
struct gdma_context *gc = cq->gdma_dev->gdma_context;

Expand All @@ -276,7 +276,7 @@ void mana_gd_arm_cq(struct gdma_queue *cq)
u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS);

mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
head, SET_ARM_BIT);
head, arm_bit);
}

static void mana_gd_process_eqe(struct gdma_queue *eq)
Expand Down Expand Up @@ -339,7 +339,6 @@ static void mana_gd_process_eq_events(void *arg)
struct gdma_queue *eq = arg;
struct gdma_context *gc;
struct gdma_eqe *eqe;
unsigned int arm_bit;
u32 head, num_eqe;
int i;

Expand Down Expand Up @@ -370,48 +369,16 @@ static void mana_gd_process_eq_events(void *arg)
eq->head++;
}

/* Always rearm the EQ for HWC. For MANA, rearm it when NAPI is done. */
if (mana_gd_is_hwc(eq->gdma_dev)) {
arm_bit = SET_ARM_BIT;
} else if (eq->eq.work_done < eq->eq.budget &&
napi_complete_done(&eq->eq.napi, eq->eq.work_done)) {
arm_bit = SET_ARM_BIT;
} else {
arm_bit = 0;
}

head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);

mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id,
head, arm_bit);
}

static int mana_poll(struct napi_struct *napi, int budget)
{
struct gdma_queue *eq = container_of(napi, struct gdma_queue, eq.napi);

eq->eq.work_done = 0;
eq->eq.budget = budget;

mana_gd_process_eq_events(eq);

return min(eq->eq.work_done, budget);
}

static void mana_gd_schedule_napi(void *arg)
{
struct gdma_queue *eq = arg;
struct napi_struct *napi;

napi = &eq->eq.napi;
napi_schedule_irqoff(napi);
head, SET_ARM_BIT);
}

static int mana_gd_register_irq(struct gdma_queue *queue,
const struct gdma_queue_spec *spec)
{
struct gdma_dev *gd = queue->gdma_dev;
bool is_mana = mana_gd_is_mana(gd);
struct gdma_irq_context *gic;
struct gdma_context *gc;
struct gdma_resource *r;
Expand Down Expand Up @@ -442,20 +409,11 @@ static int mana_gd_register_irq(struct gdma_queue *queue,

gic = &gc->irq_contexts[msi_index];

if (is_mana) {
netif_napi_add(spec->eq.ndev, &queue->eq.napi, mana_poll,
NAPI_POLL_WEIGHT);
napi_enable(&queue->eq.napi);
}

WARN_ON(gic->handler || gic->arg);

gic->arg = queue;

if (is_mana)
gic->handler = mana_gd_schedule_napi;
else
gic->handler = mana_gd_process_eq_events;
gic->handler = mana_gd_process_eq_events;

return 0;
}
Expand Down Expand Up @@ -549,11 +507,6 @@ static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,

mana_gd_deregiser_irq(queue);

if (mana_gd_is_mana(queue->gdma_dev)) {
napi_disable(&queue->eq.napi);
netif_napi_del(&queue->eq.napi);
}

if (queue->eq.disable_needed)
mana_gd_disable_queue(queue);
}
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/ethernet/microsoft/mana/hw_channel.c
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
&comp_data);
}

mana_gd_arm_cq(q_self);
mana_gd_ring_cq(q_self, SET_ARM_BIT);
}

static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
Expand Down
11 changes: 9 additions & 2 deletions drivers/net/ethernet/microsoft/mana/mana.h
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,8 @@ struct mana_tx_comp_oob {

struct mana_rxq;

#define CQE_POLLING_BUFFER 512

struct mana_cq {
struct gdma_queue *gdma_cq;

Expand All @@ -244,8 +246,13 @@ struct mana_cq {
*/
struct mana_txq *txq;

/* Pointer to a buffer which the CQ handler can copy the CQE's into. */
struct gdma_comp *gdma_comp_buf;
/* Buffer which the CQ handler can copy the CQE's into. */
struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];

/* NAPI data */
struct napi_struct napi;
int work_done;
int budget;
};

#define GDMA_MAX_RQE_SGES 15
Expand Down
Loading

0 comments on commit e1b5683

Please sign in to comment.