Skip to content

Commit

Permalink
liquidio: Napi rx/tx traffic
Browse files Browse the repository at this point in the history
This Patch adds tx buffer handling  to Napi along with RX
traffic. Also separate spinlocks are introduced for handling
iq posting and buffer reclaim so that tx path and tx interrupt
do not compete against each other.

Signed-off-by: Derek Chickles <[email protected]>
Signed-off-by: Satanand Burla <[email protected]>
Signed-off-by: Felix Manlunas <[email protected]>
Signed-off-by: Raghu Vatsavayi <[email protected]>
Signed-off-by: Raghu Vatsavayi <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
Raghu Vatsavayi authored and davem330 committed Jun 25, 2016
1 parent 63245f2 commit 9a96bde
Show file tree
Hide file tree
Showing 6 changed files with 177 additions and 105 deletions.
3 changes: 1 addition & 2 deletions drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
Original file line number Diff line number Diff line change
Expand Up @@ -496,8 +496,7 @@ u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx)
}

u32
lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
struct octeon_instr_queue *iq)
lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq)
{
u32 new_idx = readl(iq->inst_cnt_reg);

Expand Down
3 changes: 1 addition & 2 deletions drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,7 @@ void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask);
u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx);
u32
lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
struct octeon_instr_queue *iq);
lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq);
void lio_cn6xxx_enable_interrupt(void *chip);
void lio_cn6xxx_disable_interrupt(void *chip);
void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
Expand Down
150 changes: 93 additions & 57 deletions drivers/net/ethernet/cavium/liquidio/lio_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -365,7 +365,7 @@ static int wait_for_pending_requests(struct octeon_device *oct)
[OCTEON_ORDERED_SC_LIST].pending_req_count);
if (pcount)
schedule_timeout_uninterruptible(HZ / 10);
else
else
break;
}

Expand Down Expand Up @@ -409,7 +409,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
iq->octeon_read_index = iq->host_write_index;
iq->stats.instr_processed +=
atomic_read(&iq->instr_pending);
lio_process_iq_request_list(oct, iq);
lio_process_iq_request_list(oct, iq, 0);
spin_unlock_bh(&iq->lock);
}
}
Expand Down Expand Up @@ -959,6 +959,36 @@ static inline void update_link_status(struct net_device *netdev,
}
}

/* Runs in interrupt context. */
static void update_txq_status(struct octeon_device *oct, int iq_num)
{
struct net_device *netdev;
struct lio *lio;
struct octeon_instr_queue *iq = oct->instr_queue[iq_num];

/*octeon_update_iq_read_idx(oct, iq);*/

netdev = oct->props[iq->ifidx].netdev;

/* This is needed because the first IQ does not have
* a netdev associated with it.
*/
if (!netdev)
return;

lio = GET_LIO(netdev);
if (netif_is_multiqueue(netdev)) {
if (__netif_subqueue_stopped(netdev, iq->q_index) &&
lio->linfo.link.s.link_up &&
(!octnet_iq_is_full(oct, iq_num))) {
netif_wake_subqueue(netdev, iq->q_index);
} else {
if (!octnet_iq_is_full(oct, lio->txq))
wake_q(netdev, lio->txq);
}
}
}

/**
* \brief Droq packet processor sceduler
* @param oct octeon device
Expand Down Expand Up @@ -1246,6 +1276,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
{
struct net_device *netdev = oct->props[ifidx].netdev;
struct lio *lio;
struct napi_struct *napi, *n;

if (!netdev) {
dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
Expand All @@ -1262,6 +1293,13 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
txqs_stop(netdev);

if (oct->props[lio->ifidx].napi_enabled == 1) {
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_disable(napi);

oct->props[lio->ifidx].napi_enabled = 0;
}

if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev);

Expand Down Expand Up @@ -1989,39 +2027,6 @@ static void liquidio_napi_drv_callback(void *arg)
}
}

/**
* \brief Main NAPI poll function
* @param droq octeon output queue
* @param budget maximum number of items to process
*/
static int liquidio_napi_do_rx(struct octeon_droq *droq, int budget)
{
int work_done;
struct lio *lio = GET_LIO(droq->napi.dev);
struct octeon_device *oct = lio->oct_dev;

work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
POLL_EVENT_PROCESS_PKTS,
budget);
if (work_done < 0) {
netif_info(lio, rx_err, lio->netdev,
"Receive work_done < 0, rxq:%d\n", droq->q_no);
goto octnet_napi_finish;
}

if (work_done > budget)
dev_err(&oct->pci_dev->dev, ">>>> %s work_done: %d budget: %d\n",
__func__, work_done, budget);

return work_done;

octnet_napi_finish:
napi_complete(&droq->napi);
octeon_process_droq_poll_cmd(oct, droq->q_no, POLL_EVENT_ENABLE_INTR,
0);
return 0;
}

/**
* \brief Entry point for NAPI polling
* @param napi NAPI structure
Expand All @@ -2031,19 +2036,41 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
{
struct octeon_droq *droq;
int work_done;
int tx_done = 0, iq_no;
struct octeon_instr_queue *iq;
struct octeon_device *oct;

droq = container_of(napi, struct octeon_droq, napi);
oct = droq->oct_dev;
iq_no = droq->q_no;
/* Handle Droq descriptors */
work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
POLL_EVENT_PROCESS_PKTS,
budget);

work_done = liquidio_napi_do_rx(droq, budget);
/* Flush the instruction queue */
iq = oct->instr_queue[iq_no];
if (iq) {
/* Process iq buffers with in the budget limits */
tx_done = octeon_flush_iq(oct, iq, 1, budget);
/* Update iq read-index rather than waiting for next interrupt.
* Return back if tx_done is false.
*/
update_txq_status(oct, iq_no);
/*tx_done = (iq->flush_index == iq->octeon_read_index);*/
} else {
dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
__func__, iq_no);
}

if (work_done < budget) {
if ((work_done < budget) && (tx_done)) {
napi_complete(napi);
octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
POLL_EVENT_ENABLE_INTR, 0);
return 0;
}

return work_done;
return (!tx_done) ? (budget) : (work_done);
}

/**
Expand Down Expand Up @@ -2177,6 +2204,14 @@ static inline void setup_tx_poll_fn(struct net_device *netdev)
&lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
}

static inline void cleanup_tx_poll_fn(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);

cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
destroy_workqueue(lio->txq_status_wq.wq);
}

/**
* \brief Net device open for LiquidIO
* @param netdev network device
Expand All @@ -2187,17 +2222,22 @@ static int liquidio_open(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
struct napi_struct *napi, *n;

list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_enable(napi);
if (oct->props[lio->ifidx].napi_enabled == 0) {
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_enable(napi);

oct->props[lio->ifidx].napi_enabled = 1;
}

oct_ptp_open(netdev);

ifstate_set(lio, LIO_IFSTATE_RUNNING);

setup_tx_poll_fn(netdev);

start_txq(netdev);

netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
try_module_get(THIS_MODULE);

/* tell Octeon to start forwarding packets to host */
send_rx_ctrl_cmd(lio, 1);
Expand All @@ -2217,39 +2257,35 @@ static int liquidio_open(struct net_device *netdev)
*/
static int liquidio_stop(struct net_device *netdev)
{
struct napi_struct *napi, *n;
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;

netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
ifstate_reset(lio, LIO_IFSTATE_RUNNING);

netif_tx_disable(netdev);

/* Inform that netif carrier is down */
netif_carrier_off(netdev);
lio->intf_open = 0;
lio->linfo.link.s.link_up = 0;
lio->link_changes++;

netif_carrier_off(netdev);
/* Pause for a moment and wait for Octeon to flush out (to the wire) any
* egress packets that are in-flight.
*/
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(100));

/* tell Octeon to stop forwarding packets to host */
/* Now it should be safe to tell Octeon that nic interface is down. */
send_rx_ctrl_cmd(lio, 0);

cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
destroy_workqueue(lio->txq_status_wq.wq);
cleanup_tx_poll_fn(netdev);

if (lio->ptp_clock) {
ptp_clock_unregister(lio->ptp_clock);
lio->ptp_clock = NULL;
}

ifstate_reset(lio, LIO_IFSTATE_RUNNING);

/* This is a hack that allows DHCP to continue working. */
set_bit(__LINK_STATE_START, &lio->netdev->state);

list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_disable(napi);

txqs_stop(netdev);

dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
module_put(THIS_MODULE);

Expand Down
4 changes: 2 additions & 2 deletions drivers/net/ethernet/cavium/liquidio/octeon_device.h
Original file line number Diff line number Diff line change
Expand Up @@ -204,8 +204,7 @@ struct octeon_fn_list {
void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int);
void (*bar1_idx_write)(struct octeon_device *, u32, u32);
u32 (*bar1_idx_read)(struct octeon_device *, u32);
u32 (*update_iq_read_idx)(struct octeon_device *,
struct octeon_instr_queue *);
u32 (*update_iq_read_idx)(struct octeon_instr_queue *);

void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32);
void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32);
Expand Down Expand Up @@ -267,6 +266,7 @@ struct octdev_props {
/* Each interface in the Octeon device has a network
* device pointer (used for OS specific calls).
*/
int napi_enabled;
int gmxport;
struct net_device *netdev;
};
Expand Down
12 changes: 10 additions & 2 deletions drivers/net/ethernet/cavium/liquidio/octeon_iq.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,12 @@ struct octeon_instr_queue {
/** A spinlock to protect access to the input ring. */
spinlock_t lock;

/** A spinlock to protect while posting on the ring. */
spinlock_t post_lock;

/** A spinlock to protect access to the input ring.*/
spinlock_t iq_flush_running_lock;

/** Flag that indicates if the queue uses 64 byte commands. */
u32 iqcmd_64B:1;

Expand Down Expand Up @@ -339,7 +345,7 @@ octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,

int
lio_process_iq_request_list(struct octeon_device *oct,
struct octeon_instr_queue *iq);
struct octeon_instr_queue *iq, u32 napi_budget);

int octeon_send_command(struct octeon_device *oct, u32 iq_no,
u32 force_db, void *cmd, void *buf,
Expand All @@ -357,5 +363,7 @@ int octeon_send_soft_command(struct octeon_device *oct,
int octeon_setup_iq(struct octeon_device *oct, int ifidx,
int q_index, union oct_txpciq iq_no, u32 num_descs,
void *app_ctx);

int
octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
u32 pending_thresh, u32 napi_budget);
#endif /* __OCTEON_IQ_H__ */
Loading

0 comments on commit 9a96bde

Please sign in to comment.