Skip to content

Commit

Permalink
svcrdma: Remove unused Read completion handlers
Browse files Browse the repository at this point in the history
Clean up:

The generic RDMA R/W API conversion of svc_rdma_recvfrom replaced
the Register, Read, and Invalidate completion handlers. Remove the
old ones, which are no longer used.

These handlers shared some helper code with svc_rdma_wc_send. Fold
the wc_common helper back into the one remaining completion handler.

Signed-off-by: Chuck Lever <[email protected]>
Signed-off-by: J. Bruce Fields <[email protected]>
  • Loading branch information
chucklever authored and J. Bruce Fields committed Jul 12, 2017
1 parent 71641d9 commit c84dc90
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 87 deletions.
4 changes: 1 addition & 3 deletions include/linux/sunrpc/svc_rdma.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,17 +77,15 @@ extern atomic_t rdma_stat_sq_prod;
*/
struct svc_rdma_op_ctxt {
struct list_head list;
struct svc_rdma_op_ctxt *read_hdr;
struct svc_rdma_fastreg_mr *frmr;
int hdr_count;
struct xdr_buf arg;
struct ib_cqe cqe;
u32 byte_len;
struct svcxprt_rdma *xprt;
unsigned long flags;
enum dma_data_direction direction;
int count;
unsigned int mapped_sges;
int hdr_count;
struct ib_send_wr send_wr;
struct ib_sge sge[1 + RPCRDMA_MAX_INLINE_THRESH / PAGE_SIZE];
struct page *pages[RPCSVC_MAXPAGES];
Expand Down
93 changes: 9 additions & 84 deletions net/sunrpc/xprtrdma/svc_rdma_transport.c
Original file line number Diff line number Diff line change
Expand Up @@ -346,111 +346,36 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
svc_xprt_put(&xprt->sc_xprt);
}

static void svc_rdma_send_wc_common(struct svcxprt_rdma *xprt,
struct ib_wc *wc,
const char *opname)
{
if (wc->status != IB_WC_SUCCESS)
goto err;

out:
atomic_inc(&xprt->sc_sq_avail);
wake_up(&xprt->sc_send_wait);
return;

err:
set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
if (wc->status != IB_WC_WR_FLUSH_ERR)
pr_err("svcrdma: %s: %s (%u/0x%x)\n",
opname, ib_wc_status_msg(wc->status),
wc->status, wc->vendor_err);
goto out;
}

static void svc_rdma_send_wc_common_put(struct ib_cq *cq, struct ib_wc *wc,
const char *opname)
{
struct svcxprt_rdma *xprt = cq->cq_context;

svc_rdma_send_wc_common(xprt, wc, opname);
svc_xprt_put(&xprt->sc_xprt);
}

/**
* svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
* @cq: completion queue
* @wc: completed WR
*
*/
void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
{
struct ib_cqe *cqe = wc->wr_cqe;
struct svc_rdma_op_ctxt *ctxt;

svc_rdma_send_wc_common_put(cq, wc, "send");

ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);
svc_rdma_unmap_dma(ctxt);
svc_rdma_put_context(ctxt, 1);
}

/**
* svc_rdma_wc_reg - Invoked by RDMA provider for each polled FASTREG WC
* @cq: completion queue
* @wc: completed WR
*
*/
void svc_rdma_wc_reg(struct ib_cq *cq, struct ib_wc *wc)
{
svc_rdma_send_wc_common_put(cq, wc, "fastreg");
}

/**
* svc_rdma_wc_read - Invoked by RDMA provider for each polled Read WC
* @cq: completion queue
* @wc: completed WR
*
*/
void svc_rdma_wc_read(struct ib_cq *cq, struct ib_wc *wc)
{
struct svcxprt_rdma *xprt = cq->cq_context;
struct ib_cqe *cqe = wc->wr_cqe;
struct svc_rdma_op_ctxt *ctxt;

svc_rdma_send_wc_common(xprt, wc, "read");
atomic_inc(&xprt->sc_sq_avail);
wake_up(&xprt->sc_send_wait);

ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);
svc_rdma_unmap_dma(ctxt);
svc_rdma_put_frmr(xprt, ctxt->frmr);

if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
struct svc_rdma_op_ctxt *read_hdr;

read_hdr = ctxt->read_hdr;
spin_lock(&xprt->sc_rq_dto_lock);
list_add_tail(&read_hdr->list,
&xprt->sc_read_complete_q);
spin_unlock(&xprt->sc_rq_dto_lock);
svc_rdma_put_context(ctxt, 1);

set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
svc_xprt_enqueue(&xprt->sc_xprt);
if (unlikely(wc->status != IB_WC_SUCCESS)) {
set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
if (wc->status != IB_WC_WR_FLUSH_ERR)
pr_err("svcrdma: Send: %s (%u/0x%x)\n",
ib_wc_status_msg(wc->status),
wc->status, wc->vendor_err);
}

svc_rdma_put_context(ctxt, 0);
svc_xprt_put(&xprt->sc_xprt);
}

/**
* svc_rdma_wc_inv - Invoked by RDMA provider for each polled LOCAL_INV WC
* @cq: completion queue
* @wc: completed WR
*
*/
void svc_rdma_wc_inv(struct ib_cq *cq, struct ib_wc *wc)
{
svc_rdma_send_wc_common_put(cq, wc, "localInv");
}

static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
int listener)
{
Expand Down

0 comments on commit c84dc90

Please sign in to comment.