Skip to content

Commit

Permalink
RDMA/rxe: Add common rxe_prepare_res()
Browse files Browse the repository at this point in the history
It's redundant to prepare resources for Read and Atomic
requests by different functions. Replace them by a common
rxe_prepare_res() with different parameters. In addition,
the common rxe_prepare_res() can also be used by new Flush
and Atomic Write requests in the future.

Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Xiao Yang <[email protected]>
Reviewed-by: Bob Pearson <[email protected]>
Signed-off-by: Leon Romanovsky <[email protected]>
  • Loading branch information
yangx-jy authored and rleon committed Jul 18, 2022
1 parent 37da51e commit 882736f
Showing 1 changed file with 32 additions and 39 deletions.
71 changes: 32 additions & 39 deletions drivers/infiniband/sw/rxe/rxe_resp.c
Original file line number Diff line number Diff line change
@@ -553,27 +553,48 @@ static enum resp_states write_data_in(struct rxe_qp *qp,
return rc;
}

/* Guarantee atomicity of atomic operations at the machine level. */
static DEFINE_SPINLOCK(atomic_ops_lock);

static struct resp_res *rxe_prepare_atomic_res(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
static struct resp_res *rxe_prepare_res(struct rxe_qp *qp,
struct rxe_pkt_info *pkt,
int type)
{
struct resp_res *res;
u32 pkts;

res = &qp->resp.resources[qp->resp.res_head];
rxe_advance_resp_resource(qp);
free_rd_atomic_resource(qp, res);

res->type = RXE_ATOMIC_MASK;
res->first_psn = pkt->psn;
res->last_psn = pkt->psn;
res->cur_psn = pkt->psn;
res->type = type;
res->replay = 0;

switch (type) {
case RXE_READ_MASK:
res->read.va = qp->resp.va + qp->resp.offset;
res->read.va_org = qp->resp.va + qp->resp.offset;
res->read.resid = qp->resp.resid;
res->read.length = qp->resp.resid;
res->read.rkey = qp->resp.rkey;

pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
res->first_psn = pkt->psn;
res->cur_psn = pkt->psn;
res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;

res->state = rdatm_res_state_new;
break;
case RXE_ATOMIC_MASK:
res->first_psn = pkt->psn;
res->last_psn = pkt->psn;
res->cur_psn = pkt->psn;
break;
}

return res;
}

/* Guarantee atomicity of atomic operations at the machine level. */
static DEFINE_SPINLOCK(atomic_ops_lock);

static enum resp_states rxe_atomic_reply(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
@@ -584,7 +605,7 @@ static enum resp_states rxe_atomic_reply(struct rxe_qp *qp,
u64 value;

if (!res) {
res = rxe_prepare_atomic_res(qp, pkt);
res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK);
qp->resp.res = res;
}

@@ -680,34 +701,6 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
return skb;
}

static struct resp_res *rxe_prepare_read_res(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
struct resp_res *res;
u32 pkts;

res = &qp->resp.resources[qp->resp.res_head];
rxe_advance_resp_resource(qp);
free_rd_atomic_resource(qp, res);

res->type = RXE_READ_MASK;
res->replay = 0;
res->read.va = qp->resp.va + qp->resp.offset;
res->read.va_org = qp->resp.va + qp->resp.offset;
res->read.resid = qp->resp.resid;
res->read.length = qp->resp.resid;
res->read.rkey = qp->resp.rkey;

pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
res->first_psn = pkt->psn;
res->cur_psn = pkt->psn;
res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;

res->state = rdatm_res_state_new;

return res;
}

/**
* rxe_recheck_mr - revalidate MR from rkey and get a reference
* @qp: the qp
@@ -778,7 +771,7 @@ static enum resp_states read_reply(struct rxe_qp *qp,
struct rxe_mr *mr;

if (!res) {
res = rxe_prepare_read_res(qp, req_pkt);
res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK);
qp->resp.res = res;
}

0 comments on commit 882736f

Please sign in to comment.