Skip to content

Commit

Permalink
RDMA/hns: Refactor the code of creating srq
Browse files Browse the repository at this point in the history
Move the related codes of creating user srq and kernel srq into two
independent functions as well as remove some unused code and
simplifications.

Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Lijun Ou <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
  • Loading branch information
oulijun authored and jgunthorpe committed Jul 25, 2019
1 parent 4f8f0d5 commit 2a2f188
Showing 1 changed file with 183 additions and 127 deletions.
310 changes: 183 additions & 127 deletions drivers/infiniband/hw/hns/hns_roce_srq.c
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,91 @@ static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
}

static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
int srq_buf_size)
{
struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
struct hns_roce_ib_create_srq ucmd;
u32 page_shift;
u32 npages;
int ret;

if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;

srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);

if (hr_dev->caps.srqwqe_buf_pg_sz) {
npages = (ib_umem_page_count(srq->umem) +
(1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
(1 << hr_dev->caps.srqwqe_buf_pg_sz);
page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt);
} else
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->umem),
PAGE_SHIFT, &srq->mtt);
if (ret)
goto err_user_buf;

ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
if (ret)
goto err_user_srq_mtt;

/* config index queue BA */
srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
srq->idx_que.buf_size, 0, 0);
if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
ret = PTR_ERR(srq->idx_que.umem);
goto err_user_srq_mtt;
}

if (hr_dev->caps.idx_buf_pg_sz) {
npages = (ib_umem_page_count(srq->idx_que.umem) +
(1 << hr_dev->caps.idx_buf_pg_sz) - 1) /
(1 << hr_dev->caps.idx_buf_pg_sz);
page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages, page_shift,
&srq->idx_que.mtt);
} else {
ret = hns_roce_mtt_init(hr_dev,
ib_umem_page_count(srq->idx_que.umem),
PAGE_SHIFT,
&srq->idx_que.mtt);
}

if (ret) {
dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n");
goto err_user_idx_mtt;
}

ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
srq->idx_que.umem);
if (ret) {
dev_err(hr_dev->dev,
"hns_roce_ib_umem_write_mtt error for idx que\n");
goto err_user_idx_buf;
}

return 0;

err_user_idx_buf:
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);

err_user_idx_mtt:
ib_umem_release(srq->idx_que.umem);

err_user_srq_mtt:
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);

err_user_buf:
ib_umem_release(srq->umem);

return ret;
}

static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
u32 page_shift)
{
Expand All @@ -196,6 +281,93 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
return 0;
}

static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
{
struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
int ret;

if (hns_roce_buf_alloc(hr_dev, srq_buf_size, (1 << page_shift) * 2,
&srq->buf, page_shift))
return -ENOMEM;

srq->head = 0;
srq->tail = srq->max - 1;

ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
&srq->mtt);
if (ret)
goto err_kernel_buf;

ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
if (ret)
goto err_kernel_srq_mtt;

page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
ret = hns_roce_create_idx_que(srq->ibsrq.pd, srq, page_shift);
if (ret) {
dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", ret);
goto err_kernel_srq_mtt;
}

/* Init mtt table for idx_que */
ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
srq->idx_que.idx_buf.page_shift,
&srq->idx_que.mtt);
if (ret)
goto err_kernel_create_idx;

/* Write buffer address into the mtt table */
ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
&srq->idx_que.idx_buf);
if (ret)
goto err_kernel_idx_buf;

srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
ret = -ENOMEM;
goto err_kernel_idx_buf;
}

return 0;

err_kernel_idx_buf:
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);

err_kernel_create_idx:
hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
&srq->idx_que.idx_buf);
kfree(srq->idx_que.bitmap);

err_kernel_srq_mtt:
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);

err_kernel_buf:
hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);

return ret;
}

static void destroy_user_srq(struct hns_roce_dev *hr_dev,
struct hns_roce_srq *srq)
{
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
ib_umem_release(srq->idx_que.umem);
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
ib_umem_release(srq->umem);
}

static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
struct hns_roce_srq *srq, int srq_buf_size)
{
kvfree(srq->wrid);
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, &srq->idx_que.idx_buf);
kfree(srq->idx_que.bitmap);
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
}

int hns_roce_create_srq(struct ib_srq *ib_srq,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata)
Expand All @@ -205,9 +377,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
struct hns_roce_srq *srq = to_hr_srq(ib_srq);
int srq_desc_size;
int srq_buf_size;
u32 page_shift;
int ret = 0;
u32 npages;
u32 cqn;

/* Check the actual SRQ wqe and SRQ sge num */
Expand All @@ -233,115 +403,16 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;

if (udata) {
struct hns_roce_ib_create_srq ucmd;

if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;

srq->umem =
ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);

if (hr_dev->caps.srqwqe_buf_pg_sz) {
npages = (ib_umem_page_count(srq->umem) +
(1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
(1 << hr_dev->caps.srqwqe_buf_pg_sz);
page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages,
page_shift,
&srq->mtt);
} else
ret = hns_roce_mtt_init(hr_dev,
ib_umem_page_count(srq->umem),
PAGE_SHIFT, &srq->mtt);
if (ret)
goto err_buf;

ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
if (ret)
goto err_srq_mtt;

/* config index queue BA */
srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
srq->idx_que.buf_size, 0, 0);
if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev,
"ib_umem_get error for index queue\n");
ret = PTR_ERR(srq->idx_que.umem);
goto err_srq_mtt;
}

if (hr_dev->caps.idx_buf_pg_sz) {
npages = (ib_umem_page_count(srq->idx_que.umem) +
(1 << hr_dev->caps.idx_buf_pg_sz) - 1) /
(1 << hr_dev->caps.idx_buf_pg_sz);
page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages,
page_shift, &srq->idx_que.mtt);
} else {
ret = hns_roce_mtt_init(
hr_dev, ib_umem_page_count(srq->idx_que.umem),
PAGE_SHIFT, &srq->idx_que.mtt);
}

if (ret) {
dev_err(hr_dev->dev,
"hns_roce_mtt_init error for idx que\n");
goto err_idx_mtt;
}

ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
srq->idx_que.umem);
ret = create_user_srq(srq, udata, srq_buf_size);
if (ret) {
dev_err(hr_dev->dev,
"hns_roce_ib_umem_write_mtt error for idx que\n");
goto err_idx_buf;
dev_err(hr_dev->dev, "Create user srq failed\n");
goto err_srq;
}
} else {
page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
if (hns_roce_buf_alloc(hr_dev, srq_buf_size,
(1 << page_shift) * 2, &srq->buf,
page_shift))
return -ENOMEM;

srq->head = 0;
srq->tail = srq->max - 1;

ret = hns_roce_mtt_init(hr_dev, srq->buf.npages,
srq->buf.page_shift, &srq->mtt);
if (ret)
goto err_buf;

ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
if (ret)
goto err_srq_mtt;

page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
ret = hns_roce_create_idx_que(ib_srq->pd, srq, page_shift);
ret = create_kernel_srq(srq, srq_buf_size);
if (ret) {
dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n",
ret);
goto err_srq_mtt;
}

/* Init mtt table for idx_que */
ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
srq->idx_que.idx_buf.page_shift,
&srq->idx_que.mtt);
if (ret)
goto err_create_idx;

/* Write buffer address into the mtt table */
ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
&srq->idx_que.idx_buf);
if (ret)
goto err_idx_buf;

srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
ret = -ENOMEM;
goto err_idx_buf;
dev_err(hr_dev->dev, "Create kernel srq failed\n");
goto err_srq;
}
}

Expand Down Expand Up @@ -373,27 +444,12 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
hns_roce_srq_free(hr_dev, srq);

err_wrid:
kvfree(srq->wrid);

err_idx_buf:
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);

err_idx_mtt:
ib_umem_release(srq->idx_que.umem);

err_create_idx:
hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
&srq->idx_que.idx_buf);
bitmap_free(srq->idx_que.bitmap);

err_srq_mtt:
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);

err_buf:
ib_umem_release(srq->umem);
if (!udata)
hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
if (udata)
destroy_user_srq(hr_dev, srq);
else
destroy_kernel_srq(hr_dev, srq, srq_buf_size);

err_srq:
return ret;
}

Expand Down

0 comments on commit 2a2f188

Please sign in to comment.