Skip to content

Commit

Permalink
RDMA/bnxt_re: Use core helpers to get aligned DMA address
Browse files Browse the repository at this point in the history
Call the core helpers to retrieve the HW aligned address to use for the
MR, within a supported bnxt_re page size.

Remove checking the umem->hugtetlb flag as it is no longer required. The
new DMA block iterator will return the 2M aligned address if the MR is
backed by 2M huge pages.

Acked-by: Selvin Xavier <[email protected]>
Signed-off-by: Shiraz Saleem <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
  • Loading branch information
shirazsaleem authored and jgunthorpe committed May 6, 2019
1 parent eb52c03 commit d855825
Showing 1 changed file with 10 additions and 17 deletions.
27 changes: 10 additions & 17 deletions drivers/infiniband/hw/bnxt_re/ib_verbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -3507,17 +3507,12 @@ static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
int page_shift)
{
u64 *pbl_tbl = pbl_tbl_orig;
u64 paddr;
u64 page_mask = (1ULL << page_shift) - 1;
struct sg_dma_page_iter sg_iter;
u64 page_size = BIT_ULL(page_shift);
struct ib_block_iter biter;

rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, page_size)
*pbl_tbl++ = rdma_block_iter_dma_address(&biter);

for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
paddr = sg_page_iter_dma_address(&sg_iter);
if (pbl_tbl == pbl_tbl_orig)
*pbl_tbl++ = paddr & ~page_mask;
else if ((paddr & page_mask) == 0)
*pbl_tbl++ = paddr;
}
return pbl_tbl - pbl_tbl_orig;
}

Expand Down Expand Up @@ -3579,25 +3574,23 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
goto free_umem;
}

page_shift = PAGE_SHIFT;
page_shift = __ffs(ib_umem_find_best_pgsz(umem,
BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M,
virt_addr));

if (!bnxt_re_page_size_ok(page_shift)) {
dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
rc = -EFAULT;
goto fail;
}

if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
if (page_shift == BNXT_RE_PAGE_SHIFT_4K &&
length > BNXT_RE_MAX_MR_SIZE_LOW) {
dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
rc = -EINVAL;
goto fail;
}
if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
page_shift = BNXT_RE_PAGE_SHIFT_2M;
dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
1 << page_shift);
}

/* Map umem buf ptrs to the PBL */
umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
Expand Down

0 comments on commit d855825

Please sign in to comment.