Skip to content

Commit

Permalink
RDMA/verbs: Add a DMA iterator to return aligned contiguous memory bl…
Browse files Browse the repository at this point in the history
…ocks

This helper iterates over a DMA-mapped SGL and returns contiguous memory
blocks aligned to a HW supported page size.

Suggested-by: Jason Gunthorpe <[email protected]>
Signed-off-by: Shiraz Saleem <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
  • Loading branch information
shirazsaleem authored and jgunthorpe committed May 6, 2019
1 parent 4a35339 commit a808273
Show file tree
Hide file tree
Showing 2 changed files with 81 additions and 0 deletions.
34 changes: 34 additions & 0 deletions drivers/infiniband/core/verbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -2710,3 +2710,37 @@ int rdma_init_netdev(struct ib_device *device, u8 port_num,
netdev, params.param);
}
EXPORT_SYMBOL(rdma_init_netdev);

void __rdma_block_iter_start(struct ib_block_iter *biter,
struct scatterlist *sglist, unsigned int nents,
unsigned long pgsz)
{
memset(biter, 0, sizeof(struct ib_block_iter));
biter->__sg = sglist;
biter->__sg_nents = nents;

/* Driver provides best block size to use */
biter->__pg_bit = __fls(pgsz);
}
EXPORT_SYMBOL(__rdma_block_iter_start);

bool __rdma_block_iter_next(struct ib_block_iter *biter)
{
unsigned int block_offset;

if (!biter->__sg_nents || !biter->__sg)
return false;

biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;

if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
biter->__sg_advance = 0;
biter->__sg = sg_next(biter->__sg);
biter->__sg_nents--;
}

return true;
}
EXPORT_SYMBOL(__rdma_block_iter_next);
47 changes: 47 additions & 0 deletions include/rdma/ib_verbs.h
Original file line number Diff line number Diff line change
Expand Up @@ -2726,6 +2726,21 @@ struct ib_client {
u8 no_kverbs_req:1;
};

/*
* IB block DMA iterator
*
* Iterates the DMA-mapped SGL in contiguous memory blocks aligned
* to a HW supported page size.
*/
struct ib_block_iter {
/* internal states */
struct scatterlist *__sg; /* sg holding the current aligned block */
dma_addr_t __dma_addr; /* unaligned DMA address of this block */
unsigned int __sg_nents; /* number of SG entries */
unsigned int __sg_advance; /* number of bytes to advance in sg in next step */
unsigned int __pg_bit; /* alignment of current block */
};

struct ib_device *_ib_alloc_device(size_t size);
#define ib_alloc_device(drv_struct, member) \
container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
Expand All @@ -2746,6 +2761,38 @@ void ib_unregister_device_queued(struct ib_device *ib_dev);
int ib_register_client (struct ib_client *client);
void ib_unregister_client(struct ib_client *client);

void __rdma_block_iter_start(struct ib_block_iter *biter,
struct scatterlist *sglist,
unsigned int nents,
unsigned long pgsz);
bool __rdma_block_iter_next(struct ib_block_iter *biter);

/**
* rdma_block_iter_dma_address - get the aligned dma address of the current
* block held by the block iterator.
* @biter: block iterator holding the memory block
*/
static inline dma_addr_t
rdma_block_iter_dma_address(struct ib_block_iter *biter)
{
return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
}

/**
* rdma_for_each_block - iterate over contiguous memory blocks of the sg list
* @sglist: sglist to iterate over
* @biter: block iterator holding the memory block
* @nents: maximum number of sg entries to iterate over
* @pgsz: best HW supported page size to use
*
* Callers may use rdma_block_iter_dma_address() to get each
* blocks aligned DMA address.
*/
#define rdma_for_each_block(sglist, biter, nents, pgsz) \
for (__rdma_block_iter_start(biter, sglist, nents, \
pgsz); \
__rdma_block_iter_next(biter);)

/**
* ib_get_client_data - Get IB client context
* @device:Device to get context for
Expand Down

0 comments on commit a808273

Please sign in to comment.