Skip to content

Commit

Permalink
RDMA/core: Remove FMR device ops
Browse files Browse the repository at this point in the history
After removing FMR support from all the RDMA ULPs and providers, there
is no need to keep FMR operation for IB devices.

Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Max Gurtovoy <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
  • Loading branch information
Max Gurtovoy authored and jgunthorpe committed Jun 2, 2020
1 parent 22c9cc2 commit 3a57815
Show file tree
Hide file tree
Showing 4 changed files with 0 additions and 113 deletions.
2 changes: 0 additions & 2 deletions Documentation/infiniband/core_locking.rst
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ Sleeping and interrupt context
- post_recv
- poll_cq
- req_notify_cq
- map_phys_fmr

which may not sleep and must be callable from any context.

Expand All @@ -36,7 +35,6 @@ Sleeping and interrupt context
- ib_post_send
- ib_post_recv
- ib_req_notify_cq
- ib_map_phys_fmr

are therefore safe to call from any context.

Expand Down
4 changes: 0 additions & 4 deletions drivers/infiniband/core/device.c
Original file line number Diff line number Diff line change
Expand Up @@ -2571,7 +2571,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, add_gid);
SET_DEVICE_OP(dev_ops, advise_mr);
SET_DEVICE_OP(dev_ops, alloc_dm);
SET_DEVICE_OP(dev_ops, alloc_fmr);
SET_DEVICE_OP(dev_ops, alloc_hw_stats);
SET_DEVICE_OP(dev_ops, alloc_mr);
SET_DEVICE_OP(dev_ops, alloc_mr_integrity);
Expand All @@ -2598,7 +2597,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, create_wq);
SET_DEVICE_OP(dev_ops, dealloc_dm);
SET_DEVICE_OP(dev_ops, dealloc_driver);
SET_DEVICE_OP(dev_ops, dealloc_fmr);
SET_DEVICE_OP(dev_ops, dealloc_mw);
SET_DEVICE_OP(dev_ops, dealloc_pd);
SET_DEVICE_OP(dev_ops, dealloc_ucontext);
Expand Down Expand Up @@ -2642,7 +2640,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, iw_rem_ref);
SET_DEVICE_OP(dev_ops, map_mr_sg);
SET_DEVICE_OP(dev_ops, map_mr_sg_pi);
SET_DEVICE_OP(dev_ops, map_phys_fmr);
SET_DEVICE_OP(dev_ops, mmap);
SET_DEVICE_OP(dev_ops, mmap_free);
SET_DEVICE_OP(dev_ops, modify_ah);
Expand Down Expand Up @@ -2676,7 +2673,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, resize_cq);
SET_DEVICE_OP(dev_ops, set_vf_guid);
SET_DEVICE_OP(dev_ops, set_vf_link_state);
SET_DEVICE_OP(dev_ops, unmap_fmr);

SET_OBJ_SIZE(dev_ops, ib_ah);
SET_OBJ_SIZE(dev_ops, ib_cq);
Expand Down
48 changes: 0 additions & 48 deletions drivers/infiniband/core/verbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -2212,54 +2212,6 @@ struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
}
EXPORT_SYMBOL(ib_alloc_mr_integrity);

/* "Fast" memory regions */

struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
int mr_access_flags,
struct ib_fmr_attr *fmr_attr)
{
struct ib_fmr *fmr;

if (!pd->device->ops.alloc_fmr)
return ERR_PTR(-EOPNOTSUPP);

fmr = pd->device->ops.alloc_fmr(pd, mr_access_flags, fmr_attr);
if (!IS_ERR(fmr)) {
fmr->device = pd->device;
fmr->pd = pd;
atomic_inc(&pd->usecnt);
}

return fmr;
}
EXPORT_SYMBOL(ib_alloc_fmr);

int ib_unmap_fmr(struct list_head *fmr_list)
{
struct ib_fmr *fmr;

if (list_empty(fmr_list))
return 0;

fmr = list_entry(fmr_list->next, struct ib_fmr, list);
return fmr->device->ops.unmap_fmr(fmr_list);
}
EXPORT_SYMBOL(ib_unmap_fmr);

int ib_dealloc_fmr(struct ib_fmr *fmr)
{
struct ib_pd *pd;
int ret;

pd = fmr->pd;
ret = fmr->device->ops.dealloc_fmr(fmr);
if (!ret)
atomic_dec(&pd->usecnt);

return ret;
}
EXPORT_SYMBOL(ib_dealloc_fmr);

/* Multicast groups */

static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
Expand Down
59 changes: 0 additions & 59 deletions include/rdma/ib_verbs.h
Original file line number Diff line number Diff line change
Expand Up @@ -1475,12 +1475,6 @@ enum ib_mr_rereg_flags {
IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
};

struct ib_fmr_attr {
int max_pages;
int max_maps;
u8 page_shift;
};

struct ib_umem;

enum rdma_remove_reason {
Expand Down Expand Up @@ -1855,14 +1849,6 @@ struct ib_mw {
enum ib_mw_type type;
};

struct ib_fmr {
struct ib_device *device;
struct ib_pd *pd;
struct list_head list;
u32 lkey;
u32 rkey;
};

/* Supported steering options */
enum ib_flow_attr_type {
/* steering according to rule specifications */
Expand Down Expand Up @@ -2505,12 +2491,6 @@ struct ib_device_ops {
struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata);
int (*dealloc_mw)(struct ib_mw *mw);
struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr);
int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len,
u64 iova);
int (*unmap_fmr)(struct list_head *fmr_list);
int (*dealloc_fmr)(struct ib_fmr *fmr);
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
Expand Down Expand Up @@ -4319,45 +4299,6 @@ static inline u32 ib_inc_rkey(u32 rkey)
return ((rkey + 1) & mask) | (rkey & ~mask);
}

/**
* ib_alloc_fmr - Allocates a unmapped fast memory region.
* @pd: The protection domain associated with the unmapped region.
* @mr_access_flags: Specifies the memory access rights.
* @fmr_attr: Attributes of the unmapped region.
*
* A fast memory region must be mapped before it can be used as part of
* a work request.
*/
struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
int mr_access_flags,
struct ib_fmr_attr *fmr_attr);

/**
* ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
* @fmr: The fast memory region to associate with the pages.
* @page_list: An array of physical pages to map to the fast memory region.
* @list_len: The number of pages in page_list.
* @iova: The I/O virtual address to use with the mapped region.
*/
static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
u64 *page_list, int list_len,
u64 iova)
{
return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova);
}

/**
* ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
* @fmr_list: A linked list of fast memory regions to unmap.
*/
int ib_unmap_fmr(struct list_head *fmr_list);

/**
* ib_dealloc_fmr - Deallocates a fast memory region.
* @fmr: The fast memory region to deallocate.
*/
int ib_dealloc_fmr(struct ib_fmr *fmr);

/**
* ib_attach_mcast - Attaches the specified QP to a multicast group.
* @qp: QP to attach to the multicast group. The QP must be type
Expand Down

0 comments on commit 3a57815

Please sign in to comment.