Skip to content

Commit

Permalink
Merge tag 'nfsd-5.0-1' of git://linux-nfs.org/~bfields/linux
Browse files Browse the repository at this point in the history
Pull nfsd fixes from Bruce Fields:
 "Two small nfsd bugfixes for 5.0, for an RDMA bug and a file clone bug"

* tag 'nfsd-5.0-1' of git://linux-nfs.org/~bfields/linux:
  svcrdma: Remove max_sge check at connect time
  nfsd: Fix error return values for nfsd4_clone_file_range()
  • Loading branch information
torvalds committed Feb 7, 2019
2 parents 8b5cdbe + e248aa7 commit ee6c073
Show file tree
Hide file tree
Showing 3 changed files with 106 additions and 14 deletions.
6 changes: 4 additions & 2 deletions fs/nfsd/vfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -557,9 +557,11 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
loff_t cloned;

cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0);
if (cloned < 0)
return nfserrno(cloned);
if (count && cloned != count)
cloned = -EINVAL;
return nfserrno(cloned < 0 ? cloned : 0);
return nfserrno(-EINVAL);
return 0;
}

ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
Expand Down
105 changes: 99 additions & 6 deletions net/sunrpc/xprtrdma/svc_rdma_sendto.c
Original file line number Diff line number Diff line change
Expand Up @@ -537,6 +537,99 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
DMA_TO_DEVICE);
}

/* If the xdr_buf has more elements than the device can
* transmit in a single RDMA Send, then the reply will
* have to be copied into a bounce buffer.
*/
static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
struct xdr_buf *xdr,
__be32 *wr_lst)
{
int elements;

/* xdr->head */
elements = 1;

/* xdr->pages */
if (!wr_lst) {
unsigned int remaining;
unsigned long pageoff;

pageoff = xdr->page_base & ~PAGE_MASK;
remaining = xdr->page_len;
while (remaining) {
++elements;
remaining -= min_t(u32, PAGE_SIZE - pageoff,
remaining);
pageoff = 0;
}
}

/* xdr->tail */
if (xdr->tail[0].iov_len)
++elements;

/* assume 1 SGE is needed for the transport header */
return elements >= rdma->sc_max_send_sges;
}

/* The device is not capable of sending the reply directly.
* Assemble the elements of @xdr into the transport header
* buffer.
*/
static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *ctxt,
struct xdr_buf *xdr, __be32 *wr_lst)
{
unsigned char *dst, *tailbase;
unsigned int taillen;

dst = ctxt->sc_xprt_buf;
dst += ctxt->sc_sges[0].length;

memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
dst += xdr->head[0].iov_len;

tailbase = xdr->tail[0].iov_base;
taillen = xdr->tail[0].iov_len;
if (wr_lst) {
u32 xdrpad;

xdrpad = xdr_padsize(xdr->page_len);
if (taillen && xdrpad) {
tailbase += xdrpad;
taillen -= xdrpad;
}
} else {
unsigned int len, remaining;
unsigned long pageoff;
struct page **ppages;

ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
pageoff = xdr->page_base & ~PAGE_MASK;
remaining = xdr->page_len;
while (remaining) {
len = min_t(u32, PAGE_SIZE - pageoff, remaining);

memcpy(dst, page_address(*ppages), len);
remaining -= len;
dst += len;
pageoff = 0;
}
}

if (taillen)
memcpy(dst, tailbase, taillen);

ctxt->sc_sges[0].length += xdr->len;
ib_dma_sync_single_for_device(rdma->sc_pd->device,
ctxt->sc_sges[0].addr,
ctxt->sc_sges[0].length,
DMA_TO_DEVICE);

return 0;
}

/* svc_rdma_map_reply_msg - Map the buffer holding RPC message
* @rdma: controlling transport
* @ctxt: send_ctxt for the Send WR
Expand All @@ -559,8 +652,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
u32 xdr_pad;
int ret;

if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
return -EIO;
if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);

++ctxt->sc_cur_sge_no;
ret = svc_rdma_dma_map_buf(rdma, ctxt,
xdr->head[0].iov_base,
xdr->head[0].iov_len);
Expand Down Expand Up @@ -591,8 +686,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
while (remaining) {
len = min_t(u32, PAGE_SIZE - page_off, remaining);

if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
return -EIO;
++ctxt->sc_cur_sge_no;
ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
page_off, len);
if (ret < 0)
Expand All @@ -606,8 +700,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
len = xdr->tail[0].iov_len;
tail:
if (len) {
if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
return -EIO;
++ctxt->sc_cur_sge_no;
ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
if (ret < 0)
return ret;
Expand Down
9 changes: 3 additions & 6 deletions net/sunrpc/xprtrdma/svc_rdma_transport.c
Original file line number Diff line number Diff line change
Expand Up @@ -419,12 +419,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
/* Transport header, head iovec, tail iovec */
newxprt->sc_max_send_sges = 3;
/* Add one SGE per page list entry */
newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE;
if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) {
pr_err("svcrdma: too few Send SGEs available (%d needed)\n",
newxprt->sc_max_send_sges);
goto errout;
}
newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
newxprt->sc_max_req_size = svcrdma_max_req_size;
newxprt->sc_max_requests = svcrdma_max_requests;
newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
Expand Down

0 comments on commit ee6c073

Please sign in to comment.