Skip to content

Commit

Permalink
io_uring/kbuf: recycle freed mapped buffer ring entries
Browse files Browse the repository at this point in the history
Right now we stash any potentially mmap'ed provided ring buffer range
for freeing at release time, regardless of when they get unregistered.
Since we're keeping track of these ranges anyway, keep track of their
registration state as well, and use that to recycle ranges when
appropriate rather than always allocate new ones.

The lookup is a basic scan of entries, checking for the best matching
free entry.

Fixes: c392cbe ("io_uring/kbuf: defer release of mapped buffer rings")
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
axboe committed Nov 28, 2023
1 parent c392cbe commit b10b73c
Showing 1 changed file with 66 additions and 11 deletions.
77 changes: 66 additions & 11 deletions io_uring/kbuf.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ struct io_provide_buf {
struct io_buf_free {
struct hlist_node list;
void *mem;
size_t size;
int inuse;
};

static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
Expand Down Expand Up @@ -216,6 +218,24 @@ static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
return 0;
}

/*
* Mark the given mapped range as free for reuse
*/
static void io_kbuf_mark_free(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
{
struct io_buf_free *ibf;

hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
if (bl->buf_ring == ibf->mem) {
ibf->inuse = 0;
return;
}
}

/* can't happen... */
WARN_ON_ONCE(1);
}

static int __io_remove_buffers(struct io_ring_ctx *ctx,
struct io_buffer_list *bl, unsigned nbufs)
{
Expand All @@ -232,6 +252,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
* io_kbuf_list_free() will free the page(s) at
* ->release() time.
*/
io_kbuf_mark_free(ctx, bl);
bl->buf_ring = NULL;
bl->is_mmap = 0;
} else if (bl->buf_nr_pages) {
Expand Down Expand Up @@ -539,6 +560,34 @@ static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
return -EINVAL;
}

/*
* See if we have a suitable region that we can reuse, rather than allocate
* both a new io_buf_free and mem region again. We leave it on the list as
* even a reused entry will need freeing at ring release.
*/
static struct io_buf_free *io_lookup_buf_free_entry(struct io_ring_ctx *ctx,
size_t ring_size)
{
struct io_buf_free *ibf, *best = NULL;
size_t best_dist;

hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
size_t dist;

if (ibf->inuse || ibf->size < ring_size)
continue;
dist = ibf->size - ring_size;
if (!best || dist < best_dist) {
best = ibf;
if (!dist)
break;
best_dist = dist;
}
}

return best;
}

static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
struct io_uring_buf_reg *reg,
struct io_buffer_list *bl)
Expand All @@ -548,20 +597,26 @@ static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
void *ptr;

ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
ptr = io_mem_alloc(ring_size);
if (!ptr)
return -ENOMEM;

/* Allocate and store deferred free entry */
ibf = kmalloc(sizeof(*ibf), GFP_KERNEL_ACCOUNT);
/* Reuse existing entry, if we can */
ibf = io_lookup_buf_free_entry(ctx, ring_size);
if (!ibf) {
io_mem_free(ptr);
return -ENOMEM;
}
ibf->mem = ptr;
hlist_add_head(&ibf->list, &ctx->io_buf_list);
ptr = io_mem_alloc(ring_size);
if (!ptr)
return -ENOMEM;

bl->buf_ring = ptr;
/* Allocate and store deferred free entry */
ibf = kmalloc(sizeof(*ibf), GFP_KERNEL_ACCOUNT);
if (!ibf) {
io_mem_free(ptr);
return -ENOMEM;
}
ibf->mem = ptr;
ibf->size = ring_size;
hlist_add_head(&ibf->list, &ctx->io_buf_list);
}
ibf->inuse = 1;
bl->buf_ring = ibf->mem;
bl->is_mapped = 1;
bl->is_mmap = 1;
return 0;
Expand Down

0 comments on commit b10b73c

Please sign in to comment.