Skip to content

Commit

Permalink
io_uring: revert "io_uring: optimize submit_and_wait API"
Browse files Browse the repository at this point in the history
There are cases where it isn't always safe to block for submission,
even if the caller asked to wait for events as well. Revert the
previous optimization of doing that.

This reverts two commits:

bf7ec93
c576666

Fixes: c576666 ("io_uring: optimize submit_and_wait API")
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
axboe committed Oct 24, 2019
1 parent 7d194c2 commit bc808bc
Showing 1 changed file with 17 additions and 46 deletions.
63 changes: 17 additions & 46 deletions fs/io_uring.c
Original file line number Diff line number Diff line change
Expand Up @@ -2292,11 +2292,11 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
}

static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
struct sqe_submit *s, bool force_nonblock)
struct sqe_submit *s)
{
int ret;

ret = __io_submit_sqe(ctx, req, s, force_nonblock);
ret = __io_submit_sqe(ctx, req, s, true);

/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
Expand Down Expand Up @@ -2343,7 +2343,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
}

static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
struct sqe_submit *s, bool force_nonblock)
struct sqe_submit *s)
{
int ret;

Expand All @@ -2356,18 +2356,17 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
return 0;
}

return __io_queue_sqe(ctx, req, s, force_nonblock);
return __io_queue_sqe(ctx, req, s);
}

static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
struct sqe_submit *s, struct io_kiocb *shadow,
bool force_nonblock)
struct sqe_submit *s, struct io_kiocb *shadow)
{
int ret;
int need_submit = false;

if (!shadow)
return io_queue_sqe(ctx, req, s, force_nonblock);
return io_queue_sqe(ctx, req, s);

/*
* Mark the first IO in link list as DRAIN, let all the following
Expand Down Expand Up @@ -2396,16 +2395,15 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
spin_unlock_irq(&ctx->completion_lock);

if (need_submit)
return __io_queue_sqe(ctx, req, s, force_nonblock);
return __io_queue_sqe(ctx, req, s);

return 0;
}

#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)

static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
struct io_submit_state *state, struct io_kiocb **link,
bool force_nonblock)
struct io_submit_state *state, struct io_kiocb **link)
{
struct io_uring_sqe *sqe_copy;
struct io_kiocb *req;
Expand Down Expand Up @@ -2458,7 +2456,7 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
INIT_LIST_HEAD(&req->link_list);
*link = req;
} else {
io_queue_sqe(ctx, req, s, force_nonblock);
io_queue_sqe(ctx, req, s);
}
}

Expand Down Expand Up @@ -2562,8 +2560,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
* that's the end of the chain. Submit the previous link.
*/
if (!prev_was_link && link) {
io_queue_link_head(ctx, link, &link->submit, shadow_req,
true);
io_queue_link_head(ctx, link, &link->submit, shadow_req);
link = NULL;
shadow_req = NULL;
}
Expand All @@ -2588,13 +2585,13 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
sqes[i].has_user = has_user;
sqes[i].needs_lock = true;
sqes[i].needs_fixed_file = true;
io_submit_sqe(ctx, &sqes[i], statep, &link, true);
io_submit_sqe(ctx, &sqes[i], statep, &link);
submitted++;
}
}

if (link)
io_queue_link_head(ctx, link, &link->submit, shadow_req, true);
io_queue_link_head(ctx, link, &link->submit, shadow_req);
if (statep)
io_submit_state_end(&state);

Expand Down Expand Up @@ -2726,8 +2723,7 @@ static int io_sq_thread(void *data)
return 0;
}

static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
bool block_for_last)
static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
Expand All @@ -2741,7 +2737,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
}

for (i = 0; i < to_submit; i++) {
bool force_nonblock = true;
struct sqe_submit s;

if (!io_get_sqring(ctx, &s))
Expand All @@ -2752,8 +2747,7 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
* that's the end of the chain. Submit the previous link.
*/
if (!prev_was_link && link) {
io_queue_link_head(ctx, link, &link->submit, shadow_req,
force_nonblock);
io_queue_link_head(ctx, link, &link->submit, shadow_req);
link = NULL;
shadow_req = NULL;
}
Expand All @@ -2775,24 +2769,12 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
s.needs_lock = false;
s.needs_fixed_file = false;
submit++;

/*
* The caller will block for events after submit, submit the
* last IO non-blocking. This is either the only IO it's
* submitting, or it already submitted the previous ones. This
* improves performance by avoiding an async punt that we don't
* need to do.
*/
if (block_for_last && submit == to_submit)
force_nonblock = false;

io_submit_sqe(ctx, &s, statep, &link, force_nonblock);
io_submit_sqe(ctx, &s, statep, &link);
}
io_commit_sqring(ctx);

if (link)
io_queue_link_head(ctx, link, &link->submit, shadow_req,
!block_for_last);
io_queue_link_head(ctx, link, &link->submit, shadow_req);
if (statep)
io_submit_state_end(statep);

Expand Down Expand Up @@ -3636,21 +3618,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
wake_up(&ctx->sqo_wait);
submitted = to_submit;
} else if (to_submit) {
bool block_for_last = false;

to_submit = min(to_submit, ctx->sq_entries);

/*
* Allow last submission to block in a series, IFF the caller
* asked to wait for events and we don't currently have
* enough. This potentially avoids an async punt.
*/
if (to_submit == min_complete &&
io_cqring_events(ctx->rings) < min_complete)
block_for_last = true;

mutex_lock(&ctx->uring_lock);
submitted = io_ring_submit(ctx, to_submit, block_for_last);
submitted = io_ring_submit(ctx, to_submit);
mutex_unlock(&ctx->uring_lock);
}
if (flags & IORING_ENTER_GETEVENTS) {
Expand Down

0 comments on commit bc808bc

Please sign in to comment.