Skip to content

Commit

Permalink
epoll: simplify ep_poll_safewake() for CONFIG_DEBUG_LOCK_ALLOC
Browse files Browse the repository at this point in the history
Currently, ep_poll_safewake() in the CONFIG_DEBUG_LOCK_ALLOC case uses
ep_call_nested() in order to pass the correct subclass argument to
spin_lock_irqsave_nested().  However, ep_call_nested() adds unnecessary
checks for epoll depth and loops that are already verified when doing
EPOLL_CTL_ADD.  This mirrors a conversion that was done for
!CONFIG_DEBUG_LOCK_ALLOC in: commit 37b5e52 ("epoll: remove
ep_call_nested() from ep_eventpoll_poll()")

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Jason Baron <[email protected]>
Reviewed-by: Roman Penyaev <[email protected]>
Cc: Davidlohr Bueso <[email protected]>
Cc: Al Viro <[email protected]>
Cc: Eric Wong <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
almostivan authored and torvalds committed Dec 5, 2019
1 parent cd28b11 commit f6520c5
Showing 1 changed file with 13 additions and 23 deletions.
36 changes: 13 additions & 23 deletions fs/eventpoll.c
Original file line number Diff line number Diff line change
Expand Up @@ -551,28 +551,23 @@ static int ep_call_nested(struct nested_calls *ncalls,
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC

static struct nested_calls poll_safewake_ncalls;

static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
{
unsigned long flags;
wait_queue_head_t *wqueue = (wait_queue_head_t *)cookie;

spin_lock_irqsave_nested(&wqueue->lock, flags, call_nests + 1);
wake_up_locked_poll(wqueue, EPOLLIN);
spin_unlock_irqrestore(&wqueue->lock, flags);

return 0;
}
static DEFINE_PER_CPU(int, wakeup_nest);

static void ep_poll_safewake(wait_queue_head_t *wq)
{
int this_cpu = get_cpu();

ep_call_nested(&poll_safewake_ncalls,
ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
unsigned long flags;
int subclass;

put_cpu();
local_irq_save(flags);
preempt_disable();
subclass = __this_cpu_read(wakeup_nest);
spin_lock_nested(&wq->lock, subclass + 1);
__this_cpu_inc(wakeup_nest);
wake_up_locked_poll(wq, POLLIN);
__this_cpu_dec(wakeup_nest);
spin_unlock(&wq->lock);
local_irq_restore(flags);
preempt_enable();
}

#else
Expand Down Expand Up @@ -2370,11 +2365,6 @@ static int __init eventpoll_init(void)
*/
ep_nested_calls_init(&poll_loop_ncalls);

#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* Initialize the structure used to perform safe poll wait head wake ups */
ep_nested_calls_init(&poll_safewake_ncalls);
#endif

/*
* We can have many thousands of epitems, so prevent this from
* using an extra cache line on 64-bit (and smaller) CPUs
Expand Down

0 comments on commit f6520c5

Please sign in to comment.