Skip to content

Commit

Permalink
kthread: Convert worker lock to raw spinlock
Browse files Browse the repository at this point in the history
In order to enable the queuing of kthread work items from hardirq context
even when PREEMPT_RT_FULL is enabled, convert the worker spin_lock to a
raw_spin_lock.

This is only acceptable to do because the work performed under the lock is
well-bounded and minimal.

Reported-by: Steffen Trumtrar <[email protected]>
Reported-by: Tim Sander <[email protected]>
Signed-off-by: Julia Cartwright <[email protected]>
Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Tested-by: Steffen Trumtrar <[email protected]>
Reviewed-by: Petr Mladek <[email protected]>
Cc: Guenter Roeck <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
  • Loading branch information
Julia Cartwright authored and KAGA-KOKO committed Feb 28, 2019
1 parent c89d92e commit fe99a4f
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 23 deletions.
4 changes: 2 additions & 2 deletions include/linux/kthread.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ enum {

struct kthread_worker {
unsigned int flags;
spinlock_t lock;
raw_spinlock_t lock;
struct list_head work_list;
struct list_head delayed_work_list;
struct task_struct *task;
Expand All @@ -106,7 +106,7 @@ struct kthread_delayed_work {
};

#define KTHREAD_WORKER_INIT(worker) { \
.lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
.lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \
.work_list = LIST_HEAD_INIT((worker).work_list), \
.delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
}
Expand Down
42 changes: 21 additions & 21 deletions kernel/kthread.c
Original file line number Diff line number Diff line change
Expand Up @@ -599,7 +599,7 @@ void __kthread_init_worker(struct kthread_worker *worker,
struct lock_class_key *key)
{
memset(worker, 0, sizeof(struct kthread_worker));
spin_lock_init(&worker->lock);
raw_spin_lock_init(&worker->lock);
lockdep_set_class_and_name(&worker->lock, key, name);
INIT_LIST_HEAD(&worker->work_list);
INIT_LIST_HEAD(&worker->delayed_work_list);
Expand Down Expand Up @@ -641,21 +641,21 @@ int kthread_worker_fn(void *worker_ptr)

if (kthread_should_stop()) {
__set_current_state(TASK_RUNNING);
spin_lock_irq(&worker->lock);
raw_spin_lock_irq(&worker->lock);
worker->task = NULL;
spin_unlock_irq(&worker->lock);
raw_spin_unlock_irq(&worker->lock);
return 0;
}

work = NULL;
spin_lock_irq(&worker->lock);
raw_spin_lock_irq(&worker->lock);
if (!list_empty(&worker->work_list)) {
work = list_first_entry(&worker->work_list,
struct kthread_work, node);
list_del_init(&work->node);
}
worker->current_work = work;
spin_unlock_irq(&worker->lock);
raw_spin_unlock_irq(&worker->lock);

if (work) {
__set_current_state(TASK_RUNNING);
Expand Down Expand Up @@ -812,12 +812,12 @@ bool kthread_queue_work(struct kthread_worker *worker,
bool ret = false;
unsigned long flags;

spin_lock_irqsave(&worker->lock, flags);
raw_spin_lock_irqsave(&worker->lock, flags);
if (!queuing_blocked(worker, work)) {
kthread_insert_work(worker, work, &worker->work_list);
ret = true;
}
spin_unlock_irqrestore(&worker->lock, flags);
raw_spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(kthread_queue_work);
Expand All @@ -843,7 +843,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
if (WARN_ON_ONCE(!worker))
return;

spin_lock(&worker->lock);
raw_spin_lock(&worker->lock);
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);

Expand All @@ -852,7 +852,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
list_del_init(&work->node);
kthread_insert_work(worker, work, &worker->work_list);

spin_unlock(&worker->lock);
raw_spin_unlock(&worker->lock);
}
EXPORT_SYMBOL(kthread_delayed_work_timer_fn);

Expand Down Expand Up @@ -908,14 +908,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
unsigned long flags;
bool ret = false;

spin_lock_irqsave(&worker->lock, flags);
raw_spin_lock_irqsave(&worker->lock, flags);

if (!queuing_blocked(worker, work)) {
__kthread_queue_delayed_work(worker, dwork, delay);
ret = true;
}

spin_unlock_irqrestore(&worker->lock, flags);
raw_spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
Expand Down Expand Up @@ -951,7 +951,7 @@ void kthread_flush_work(struct kthread_work *work)
if (!worker)
return;

spin_lock_irq(&worker->lock);
raw_spin_lock_irq(&worker->lock);
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);

Expand All @@ -963,7 +963,7 @@ void kthread_flush_work(struct kthread_work *work)
else
noop = true;

spin_unlock_irq(&worker->lock);
raw_spin_unlock_irq(&worker->lock);

if (!noop)
wait_for_completion(&fwork.done);
Expand Down Expand Up @@ -996,9 +996,9 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
* any queuing is blocked by setting the canceling counter.
*/
work->canceling++;
spin_unlock_irqrestore(&worker->lock, *flags);
raw_spin_unlock_irqrestore(&worker->lock, *flags);
del_timer_sync(&dwork->timer);
spin_lock_irqsave(&worker->lock, *flags);
raw_spin_lock_irqsave(&worker->lock, *flags);
work->canceling--;
}

Expand Down Expand Up @@ -1045,7 +1045,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
unsigned long flags;
int ret = false;

spin_lock_irqsave(&worker->lock, flags);
raw_spin_lock_irqsave(&worker->lock, flags);

/* Do not bother with canceling when never queued. */
if (!work->worker)
Expand All @@ -1062,7 +1062,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
fast_queue:
__kthread_queue_delayed_work(worker, dwork, delay);
out:
spin_unlock_irqrestore(&worker->lock, flags);
raw_spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
Expand All @@ -1076,7 +1076,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
if (!worker)
goto out;

spin_lock_irqsave(&worker->lock, flags);
raw_spin_lock_irqsave(&worker->lock, flags);
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);

Expand All @@ -1090,13 +1090,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
* In the meantime, block any queuing by setting the canceling counter.
*/
work->canceling++;
spin_unlock_irqrestore(&worker->lock, flags);
raw_spin_unlock_irqrestore(&worker->lock, flags);
kthread_flush_work(work);
spin_lock_irqsave(&worker->lock, flags);
raw_spin_lock_irqsave(&worker->lock, flags);
work->canceling--;

out_fast:
spin_unlock_irqrestore(&worker->lock, flags);
raw_spin_unlock_irqrestore(&worker->lock, flags);
out:
return ret;
}
Expand Down

0 comments on commit fe99a4f

Please sign in to comment.