Skip to content

Commit

Permalink
kernel/sched: Remove "cooperative scheduling only" special cases
Browse files Browse the repository at this point in the history
The scheduler has historically had an API where an application can
inform the kernel that it will never create a thread that can be
preempted, and the kernel and architecture layer would use that as an
optimization hint to eliminate some code paths.

Those optimizations have dwindled to almost nothing at this point, and
they're now objectively a smaller impact than the special casing that
was required to handle the idle thread (which, obviously, must always
be preemptible).

Fix this by eliminating the idea of "cooperative only" and ensuring
that there will always be at least one preemptible priority with value
>=0.  CONFIG_NUM_PREEMPT_PRIORITIES now specifies the number of
user-accessible priorities other than the idle thread.

The only remaining workaround is that some older architectures (and
also SPARC) use the CONFIG_PREEMPT_ENABLED=n state as a hint to skip
thread switching on interrupt exit.  So detect exactly those platforms
and implement a minimal workaround in the idle loop (basically "just
call swap()") instead, with a big explanation.

Note that this also fixes a bug in one of the philosophers samples,
where it would ask for 6 cooperative priorities but then use values -7
through -2.  It was assuming the kernel would magically create a
cooperative priority for its idle thread, which wasn't correct even
before.

Fixes zephyrproject-rtos#34584

Signed-off-by: Andy Ross <[email protected]>
  • Loading branch information
Andy Ross authored and nashif committed May 25, 2021
1 parent a7c732d commit 851d14a
Show file tree
Hide file tree
Showing 7 changed files with 39 additions and 73 deletions.
38 changes: 7 additions & 31 deletions include/kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,43 +36,19 @@ extern "C" {
* @}
*/

#if defined(CONFIG_COOP_ENABLED) && defined(CONFIG_PREEMPT_ENABLED)
#define _NUM_COOP_PRIO (CONFIG_NUM_COOP_PRIORITIES)
#define _NUM_PREEMPT_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + 1)
#elif defined(CONFIG_COOP_ENABLED)
#define _NUM_COOP_PRIO (CONFIG_NUM_COOP_PRIORITIES + 1)
#define _NUM_PREEMPT_PRIO (0)
#elif defined(CONFIG_PREEMPT_ENABLED)
#define _NUM_COOP_PRIO (0)
#define _NUM_PREEMPT_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + 1)
#else
#error "invalid configuration"
#endif

#define K_PRIO_COOP(x) (-(_NUM_COOP_PRIO - (x)))
#define K_PRIO_PREEMPT(x) (x)

#define K_ANY NULL
#define K_END NULL

#if defined(CONFIG_COOP_ENABLED) && defined(CONFIG_PREEMPT_ENABLED)
#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
#elif defined(CONFIG_COOP_ENABLED)
#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES - 1)
#elif defined(CONFIG_PREEMPT_ENABLED)
#define K_HIGHEST_THREAD_PRIO 0
#else
#error "invalid configuration"
#if CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES == 0
#error Zero available thread priorities defined!
#endif

#ifdef CONFIG_PREEMPT_ENABLED
#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
#else
#define K_LOWEST_THREAD_PRIO -1
#endif
#define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
#define K_PRIO_PREEMPT(x) (x)

#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
#define K_IDLE_PRIO K_LOWEST_THREAD_PRIO

#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)

Expand Down Expand Up @@ -2553,7 +2529,7 @@ struct k_mutex {
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
.owner = NULL, \
.lock_count = 0, \
.owner_orig_prio = K_LOWEST_THREAD_PRIO, \
.owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
}

/**
Expand Down
6 changes: 5 additions & 1 deletion kernel/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,11 @@ config PREEMPT_ENABLED

config PRIORITY_CEILING
int "Priority inheritance ceiling"
default 0
default -127
help
This defines the minimum priority value (i.e. the logically
highest priority) that a thread will acquire as part of
k_mutex priority inheritance.

config NUM_METAIRQ_PRIORITIES
int "Number of very-high priority 'preemptor' threads"
Expand Down
24 changes: 17 additions & 7 deletions kernel/idle.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <stdbool.h>
#include <logging/log.h>
#include <ksched.h>
#include <kswap.h>

LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);

Expand Down Expand Up @@ -66,6 +67,8 @@ void idle(void *unused1, void *unused2, void *unused3)
ARG_UNUSED(unused2);
ARG_UNUSED(unused3);

__ASSERT_NO_MSG(_current->base.prio >= 0);

while (true) {
/* SMP systems without a working IPI can't
* actual enter an idle state, because they
Expand Down Expand Up @@ -95,14 +98,21 @@ void idle(void *unused1, void *unused2, void *unused3)
k_cpu_idle();
}

/* It is possible to (pathologically) configure the
* idle thread to have a non-preemptible priority.
* You might think this is an API bug, but we actually
* have a test that exercises this. Handle the edge
* case when that happens.
#if !defined(CONFIG_PREEMPT_ENABLED)
# if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC)
/* A legacy mess: the idle thread is by definition
* preemptible as far as the modern scheduler is
* concerned, but older platforms use
* CONFIG_PREEMPT_ENABLED=n as an optimization hint
* that interrupt exit always returns to the
* interrupted context. So in that setup we need to
* explicitly yield in the idle thread otherwise
* nothing else will run once it starts.
*/
if (K_IDLE_PRIO < 0) {
k_yield();
if (_kernel.ready_q.cache != _current) {
z_swap_unlocked();
}
# endif
#endif
}
}
5 changes: 0 additions & 5 deletions kernel/include/ksched.h
Original file line number Diff line number Diff line change
Expand Up @@ -250,27 +250,22 @@ static inline void _ready_one_thread(_wait_q_t *wq)

static inline void z_sched_lock(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(!arch_is_in_isr(), "");
__ASSERT(_current->base.sched_locked != 1U, "");

--_current->base.sched_locked;

compiler_barrier();

#endif
}

static ALWAYS_INLINE void z_sched_unlock_no_reschedule(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(!arch_is_in_isr(), "");
__ASSERT(_current->base.sched_locked != 0U, "");

compiler_barrier();

++_current->base.sched_locked;
#endif
}

static ALWAYS_INLINE bool z_is_thread_timeout_expired(struct k_thread *thread)
Expand Down
2 changes: 1 addition & 1 deletion kernel/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ static void init_idle_thread(int i)

z_setup_new_thread(thread, stack,
CONFIG_IDLE_STACK_SIZE, idle, &_kernel.cpus[i],
NULL, NULL, K_LOWEST_THREAD_PRIO, K_ESSENTIAL,
NULL, NULL, K_IDLE_PRIO, K_ESSENTIAL,
tname);
z_mark_thread_as_started(thread);

Expand Down
35 changes: 8 additions & 27 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,8 @@ static void end_thread(struct k_thread *thread);

static inline int is_preempt(struct k_thread *thread)
{
#ifdef CONFIG_PREEMPT_ENABLED
/* explanation in kernel_struct.h */
return thread->base.preempt <= _PREEMPT_THRESHOLD;
#else
return 0;
#endif
}

static inline int is_metairq(struct k_thread *thread)
Expand Down Expand Up @@ -154,15 +150,6 @@ static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
return true;
}

/* The idle threads can look "cooperative" if there are no
* preemptible priorities (this is sort of an API glitch).
* They must always be preemptible.
*/
if (!IS_ENABLED(CONFIG_PREEMPT_ENABLED) &&
z_is_idle_thread_object(_current)) {
return true;
}

return false;
}

Expand Down Expand Up @@ -845,7 +832,6 @@ void k_sched_lock(void)

void k_sched_unlock(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
LOCKED(&sched_spinlock) {
__ASSERT(_current->base.sched_locked != 0U, "");
__ASSERT(!arch_is_in_isr(), "");
Expand All @@ -860,7 +846,6 @@ void k_sched_unlock(void)
SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);

z_reschedule_unlocked();
#endif
}

struct k_thread *z_swap_next_thread(void)
Expand Down Expand Up @@ -1201,20 +1186,16 @@ void z_impl_k_yield(void)

SYS_PORT_TRACING_FUNC(k_thread, yield);

if (!z_is_idle_thread_object(_current)) {
k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
k_spinlock_key_t key = k_spin_lock(&sched_spinlock);

if (!IS_ENABLED(CONFIG_SMP) ||
z_is_thread_queued(_current)) {
dequeue_thread(&_kernel.ready_q.runq,
_current);
}
queue_thread(&_kernel.ready_q.runq, _current);
update_cache(1);
z_swap(&sched_spinlock, key);
} else {
z_swap_unlocked();
if (!IS_ENABLED(CONFIG_SMP) ||
z_is_thread_queued(_current)) {
dequeue_thread(&_kernel.ready_q.runq,
_current);
}
queue_thread(&_kernel.ready_q.runq, _current);
update_cache(1);
z_swap(&sched_spinlock, key);
}

#ifdef CONFIG_USERSPACE
Expand Down
2 changes: 1 addition & 1 deletion samples/philosophers/sample.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -38,4 +38,4 @@ tests:
sample.kernel.philosopher.coop_only:
extra_configs:
- CONFIG_NUM_PREEMPT_PRIORITIES=0
- CONFIG_NUM_COOP_PRIORITIES=6
- CONFIG_NUM_COOP_PRIORITIES=7

0 comments on commit 851d14a

Please sign in to comment.