Skip to content

Commit

Permalink
sched: Basic tracking of matching tasks
Browse files Browse the repository at this point in the history
Introduce task_struct::core_cookie as an opaque identifier for core
scheduling. When enabled; core scheduling will only allow matching
task to be on the core; where idle matches everything.

When task_struct::core_cookie is set (and core scheduling is enabled)
these tasks are indexed in a second RB-tree, first on cookie value
then on scheduling function, such that matching task selection always
finds the most elegible match.

NOTE: *shudder* at the overhead...

NOTE: *sigh*, a 3rd copy of the scheduling function; the alternative
is per class tracking of cookies and that just duplicates a lot of
stuff for no raisin (the 2nd copy lives in the rt-mutex PI code).

[Joel: folded fixes]
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Joel Fernandes (Google) <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Tested-by: Don Hiatt <[email protected]>
Tested-by: Hongyu Ning <[email protected]>
Tested-by: Vincent Guittot <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
  • Loading branch information
Peter Zijlstra committed May 12, 2021
1 parent 21f56ff commit 8a311c7
Show file tree
Hide file tree
Showing 4 changed files with 210 additions and 51 deletions.
8 changes: 7 additions & 1 deletion include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -700,10 +700,16 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
struct sched_dl_entity dl;

#ifdef CONFIG_SCHED_CORE
struct rb_node core_node;
unsigned long core_cookie;
#endif

#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
struct sched_dl_entity dl;

#ifdef CONFIG_UCLAMP_TASK
/*
Expand Down
152 changes: 148 additions & 4 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,133 @@ __read_mostly int scheduler_running;

DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);

/* kernel prio, less is more */
static inline int __task_prio(struct task_struct *p)
{
if (p->sched_class == &stop_sched_class) /* trumps deadline */
return -2;

if (rt_prio(p->prio)) /* includes deadline */
return p->prio; /* [-1, 99] */

if (p->sched_class == &idle_sched_class)
return MAX_RT_PRIO + NICE_WIDTH; /* 140 */

return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */
}

/*
* l(a,b)
* le(a,b) := !l(b,a)
* g(a,b) := l(b,a)
* ge(a,b) := !l(a,b)
*/

/* real prio, less is less */
static inline bool prio_less(struct task_struct *a, struct task_struct *b)
{

int pa = __task_prio(a), pb = __task_prio(b);

if (-pa < -pb)
return true;

if (-pb < -pa)
return false;

if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
return !dl_time_before(a->dl.deadline, b->dl.deadline);

if (pa == MAX_RT_PRIO + MAX_NICE) { /* fair */
u64 vruntime = b->se.vruntime;

/*
* Normalize the vruntime if tasks are in different cpus.
*/
if (task_cpu(a) != task_cpu(b)) {
vruntime -= task_cfs_rq(b)->min_vruntime;
vruntime += task_cfs_rq(a)->min_vruntime;
}

return !((s64)(a->se.vruntime - vruntime) <= 0);
}

return false;
}

static inline bool __sched_core_less(struct task_struct *a, struct task_struct *b)
{
if (a->core_cookie < b->core_cookie)
return true;

if (a->core_cookie > b->core_cookie)
return false;

/* flip prio, so high prio is leftmost */
if (prio_less(b, a))
return true;

return false;
}

#define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)

static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
{
return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
}

static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
{
const struct task_struct *p = __node_2_sc(node);
unsigned long cookie = (unsigned long)key;

if (cookie < p->core_cookie)
return -1;

if (cookie > p->core_cookie)
return 1;

return 0;
}

static void sched_core_enqueue(struct rq *rq, struct task_struct *p)
{
rq->core->core_task_seq++;

if (!p->core_cookie)
return;

rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
}

static void sched_core_dequeue(struct rq *rq, struct task_struct *p)
{
rq->core->core_task_seq++;

if (!p->core_cookie)
return;

rb_erase(&p->core_node, &rq->core_tree);
}

/*
* Find left-most (aka, highest priority) task matching @cookie.
*/
static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
{
struct rb_node *node;

node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
/*
* The idle task always matches any cookie!
*/
if (!node)
return idle_sched_class.pick_task(rq);

return __node_2_sc(node);
}

/*
* Magic required such that:
*
Expand Down Expand Up @@ -147,23 +274,29 @@ static void __sched_core_flip(bool enabled)
cpus_read_unlock();
}

static void __sched_core_enable(void)
static void sched_core_assert_empty(void)
{
// XXX verify there are no cookie tasks (yet)
int cpu;

for_each_possible_cpu(cpu)
WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
}

static void __sched_core_enable(void)
{
static_branch_enable(&__sched_core_enabled);
/*
* Ensure all previous instances of raw_spin_rq_*lock() have finished
* and future ones will observe !sched_core_disabled().
*/
synchronize_rcu();
__sched_core_flip(true);
sched_core_assert_empty();
}

static void __sched_core_disable(void)
{
// XXX verify there are no cookie tasks (left)

sched_core_assert_empty();
__sched_core_flip(false);
static_branch_disable(&__sched_core_enabled);
}
Expand Down Expand Up @@ -205,6 +338,11 @@ void sched_core_put(void)
schedule_work(&_work);
}

#else /* !CONFIG_SCHED_CORE */

static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
static inline void sched_core_dequeue(struct rq *rq, struct task_struct *p) { }

#endif /* CONFIG_SCHED_CORE */

/*
Expand Down Expand Up @@ -1797,10 +1935,16 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)

uclamp_rq_inc(rq, p);
p->sched_class->enqueue_task(rq, p, flags);

if (sched_core_enabled(rq))
sched_core_enqueue(rq, p);
}

static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
if (sched_core_enabled(rq))
sched_core_dequeue(rq, p);

if (!(flags & DEQUEUE_NOCLOCK))
update_rq_clock(rq);

Expand Down
46 changes: 0 additions & 46 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -268,33 +268,11 @@ const struct sched_class fair_sched_class;
*/

#ifdef CONFIG_FAIR_GROUP_SCHED
static inline struct task_struct *task_of(struct sched_entity *se)
{
SCHED_WARN_ON(!entity_is_task(se));
return container_of(se, struct task_struct, se);
}

/* Walk up scheduling entities hierarchy */
#define for_each_sched_entity(se) \
for (; se; se = se->parent)

static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
return p->se.cfs_rq;
}

/* runqueue on which this entity is (to be) queued */
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
return se->cfs_rq;
}

/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return grp->my_q;
}

static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
{
if (!path)
Expand Down Expand Up @@ -455,33 +433,9 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)

#else /* !CONFIG_FAIR_GROUP_SCHED */

static inline struct task_struct *task_of(struct sched_entity *se)
{
return container_of(se, struct task_struct, se);
}

#define for_each_sched_entity(se) \
for (; se; se = NULL)

static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
return &task_rq(p)->cfs;
}

static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
struct task_struct *p = task_of(se);
struct rq *rq = task_rq(p);

return &rq->cfs;
}

/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return NULL;
}

static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
{
if (path)
Expand Down
55 changes: 55 additions & 0 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1080,6 +1080,10 @@ struct rq {
/* per rq */
struct rq *core;
unsigned int core_enabled;
struct rb_root core_tree;

/* shared state */
unsigned int core_task_seq;
#endif
};

Expand Down Expand Up @@ -1243,6 +1247,57 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define raw_rq() raw_cpu_ptr(&runqueues)

#ifdef CONFIG_FAIR_GROUP_SCHED
static inline struct task_struct *task_of(struct sched_entity *se)
{
SCHED_WARN_ON(!entity_is_task(se));
return container_of(se, struct task_struct, se);
}

static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
return p->se.cfs_rq;
}

/* runqueue on which this entity is (to be) queued */
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
return se->cfs_rq;
}

/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return grp->my_q;
}

#else

static inline struct task_struct *task_of(struct sched_entity *se)
{
return container_of(se, struct task_struct, se);
}

static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
return &task_rq(p)->cfs;
}

static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
struct task_struct *p = task_of(se);
struct rq *rq = task_rq(p);

return &rq->cfs;
}

/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return NULL;
}
#endif

extern void update_rq_clock(struct rq *rq);

static inline u64 __rq_clock_broken(struct rq *rq)
Expand Down

0 comments on commit 8a311c7

Please sign in to comment.