Skip to content

Commit

Permalink
sched: Allow for positional tg_tree walks
Browse files Browse the repository at this point in the history
Extend walk_tg_tree to accept a positional argument

static int walk_tg_tree_from(struct task_group *from,
			     tg_visitor down, tg_visitor up, void *data)

Existing semantics are preserved, caller must hold rcu_lock() or sufficient
analogue.

Signed-off-by: Paul Turner <[email protected]>
Reviewed-by: Hidetoshi Seto <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
paulturner authored and Ingo Molnar committed Aug 14, 2011
1 parent 671fd9d commit 8277434
Showing 1 changed file with 37 additions and 13 deletions.
50 changes: 37 additions & 13 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1591,20 +1591,23 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load)
typedef int (*tg_visitor)(struct task_group *, void *);

/*
* Iterate the full tree, calling @down when first entering a node and @up when
* leaving it for the final time.
* Iterate task_group tree rooted at *from, calling @down when first entering a
* node and @up when leaving it for the final time.
*
* Caller must hold rcu_lock or sufficient equivalent.
*/
static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
static int walk_tg_tree_from(struct task_group *from,
tg_visitor down, tg_visitor up, void *data)
{
struct task_group *parent, *child;
int ret;

rcu_read_lock();
parent = &root_task_group;
parent = from;

down:
ret = (*down)(parent, data);
if (ret)
goto out_unlock;
goto out;
list_for_each_entry_rcu(child, &parent->children, siblings) {
parent = child;
goto down;
Expand All @@ -1613,19 +1616,29 @@ static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
continue;
}
ret = (*up)(parent, data);
if (ret)
goto out_unlock;
if (ret || parent == from)
goto out;

child = parent;
parent = parent->parent;
if (parent)
goto up;
out_unlock:
rcu_read_unlock();

out:
return ret;
}

/*
* Iterate the full tree, calling @down when first entering a node and @up when
* leaving it for the final time.
*
* Caller must hold rcu_lock or sufficient equivalent.
*/

static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
{
return walk_tg_tree_from(&root_task_group, down, up, data);
}

static int tg_nop(struct task_group *tg, void *data)
{
return 0;
Expand Down Expand Up @@ -8870,13 +8883,19 @@ static int tg_rt_schedulable(struct task_group *tg, void *data)

static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
int ret;

struct rt_schedulable_data data = {
.tg = tg,
.rt_period = period,
.rt_runtime = runtime,
};

return walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
rcu_read_lock();
ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
rcu_read_unlock();

return ret;
}

static int tg_set_rt_bandwidth(struct task_group *tg,
Expand Down Expand Up @@ -9333,6 +9352,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)

static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
{
int ret;
struct cfs_schedulable_data data = {
.tg = tg,
.period = period,
Expand All @@ -9344,7 +9364,11 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
do_div(data.quota, NSEC_PER_USEC);
}

return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
rcu_read_lock();
ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
rcu_read_unlock();

return ret;
}
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
Expand Down

0 comments on commit 8277434

Please sign in to comment.