Skip to content

Commit

Permalink
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Three fixes:

   - fix a suspend/resume cpusets bug

   - fix a !CONFIG_NUMA_BALANCING bug

   - fix a kerneldoc warning"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/fair: Fix nuisance kernel-doc warning
  sched/cpuset/pm: Fix cpuset vs. suspend-resume bugs
  sched/fair: Fix wake_affine_llc() balancing rules
  • Loading branch information
torvalds committed Sep 12, 2017
2 parents e6328a7 + 4612335 commit 040b9d7
Show file tree
Hide file tree
Showing 5 changed files with 30 additions and 8 deletions.
6 changes: 6 additions & 0 deletions include/linux/cpuset.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,9 @@ static inline void cpuset_dec(void)

extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void);
extern void cpuset_wait_for_hotplug(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
Expand Down Expand Up @@ -164,11 +166,15 @@ static inline bool cpusets_enabled(void) { return false; }
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}

static inline void cpuset_force_rebuild(void) { }

static inline void cpuset_update_active_cpus(void)
{
partition_sched_domains(1, NULL, NULL);
}

static inline void cpuset_wait_for_hotplug(void) { }

static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
Expand Down
16 changes: 15 additions & 1 deletion kernel/cgroup/cpuset.c
Original file line number Diff line number Diff line change
Expand Up @@ -2275,6 +2275,13 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs)
mutex_unlock(&cpuset_mutex);
}

static bool force_rebuild;

void cpuset_force_rebuild(void)
{
force_rebuild = true;
}

/**
* cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
*
Expand Down Expand Up @@ -2349,8 +2356,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
}

/* rebuild sched domains if cpus_allowed has changed */
if (cpus_updated)
if (cpus_updated || force_rebuild) {
force_rebuild = false;
rebuild_sched_domains();
}
}

void cpuset_update_active_cpus(void)
Expand All @@ -2363,6 +2372,11 @@ void cpuset_update_active_cpus(void)
schedule_work(&cpuset_hotplug_work);
}

void cpuset_wait_for_hotplug(void)
{
flush_work(&cpuset_hotplug_work);
}

/*
* Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
* Call this routine anytime after node_states[N_MEMORY] changes.
Expand Down
5 changes: 4 additions & 1 deletion kernel/power/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,9 @@
#include <linux/workqueue.h>
#include <linux/kmod.h>
#include <trace/events/power.h>
#include <linux/cpuset.h>

/*
/*
* Timeout for stopping processes
*/
unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
Expand Down Expand Up @@ -202,6 +203,8 @@ void thaw_processes(void)
__usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();

cpuset_wait_for_hotplug();

read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
/* No other threads should have PF_SUSPEND_TASK set */
Expand Down
7 changes: 3 additions & 4 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -5556,16 +5556,15 @@ static void cpuset_cpu_active(void)
* operation in the resume sequence, just build a single sched
* domain, ignoring cpusets.
*/
num_cpus_frozen--;
if (likely(num_cpus_frozen)) {
partition_sched_domains(1, NULL, NULL);
partition_sched_domains(1, NULL, NULL);
if (--num_cpus_frozen)
return;
}
/*
* This is the last CPU online operation. So fall through and
* restore the original sched domains by considering the
* cpuset configurations.
*/
cpuset_force_rebuild();
}
cpuset_update_active_cpus();
}
Expand Down
4 changes: 2 additions & 2 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -5424,7 +5424,7 @@ wake_affine_llc(struct sched_domain *sd, struct task_struct *p,
return false;

/* if this cache has capacity, come here */
if (this_stats.has_capacity && this_stats.nr_running < prev_stats.nr_running+1)
if (this_stats.has_capacity && this_stats.nr_running+1 < prev_stats.nr_running)
return true;

/*
Expand Down Expand Up @@ -7708,7 +7708,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
* number.
*
* Return: 1 when packing is required and a task should be moved to
* this CPU. The amount of the imbalance is returned in *imbalance.
* this CPU. The amount of the imbalance is returned in env->imbalance.
*
* @env: The load balancing environment.
* @sds: Statistics of the sched_domain which is to be packed
Expand Down

0 comments on commit 040b9d7

Please sign in to comment.