Skip to content

Commit

Permalink
stop_machine: implement stop_machine_from_inactive_cpu()
Browse files Browse the repository at this point in the history
Currently, mtrr wants stop_machine functionality while a CPU is being
brought up.  As stop_machine() requires the calling CPU to be active,
mtrr implements its own stop_machine using stop_one_cpu() on each
online CPU.  This doesn't only unnecessarily duplicate complex logic
but also introduces a possibility of deadlock when it races against
the generic stop_machine().

This patch implements stop_machine_from_inactive_cpu() to serve such
use cases.  Its functionality is basically the same as stop_machine();
however, it should be called from a CPU which isn't active and doesn't
depend on working scheduling on the calling CPU.

This is achieved by using busy loops for synchronization and
open-coding stop_cpus queuing and waiting with direct invocation of
fn() for local CPU inbetween.

Signed-off-by: Tejun Heo <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Suresh Siddha <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Signed-off-by: H. Peter Anvin <[email protected]>
  • Loading branch information
htejun authored and H. Peter Anvin committed Jun 27, 2011
1 parent fd7355b commit f740e6c
Show file tree
Hide file tree
Showing 2 changed files with 73 additions and 3 deletions.
14 changes: 12 additions & 2 deletions include/linux/stop_machine.h
Original file line number Diff line number Diff line change
Expand Up @@ -126,15 +126,19 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
*/
int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);

int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
const struct cpumask *cpus);

#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */

static inline int __stop_machine(int (*fn)(void *), void *data,
const struct cpumask *cpus)
{
unsigned long flags;
int ret;
local_irq_disable();
local_irq_save(flags);
ret = fn(data);
local_irq_enable();
local_irq_restore(flags);
return ret;
}

Expand All @@ -144,5 +148,11 @@ static inline int stop_machine(int (*fn)(void *), void *data,
return __stop_machine(fn, data, cpus);
}

static inline int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
const struct cpumask *cpus)
{
return __stop_machine(fn, data, cpus);
}

#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
#endif /* _LINUX_STOP_MACHINE */
62 changes: 61 additions & 1 deletion kernel/stop_machine.c
Original file line number Diff line number Diff line change
Expand Up @@ -439,8 +439,15 @@ static int stop_machine_cpu_stop(void *data)
struct stop_machine_data *smdata = data;
enum stopmachine_state curstate = STOPMACHINE_NONE;
int cpu = smp_processor_id(), err = 0;
unsigned long flags;
bool is_active;

/*
* When called from stop_machine_from_inactive_cpu(), irq might
* already be disabled. Save the state and restore it on exit.
*/
local_save_flags(flags);

if (!smdata->active_cpus)
is_active = cpu == cpumask_first(cpu_online_mask);
else
Expand Down Expand Up @@ -468,7 +475,7 @@ static int stop_machine_cpu_stop(void *data)
}
} while (curstate != STOPMACHINE_EXIT);

local_irq_enable();
local_irq_restore(flags);
return err;
}

Expand All @@ -495,4 +502,57 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
}
EXPORT_SYMBOL_GPL(stop_machine);

/**
* stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
* @fn: the function to run
* @data: the data ptr for the @fn()
* @cpus: the cpus to run the @fn() on (NULL = any online cpu)
*
* This is identical to stop_machine() but can be called from a CPU which
* is not active. The local CPU is in the process of hotplug (so no other
* CPU hotplug can start) and not marked active and doesn't have enough
* context to sleep.
*
* This function provides stop_machine() functionality for such state by
* using busy-wait for synchronization and executing @fn directly for local
* CPU.
*
* CONTEXT:
* Local CPU is inactive. Temporarily stops all active CPUs.
*
* RETURNS:
* 0 if all executions of @fn returned 0, any non zero return value if any
* returned non zero.
*/
int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
const struct cpumask *cpus)
{
struct stop_machine_data smdata = { .fn = fn, .data = data,
.active_cpus = cpus };
struct cpu_stop_done done;
int ret;

/* Local CPU must be inactive and CPU hotplug in progress. */
BUG_ON(cpu_active(raw_smp_processor_id()));
smdata.num_threads = num_active_cpus() + 1; /* +1 for local */

/* No proper task established and can't sleep - busy wait for lock. */
while (!mutex_trylock(&stop_cpus_mutex))
cpu_relax();

/* Schedule work on other CPUs and execute directly for local CPU */
set_state(&smdata, STOPMACHINE_PREPARE);
cpu_stop_init_done(&done, num_active_cpus());
queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata,
&done);
ret = stop_machine_cpu_stop(&smdata);

/* Busy wait for completion. */
while (!completion_done(&done.completion))
cpu_relax();

mutex_unlock(&stop_cpus_mutex);
return ret ?: done.ret;
}

#endif /* CONFIG_STOP_MACHINE */

0 comments on commit f740e6c

Please sign in to comment.