Skip to content

Commit

Permalink
generic-ipi: remove CSD_FLAG_WAIT
Browse files Browse the repository at this point in the history
Oleg noticed that we don't strictly need CSD_FLAG_WAIT, rework
the code so that we can use CSD_FLAG_LOCK for both purposes.

Signed-off-by: Peter Zijlstra <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Nick Piggin <[email protected]>
Cc: Jens Axboe <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Rusty Russell <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Feb 25, 2009
1 parent 8969a5e commit 6e27563
Show file tree
Hide file tree
Showing 5 changed files with 28 additions and 71 deletions.
2 changes: 1 addition & 1 deletion block/blk-softirq.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ static int raise_blk_irq(int cpu, struct request *rq)
data->info = rq;
data->flags = 0;

__smp_call_function_single(cpu, data);
__smp_call_function_single(cpu, data, 0);
return 0;
}

Expand Down
3 changes: 2 additions & 1 deletion include/linux/smp.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,8 @@ smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
return 0;
}

void __smp_call_function_single(int cpuid, struct call_single_data *data);
void __smp_call_function_single(int cpuid, struct call_single_data *data,
int wait);

/*
* Generic and arch helpers
Expand Down
2 changes: 1 addition & 1 deletion kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1093,7 +1093,7 @@ static void hrtick_start(struct rq *rq, u64 delay)
if (rq == this_rq()) {
hrtimer_restart(timer);
} else if (!rq->hrtick_csd_pending) {
__smp_call_function_single(cpu_of(rq), &rq->hrtick_csd);
__smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
rq->hrtick_csd_pending = 1;
}
}
Expand Down
90 changes: 23 additions & 67 deletions kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,7 @@ static struct {
};

enum {
CSD_FLAG_WAIT = 0x01,
CSD_FLAG_LOCK = 0x02,
CSD_FLAG_LOCK = 0x01,
};

struct call_function_data {
Expand Down Expand Up @@ -94,42 +93,22 @@ static int __cpuinit init_call_single_data(void)
}
early_initcall(init_call_single_data);

/*
* csd_wait/csd_complete are used for synchronous ipi calls
*/
static void csd_wait_prepare(struct call_single_data *data)
{
data->flags |= CSD_FLAG_WAIT;
}

static void csd_complete(struct call_single_data *data)
{
if (data->flags & CSD_FLAG_WAIT) {
/*
* ensure we're all done before saying we are
*/
smp_mb();
data->flags &= ~CSD_FLAG_WAIT;
}
}

static void csd_wait(struct call_single_data *data)
{
while (data->flags & CSD_FLAG_WAIT)
cpu_relax();
}

/*
* csd_lock/csd_unlock used to serialize access to per-cpu csd resources
*
* For non-synchronous ipi calls the csd can still be in use by the previous
* function call. For multi-cpu calls its even more interesting as we'll have
* to ensure no other cpu is observing our csd.
*/
static void csd_lock(struct call_single_data *data)
static void csd_lock_wait(struct call_single_data *data)
{
while (data->flags & CSD_FLAG_LOCK)
cpu_relax();
}

static void csd_lock(struct call_single_data *data)
{
csd_lock_wait(data);
data->flags = CSD_FLAG_LOCK;

/*
Expand All @@ -155,11 +134,12 @@ static void csd_unlock(struct call_single_data *data)
* Insert a previously allocated call_single_data element for execution
* on the given CPU. data must already have ->func, ->info, and ->flags set.
*/
static void generic_exec_single(int cpu, struct call_single_data *data)
static
void generic_exec_single(int cpu, struct call_single_data *data, int wait)
{
struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
int wait = data->flags & CSD_FLAG_WAIT, ipi;
unsigned long flags;
int ipi;

spin_lock_irqsave(&dst->lock, flags);
ipi = list_empty(&dst->list);
Expand All @@ -182,7 +162,7 @@ static void generic_exec_single(int cpu, struct call_single_data *data)
arch_send_call_function_single_ipi(cpu);

if (wait)
csd_wait(data);
csd_lock_wait(data);
}

/*
Expand Down Expand Up @@ -232,7 +212,6 @@ void generic_smp_call_function_interrupt(void)
if (refs)
continue;

csd_complete(&data->csd);
csd_unlock(&data->csd);
}

Expand Down Expand Up @@ -270,9 +249,6 @@ void generic_smp_call_function_single_interrupt(void)

data->func(data->info);

if (data_flags & CSD_FLAG_WAIT)
csd_complete(data);

/*
* Unlocked CSDs are valid through generic_exec_single()
*/
Expand Down Expand Up @@ -313,36 +289,16 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
func(info);
local_irq_restore(flags);
} else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
struct call_single_data *data;
struct call_single_data *data = &d;

if (!wait) {
/*
* We are calling a function on a single CPU
* and we are not going to wait for it to finish.
* We use a per cpu data to pass the information to
* that CPU. Since all callers of this code will
* use the same data, we must synchronize the
* callers to prevent a new caller from corrupting
* the data before the callee can access it.
*
* The CSD_FLAG_LOCK is used to let us know when
* the IPI handler is done with the data.
* The first caller will set it, and the callee
* will clear it. The next caller must wait for
* it to clear before we set it again. This
* will make sure the callee is done with the
* data before a new caller will use it.
*/
if (!wait)
data = &__get_cpu_var(csd_data);
csd_lock(data);
} else {
data = &d;
csd_wait_prepare(data);
}

csd_lock(data);

data->func = func;
data->info = info;
generic_exec_single(cpu, data);
generic_exec_single(cpu, data, wait);
} else {
err = -ENXIO; /* CPU not online */
}
Expand All @@ -362,12 +318,15 @@ EXPORT_SYMBOL(smp_call_function_single);
* instance.
*
*/
void __smp_call_function_single(int cpu, struct call_single_data *data)
void __smp_call_function_single(int cpu, struct call_single_data *data,
int wait)
{
csd_lock(data);

/* Can deadlock when called with interrupts disabled */
WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled());
WARN_ON(wait && irqs_disabled());

generic_exec_single(cpu, data);
generic_exec_single(cpu, data, wait);
}

/* FIXME: Shim for archs using old arch_send_call_function_ipi API. */
Expand Down Expand Up @@ -425,9 +384,6 @@ void smp_call_function_many(const struct cpumask *mask,
csd_lock(&data->csd);

spin_lock_irqsave(&data->lock, flags);
if (wait)
csd_wait_prepare(&data->csd);

data->csd.func = func;
data->csd.info = info;
cpumask_and(data->cpumask, mask, cpu_online_mask);
Expand Down Expand Up @@ -456,7 +412,7 @@ void smp_call_function_many(const struct cpumask *mask,

/* optionally wait for the CPUs to complete */
if (wait)
csd_wait(&data->csd);
csd_lock_wait(&data->csd);
}
EXPORT_SYMBOL(smp_call_function_many);

Expand Down
2 changes: 1 addition & 1 deletion kernel/softirq.c
Original file line number Diff line number Diff line change
Expand Up @@ -496,7 +496,7 @@ static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softir
cp->flags = 0;
cp->priv = softirq;

__smp_call_function_single(cpu, cp);
__smp_call_function_single(cpu, cp, 0);
return 0;
}
return 1;
Expand Down

0 comments on commit 6e27563

Please sign in to comment.