Skip to content

Commit

Permalink
percpu: clean up percpu variable definitions
Browse files Browse the repository at this point in the history
Percpu variable definition is about to be updated such that all percpu
symbols including the static ones must be unique.  Update percpu
variable definitions accordingly.

* as,cfq: rename ioc_count uniquely

* cpufreq: rename cpu_dbs_info uniquely

* xen: move nesting_count out of xen_evtchn_do_upcall() and rename it

* mm: move ratelimits out of balance_dirty_pages_ratelimited_nr() and
  rename it

* ipv4,6: rename cookie_scratch uniquely

* x86 perf_counter: rename prev_left to pmc_prev_left, irq_entry to
  pmc_irq_entry and nmi_entry to pmc_nmi_entry

* perf_counter: rename disable_count to perf_disable_count

* ftrace: rename test_event_disable to ftrace_test_event_disable

* kmemleak: rename test_pointer to kmemleak_test_pointer

* mce: rename next_interval to mce_next_interval

[ Impact: percpu usage cleanups, no duplicate static percpu var names ]

Signed-off-by: Tejun Heo <[email protected]>
Reviewed-by: Christoph Lameter <[email protected]>
Cc: Ivan Kokshaysky <[email protected]>
Cc: Jens Axboe <[email protected]>
Cc: Dave Jones <[email protected]>
Cc: Jeremy Fitzhardinge <[email protected]>
Cc: linux-mm <[email protected]>
Cc: David S. Miller <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Steven Rostedt <[email protected]>
Cc: Li Zefan <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Andi Kleen <[email protected]>
  • Loading branch information
htejun committed Jun 24, 2009
1 parent b9bf312 commit 245b2e7
Show file tree
Hide file tree
Showing 13 changed files with 58 additions and 53 deletions.
8 changes: 4 additions & 4 deletions arch/x86/kernel/cpu/mcheck/mce.c
Original file line number Diff line number Diff line change
Expand Up @@ -1091,7 +1091,7 @@ void mce_log_therm_throt_event(__u64 status)
*/
static int check_interval = 5 * 60; /* 5 minutes */

static DEFINE_PER_CPU(int, next_interval); /* in jiffies */
static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
static DEFINE_PER_CPU(struct timer_list, mce_timer);

static void mcheck_timer(unsigned long data)
Expand All @@ -1110,7 +1110,7 @@ static void mcheck_timer(unsigned long data)
* Alert userspace if needed. If we logged an MCE, reduce the
* polling interval, otherwise increase the polling interval.
*/
n = &__get_cpu_var(next_interval);
n = &__get_cpu_var(mce_next_interval);
if (mce_notify_irq())
*n = max(*n/2, HZ/100);
else
Expand Down Expand Up @@ -1311,7 +1311,7 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
static void mce_init_timer(void)
{
struct timer_list *t = &__get_cpu_var(mce_timer);
int *n = &__get_cpu_var(next_interval);
int *n = &__get_cpu_var(mce_next_interval);

if (mce_ignore_ce)
return;
Expand Down Expand Up @@ -1914,7 +1914,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
t->expires = round_jiffies(jiffies +
__get_cpu_var(next_interval));
__get_cpu_var(mce_next_interval));
add_timer_on(t, cpu);
smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
break;
Expand Down
14 changes: 7 additions & 7 deletions arch/x86/kernel/cpu/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -862,7 +862,7 @@ amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
x86_pmu_disable_counter(hwc, idx);
}

static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], prev_left);
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);

/*
* Set the next IRQ period, based on the hwc->period_left value.
Expand Down Expand Up @@ -901,7 +901,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
if (left > x86_pmu.max_period)
left = x86_pmu.max_period;

per_cpu(prev_left[idx], smp_processor_id()) = left;
per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;

/*
* The hw counter starts counting from this counter offset,
Expand Down Expand Up @@ -1089,7 +1089,7 @@ void perf_counter_print_debug(void)
rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
rdmsrl(x86_pmu.perfctr + idx, pmc_count);

prev_left = per_cpu(prev_left[idx], cpu);
prev_left = per_cpu(pmc_prev_left[idx], cpu);

pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
cpu, idx, pmc_ctrl);
Expand Down Expand Up @@ -1561,8 +1561,8 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
entry->ip[entry->nr++] = ip;
}

static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);


static void
Expand Down Expand Up @@ -1709,9 +1709,9 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
struct perf_callchain_entry *entry;

if (in_nmi())
entry = &__get_cpu_var(nmi_entry);
entry = &__get_cpu_var(pmc_nmi_entry);
else
entry = &__get_cpu_var(irq_entry);
entry = &__get_cpu_var(pmc_irq_entry);

entry->nr = 0;

Expand Down
10 changes: 5 additions & 5 deletions block/as-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ enum arq_state {
#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2)
#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state)

static DEFINE_PER_CPU(unsigned long, ioc_count);
static DEFINE_PER_CPU(unsigned long, as_ioc_count);
static struct completion *ioc_gone;
static DEFINE_SPINLOCK(ioc_gone_lock);

Expand All @@ -161,15 +161,15 @@ static void as_antic_stop(struct as_data *ad);
static void free_as_io_context(struct as_io_context *aic)
{
kfree(aic);
elv_ioc_count_dec(ioc_count);
elv_ioc_count_dec(as_ioc_count);
if (ioc_gone) {
/*
* AS scheduler is exiting, grab exit lock and check
* the pending io context count. If it hits zero,
* complete ioc_gone and set it back to NULL.
*/
spin_lock(&ioc_gone_lock);
if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
if (ioc_gone && !elv_ioc_count_read(as_ioc_count)) {
complete(ioc_gone);
ioc_gone = NULL;
}
Expand Down Expand Up @@ -211,7 +211,7 @@ static struct as_io_context *alloc_as_io_context(void)
ret->seek_total = 0;
ret->seek_samples = 0;
ret->seek_mean = 0;
elv_ioc_count_inc(ioc_count);
elv_ioc_count_inc(as_ioc_count);
}

return ret;
Expand Down Expand Up @@ -1507,7 +1507,7 @@ static void __exit as_exit(void)
ioc_gone = &all_gone;
/* ioc_gone's update must be visible before reading ioc_count */
smp_wmb();
if (elv_ioc_count_read(ioc_count))
if (elv_ioc_count_read(as_ioc_count))
wait_for_completion(&all_gone);
synchronize_rcu();
}
Expand Down
10 changes: 5 additions & 5 deletions block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ static int cfq_slice_idle = HZ / 125;
static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool;

static DEFINE_PER_CPU(unsigned long, ioc_count);
static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
static struct completion *ioc_gone;
static DEFINE_SPINLOCK(ioc_gone_lock);

Expand Down Expand Up @@ -1422,7 +1422,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
cic = container_of(head, struct cfq_io_context, rcu_head);

kmem_cache_free(cfq_ioc_pool, cic);
elv_ioc_count_dec(ioc_count);
elv_ioc_count_dec(cfq_ioc_count);

if (ioc_gone) {
/*
Expand All @@ -1431,7 +1431,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
* complete ioc_gone and set it back to NULL
*/
spin_lock(&ioc_gone_lock);
if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
complete(ioc_gone);
ioc_gone = NULL;
}
Expand Down Expand Up @@ -1557,7 +1557,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
INIT_HLIST_NODE(&cic->cic_list);
cic->dtor = cfq_free_io_context;
cic->exit = cfq_exit_io_context;
elv_ioc_count_inc(ioc_count);
elv_ioc_count_inc(cfq_ioc_count);
}

return cic;
Expand Down Expand Up @@ -2658,7 +2658,7 @@ static void __exit cfq_exit(void)
* this also protects us from entering cfq_slab_kill() with
* pending RCU callbacks
*/
if (elv_ioc_count_read(ioc_count))
if (elv_ioc_count_read(cfq_ioc_count))
wait_for_completion(&all_gone);
cfq_slab_kill();
}
Expand Down
12 changes: 6 additions & 6 deletions drivers/cpufreq/cpufreq_conservative.c
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ struct cpu_dbs_info_s {
int cpu;
unsigned int enable:1;
};
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);

static unsigned int dbs_enable; /* number of CPUs using this policy */

Expand Down Expand Up @@ -138,7 +138,7 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct cpufreq_freqs *freq = data;
struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info,
struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info,
freq->cpu);

struct cpufreq_policy *policy;
Expand Down Expand Up @@ -298,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
struct cpu_dbs_info_s *dbs_info;
dbs_info = &per_cpu(cpu_dbs_info, j);
dbs_info = &per_cpu(cs_cpu_dbs_info, j);
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice)
Expand Down Expand Up @@ -388,7 +388,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cputime64_t cur_wall_time, cur_idle_time;
unsigned int idle_time, wall_time;

j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);

cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);

Expand Down Expand Up @@ -528,7 +528,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int j;
int rc;

this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);

switch (event) {
case CPUFREQ_GOV_START:
Expand All @@ -548,7 +548,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,

for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;

j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
Expand Down
15 changes: 8 additions & 7 deletions drivers/cpufreq/cpufreq_ondemand.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ struct cpu_dbs_info_s {
unsigned int enable:1,
sample_type:1;
};
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);

static unsigned int dbs_enable; /* number of CPUs using this policy */

Expand Down Expand Up @@ -151,7 +151,8 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
unsigned int freq_hi, freq_lo;
unsigned int index = 0;
unsigned int jiffies_total, jiffies_hi, jiffies_lo;
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu);
struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
policy->cpu);

if (!dbs_info->freq_table) {
dbs_info->freq_lo = 0;
Expand Down Expand Up @@ -196,7 +197,7 @@ static void ondemand_powersave_bias_init(void)
{
int i;
for_each_online_cpu(i) {
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i);
struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, i);
dbs_info->freq_table = cpufreq_frequency_get_table(i);
dbs_info->freq_lo = 0;
}
Expand Down Expand Up @@ -297,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
struct cpu_dbs_info_s *dbs_info;
dbs_info = &per_cpu(cpu_dbs_info, j);
dbs_info = &per_cpu(od_cpu_dbs_info, j);
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice)
Expand Down Expand Up @@ -391,7 +392,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
unsigned int load, load_freq;
int freq_avg;

j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info = &per_cpu(od_cpu_dbs_info, j);

cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);

Expand Down Expand Up @@ -548,7 +549,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int j;
int rc;

this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);

switch (event) {
case CPUFREQ_GOV_START:
Expand All @@ -570,7 +571,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,

for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;

j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
Expand Down
9 changes: 5 additions & 4 deletions drivers/xen/events.c
Original file line number Diff line number Diff line change
Expand Up @@ -602,6 +602,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}

static DEFINE_PER_CPU(unsigned, xed_nesting_count);

/*
* Search the CPUs pending events bitmasks. For each one found, map
* the event number to an irq, and feed it into do_IRQ() for
Expand All @@ -617,7 +619,6 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
static DEFINE_PER_CPU(unsigned, nesting_count);
unsigned count;

exit_idle();
Expand All @@ -628,7 +629,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)

vcpu_info->evtchn_upcall_pending = 0;

if (__get_cpu_var(nesting_count)++)
if (__get_cpu_var(xed_nesting_count)++)
goto out;

#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
Expand All @@ -653,8 +654,8 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)

BUG_ON(!irqs_disabled());

count = __get_cpu_var(nesting_count);
__get_cpu_var(nesting_count) = 0;
count = __get_cpu_var(xed_nesting_count);
__get_cpu_var(xed_nesting_count) = 0;
} while(count != 1);

out:
Expand Down
6 changes: 3 additions & 3 deletions kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,16 +98,16 @@ hw_perf_group_sched_in(struct perf_counter *group_leader,

void __weak perf_counter_print_debug(void) { }

static DEFINE_PER_CPU(int, disable_count);
static DEFINE_PER_CPU(int, perf_disable_count);

void __perf_disable(void)
{
__get_cpu_var(disable_count)++;
__get_cpu_var(perf_disable_count)++;
}

bool __perf_enable(void)
{
return !--__get_cpu_var(disable_count);
return !--__get_cpu_var(perf_disable_count);
}

void perf_disable(void)
Expand Down
6 changes: 3 additions & 3 deletions kernel/trace/trace_events.c
Original file line number Diff line number Diff line change
Expand Up @@ -1318,7 +1318,7 @@ static __init void event_trace_self_tests(void)

#ifdef CONFIG_FUNCTION_TRACER

static DEFINE_PER_CPU(atomic_t, test_event_disable);
static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);

static void
function_test_events_call(unsigned long ip, unsigned long parent_ip)
Expand All @@ -1334,7 +1334,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
pc = preempt_count();
resched = ftrace_preempt_disable();
cpu = raw_smp_processor_id();
disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));

if (disabled != 1)
goto out;
Expand All @@ -1352,7 +1352,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
trace_nowake_buffer_unlock_commit(event, flags, pc);

out:
atomic_dec(&per_cpu(test_event_disable, cpu));
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
ftrace_preempt_enable(resched);
}

Expand Down
6 changes: 3 additions & 3 deletions mm/kmemleak-test.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ struct test_node {
};

static LIST_HEAD(test_list);
static DEFINE_PER_CPU(void *, test_pointer);
static DEFINE_PER_CPU(void *, kmemleak_test_pointer);

/*
* Some very simple testing. This function needs to be extended for
Expand Down Expand Up @@ -86,9 +86,9 @@ static int __init kmemleak_test_init(void)
}

for_each_possible_cpu(i) {
per_cpu(test_pointer, i) = kmalloc(129, GFP_KERNEL);
per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL);
pr_info("kmemleak: kmalloc(129) = %p\n",
per_cpu(test_pointer, i));
per_cpu(kmemleak_test_pointer, i));
}

return 0;
Expand Down
Loading

0 comments on commit 245b2e7

Please sign in to comment.