Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel…
Browse files Browse the repository at this point in the history
…/git/tj/percpu

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (34 commits)
  m68k: rename global variable vmalloc_end to m68k_vmalloc_end
  percpu: add missing per_cpu_ptr_to_phys() definition for UP
  percpu: Fix kdump failure if booted with percpu_alloc=page
  percpu: make misc percpu symbols unique
  percpu: make percpu symbols in ia64 unique
  percpu: make percpu symbols in powerpc unique
  percpu: make percpu symbols in x86 unique
  percpu: make percpu symbols in xen unique
  percpu: make percpu symbols in cpufreq unique
  percpu: make percpu symbols in oprofile unique
  percpu: make percpu symbols in tracer unique
  percpu: make percpu symbols under kernel/ and mm/ unique
  percpu: remove some sparse warnings
  percpu: make alloc_percpu() handle array types
  vmalloc: fix use of non-existent percpu variable in put_cpu_var()
  this_cpu: Use this_cpu_xx in trace_functions_graph.c
  this_cpu: Use this_cpu_xx for ftrace
  this_cpu: Use this_cpu_xx in nmi handling
  this_cpu: Use this_cpu operations in RCU
  this_cpu: Use this_cpu ops for VM statistics
  ...

Fix up trivial (famous last words) global per-cpu naming conflicts in
	arch/x86/kvm/svm.c
	mm/slab.c
  • Loading branch information
torvalds committed Dec 14, 2009
2 parents fb0bbb9 + 51e99be commit d031655
Show file tree
Hide file tree
Showing 79 changed files with 1,222 additions and 978 deletions.
3 changes: 0 additions & 3 deletions arch/ia64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,6 @@ config GENERIC_TIME_VSYSCALL
bool
default y

config HAVE_LEGACY_PER_CPU_AREA
def_bool y

config HAVE_SETUP_PER_CPU_AREA
def_bool y

Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/include/asm/meminit.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ extern int register_active_ranges(u64 start, u64 len, int nid);

#ifdef CONFIG_VIRTUAL_MEM_MAP
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
extern unsigned long vmalloc_end;
extern unsigned long VMALLOC_END;
extern struct page *vmem_map;
extern int find_largest_hole(u64 start, u64 end, void *arg);
extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
Expand Down
3 changes: 1 addition & 2 deletions arch/ia64/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -228,8 +228,7 @@ ia64_phys_addr_valid (unsigned long addr)
#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
# define VMALLOC_END vmalloc_end
extern unsigned long vmalloc_end;
extern unsigned long VMALLOC_END;
#else
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
Expand Down
6 changes: 3 additions & 3 deletions arch/ia64/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -229,16 +229,16 @@ struct cpuinfo_ia64 {
#endif
};

DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);

/*
* The "local" data variable. It refers to the per-CPU data of the currently executing
* CPU, much like "current" points to the per-task data of the currently executing task.
* Do not use the address of local_cpu_data, since it will be different from
* cpu_data(smp_processor_id())!
*/
#define local_cpu_data (&__ia64_per_cpu_var(cpu_info))
#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
#define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info))
#define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu))

extern void print_cpu_info (struct cpuinfo_ia64 *);

Expand Down
33 changes: 15 additions & 18 deletions arch/ia64/kernel/acpi.c
Original file line number Diff line number Diff line change
Expand Up @@ -702,11 +702,23 @@ int __init early_acpi_boot_init(void)
printk(KERN_ERR PREFIX
"Error parsing MADT - no LAPIC entries\n");

#ifdef CONFIG_SMP
if (available_cpus == 0) {
printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
smp_boot_data.cpu_phys_id[available_cpus] =
hard_smp_processor_id();
available_cpus = 1; /* We've got at least one of these, no? */
}
smp_boot_data.cpu_count = available_cpus;
#endif
/* Make boot-up look pretty */
printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
total_cpus);

return 0;
}



int __init acpi_boot_init(void)
{

Expand Down Expand Up @@ -769,18 +781,8 @@ int __init acpi_boot_init(void)
if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
printk(KERN_ERR PREFIX "Can't find FADT\n");

#ifdef CONFIG_ACPI_NUMA
#ifdef CONFIG_SMP
if (available_cpus == 0) {
printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
smp_boot_data.cpu_phys_id[available_cpus] =
hard_smp_processor_id();
available_cpus = 1; /* We've got at least one of these, no? */
}
smp_boot_data.cpu_count = available_cpus;

smp_build_cpu_map();
# ifdef CONFIG_ACPI_NUMA
if (srat_num_cpus == 0) {
int cpu, i = 1;
for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
Expand All @@ -789,14 +791,9 @@ int __init acpi_boot_init(void)
node_cpuid[i++].phys_id =
smp_boot_data.cpu_phys_id[cpu];
}
# endif
#endif
#ifdef CONFIG_ACPI_NUMA
build_cpu_to_node_map();
#endif
/* Make boot-up look pretty */
printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
total_cpus);
return 0;
}

Expand Down
4 changes: 2 additions & 2 deletions arch/ia64/kernel/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -1051,7 +1051,7 @@ END(ia64_delay_loop)
* intermediate precision so that we can produce a full 64-bit result.
*/
GLOBAL_ENTRY(ia64_native_sched_clock)
addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
;;
ldf8 f8=[r8]
Expand All @@ -1077,7 +1077,7 @@ sched_clock = ia64_native_sched_clock
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
GLOBAL_ENTRY(cycle_to_cputime)
alloc r16=ar.pfs,1,0,0,0
addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
;;
ldf8 f8=[r8]
;;
Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/kernel/ia64_ksyms.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic
#endif

#include <asm/processor.h>
EXPORT_SYMBOL(per_cpu__cpu_info);
EXPORT_SYMBOL(per_cpu__ia64_cpu_info);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
#endif
Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/kernel/mca_asm.S
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
ia64_do_tlb_purge:
#define O(member) IA64_CPUINFO_##member##_OFFSET

GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
;;
addl r17=O(PTCE_STRIDE),r2
addl r2=O(PTCE_BASE),r2
Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/kernel/relocate_kernel.S
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ GLOBAL_ENTRY(relocate_new_kernel)

// purge all TC entries
#define O(member) IA64_CPUINFO_##member##_OFFSET
GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
;;
addl r17=O(PTCE_STRIDE),r2
addl r2=O(PTCE_BASE),r2
Expand Down
27 changes: 7 additions & 20 deletions arch/ia64/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ unsigned long __per_cpu_offset[NR_CPUS];
EXPORT_SYMBOL(__per_cpu_offset);
#endif

DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
unsigned long ia64_cycles_per_usec;
struct ia64_boot_param *ia64_boot_param;
Expand Down Expand Up @@ -566,19 +566,18 @@ setup_arch (char **cmdline_p)
early_acpi_boot_init();
# ifdef CONFIG_ACPI_NUMA
acpi_numa_init();
#ifdef CONFIG_ACPI_HOTPLUG_CPU
# ifdef CONFIG_ACPI_HOTPLUG_CPU
prefill_possible_map();
#endif
# endif
per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
32 : cpus_weight(early_cpu_possible_map)),
additional_cpus > 0 ? additional_cpus : 0);
# endif
#else
# ifdef CONFIG_SMP
smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
# endif
#endif /* CONFIG_APCI_BOOT */

#ifdef CONFIG_SMP
smp_build_cpu_map();
#endif
find_memory();

/* process SAL system table: */
Expand Down Expand Up @@ -855,18 +854,6 @@ identify_cpu (struct cpuinfo_ia64 *c)
c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
}

/*
* In UP configuration, setup_per_cpu_areas() is defined in
* include/linux/percpu.h
*/
#ifdef CONFIG_SMP
void __init
setup_per_cpu_areas (void)
{
/* start_kernel() requires this... */
}
#endif

/*
* Do the following calculations:
*
Expand Down Expand Up @@ -980,7 +967,7 @@ cpu_init (void)
* depends on the data returned by identify_cpu(). We break the dependency by
* accessing cpu_data() through the canonical per-CPU address.
*/
cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
identify_cpu(cpu_info);

#ifdef CONFIG_MCKINLEY
Expand Down
11 changes: 6 additions & 5 deletions arch/ia64/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,12 @@ SECTIONS
}
#endif

#ifdef CONFIG_SMP
. = ALIGN(PERCPU_PAGE_SIZE);
__cpu0_per_cpu = .;
. = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
#endif

. = ALIGN(PAGE_SIZE);
__init_end = .;

Expand Down Expand Up @@ -198,11 +204,6 @@ SECTIONS
data : { } :data
.data : AT(ADDR(.data) - LOAD_OFFSET)
{
#ifdef CONFIG_SMP
. = ALIGN(PERCPU_PAGE_SIZE);
__cpu0_per_cpu = .;
. = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
#endif
INIT_TASK_DATA(PAGE_SIZE)
CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
READ_MOSTLY_DATA(SMP_CACHE_BYTES)
Expand Down
99 changes: 80 additions & 19 deletions arch/ia64/mm/contig.c
Original file line number Diff line number Diff line change
Expand Up @@ -154,38 +154,99 @@ static void *cpu_data;
void * __cpuinit
per_cpu_init (void)
{
int cpu;
static int first_time=1;
static bool first_time = true;
void *cpu0_data = __cpu0_per_cpu;
unsigned int cpu;

if (!first_time)
goto skip;
first_time = false;

/*
* get_free_pages() cannot be used before cpu_init() done. BSP
* allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
* get_zeroed_page().
* get_free_pages() cannot be used before cpu_init() done.
* BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
* to avoid that AP calls get_zeroed_page().
*/
if (first_time) {
void *cpu0_data = __cpu0_per_cpu;
for_each_possible_cpu(cpu) {
void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;

first_time=0;
memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];

__per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start;
per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0];
/*
* percpu area for cpu0 is moved from the __init area
* which is setup by head.S and used till this point.
* Update ar.k3. This move is ensures that percpu
* area for cpu0 is on the correct node and its
* virtual address isn't insanely far from other
* percpu areas which is important for congruent
* percpu allocator.
*/
if (cpu == 0)
ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
(unsigned long)__per_cpu_start);

for (cpu = 1; cpu < NR_CPUS; cpu++) {
memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
cpu_data += PERCPU_PAGE_SIZE;
per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
}
cpu_data += PERCPU_PAGE_SIZE;
}
skip:
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
}

static inline void
alloc_per_cpu_data(void)
{
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1,
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}

/**
* setup_per_cpu_areas - setup percpu areas
*
* Arch code has already allocated and initialized percpu areas. All
* this function has to do is to teach the determined layout to the
* dynamic percpu allocator, which happens to be more complex than
* creating whole new ones using helpers.
*/
void __init
setup_per_cpu_areas(void)
{
struct pcpu_alloc_info *ai;
struct pcpu_group_info *gi;
unsigned int cpu;
ssize_t static_size, reserved_size, dyn_size;
int rc;

ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
if (!ai)
panic("failed to allocate pcpu_alloc_info");
gi = &ai->groups[0];

/* units are assigned consecutively to possible cpus */
for_each_possible_cpu(cpu)
gi->cpu_map[gi->nr_units++] = cpu;

/* set parameters */
static_size = __per_cpu_end - __per_cpu_start;
reserved_size = PERCPU_MODULE_RESERVE;
dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
if (dyn_size < 0)
panic("percpu area overflow static=%zd reserved=%zd\n",
static_size, reserved_size);

ai->static_size = static_size;
ai->reserved_size = reserved_size;
ai->dyn_size = dyn_size;
ai->unit_size = PERCPU_PAGE_SIZE;
ai->atom_size = PAGE_SIZE;
ai->alloc_size = PERCPU_PAGE_SIZE;

rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
if (rc)
panic("failed to setup percpu area (err=%d)", rc);

pcpu_free_alloc_info(ai);
}
#else
#define alloc_per_cpu_data() do { } while (0)
#endif /* CONFIG_SMP */
Expand Down Expand Up @@ -270,8 +331,8 @@ paging_init (void)

map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
sizeof(struct page));
vmalloc_end -= map_size;
vmem_map = (struct page *) vmalloc_end;
VMALLOC_END -= map_size;
vmem_map = (struct page *) VMALLOC_END;
efi_memmap_walk(create_mem_map_page_table, NULL);

/*
Expand Down
Loading

0 comments on commit d031655

Please sign in to comment.