Skip to content

Commit

Permalink
Merge branch 'akpm' (Andrew's patch-bomb)
Browse files Browse the repository at this point in the history
Merge third batch of patches from Andrew Morton:
 - Some MM stragglers
 - core SMP library cleanups (on_each_cpu_mask)
 - Some IPI optimisations
 - kexec
 - kdump
 - IPMI
 - the radix-tree iterator work
 - various other misc bits.

 "That'll do for -rc1.  I still have ~10 patches for 3.4, will send
  those along when they've baked a little more."

* emailed from Andrew Morton <[email protected]>: (35 commits)
  backlight: fix typo in tosa_lcd.c
  crc32: add help text for the algorithm select option
  mm: move hugepage test examples to tools/testing/selftests/vm
  mm: move slabinfo.c to tools/vm
  mm: move page-types.c from Documentation to tools/vm
  selftests/Makefile: make `run_tests' depend on `all'
  selftests: launch individual selftests from the main Makefile
  radix-tree: use iterators in find_get_pages* functions
  radix-tree: rewrite gang lookup using iterator
  radix-tree: introduce bit-optimized iterator
  fs/proc/namespaces.c: prevent crash when ns_entries[] is empty
  nbd: rename the nbd_device variable from lo to nbd
  pidns: add reboot_pid_ns() to handle the reboot syscall
  sysctl: use bitmap library functions
  ipmi: use locks on watchdog timeout set on reboot
  ipmi: simplify locking
  ipmi: fix message handling during panics
  ipmi: use a tasklet for handling received messages
  ipmi: increase KCS timeouts
  ipmi: decrease the IPMI message transaction time in interrupt mode
  ...
  • Loading branch information
torvalds committed Mar 29, 2012
2 parents 0195c00 + 8da00ed commit 532bfc8
Show file tree
Hide file tree
Showing 55 changed files with 1,225 additions and 768 deletions.
2 changes: 1 addition & 1 deletion Documentation/Makefile
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
obj-m := DocBook/ accounting/ auxdisplay/ connector/ \
filesystems/ filesystems/configfs/ ia64/ laptops/ networking/ \
pcmcia/ spi/ timers/ vm/ watchdog/src/
pcmcia/ spi/ timers/ watchdog/src/
8 changes: 0 additions & 8 deletions Documentation/vm/Makefile

This file was deleted.

20 changes: 5 additions & 15 deletions arch/arm/kernel/smp_tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,6 @@
#include <asm/smp_plat.h>
#include <asm/tlbflush.h>

static void on_each_cpu_mask(void (*func)(void *), void *info, int wait,
const struct cpumask *mask)
{
preempt_disable();

smp_call_function_many(mask, func, info, wait);
if (cpumask_test_cpu(smp_processor_id(), mask))
func(info);

preempt_enable();
}

/**********************************************************************/

/*
Expand Down Expand Up @@ -87,7 +75,7 @@ void flush_tlb_all(void)
void flush_tlb_mm(struct mm_struct *mm)
{
if (tlb_ops_need_broadcast())
on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
else
local_flush_tlb_mm(mm);
}
Expand All @@ -98,7 +86,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
struct tlb_args ta;
ta.ta_vma = vma;
ta.ta_start = uaddr;
on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
&ta, 1);
} else
local_flush_tlb_page(vma, uaddr);
}
Expand All @@ -121,7 +110,8 @@ void flush_tlb_range(struct vm_area_struct *vma,
ta.ta_vma = vma;
ta.ta_start = start;
ta.ta_end = end;
on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range,
&ta, 1);
} else
local_flush_tlb_range(vma, start, end);
}
Expand Down
6 changes: 3 additions & 3 deletions arch/ia64/kernel/acpi.c
Original file line number Diff line number Diff line change
Expand Up @@ -843,7 +843,7 @@ early_param("additional_cpus", setup_additional_cpus);
* are onlined, or offlined. The reason is per-cpu data-structures
* are allocated by some modules at init time, and dont expect to
* do this dynamically on cpu arrival/departure.
* cpu_present_map on the other hand can change dynamically.
* cpu_present_mask on the other hand can change dynamically.
* In case when cpu_hotplug is not compiled, then we resort to current
* behaviour, which is cpu_possible == cpu_present.
* - Ashok Raj
Expand Down Expand Up @@ -921,7 +921,7 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)

acpi_map_cpu2node(handle, cpu, physid);

cpu_set(cpu, cpu_present_map);
set_cpu_present(cpu, true);
ia64_cpu_to_sapicid[cpu] = physid;

acpi_processor_set_pdc(handle);
Expand All @@ -940,7 +940,7 @@ EXPORT_SYMBOL(acpi_map_lsapic);
int acpi_unmap_lsapic(int cpu)
{
ia64_cpu_to_sapicid[cpu] = -1;
cpu_clear(cpu, cpu_present_map);
set_cpu_present(cpu, false);

#ifdef CONFIG_ACPI_NUMA
/* NUMA specific cleanup's */
Expand Down
8 changes: 4 additions & 4 deletions arch/ia64/kernel/irq_ia64.c
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ static inline int find_unassigned_vector(cpumask_t domain)
cpumask_t mask;
int pos, vector;

cpus_and(mask, domain, cpu_online_map);
cpumask_and(&mask, &domain, cpu_online_mask);
if (cpus_empty(mask))
return -EINVAL;

Expand All @@ -140,7 +140,7 @@ static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
BUG_ON((unsigned)irq >= NR_IRQS);
BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);

cpus_and(mask, domain, cpu_online_map);
cpumask_and(&mask, &domain, cpu_online_mask);
if (cpus_empty(mask))
return -EINVAL;
if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
Expand Down Expand Up @@ -178,7 +178,7 @@ static void __clear_irq_vector(int irq)
BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
vector = cfg->vector;
domain = cfg->domain;
cpus_and(mask, cfg->domain, cpu_online_map);
cpumask_and(&mask, &cfg->domain, cpu_online_mask);
for_each_cpu_mask(cpu, mask)
per_cpu(vector_irq, cpu)[vector] = -1;
cfg->vector = IRQ_VECTOR_UNASSIGNED;
Expand Down Expand Up @@ -321,7 +321,7 @@ void irq_complete_move(unsigned irq)
if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
return;

cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
cfg->move_cleanup_count = cpus_weight(cleanup_mask);
for_each_cpu_mask(i, cleanup_mask)
platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
Expand Down
6 changes: 4 additions & 2 deletions arch/ia64/kernel/mca.c
Original file line number Diff line number Diff line change
Expand Up @@ -1514,7 +1514,8 @@ static void
ia64_mca_cmc_poll (unsigned long dummy)
{
/* Trigger a CMC interrupt cascade */
platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CMCP_VECTOR,
IA64_IPI_DM_INT, 0);
}

/*
Expand Down Expand Up @@ -1590,7 +1591,8 @@ static void
ia64_mca_cpe_poll (unsigned long dummy)
{
/* Trigger a CPE interrupt cascade */
platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CPEP_VECTOR,
IA64_IPI_DM_INT, 0);
}

#endif /* CONFIG_ACPI */
Expand Down
4 changes: 2 additions & 2 deletions arch/ia64/kernel/msi_ia64.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
return irq;

irq_set_msi_desc(irq, desc);
cpus_and(mask, irq_to_domain(irq), cpu_online_map);
cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask);
dest_phys_id = cpu_physical_id(first_cpu(mask));
vector = irq_to_vector(irq);

Expand Down Expand Up @@ -179,7 +179,7 @@ msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
unsigned dest;
cpumask_t mask;

cpus_and(mask, irq_to_domain(irq), cpu_online_map);
cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask);
dest = cpu_physical_id(first_cpu(mask));

msg->address_hi = 0;
Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -485,7 +485,7 @@ mark_bsp_online (void)
{
#ifdef CONFIG_SMP
/* If we register an early console, allow CPU 0 to printk */
cpu_set(smp_processor_id(), cpu_online_map);
set_cpu_online(smp_processor_id(), true);
#endif
}

Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ stop_this_cpu(void)
/*
* Remove this CPU:
*/
cpu_clear(smp_processor_id(), cpu_online_map);
set_cpu_online(smp_processor_id(), false);
max_xtp();
local_irq_disable();
cpu_halt();
Expand Down
19 changes: 7 additions & 12 deletions arch/ia64/kernel/smpboot.c
Original file line number Diff line number Diff line change
Expand Up @@ -400,7 +400,7 @@ smp_callin (void)
/* Setup the per cpu irq handling data structures */
__setup_vector_irq(cpuid);
notify_cpu_starting(cpuid);
cpu_set(cpuid, cpu_online_map);
set_cpu_online(cpuid, true);
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
spin_unlock(&vector_lock);
ipi_call_unlock_irq();
Expand Down Expand Up @@ -547,7 +547,7 @@ do_boot_cpu (int sapicid, int cpu)
if (!cpu_isset(cpu, cpu_callin_map)) {
printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
ia64_cpu_to_sapicid[cpu] = -1;
cpu_clear(cpu, cpu_online_map); /* was set in smp_callin() */
set_cpu_online(cpu, false); /* was set in smp_callin() */
return -EINVAL;
}
return 0;
Expand Down Expand Up @@ -577,8 +577,7 @@ smp_build_cpu_map (void)
}

ia64_cpu_to_sapicid[0] = boot_cpu_id;
cpus_clear(cpu_present_map);
set_cpu_present(0, true);
init_cpu_present(cpumask_of(0));
set_cpu_possible(0, true);
for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
sapicid = smp_boot_data.cpu_phys_id[i];
Expand All @@ -605,10 +604,6 @@ smp_prepare_cpus (unsigned int max_cpus)

smp_setup_percpu_timer();

/*
* We have the boot CPU online for sure.
*/
cpu_set(0, cpu_online_map);
cpu_set(0, cpu_callin_map);

local_cpu_data->loops_per_jiffy = loops_per_jiffy;
Expand All @@ -632,7 +627,7 @@ smp_prepare_cpus (unsigned int max_cpus)

void __devinit smp_prepare_boot_cpu(void)
{
cpu_set(smp_processor_id(), cpu_online_map);
set_cpu_online(smp_processor_id(), true);
cpu_set(smp_processor_id(), cpu_callin_map);
set_numa_node(cpu_to_node_map[smp_processor_id()]);
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
Expand Down Expand Up @@ -689,7 +684,7 @@ int migrate_platform_irqs(unsigned int cpu)
/*
* Now re-target the CPEI to a different processor
*/
new_cpei_cpu = any_online_cpu(cpu_online_map);
new_cpei_cpu = cpumask_any(cpu_online_mask);
mask = cpumask_of(new_cpei_cpu);
set_cpei_target_cpu(new_cpei_cpu);
data = irq_get_irq_data(ia64_cpe_irq);
Expand Down Expand Up @@ -731,10 +726,10 @@ int __cpu_disable(void)
return -EBUSY;
}

cpu_clear(cpu, cpu_online_map);
set_cpu_online(cpu, false);

if (migrate_platform_irqs(cpu)) {
cpu_set(cpu, cpu_online_map);
set_cpu_online(cpu, true);
return -EBUSY;
}

Expand Down
3 changes: 2 additions & 1 deletion arch/ia64/kernel/topology.c
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,8 @@ static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
ssize_t len;
cpumask_t shared_cpu_map;

cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
cpumask_and(&shared_cpu_map,
&this_leaf->shared_cpu_map, cpu_online_mask);
len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map);
len += sprintf(buf+len, "\n");
return len;
Expand Down
7 changes: 0 additions & 7 deletions arch/tile/include/asm/smp.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,6 @@ void evaluate_message(int tag);
/* Boot a secondary cpu */
void online_secondary(void);

/* Call a function on a specified set of CPUs (may include this one). */
extern void on_each_cpu_mask(const struct cpumask *mask,
void (*func)(void *), void *info, bool wait);

/* Topology of the supervisor tile grid, and coordinates of boot processor */
extern HV_Topology smp_topology;

Expand Down Expand Up @@ -91,9 +87,6 @@ void print_disabled_cpus(void);

#else /* !CONFIG_SMP */

#define on_each_cpu_mask(mask, func, info, wait) \
do { if (cpumask_test_cpu(0, (mask))) func(info); } while (0)

#define smp_master_cpu 0
#define smp_height 1
#define smp_width 1
Expand Down
19 changes: 0 additions & 19 deletions arch/tile/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -87,25 +87,6 @@ void send_IPI_allbutself(int tag)
send_IPI_many(&mask, tag);
}


/*
* Provide smp_call_function_mask, but also run function locally
* if specified in the mask.
*/
void on_each_cpu_mask(const struct cpumask *mask, void (*func)(void *),
void *info, bool wait)
{
int cpu = get_cpu();
smp_call_function_many(mask, func, info, wait);
if (cpumask_test_cpu(cpu, mask)) {
local_irq_disable();
func(info);
local_irq_enable();
}
put_cpu();
}


/*
* Functions related to starting/stopping cpus.
*/
Expand Down
11 changes: 1 addition & 10 deletions arch/x86/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -508,15 +508,6 @@ static void __init memblock_x86_reserve_range_setup_data(void)

#ifdef CONFIG_KEXEC

static inline unsigned long long get_total_mem(void)
{
unsigned long long total;

total = max_pfn - min_low_pfn;

return total << PAGE_SHIFT;
}

/*
* Keep the crash kernel below this limit. On 32 bits earlier kernels
* would limit the kernel to the low 512 MiB due to mapping restrictions.
Expand All @@ -535,7 +526,7 @@ static void __init reserve_crashkernel(void)
unsigned long long crash_size, crash_base;
int ret;

total_mem = get_total_mem();
total_mem = memblock_phys_mem_size();

ret = parse_crashkernel(boot_command_line, total_mem,
&crash_size, &crash_base);
Expand Down
Loading

0 comments on commit 532bfc8

Please sign in to comment.