Skip to content

Commit

Permalink
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/…
Browse files Browse the repository at this point in the history
…git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "Here are a few more arm64 fixes, but things do finally appear to be
  slowing down.  The main fix is avoiding hibernation in a previously
  unanticipated situation where we have CPUs parked in the kernel, but
  it's all good stuff.

   - Fix icache/dcache sync for anonymous pages under migration
   - Correct the ASID limit check
   - Fix parallel builds of Image and Image.gz
   - Refuse to hibernate when we have CPUs that we can't offline"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: hibernate: Don't hibernate on systems with stuck CPUs
  arm64: smp: Add function to determine if cpus are stuck in the kernel
  arm64: mm: remove page_mapping check in __sync_icache_dcache
  arm64: fix boot image dependencies to not generate invalid images
  arm64: update ASID limit
  • Loading branch information
torvalds committed Jun 25, 2016
2 parents 9c46a6d + d74b4e4 commit d05be0d
Show file tree
Hide file tree
Showing 6 changed files with 43 additions and 8 deletions.
2 changes: 1 addition & 1 deletion arch/arm64/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ boot := arch/arm64/boot
Image: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@

Image.%: vmlinux
Image.%: Image
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@

zinstall install:
Expand Down
12 changes: 12 additions & 0 deletions arch/arm64/include/asm/smp.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,18 @@ static inline void cpu_panic_kernel(void)
cpu_park_loop();
}

/*
* If a secondary CPU enters the kernel but fails to come online,
* (e.g. due to mismatched features), and cannot exit the kernel,
* we increment cpus_stuck_in_kernel and leave the CPU in a
* quiesecent loop within the kernel text. The memory containing
* this loop must not be re-used for anything else as the 'stuck'
* core is executing it.
*
* This function is used to inhibit features like kexec and hibernate.
*/
bool cpus_are_stuck_in_kernel(void);

#endif /* ifndef __ASSEMBLY__ */

#endif /* ifndef __ASM_SMP_H */
6 changes: 6 additions & 0 deletions arch/arm64/kernel/hibernate.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/sections.h>
#include <asm/smp.h>
#include <asm/suspend.h>
#include <asm/virt.h>

Expand Down Expand Up @@ -236,6 +237,11 @@ int swsusp_arch_suspend(void)
unsigned long flags;
struct sleep_stack_data state;

if (cpus_are_stuck_in_kernel()) {
pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
return -EBUSY;
}

local_dbg_save(flags);

if (__cpu_suspend_enter(&state)) {
Expand Down
18 changes: 18 additions & 0 deletions arch/arm64/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -909,3 +909,21 @@ int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}

static bool have_cpu_die(void)
{
#ifdef CONFIG_HOTPLUG_CPU
int any_cpu = raw_smp_processor_id();

if (cpu_ops[any_cpu]->cpu_die)
return true;
#endif
return false;
}

bool cpus_are_stuck_in_kernel(void)
{
bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());

return !!cpus_stuck_in_kernel || smp_spin_tables;
}
9 changes: 6 additions & 3 deletions arch/arm64/mm/context.c
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
&asid_generation);
flush_context(cpu);

/* We have at least 1 ASID per CPU, so this will always succeed */
/* We have more ASIDs than CPUs, so this will always succeed */
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);

set_asid:
Expand Down Expand Up @@ -227,8 +227,11 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
/* If we end up with more CPUs than ASIDs, expect things to crash */
WARN_ON(NUM_USER_ASIDS < num_possible_cpus());
/*
* Expect allocation after rollover to fail if we don't have at least
* one more ASID than CPUs. ASID #0 is reserved for init_mm.
*/
WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
GFP_KERNEL);
Expand Down
4 changes: 0 additions & 4 deletions arch/arm64/mm/flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,10 +71,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
{
struct page *page = pte_page(pte);

/* no flushing needed for anonymous pages */
if (!page_mapping(page))
return;

if (!test_and_set_bit(PG_dcache_clean, &page->flags))
sync_icache_aliases(page_address(page),
PAGE_SIZE << compound_order(page));
Expand Down

0 comments on commit d05be0d

Please sign in to comment.