Skip to content

Commit

Permalink
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kern…
Browse files Browse the repository at this point in the history
…el/git/arm64/linux

Pull arm64 updates from Will Deacon:
 "A bunch of good stuff in here. Worth noting is that we've pulled in
  the x86/mm branch from -tip so that we can make use of the core
  ioremap changes which allow us to put down huge mappings in the
  vmalloc area without screwing up the TLB. Much of the positive
  diffstat is because of the rseq selftest for arm64.

  Summary:

   - Wire up support for qspinlock, replacing our trusty ticket lock
     code

   - Add an IPI to flush_icache_range() to ensure that stale
     instructions fetched into the pipeline are discarded along with the
     I-cache lines

   - Support for the GCC "stackleak" plugin

   - Support for restartable sequences, plus an arm64 port for the
     selftest

   - Kexec/kdump support on systems booting with ACPI

   - Rewrite of our syscall entry code in C, which allows us to zero the
     GPRs on entry from userspace

   - Support for chained PMU counters, allowing 64-bit event counters to
     be constructed on current CPUs

   - Ensure scheduler topology information is kept up-to-date with CPU
     hotplug events

   - Re-enable support for huge vmalloc/IO mappings now that the core
     code has the correct hooks to use break-before-make sequences

   - Miscellaneous, non-critical fixes and cleanups"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (90 commits)
  arm64: alternative: Use true and false for boolean values
  arm64: kexec: Add comment to explain use of __flush_icache_range()
  arm64: sdei: Mark sdei stack helper functions as static
  arm64, kaslr: export offset in VMCOREINFO ELF notes
  arm64: perf: Add cap_user_time aarch64
  efi/libstub: Only disable stackleak plugin for arm64
  arm64: drop unused kernel_neon_begin_partial() macro
  arm64: kexec: machine_kexec should call __flush_icache_range
  arm64: svc: Ensure hardirq tracing is updated before return
  arm64: mm: Export __sync_icache_dcache() for xen-privcmd
  drivers/perf: arm-ccn: Use devm_ioremap_resource() to map memory
  arm64: Add support for STACKLEAK gcc plugin
  arm64: Add stack information to on_accessible_stack
  drivers/perf: hisi: update the sccl_id/ccl_id when MT is supported
  arm64: fix ACPI dependencies
  rseq/selftests: Add support for arm64
  arm64: acpi: fix alignment fault in accessing ACPI
  efi/arm: map UEFI memory map even w/o runtime services enabled
  efi/arm: preserve early mapping of UEFI memory map longer for BGRT
  drivers: acpi: add dependency of EFI for arm64
  ...
  • Loading branch information
torvalds committed Aug 14, 2018
2 parents d0055f3 + 3c4d913 commit 1202f4f
Show file tree
Hide file tree
Showing 91 changed files with 2,154 additions and 1,001 deletions.
1 change: 1 addition & 0 deletions MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -2270,6 +2270,7 @@ L: [email protected] (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git
S: Maintained
F: arch/arm64/
X: arch/arm64/boot/dts/
F: Documentation/arm64/

AS3645A LED FLASH CONTROLLER DRIVER
Expand Down
14 changes: 7 additions & 7 deletions arch/arm/include/asm/kvm_emulate.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,13 @@
#include <asm/cputype.h>

/* arm64 compatibility macros */
#define COMPAT_PSR_MODE_ABT ABT_MODE
#define COMPAT_PSR_MODE_UND UND_MODE
#define COMPAT_PSR_T_BIT PSR_T_BIT
#define COMPAT_PSR_I_BIT PSR_I_BIT
#define COMPAT_PSR_A_BIT PSR_A_BIT
#define COMPAT_PSR_E_BIT PSR_E_BIT
#define COMPAT_PSR_IT_MASK PSR_IT_MASK
#define PSR_AA32_MODE_ABT ABT_MODE
#define PSR_AA32_MODE_UND UND_MODE
#define PSR_AA32_T_BIT PSR_T_BIT
#define PSR_AA32_I_BIT PSR_I_BIT
#define PSR_AA32_A_BIT PSR_A_BIT
#define PSR_AA32_E_BIT PSR_E_BIT
#define PSR_AA32_IT_MASK PSR_IT_MASK

unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);

Expand Down
14 changes: 10 additions & 4 deletions arch/arm/kernel/perf_event_v6.c
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
return ret;
}

static inline u32 armv6pmu_read_counter(struct perf_event *event)
static inline u64 armv6pmu_read_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
Expand All @@ -251,7 +251,7 @@ static inline u32 armv6pmu_read_counter(struct perf_event *event)
return value;
}

static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
static inline void armv6pmu_write_counter(struct perf_event *event, u64 value)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
Expand Down Expand Up @@ -411,6 +411,12 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
}
}

static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
clear_bit(event->hw.idx, cpuc->used_mask);
}

static void armv6pmu_disable_event(struct perf_event *event)
{
unsigned long val, mask, evt, flags;
Expand Down Expand Up @@ -491,11 +497,11 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv6pmu_read_counter;
cpu_pmu->write_counter = armv6pmu_write_counter;
cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
cpu_pmu->start = armv6pmu_start;
cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6_map_event;
cpu_pmu->num_events = 3;
cpu_pmu->max_period = (1LLU << 32) - 1;
}

static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
Expand Down Expand Up @@ -542,11 +548,11 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv6pmu_read_counter;
cpu_pmu->write_counter = armv6pmu_write_counter;
cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
cpu_pmu->start = armv6pmu_start;
cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6mpcore_map_event;
cpu_pmu->num_events = 3;
cpu_pmu->max_period = (1LLU << 32) - 1;

return 0;
}
Expand Down
15 changes: 12 additions & 3 deletions arch/arm/kernel/perf_event_v7.c
Original file line number Diff line number Diff line change
Expand Up @@ -743,7 +743,7 @@ static inline void armv7_pmnc_select_counter(int idx)
isb();
}

static inline u32 armv7pmu_read_counter(struct perf_event *event)
static inline u64 armv7pmu_read_counter(struct perf_event *event)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
Expand All @@ -763,7 +763,7 @@ static inline u32 armv7pmu_read_counter(struct perf_event *event)
return value;
}

static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
Expand Down Expand Up @@ -1058,6 +1058,12 @@ static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
return -EAGAIN;
}

static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
clear_bit(event->hw.idx, cpuc->used_mask);
}

/*
* Add an event filter to a given event. This will only work for PMUv2 PMUs.
*/
Expand Down Expand Up @@ -1167,10 +1173,10 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv7pmu_read_counter;
cpu_pmu->write_counter = armv7pmu_write_counter;
cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx;
cpu_pmu->start = armv7pmu_start;
cpu_pmu->stop = armv7pmu_stop;
cpu_pmu->reset = armv7pmu_reset;
cpu_pmu->max_period = (1LLU << 32) - 1;
};

static void armv7_read_num_pmnc_events(void *info)
Expand Down Expand Up @@ -1638,6 +1644,7 @@ static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
bool venum_event = EVENT_VENUM(hwc->config_base);
bool krait_event = EVENT_CPU(hwc->config_base);

armv7pmu_clear_event_idx(cpuc, event);
if (venum_event || krait_event) {
bit = krait_event_to_bit(event, region, group);
clear_bit(bit, cpuc->used_mask);
Expand Down Expand Up @@ -1967,6 +1974,7 @@ static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
bool venum_event = EVENT_VENUM(hwc->config_base);
bool scorpion_event = EVENT_CPU(hwc->config_base);

armv7pmu_clear_event_idx(cpuc, event);
if (venum_event || scorpion_event) {
bit = scorpion_event_to_bit(event, region, group);
clear_bit(bit, cpuc->used_mask);
Expand Down Expand Up @@ -2030,6 +2038,7 @@ static struct platform_driver armv7_pmu_driver = {
.driver = {
.name = "armv7-pmu",
.of_match_table = armv7_pmu_of_device_ids,
.suppress_bind_attrs = true,
},
.probe = armv7_pmu_device_probe,
};
Expand Down
18 changes: 12 additions & 6 deletions arch/arm/kernel/perf_event_xscale.c
Original file line number Diff line number Diff line change
Expand Up @@ -292,6 +292,12 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
}
}

static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
clear_bit(event->hw.idx, cpuc->used_mask);
}

static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
Expand All @@ -316,7 +322,7 @@ static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}

static inline u32 xscale1pmu_read_counter(struct perf_event *event)
static inline u64 xscale1pmu_read_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
Expand All @@ -337,7 +343,7 @@ static inline u32 xscale1pmu_read_counter(struct perf_event *event)
return val;
}

static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val)
static inline void xscale1pmu_write_counter(struct perf_event *event, u64 val)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
Expand Down Expand Up @@ -370,11 +376,11 @@ static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = xscale1pmu_read_counter;
cpu_pmu->write_counter = xscale1pmu_write_counter;
cpu_pmu->get_event_idx = xscale1pmu_get_event_idx;
cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
cpu_pmu->start = xscale1pmu_start;
cpu_pmu->stop = xscale1pmu_stop;
cpu_pmu->map_event = xscale_map_event;
cpu_pmu->num_events = 3;
cpu_pmu->max_period = (1LLU << 32) - 1;

return 0;
}
Expand Down Expand Up @@ -679,7 +685,7 @@ static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}

static inline u32 xscale2pmu_read_counter(struct perf_event *event)
static inline u64 xscale2pmu_read_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
Expand All @@ -706,7 +712,7 @@ static inline u32 xscale2pmu_read_counter(struct perf_event *event)
return val;
}

static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val)
static inline void xscale2pmu_write_counter(struct perf_event *event, u64 val)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
Expand Down Expand Up @@ -739,11 +745,11 @@ static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = xscale2pmu_read_counter;
cpu_pmu->write_counter = xscale2pmu_write_counter;
cpu_pmu->get_event_idx = xscale2pmu_get_event_idx;
cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
cpu_pmu->start = xscale2pmu_start;
cpu_pmu->stop = xscale2pmu_stop;
cpu_pmu->map_event = xscale_map_event;
cpu_pmu->num_events = 5;
cpu_pmu->max_period = (1LLU << 32) - 1;

return 0;
}
Expand Down
18 changes: 18 additions & 0 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ config ARM64
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK if !PREEMPT
Expand All @@ -42,8 +43,19 @@ config ARM64
select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT
select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT
select ARCH_INLINE_SPIN_LOCK if !PREEMPT
select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT
select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT
select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT
select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG
Expand Down Expand Up @@ -97,6 +109,7 @@ config ARM64
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
Expand Down Expand Up @@ -128,6 +141,7 @@ config ARM64
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RCU_TABLE_FREE
select HAVE_RSEQ
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES
Expand Down Expand Up @@ -773,6 +787,9 @@ config ARCH_SPARSEMEM_DEFAULT
config ARCH_SELECT_MEMORY_MODEL
def_bool ARCH_SPARSEMEM_ENABLE

config ARCH_FLATMEM_ENABLE
def_bool !NUMA

config HAVE_ARCH_PFN_VALID
def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM

Expand Down Expand Up @@ -1244,6 +1261,7 @@ config EFI
bool "UEFI runtime support"
depends on OF && !CPU_BIG_ENDIAN
depends on KERNEL_MODE_NEON
select ARCH_SUPPORTS_ACPI
select LIBFDT
select UCS2_STRING
select EFI_PARAMS_FROM_FDT
Expand Down
9 changes: 5 additions & 4 deletions arch/arm64/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -60,15 +60,16 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__
AS += -EB
# We must use the linux target here, since distributions don't tend to package
# the ELF linker scripts with binutils, and this results in a build failure.
LDFLAGS += -EB -maarch64linuxb
# Prefer the baremetal ELF build target, but not all toolchains include
# it so fall back to the standard linux version if needed.
LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
UTS_MACHINE := aarch64_be
else
KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__AARCH64EL__
AS += -EL
LDFLAGS += -EL -maarch64linux # See comment above
# Same as above, prefer ELF but fall back to linux target if needed.
LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux)
UTS_MACHINE := aarch64
endif

Expand Down
1 change: 1 addition & 0 deletions arch/arm64/include/asm/Kbuild
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ generic-y += mm-arch-hooks.h
generic-y += msi.h
generic-y += preempt.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += rwsem.h
generic-y += segment.h
generic-y += serial.h
Expand Down
29 changes: 20 additions & 9 deletions arch/arm64/include/asm/acpi.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,12 @@
#ifndef _ASM_ACPI_H
#define _ASM_ACPI_H

#include <linux/efi.h>
#include <linux/memblock.h>
#include <linux/psci.h>

#include <asm/cputype.h>
#include <asm/io.h>
#include <asm/smp_plat.h>
#include <asm/tlbflush.h>

Expand All @@ -29,18 +31,22 @@

/* Basic configuration for ACPI */
#ifdef CONFIG_ACPI
pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);

/* ACPI table mapping after acpi_permanent_mmap is set */
static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
acpi_size size)
{
/* For normal memory we already have a cacheable mapping. */
if (memblock_is_map_memory(phys))
return (void __iomem *)__phys_to_virt(phys);

/*
* EFI's reserve_regions() call adds memory with the WB attribute
* to memblock via early_init_dt_add_memory_arch().
* We should still honor the memory's attribute here because
* crash dump kernel possibly excludes some ACPI (reclaim)
* regions from memblock list.
*/
if (!memblock_is_memory(phys))
return ioremap(phys, size);

return ioremap_cache(phys, size);
return __ioremap(phys, size, __acpi_get_mem_attribute(phys));
}
#define acpi_os_ioremap acpi_os_ioremap

Expand Down Expand Up @@ -129,15 +135,20 @@ static inline const char *acpi_get_enable_method(int cpu)
* for compatibility.
*/
#define acpi_disable_cmcff 1
pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr);
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
{
return __acpi_get_mem_attribute(addr);
}
#endif /* CONFIG_ACPI_APEI */

#ifdef CONFIG_ACPI_NUMA
int arm64_acpi_numa_init(void);
int acpi_numa_get_nid(unsigned int cpu, u64 hwid);
int acpi_numa_get_nid(unsigned int cpu);
void acpi_map_cpus_to_nodes(void);
#else
static inline int arm64_acpi_numa_init(void) { return -ENOSYS; }
static inline int acpi_numa_get_nid(unsigned int cpu, u64 hwid) { return NUMA_NO_NODE; }
static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
static inline void acpi_map_cpus_to_nodes(void) { }
#endif /* CONFIG_ACPI_NUMA */

#define ACPI_TABLE_UPGRADE_MAX_PHYS MEMBLOCK_ALLOC_ACCESSIBLE
Expand Down
Loading

0 comments on commit 1202f4f

Please sign in to comment.