Skip to content

Commit

Permalink
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kern…
Browse files Browse the repository at this point in the history
…el/git/arm64/linux

Pull arm64 updates from Catalin Marinas:
 "Apart from the core arm64 and perf changes, the Spectre v4 mitigation
  touches the arm KVM code and the ACPI PPTT support touches drivers/
  (acpi and cacheinfo). I should have the maintainers' acks in place.

  Summary:

   - Spectre v4 mitigation (Speculative Store Bypass Disable) support
     for arm64 using SMC firmware call to set a hardware chicken bit

   - ACPI PPTT (Processor Properties Topology Table) parsing support and
     enable the feature for arm64

   - Report signal frame size to user via auxv (AT_MINSIGSTKSZ). The
     primary motivation is Scalable Vector Extensions which requires
     more space on the signal frame than the currently defined
     MINSIGSTKSZ

   - ARM perf patches: allow building arm-cci as module, demote
     dev_warn() to dev_dbg() in arm-ccn event_init(), miscellaneous
     cleanups

   - cmpwait() WFE optimisation to avoid some spurious wakeups

   - L1_CACHE_BYTES reverted back to 64 (for performance reasons that
     have to do with some network allocations) while keeping
     ARCH_DMA_MINALIGN to 128. cache_line_size() returns the actual
     hardware Cache Writeback Granule

   - Turn LSE atomics on by default in Kconfig

   - Kernel fault reporting tidying

   - Some #include and miscellaneous cleanups"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (53 commits)
  arm64: Fix syscall restarting around signal suppressed by tracer
  arm64: topology: Avoid checking numa mask for scheduler MC selection
  ACPI / PPTT: fix build when CONFIG_ACPI_PPTT is not enabled
  arm64: cpu_errata: include required headers
  arm64: KVM: Move VCPU_WORKAROUND_2_FLAG macros to the top of the file
  arm64: signal: Report signal frame size to userspace via auxv
  arm64/sve: Thin out initialisation sanity-checks for sve_max_vl
  arm64: KVM: Add ARCH_WORKAROUND_2 discovery through ARCH_FEATURES_FUNC_ID
  arm64: KVM: Handle guest's ARCH_WORKAROUND_2 requests
  arm64: KVM: Add ARCH_WORKAROUND_2 support for guests
  arm64: KVM: Add HYP per-cpu accessors
  arm64: ssbd: Add prctl interface for per-thread mitigation
  arm64: ssbd: Introduce thread flag to control userspace mitigation
  arm64: ssbd: Restore mitigation status on CPU resume
  arm64: ssbd: Skip apply_ssbd if not using dynamic mitigation
  arm64: ssbd: Add global mitigation state accessor
  arm64: Add 'ssbd' command-line option
  arm64: Add ARCH_WORKAROUND_2 probing
  arm64: Add per-cpu infrastructure to call ARCH_WORKAROUND_2
  arm64: Call ARCH_WORKAROUND_2 on transitions between EL0 and EL1
  ...
  • Loading branch information
torvalds committed Jun 8, 2018
2 parents 2996148 + 0fe4251 commit 410feb7
Show file tree
Hide file tree
Showing 61 changed files with 1,689 additions and 260 deletions.
17 changes: 17 additions & 0 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4106,6 +4106,23 @@
expediting. Set to zero to disable automatic
expediting.

ssbd= [ARM64,HW]
Speculative Store Bypass Disable control

On CPUs that are vulnerable to the Speculative
Store Bypass vulnerability and offer a
firmware based mitigation, this parameter
indicates how the mitigation should be used:

force-on: Unconditionally enable mitigation for
for both kernel and userspace
force-off: Unconditionally disable mitigation for
for both kernel and userspace
kernel: Always enable mitigation in the
kernel, and offer a prctl interface
to allow userspace to register its
interest in being mitigated too.

stack_guard_gap= [MM]
override the default stack gap protection. The value
is in page units and it defines how many pages prior
Expand Down
2 changes: 2 additions & 0 deletions arch/arm/common/mcpm_entry.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/

#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irqflags.h>
Expand Down Expand Up @@ -174,6 +175,7 @@ bool mcpm_is_available(void)
{
return (platform_ops) ? true : false;
}
EXPORT_SYMBOL_GPL(mcpm_is_available);

/*
* We can't use regular spinlocks. In the switcher case, it is possible
Expand Down
12 changes: 12 additions & 0 deletions arch/arm/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -325,6 +325,18 @@ static inline bool kvm_arm_harden_branch_predictor(void)
}
}

#define KVM_SSBD_UNKNOWN -1
#define KVM_SSBD_FORCE_DISABLE 0
#define KVM_SSBD_KERNEL 1
#define KVM_SSBD_FORCE_ENABLE 2
#define KVM_SSBD_MITIGATED 3

static inline int kvm_arm_have_ssbd(void)
{
/* No way to detect it yet, pretend it is not there. */
return KVM_SSBD_UNKNOWN;
}

static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}

Expand Down
5 changes: 5 additions & 0 deletions arch/arm/include/asm/kvm_mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -356,6 +356,11 @@ static inline int kvm_map_vectors(void)
return 0;
}

static inline int hyp_map_aux_data(void)
{
return 0;
}

#define kvm_phys_to_vttbr(addr) (addr)

#endif /* !__ASSEMBLY__ */
Expand Down
4 changes: 1 addition & 3 deletions arch/arm/kernel/perf_event_v6.c
Original file line number Diff line number Diff line change
Expand Up @@ -303,12 +303,10 @@ static void armv6pmu_enable_event(struct perf_event *event)
}

static irqreturn_t
armv6pmu_handle_irq(int irq_num,
void *dev)
armv6pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
Expand Down
3 changes: 1 addition & 2 deletions arch/arm/kernel/perf_event_v7.c
Original file line number Diff line number Diff line change
Expand Up @@ -946,11 +946,10 @@ static void armv7pmu_disable_event(struct perf_event *event)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}

static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
u32 pmnc;
struct perf_sample_data data;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
Expand Down
6 changes: 2 additions & 4 deletions arch/arm/kernel/perf_event_xscale.c
Original file line number Diff line number Diff line change
Expand Up @@ -142,11 +142,10 @@ xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
}

static irqreturn_t
xscale1pmu_handle_irq(int irq_num, void *dev)
xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
unsigned long pmnc;
struct perf_sample_data data;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
Expand Down Expand Up @@ -489,11 +488,10 @@ xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
}

static irqreturn_t
xscale2pmu_handle_irq(int irq_num, void *dev)
xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
unsigned long pmnc, of_flags;
struct perf_sample_data data;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
Expand Down
15 changes: 14 additions & 1 deletion arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,13 @@ config ARM64
select ACPI_REDUCED_HARDWARE_ONLY if ACPI
select ACPI_MCFG if ACPI
select ACPI_SPCR_TABLE if ACPI
select ACPI_PPTT if ACPI
select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
Expand Down Expand Up @@ -923,6 +925,15 @@ config HARDEN_EL2_VECTORS

If unsure, say Y.

config ARM64_SSBD
bool "Speculative Store Bypass Disable" if EXPERT
default y
help
This enables mitigation of the bypassing of previous stores
by speculative loads.

If unsure, say Y.

menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT
Expand Down Expand Up @@ -1034,6 +1045,7 @@ config ARM64_PAN

config ARM64_LSE_ATOMICS
bool "Atomic instructions"
default y
help
As part of the Large System Extensions, ARMv8.1 introduces new
atomic instructions that are designed specifically to scale in
Expand All @@ -1042,7 +1054,8 @@ config ARM64_LSE_ATOMICS
Say Y here to make use of these instructions for the in-kernel
atomic routines. This incurs a small overhead on CPUs that do
not support these instructions and requires the kernel to be
built with binutils >= 2.25.
built with binutils >= 2.25 in order for the new instructions
to be used.

config ARM64_VHE
bool "Enable support for Virtualization Host Extensions (VHE)"
Expand Down
4 changes: 4 additions & 0 deletions arch/arm64/include/asm/acpi.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,10 @@ static inline bool acpi_has_cpu_in_madt(void)
}

struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu);
static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
{
return acpi_cpu_get_madt_gicc(cpu)->uid;
}

static inline void arch_fix_phys_package_id(int num, u32 slot) { }
void __init acpi_init_cpus(void);
Expand Down
6 changes: 3 additions & 3 deletions arch/arm64/include/asm/cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
#define ICACHE_POLICY_VIPT 2
#define ICACHE_POLICY_PIPT 3

#define L1_CACHE_SHIFT 7
#define L1_CACHE_SHIFT (6)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)

/*
Expand All @@ -43,7 +43,7 @@
* cache before the transfer is done, causing old data to be seen by
* the CPU.
*/
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#define ARCH_DMA_MINALIGN (128)

#ifndef __ASSEMBLY__

Expand Down Expand Up @@ -77,7 +77,7 @@ static inline u32 cache_type_cwg(void)
static inline int cache_line_size(void)
{
u32 cwg = cache_type_cwg();
return cwg ? 4 << cwg : L1_CACHE_BYTES;
return cwg ? 4 << cwg : ARCH_DMA_MINALIGN;
}

#endif /* __ASSEMBLY__ */
Expand Down
4 changes: 3 additions & 1 deletion arch/arm64/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,9 @@ static inline void __cmpwait_case_##name(volatile void *ptr, \
unsigned long tmp; \
\
asm volatile( \
" ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
" sevl\n" \
" wfe\n" \
" ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
" eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
" cbnz %" #w "[tmp], 1f\n" \
" wfe\n" \
Expand Down
3 changes: 2 additions & 1 deletion arch/arm64/include/asm/cpucaps.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,8 @@
#define ARM64_HAS_CACHE_IDC 27
#define ARM64_HAS_CACHE_DIC 28
#define ARM64_HW_DBM 29
#define ARM64_SSBD 30

#define ARM64_NCAPS 30
#define ARM64_NCAPS 31

#endif /* __ASM_CPUCAPS_H */
22 changes: 22 additions & 0 deletions arch/arm64/include/asm/cpufeature.h
Original file line number Diff line number Diff line change
Expand Up @@ -537,6 +537,28 @@ static inline u64 read_zcr_features(void)
return zcr;
}

#define ARM64_SSBD_UNKNOWN -1
#define ARM64_SSBD_FORCE_DISABLE 0
#define ARM64_SSBD_KERNEL 1
#define ARM64_SSBD_FORCE_ENABLE 2
#define ARM64_SSBD_MITIGATED 3

static inline int arm64_get_ssbd_state(void)
{
#ifdef CONFIG_ARM64_SSBD
extern int ssbd_state;
return ssbd_state;
#else
return ARM64_SSBD_UNKNOWN;
#endif
}

#ifdef CONFIG_ARM64_SSBD
void arm64_set_ssbd_mitigation(bool state);
#else
static inline void arm64_set_ssbd_mitigation(bool state) {}
#endif

#endif /* __ASSEMBLY__ */

#endif
13 changes: 13 additions & 0 deletions arch/arm64/include/asm/elf.h
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,9 @@

#ifndef __ASSEMBLY__

#include <linux/bug.h>
#include <asm/processor.h> /* for signal_minsigstksz, used by ARCH_DLINFO */

typedef unsigned long elf_greg_t;

#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t))
Expand Down Expand Up @@ -148,6 +151,16 @@ typedef struct user_fpsimd_state elf_fpregset_t;
do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(elf_addr_t)current->mm->context.vdso); \
\
/* \
* Should always be nonzero unless there's a kernel bug. \
* If we haven't determined a sensible value to give to \
* userspace, omit the entry: \
*/ \
if (likely(signal_minsigstksz)) \
NEW_AUX_ENT(AT_MINSIGSTKSZ, signal_minsigstksz); \
else \
NEW_AUX_ENT(AT_IGNORE, 0); \
} while (0)

#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
Expand Down
12 changes: 7 additions & 5 deletions arch/arm64/include/asm/fpsimdmacros.h
Original file line number Diff line number Diff line change
Expand Up @@ -207,12 +207,14 @@
str w\nxtmp, [\xpfpsr, #4]
.endm

.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp
.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2
mrs_s x\nxtmp, SYS_ZCR_EL1
bic x\nxtmp, x\nxtmp, ZCR_ELx_LEN_MASK
orr x\nxtmp, x\nxtmp, \xvqminus1
msr_s SYS_ZCR_EL1, x\nxtmp // self-synchronising

bic \xtmp2, x\nxtmp, ZCR_ELx_LEN_MASK
orr \xtmp2, \xtmp2, \xvqminus1
cmp \xtmp2, x\nxtmp
b.eq 921f
msr_s SYS_ZCR_EL1, \xtmp2 // self-synchronising
921:
_for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34
_sve_ldr_p 0, \nxbase
_sve_wrffr 0
Expand Down
30 changes: 28 additions & 2 deletions arch/arm64/include/asm/kvm_asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@

#include <asm/virt.h>

#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)

#define ARM_EXIT_WITH_SERROR_BIT 31
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
#define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
Expand Down Expand Up @@ -71,14 +74,37 @@ extern u32 __kvm_get_mdcr_el2(void);

extern u32 __init_stage2_translation(void);

/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
#define __hyp_this_cpu_ptr(sym) \
({ \
void *__ptr = hyp_symbol_addr(sym); \
__ptr += read_sysreg(tpidr_el2); \
(typeof(&sym))__ptr; \
})

#define __hyp_this_cpu_read(sym) \
({ \
*__hyp_this_cpu_ptr(sym); \
})

#else /* __ASSEMBLY__ */

.macro get_host_ctxt reg, tmp
adr_l \reg, kvm_host_cpu_state
.macro hyp_adr_this_cpu reg, sym, tmp
adr_l \reg, \sym
mrs \tmp, tpidr_el2
add \reg, \reg, \tmp
.endm

.macro hyp_ldr_this_cpu reg, sym, tmp
adr_l \reg, \sym
mrs \tmp, tpidr_el2
ldr \reg, [\reg, \tmp]
.endm

.macro get_host_ctxt reg, tmp
hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
.endm

.macro get_vcpu_ptr vcpu, ctxt
get_host_ctxt \ctxt, \vcpu
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
Expand Down
26 changes: 26 additions & 0 deletions arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,9 @@ struct kvm_vcpu_arch {
/* Exception Information */
struct kvm_vcpu_fault_info fault;

/* State of various workarounds, see kvm_asm.h for bit assignment */
u64 workaround_flags;

/* Guest debug state */
u64 debug_flags;

Expand Down Expand Up @@ -452,6 +455,29 @@ static inline bool kvm_arm_harden_branch_predictor(void)
return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
}

#define KVM_SSBD_UNKNOWN -1
#define KVM_SSBD_FORCE_DISABLE 0
#define KVM_SSBD_KERNEL 1
#define KVM_SSBD_FORCE_ENABLE 2
#define KVM_SSBD_MITIGATED 3

static inline int kvm_arm_have_ssbd(void)
{
switch (arm64_get_ssbd_state()) {
case ARM64_SSBD_FORCE_DISABLE:
return KVM_SSBD_FORCE_DISABLE;
case ARM64_SSBD_KERNEL:
return KVM_SSBD_KERNEL;
case ARM64_SSBD_FORCE_ENABLE:
return KVM_SSBD_FORCE_ENABLE;
case ARM64_SSBD_MITIGATED:
return KVM_SSBD_MITIGATED;
case ARM64_SSBD_UNKNOWN:
default:
return KVM_SSBD_UNKNOWN;
}
}

void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);

Expand Down
Loading

0 comments on commit 410feb7

Please sign in to comment.