Skip to content

Commit

Permalink
Merge branch 'for-next/boot' into for-next/core
Browse files Browse the repository at this point in the history
* for-next/boot: (34 commits)
  arm64: fix KASAN_INLINE
  arm64: Add an override for ID_AA64SMFR0_EL1.FA64
  arm64: Add the arm64.nosve command line option
  arm64: Add the arm64.nosme command line option
  arm64: Expose a __check_override primitive for oddball features
  arm64: Allow the idreg override to deal with variable field width
  arm64: Factor out checking of a feature against the override into a macro
  arm64: Allow sticky E2H when entering EL1
  arm64: Save state of HCR_EL2.E2H before switch to EL1
  arm64: Rename the VHE switch to "finalise_el2"
  arm64: mm: fix booting with 52-bit address space
  arm64: head: remove __PHYS_OFFSET
  arm64: lds: use PROVIDE instead of conditional definitions
  arm64: setup: drop early FDT pointer helpers
  arm64: head: avoid relocating the kernel twice for KASLR
  arm64: kaslr: defer initialization to initcall where permitted
  arm64: head: record CPU boot mode after enabling the MMU
  arm64: head: populate kernel page tables with MMU and caches on
  arm64: head: factor out TTBR1 assignment into a macro
  arm64: idreg-override: use early FDT mapping in ID map
  ...
  • Loading branch information
willdeacon committed Jul 25, 2022
2 parents 9286773 + 1191b62 commit f96d67a
Show file tree
Hide file tree
Showing 24 changed files with 750 additions and 612 deletions.
6 changes: 6 additions & 0 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -400,6 +400,12 @@
arm64.nomte [ARM64] Unconditionally disable Memory Tagging Extension
support

arm64.nosve [ARM64] Unconditionally disable Scalable Vector
Extension support

arm64.nosme [ARM64] Unconditionally disable Scalable Matrix
Extension support

ataflop= [HW,M68k]

atarimouse= [HW,MOUSE] Atari Mouse
Expand Down
11 changes: 6 additions & 5 deletions Documentation/virt/kvm/arm/hyp-abi.rst
Original file line number Diff line number Diff line change
Expand Up @@ -60,12 +60,13 @@ these functions (see arch/arm{,64}/include/asm/virt.h):

* ::

x0 = HVC_VHE_RESTART (arm64 only)
x0 = HVC_FINALISE_EL2 (arm64 only)

Attempt to upgrade the kernel's exception level from EL1 to EL2 by enabling
the VHE mode. This is conditioned by the CPU supporting VHE, the EL2 MMU
being off, and VHE not being disabled by any other means (command line
option, for example).
Finish configuring EL2 depending on the command-line options,
including an attempt to upgrade the kernel's exception level from
EL1 to EL2 by enabling the VHE mode. This is conditioned by the CPU
supporting VHE, the EL2 MMU being off, and VHE not being disabled by
any other means (command line option, for example).

Any other value of r0/x0 triggers a hypervisor-specific handling,
which is not documented here.
Expand Down
31 changes: 27 additions & 4 deletions arch/arm64/include/asm/assembler.h
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,20 @@ alternative_cb_end
bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
.endm

/*
* idmap_get_t0sz - get the T0SZ value needed to cover the ID map
*
* Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
* entire ID map region can be mapped. As T0SZ == (64 - #bits used),
* this number conveniently equals the number of leading zeroes in
* the physical address of _end.
*/
.macro idmap_get_t0sz, reg
adrp \reg, _end
orr \reg, \reg, #(1 << VA_BITS_MIN) - 1
clz \reg, \reg
.endm

/*
* tcr_compute_pa_size - set TCR.(I)PS to the highest supported
* ID_AA64MMFR0_EL1.PARange value
Expand Down Expand Up @@ -465,6 +479,18 @@ alternative_endif
_cond_uaccess_extable .Licache_op\@, \fixup
.endm

/*
* load_ttbr1 - install @pgtbl as a TTBR1 page table
* pgtbl preserved
* tmp1/tmp2 clobbered, either may overlap with pgtbl
*/
.macro load_ttbr1, pgtbl, tmp1, tmp2
phys_to_ttbr \tmp1, \pgtbl
offset_ttbr1 \tmp1, \tmp2
msr ttbr1_el1, \tmp1
isb
.endm

/*
* To prevent the possibility of old and new partial table walks being visible
* in the tlb, switch the ttbr to a zero page when we invalidate the old
Expand All @@ -478,10 +504,7 @@ alternative_endif
isb
tlbi vmalle1
dsb nsh
phys_to_ttbr \tmp, \page_table
offset_ttbr1 \tmp, \tmp2
msr ttbr1_el1, \tmp
isb
load_ttbr1 \page_table, \tmp, \tmp2
.endm

/*
Expand Down
3 changes: 3 additions & 0 deletions arch/arm64/include/asm/cpufeature.h
Original file line number Diff line number Diff line change
Expand Up @@ -908,7 +908,10 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
}

extern struct arm64_ftr_override id_aa64mmfr1_override;
extern struct arm64_ftr_override id_aa64pfr0_override;
extern struct arm64_ftr_override id_aa64pfr1_override;
extern struct arm64_ftr_override id_aa64zfr0_override;
extern struct arm64_ftr_override id_aa64smfr0_override;
extern struct arm64_ftr_override id_aa64isar1_override;
extern struct arm64_ftr_override id_aa64isar2_override;

Expand Down
60 changes: 0 additions & 60 deletions arch/arm64/include/asm/el2_setup.h
Original file line number Diff line number Diff line change
Expand Up @@ -129,64 +129,6 @@
msr cptr_el2, x0 // Disable copro. traps to EL2
.endm

/* SVE register access */
.macro __init_el2_nvhe_sve
mrs x1, id_aa64pfr0_el1
ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
cbz x1, .Lskip_sve_\@

bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps
msr cptr_el2, x0 // Disable copro. traps to EL2
isb
mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
msr_s SYS_ZCR_EL2, x1 // length for EL1.
.Lskip_sve_\@:
.endm

/* SME register access and priority mapping */
.macro __init_el2_nvhe_sme
mrs x1, id_aa64pfr1_el1
ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4
cbz x1, .Lskip_sme_\@

bic x0, x0, #CPTR_EL2_TSM // Also disable SME traps
msr cptr_el2, x0 // Disable copro. traps to EL2
isb

mrs x1, sctlr_el2
orr x1, x1, #SCTLR_ELx_ENTP2 // Disable TPIDR2 traps
msr sctlr_el2, x1
isb

mov x1, #0 // SMCR controls

mrs_s x2, SYS_ID_AA64SMFR0_EL1
ubfx x2, x2, #ID_AA64SMFR0_EL1_FA64_SHIFT, #1 // Full FP in SM?
cbz x2, .Lskip_sme_fa64_\@

orr x1, x1, SMCR_ELx_FA64_MASK
.Lskip_sme_fa64_\@:

orr x1, x1, #SMCR_ELx_LEN_MASK // Enable full SME vector
msr_s SYS_SMCR_EL2, x1 // length for EL1.

mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported?
ubfx x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1
cbz x1, .Lskip_sme_\@

msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal

mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present?
ubfx x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4
cbz x1, .Lskip_sme_\@

mrs_s x1, SYS_HCRX_EL2
orr x1, x1, #HCRX_EL2_SMPME_MASK // Enable priority mapping
msr_s SYS_HCRX_EL2, x1

.Lskip_sme_\@:
.endm

/* Disable any fine grained traps */
.macro __init_el2_fgt
mrs x1, id_aa64mmfr0_el1
Expand Down Expand Up @@ -250,8 +192,6 @@
__init_el2_hstr
__init_el2_nvhe_idregs
__init_el2_nvhe_cptr
__init_el2_nvhe_sve
__init_el2_nvhe_sme
__init_el2_fgt
__init_el2_nvhe_prepare_eret
.endm
Expand Down
18 changes: 13 additions & 5 deletions arch/arm64/include/asm/kernel-pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#ifndef __ASM_KERNEL_PGTABLE_H
#define __ASM_KERNEL_PGTABLE_H

#include <asm/boot.h>
#include <asm/pgtable-hwdef.h>
#include <asm/sparsemem.h>

Expand Down Expand Up @@ -35,10 +36,8 @@
*/
#if ARM64_KERNEL_USES_PMD_MAPS
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
#define IDMAP_PGTABLE_LEVELS (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT) - 1)
#else
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS)
#define IDMAP_PGTABLE_LEVELS (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT))
#endif


Expand Down Expand Up @@ -87,7 +86,14 @@
+ EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \
+ EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */
#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end))
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)

/* the initial ID map may need two extra pages if it needs to be extended */
#if VA_BITS < 48
#define INIT_IDMAP_DIR_SIZE ((INIT_IDMAP_DIR_PAGES + 2) * PAGE_SIZE)
#else
#define INIT_IDMAP_DIR_SIZE (INIT_IDMAP_DIR_PAGES * PAGE_SIZE)
#endif
#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE)

/* Initial memory map size */
#if ARM64_KERNEL_USES_PMD_MAPS
Expand All @@ -107,9 +113,11 @@
#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)

#if ARM64_KERNEL_USES_PMD_MAPS
#define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
#define SWAPPER_RW_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
#define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PMD_SECT_RDONLY)
#else
#define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
#define SWAPPER_RW_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
#define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PTE_RDONLY)
#endif

/*
Expand Down
4 changes: 4 additions & 0 deletions arch/arm64/include/asm/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,11 @@
#include <linux/types.h>
#include <asm/bug.h>

#if VA_BITS > 48
extern u64 vabits_actual;
#else
#define vabits_actual ((u64)VA_BITS)
#endif

extern s64 memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */
Expand Down
16 changes: 10 additions & 6 deletions arch/arm64/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,7 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
* TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
* physical memory, in which case it will be smaller.
*/
extern u64 idmap_t0sz;
extern u64 idmap_ptrs_per_pgd;
extern int idmap_t0sz;

/*
* Ensure TCR.T0SZ is set to the provided value.
Expand Down Expand Up @@ -106,13 +105,18 @@ static inline void cpu_uninstall_idmap(void)
cpu_switch_mm(mm->pgd, mm);
}

static inline void cpu_install_idmap(void)
static inline void __cpu_install_idmap(pgd_t *idmap)
{
cpu_set_reserved_ttbr0();
local_flush_tlb_all();
cpu_set_idmap_tcr_t0sz();

cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
cpu_switch_mm(lm_alias(idmap), &init_mm);
}

static inline void cpu_install_idmap(void)
{
__cpu_install_idmap(idmap_pg_dir);
}

/*
Expand Down Expand Up @@ -143,7 +147,7 @@ static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
* Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
* avoiding the possibility of conflicting TLB entries being allocated.
*/
static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp)
static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
{
typedef void (ttbr_replace_func)(phys_addr_t);
extern ttbr_replace_func idmap_cpu_replace_ttbr1;
Expand All @@ -166,7 +170,7 @@ static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp)

replace_phys = (void *)__pa_symbol(function_nocfi(idmap_cpu_replace_ttbr1));

cpu_install_idmap();
__cpu_install_idmap(idmap);
replace_phys(ttbr1);
cpu_uninstall_idmap();
}
Expand Down
11 changes: 9 additions & 2 deletions arch/arm64/include/asm/virt.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,9 @@
#define HVC_RESET_VECTORS 2

/*
* HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible
* HVC_FINALISE_EL2 - Upgrade the CPU from EL1 to EL2, if possible
*/
#define HVC_VHE_RESTART 3
#define HVC_FINALISE_EL2 3

/* Max number of HYP stub hypercalls */
#define HVC_STUB_HCALL_NR 4
Expand All @@ -49,6 +49,13 @@
#define BOOT_CPU_MODE_EL1 (0xe11)
#define BOOT_CPU_MODE_EL2 (0xe12)

/*
* Flags returned together with the boot mode, but not preserved in
* __boot_cpu_mode. Used by the idreg override code to work out the
* boot state.
*/
#define BOOT_CPU_FLAG_E2H BIT_ULL(32)

#ifndef __ASSEMBLY__

#include <asm/ptrace.h>
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ obj-$(CONFIG_ACPI) += acpi.o
obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o
obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
obj-$(CONFIG_PARAVIRT) += paravirt.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o pi/
obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
obj-$(CONFIG_ELF_CORE) += elfcore.o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \
Expand Down
14 changes: 10 additions & 4 deletions arch/arm64/kernel/cpufeature.c
Original file line number Diff line number Diff line change
Expand Up @@ -633,7 +633,10 @@ static const struct arm64_ftr_bits ftr_raz[] = {
__ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override)

struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override;
struct arm64_ftr_override __ro_after_init id_aa64pfr0_override;
struct arm64_ftr_override __ro_after_init id_aa64pfr1_override;
struct arm64_ftr_override __ro_after_init id_aa64zfr0_override;
struct arm64_ftr_override __ro_after_init id_aa64smfr0_override;
struct arm64_ftr_override __ro_after_init id_aa64isar1_override;
struct arm64_ftr_override __ro_after_init id_aa64isar2_override;

Expand Down Expand Up @@ -670,11 +673,14 @@ static const struct __ftr_reg_entry {
ARM64_FTR_REG(SYS_ID_MMFR5_EL1, ftr_id_mmfr5),

/* Op1 = 0, CRn = 0, CRm = 4 */
ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0,
&id_aa64pfr0_override),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1,
&id_aa64pfr1_override),
ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0),
ARM64_FTR_REG(SYS_ID_AA64SMFR0_EL1, ftr_id_aa64smfr0),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0,
&id_aa64zfr0_override),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64SMFR0_EL1, ftr_id_aa64smfr0,
&id_aa64smfr0_override),

/* Op1 = 0, CRn = 0, CRm = 5 */
ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
Expand Down Expand Up @@ -3295,7 +3301,7 @@ subsys_initcall_sync(init_32bit_el0_mask);

static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
{
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir);
}

/*
Expand Down
Loading

0 comments on commit f96d67a

Please sign in to comment.