Skip to content

Commit

Permalink
Merge branch kvm-arm64/6.6/misc into kvmarm-master/next
Browse files Browse the repository at this point in the history
* kvm-arm64/6.6/misc:
  : .
  : Misc KVM/arm64 updates for 6.6:
  :
  : - Don't unnecessary align non-stack allocations in the EL2 VA space
  :
  : - Drop HCR_VIRT_EXCP_MASK, which was never used...
  :
  : - Don't use smp_processor_id() in kvm_arch_vcpu_load(),
  :   but the cpu parameter instead
  :
  : - Drop redundant call to kvm_set_pfn_accessed() in user_mem_abort()
  :
  : - Remove prototypes without implementations
  : .
  KVM: arm64: Remove size-order align in the nVHE hyp private VA range
  KVM: arm64: Remove unused declarations
  KVM: arm64: Remove redundant kvm_set_pfn_accessed() from user_mem_abort()
  KVM: arm64: Drop HCR_VIRT_EXCP_MASK
  KVM: arm64: Use the known cpu id instead of smp_processor_id()

Signed-off-by: Marc Zyngier <[email protected]>
  • Loading branch information
Marc Zyngier committed Aug 28, 2023
2 parents 50a40ff + f156a7d commit 1f66f12
Show file tree
Hide file tree
Showing 9 changed files with 139 additions and 96 deletions.
1 change: 0 additions & 1 deletion arch/arm64/include/asm/kvm_arm.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,6 @@
HCR_BSU_IS | HCR_FB | HCR_TACR | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3)
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
Expand Down
6 changes: 0 additions & 6 deletions arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -974,8 +974,6 @@ void kvm_arm_resume_guest(struct kvm *kvm);
#define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
#endif /* __KVM_NVHE_HYPERVISOR__ */

void force_vm_exit(const cpumask_t *mask);

int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);

Expand Down Expand Up @@ -1057,8 +1055,6 @@ static inline bool kvm_system_needs_idmapped_vectors(void)
return cpus_have_const_cap(ARM64_SPECTRE_V3A);
}

void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);

static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}

Expand Down Expand Up @@ -1130,8 +1126,6 @@ static inline bool kvm_vm_is_protected(struct kvm *kvm)
return false;
}

void kvm_init_protected_traps(struct kvm_vcpu *vcpu);

int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);

Expand Down
1 change: 1 addition & 0 deletions arch/arm64/include/asm/kvm_mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,7 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
void __iomem **haddr);
int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
void **haddr);
int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr);
void __init free_hyp_pgds(void);

void stage2_unmap_vm(struct kvm *kvm);
Expand Down
28 changes: 2 additions & 26 deletions arch/arm64/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -463,7 +463,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu_ptrauth_disable(vcpu);
kvm_arch_vcpu_load_debug_state_flags(vcpu);

if (!cpumask_test_cpu(smp_processor_id(), vcpu->kvm->arch.supported_cpus))
if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
vcpu_set_on_unsupported_cpu(vcpu);
}

Expand Down Expand Up @@ -2282,30 +2282,8 @@ static int __init init_hyp_mode(void)
for_each_possible_cpu(cpu) {
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
unsigned long hyp_addr;

/*
* Allocate a contiguous HYP private VA range for the stack
* and guard page. The allocation is also aligned based on
* the order of its size.
*/
err = hyp_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
if (err) {
kvm_err("Cannot allocate hyp stack guard page\n");
goto out_err;
}

/*
* Since the stack grows downwards, map the stack to the page
* at the higher address and leave the lower guard page
* unbacked.
*
* Any valid stack address now has the PAGE_SHIFT bit as 1
* and addresses corresponding to the guard page have the
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
*/
err = __create_hyp_mappings(hyp_addr + PAGE_SIZE, PAGE_SIZE,
__pa(stack_page), PAGE_HYP);
err = create_hyp_stack(__pa(stack_page), &params->stack_hyp_va);
if (err) {
kvm_err("Cannot map hyp stack\n");
goto out_err;
Expand All @@ -2318,8 +2296,6 @@ static int __init init_hyp_mode(void)
* has been mapped in the flexible private VA space.
*/
params->stack_pa = __pa(stack_page);

params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
}

for_each_possible_cpu(cpu) {
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/kvm/hyp/include/nvhe/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot
int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
enum kvm_pgtable_prot prot,
unsigned long *haddr);
int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr);
int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr);

#endif /* __KVM_HYP_MM_H */
83 changes: 66 additions & 17 deletions arch/arm64/kvm/hyp/nvhe/mm.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,27 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size,
return err;
}

static int __pkvm_alloc_private_va_range(unsigned long start, size_t size)
{
unsigned long cur;

hyp_assert_lock_held(&pkvm_pgd_lock);

if (!start || start < __io_map_base)
return -EINVAL;

/* The allocated size is always a multiple of PAGE_SIZE */
cur = start + PAGE_ALIGN(size);

/* Are we overflowing on the vmemmap ? */
if (cur > __hyp_vmemmap)
return -ENOMEM;

__io_map_base = cur;

return 0;
}

/**
* pkvm_alloc_private_va_range - Allocates a private VA range.
* @size: The size of the VA range to reserve.
Expand All @@ -56,27 +77,16 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size,
*/
int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr)
{
unsigned long base, addr;
int ret = 0;
unsigned long addr;
int ret;

hyp_spin_lock(&pkvm_pgd_lock);

/* Align the allocation based on the order of its size */
addr = ALIGN(__io_map_base, PAGE_SIZE << get_order(size));

/* The allocated size is always a multiple of PAGE_SIZE */
base = addr + PAGE_ALIGN(size);

/* Are we overflowing on the vmemmap ? */
if (!addr || base > __hyp_vmemmap)
ret = -ENOMEM;
else {
__io_map_base = base;
*haddr = addr;
}

addr = __io_map_base;
ret = __pkvm_alloc_private_va_range(addr, size);
hyp_spin_unlock(&pkvm_pgd_lock);

*haddr = addr;

return ret;
}

Expand Down Expand Up @@ -340,6 +350,45 @@ int hyp_create_idmap(u32 hyp_va_bits)
return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
}

int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
{
unsigned long addr, prev_base;
size_t size;
int ret;

hyp_spin_lock(&pkvm_pgd_lock);

prev_base = __io_map_base;
/*
* Efficient stack verification using the PAGE_SHIFT bit implies
* an alignment of our allocation on the order of the size.
*/
size = PAGE_SIZE * 2;
addr = ALIGN(__io_map_base, size);

ret = __pkvm_alloc_private_va_range(addr, size);
if (!ret) {
/*
* Since the stack grows downwards, map the stack to the page
* at the higher address and leave the lower guard page
* unbacked.
*
* Any valid stack address now has the PAGE_SHIFT bit as 1
* and addresses corresponding to the guard page have the
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
*/
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + PAGE_SIZE,
PAGE_SIZE, phys, PAGE_HYP);
if (ret)
__io_map_base = prev_base;
}
hyp_spin_unlock(&pkvm_pgd_lock);

*haddr = addr + size;

return ret;
}

static void *admit_host_page(void *arg)
{
struct kvm_hyp_memcache *host_mc = arg;
Expand Down
27 changes: 1 addition & 26 deletions arch/arm64/kvm/hyp/nvhe/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -113,41 +113,16 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,

for (i = 0; i < hyp_nr_cpus; i++) {
struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
unsigned long hyp_addr;

start = (void *)kern_hyp_va(per_cpu_base[i]);
end = start + PAGE_ALIGN(hyp_percpu_size);
ret = pkvm_create_mappings(start, end, PAGE_HYP);
if (ret)
return ret;

/*
* Allocate a contiguous HYP private VA range for the stack
* and guard page. The allocation is also aligned based on
* the order of its size.
*/
ret = pkvm_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
ret = pkvm_create_stack(params->stack_pa, &params->stack_hyp_va);
if (ret)
return ret;

/*
* Since the stack grows downwards, map the stack to the page
* at the higher address and leave the lower guard page
* unbacked.
*
* Any valid stack address now has the PAGE_SHIFT bit as 1
* and addresses corresponding to the guard page have the
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
*/
hyp_spin_lock(&pkvm_pgd_lock);
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, hyp_addr + PAGE_SIZE,
PAGE_SIZE, params->stack_pa, PAGE_HYP);
hyp_spin_unlock(&pkvm_pgd_lock);
if (ret)
return ret;

/* Update stack_hyp_va to end of the stack's private VA range */
params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
}

/*
Expand Down
86 changes: 68 additions & 18 deletions arch/arm64/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -600,6 +600,25 @@ int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
return 0;
}

static int __hyp_alloc_private_va_range(unsigned long base)
{
lockdep_assert_held(&kvm_hyp_pgd_mutex);

if (!PAGE_ALIGNED(base))
return -EINVAL;

/*
* Verify that BIT(VA_BITS - 1) hasn't been flipped by
* allocating the new area, as it would indicate we've
* overflowed the idmap/IO address range.
*/
if ((base ^ io_map_base) & BIT(VA_BITS - 1))
return -ENOMEM;

io_map_base = base;

return 0;
}

/**
* hyp_alloc_private_va_range - Allocates a private VA range.
Expand All @@ -620,26 +639,16 @@ int hyp_alloc_private_va_range(size_t size, unsigned long *haddr)

/*
* This assumes that we have enough space below the idmap
* page to allocate our VAs. If not, the check below will
* kick. A potential alternative would be to detect that
* overflow and switch to an allocation above the idmap.
* page to allocate our VAs. If not, the check in
* __hyp_alloc_private_va_range() will kick. A potential
* alternative would be to detect that overflow and switch
* to an allocation above the idmap.
*
* The allocated size is always a multiple of PAGE_SIZE.
*/
base = io_map_base - PAGE_ALIGN(size);

/* Align the allocation based on the order of its size */
base = ALIGN_DOWN(base, PAGE_SIZE << get_order(size));

/*
* Verify that BIT(VA_BITS - 1) hasn't been flipped by
* allocating the new area, as it would indicate we've
* overflowed the idmap/IO address range.
*/
if ((base ^ io_map_base) & BIT(VA_BITS - 1))
ret = -ENOMEM;
else
*haddr = io_map_base = base;
size = PAGE_ALIGN(size);
base = io_map_base - size;
ret = __hyp_alloc_private_va_range(base);

mutex_unlock(&kvm_hyp_pgd_mutex);

Expand Down Expand Up @@ -676,6 +685,48 @@ static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
return ret;
}

int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
{
unsigned long base;
size_t size;
int ret;

mutex_lock(&kvm_hyp_pgd_mutex);
/*
* Efficient stack verification using the PAGE_SHIFT bit implies
* an alignment of our allocation on the order of the size.
*/
size = PAGE_SIZE * 2;
base = ALIGN_DOWN(io_map_base - size, size);

ret = __hyp_alloc_private_va_range(base);

mutex_unlock(&kvm_hyp_pgd_mutex);

if (ret) {
kvm_err("Cannot allocate hyp stack guard page\n");
return ret;
}

/*
* Since the stack grows downwards, map the stack to the page
* at the higher address and leave the lower guard page
* unbacked.
*
* Any valid stack address now has the PAGE_SHIFT bit as 1
* and addresses corresponding to the guard page have the
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
*/
ret = __create_hyp_mappings(base + PAGE_SIZE, PAGE_SIZE, phys_addr,
PAGE_HYP);
if (ret)
kvm_err("Cannot map hyp stack\n");

*haddr = base + size;

return ret;
}

/**
* create_hyp_io_mappings - Map IO into both kernel and HYP
* @phys_addr: The physical start address which gets mapped
Expand Down Expand Up @@ -1549,7 +1600,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,

out_unlock:
read_unlock(&kvm->mmu_lock);
kvm_set_pfn_accessed(pfn);
kvm_release_pfn_clean(pfn);
return ret != -EAGAIN ? ret : 0;
}
Expand Down
2 changes: 0 additions & 2 deletions arch/arm64/kvm/vgic/vgic.h
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,6 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
void vgic_v2_set_npie(struct kvm_vcpu *vcpu);
int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
int offset, u32 *val);
Expand Down Expand Up @@ -233,7 +232,6 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
void vgic_v3_set_npie(struct kvm_vcpu *vcpu);
void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
void vgic_v3_enable(struct kvm_vcpu *vcpu);
Expand Down

0 comments on commit 1f66f12

Please sign in to comment.