Skip to content

Commit

Permalink
Merge tag 'kvm-x86-selftests-6.9' of https://github.com/kvm-x86/linux
Browse files Browse the repository at this point in the history
…into HEAD

KVM selftests changes for 6.9:

 - Add macros to reduce the amount of boilerplate code needed to write "simple"
   selftests, and to utilize selftest TAP infrastructure, which is especially
   beneficial for KVM selftests with multiple testcases.

 - Add basic smoke tests for SEV and SEV-ES, along with a pile of library
   support for handling private/encrypted/protected memory.

 - Fix benign bugs where tests neglect to close() guest_memfd files.
  • Loading branch information
bonzini committed Mar 11, 2024
2 parents f074158 + e9da6f0 commit 4d4c028
Show file tree
Hide file tree
Showing 26 changed files with 802 additions and 240 deletions.
2 changes: 2 additions & 0 deletions tools/testing/selftests/kvm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ LIBKVM_x86_64 += lib/x86_64/handlers.S
LIBKVM_x86_64 += lib/x86_64/hyperv.c
LIBKVM_x86_64 += lib/x86_64/memstress.c
LIBKVM_x86_64 += lib/x86_64/processor.c
LIBKVM_x86_64 += lib/x86_64/sev.c
LIBKVM_x86_64 += lib/x86_64/svm.c
LIBKVM_x86_64 += lib/x86_64/ucall.c
LIBKVM_x86_64 += lib/x86_64/vmx.c
Expand Down Expand Up @@ -118,6 +119,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_caps_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
TEST_GEN_PROGS_x86_64 += x86_64/sev_smoke_test
TEST_GEN_PROGS_x86_64 += x86_64/amx_test
TEST_GEN_PROGS_x86_64 += x86_64/max_vcpuid_cap_test
TEST_GEN_PROGS_x86_64 += x86_64/triple_fault_event_test
Expand Down
3 changes: 3 additions & 0 deletions tools/testing/selftests/kvm/guest_memfd_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,9 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
TEST_ASSERT(ret != -1, "memfd fstat should succeed");
TEST_ASSERT(st1.st_size == 4096, "first memfd st_size should still match requested size");
TEST_ASSERT(st1.st_ino != st2.st_ino, "different memfd should have different inode numbers");

close(fd2);
close(fd1);
}

int main(int argc, char *argv[])
Expand Down
7 changes: 7 additions & 0 deletions tools/testing/selftests/kvm/include/aarch64/kvm_util_arch.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef SELFTEST_KVM_UTIL_ARCH_H
#define SELFTEST_KVM_UTIL_ARCH_H

struct kvm_vm_arch {};

#endif // SELFTEST_KVM_UTIL_ARCH_H
36 changes: 36 additions & 0 deletions tools/testing/selftests/kvm/include/kvm_test_harness.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Macros for defining a KVM test
*
* Copyright (C) 2022, Google LLC.
*/

#ifndef SELFTEST_KVM_TEST_HARNESS_H
#define SELFTEST_KVM_TEST_HARNESS_H

#include "kselftest_harness.h"

#define KVM_ONE_VCPU_TEST_SUITE(name) \
FIXTURE(name) { \
struct kvm_vcpu *vcpu; \
}; \
\
FIXTURE_SETUP(name) { \
(void)vm_create_with_one_vcpu(&self->vcpu, NULL); \
} \
\
FIXTURE_TEARDOWN(name) { \
kvm_vm_free(self->vcpu->vm); \
}

#define KVM_ONE_VCPU_TEST(suite, test, guestcode) \
static void __suite##_##test(struct kvm_vcpu *vcpu); \
\
TEST_F(suite, test) \
{ \
vcpu_arch_set_entry_point(self->vcpu, guestcode); \
__suite##_##test(self->vcpu); \
} \
static void __suite##_##test(struct kvm_vcpu *vcpu)

#endif /* SELFTEST_KVM_TEST_HARNESS_H */
61 changes: 53 additions & 8 deletions tools/testing/selftests/kvm/include/kvm_util_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,11 @@
#include <linux/types.h>

#include <asm/atomic.h>
#include <asm/kvm.h>

#include <sys/ioctl.h>

#include "kvm_util_arch.h"
#include "sparsebit.h"

/*
Expand All @@ -46,6 +48,7 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
struct userspace_mem_region {
struct kvm_userspace_memory_region2 region;
struct sparsebit *unused_phy_pages;
struct sparsebit *protected_phy_pages;
int fd;
off_t offset;
enum vm_mem_backing_src_type backing_src_type;
Expand Down Expand Up @@ -90,6 +93,7 @@ enum kvm_mem_region_type {
struct kvm_vm {
int mode;
unsigned long type;
uint8_t subtype;
int kvm_fd;
int fd;
unsigned int pgtable_levels;
Expand All @@ -111,6 +115,9 @@ struct kvm_vm {
vm_vaddr_t idt;
vm_vaddr_t handlers;
uint32_t dirty_ring_size;
uint64_t gpa_tag_mask;

struct kvm_vm_arch arch;

/* Cache of information for binary stats interface */
int stats_fd;
Expand Down Expand Up @@ -191,10 +198,14 @@ enum vm_guest_mode {
};

struct vm_shape {
enum vm_guest_mode mode;
unsigned int type;
uint32_t type;
uint8_t mode;
uint8_t subtype;
uint16_t padding;
};

kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t));

#define VM_TYPE_DEFAULT 0

#define VM_SHAPE(__mode) \
Expand Down Expand Up @@ -564,6 +575,13 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
uint64_t guest_paddr, uint32_t slot, uint64_t npages,
uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset);

#ifndef vm_arch_has_protected_memory
static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
{
return false;
}
#endif

void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
Expand All @@ -573,6 +591,9 @@ vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_mi
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
enum kvm_mem_region_type type);
vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
vm_vaddr_t vaddr_min,
enum kvm_mem_region_type type);
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
enum kvm_mem_region_type type);
Expand All @@ -585,6 +606,12 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);


static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa)
{
return gpa & ~vm->gpa_tag_mask;
}

void vcpu_run(struct kvm_vcpu *vcpu);
int _vcpu_run(struct kvm_vcpu *vcpu);

Expand Down Expand Up @@ -827,10 +854,23 @@ const char *exit_reason_str(unsigned int exit_reason);

vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot);
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
vm_paddr_t paddr_min, uint32_t memslot);
vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
vm_paddr_t paddr_min, uint32_t memslot,
bool protected);
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);

static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
vm_paddr_t paddr_min, uint32_t memslot)
{
/*
* By default, allocate memory as protected for VMs that support
* protected memory, as the majority of memory for such VMs is
* protected, i.e. using shared memory is effectively opt-in.
*/
return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
vm_arch_has_protected_memory(vm));
}

/*
* ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also
* loads the test binary into guest memory and creates an IRQ chip (x86 only).
Expand Down Expand Up @@ -969,15 +1009,18 @@ static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
* Input Args:
* vm - Virtual Machine
* vcpu_id - The id of the VCPU to add to the VM.
* guest_code - The vCPU's entry point
*/
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code);
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code);

static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code)
{
return vm_arch_vcpu_add(vm, vcpu_id, guest_code);
struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id);

vcpu_arch_set_entry_point(vcpu, guest_code);

return vcpu;
}

/* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
Expand Down Expand Up @@ -1081,6 +1124,8 @@ void kvm_selftest_arch_init(void);

void kvm_arch_vm_post_create(struct kvm_vm *vm);

bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);

uint32_t guest_get_vcpuid(void);

#endif /* SELFTEST_KVM_UTIL_BASE_H */
7 changes: 7 additions & 0 deletions tools/testing/selftests/kvm/include/riscv/kvm_util_arch.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef SELFTEST_KVM_UTIL_ARCH_H
#define SELFTEST_KVM_UTIL_ARCH_H

struct kvm_vm_arch {};

#endif // SELFTEST_KVM_UTIL_ARCH_H
7 changes: 7 additions & 0 deletions tools/testing/selftests/kvm/include/s390x/kvm_util_arch.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef SELFTEST_KVM_UTIL_ARCH_H
#define SELFTEST_KVM_UTIL_ARCH_H

struct kvm_vm_arch {};

#endif // SELFTEST_KVM_UTIL_ARCH_H
56 changes: 38 additions & 18 deletions tools/testing/selftests/kvm/include/sparsebit.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,26 +30,26 @@ typedef uint64_t sparsebit_num_t;

struct sparsebit *sparsebit_alloc(void);
void sparsebit_free(struct sparsebit **sbitp);
void sparsebit_copy(struct sparsebit *dstp, struct sparsebit *src);
void sparsebit_copy(struct sparsebit *dstp, const struct sparsebit *src);

bool sparsebit_is_set(struct sparsebit *sbit, sparsebit_idx_t idx);
bool sparsebit_is_set_num(struct sparsebit *sbit,
bool sparsebit_is_set(const struct sparsebit *sbit, sparsebit_idx_t idx);
bool sparsebit_is_set_num(const struct sparsebit *sbit,
sparsebit_idx_t idx, sparsebit_num_t num);
bool sparsebit_is_clear(struct sparsebit *sbit, sparsebit_idx_t idx);
bool sparsebit_is_clear_num(struct sparsebit *sbit,
bool sparsebit_is_clear(const struct sparsebit *sbit, sparsebit_idx_t idx);
bool sparsebit_is_clear_num(const struct sparsebit *sbit,
sparsebit_idx_t idx, sparsebit_num_t num);
sparsebit_num_t sparsebit_num_set(struct sparsebit *sbit);
bool sparsebit_any_set(struct sparsebit *sbit);
bool sparsebit_any_clear(struct sparsebit *sbit);
bool sparsebit_all_set(struct sparsebit *sbit);
bool sparsebit_all_clear(struct sparsebit *sbit);
sparsebit_idx_t sparsebit_first_set(struct sparsebit *sbit);
sparsebit_idx_t sparsebit_first_clear(struct sparsebit *sbit);
sparsebit_idx_t sparsebit_next_set(struct sparsebit *sbit, sparsebit_idx_t prev);
sparsebit_idx_t sparsebit_next_clear(struct sparsebit *sbit, sparsebit_idx_t prev);
sparsebit_idx_t sparsebit_next_set_num(struct sparsebit *sbit,
sparsebit_num_t sparsebit_num_set(const struct sparsebit *sbit);
bool sparsebit_any_set(const struct sparsebit *sbit);
bool sparsebit_any_clear(const struct sparsebit *sbit);
bool sparsebit_all_set(const struct sparsebit *sbit);
bool sparsebit_all_clear(const struct sparsebit *sbit);
sparsebit_idx_t sparsebit_first_set(const struct sparsebit *sbit);
sparsebit_idx_t sparsebit_first_clear(const struct sparsebit *sbit);
sparsebit_idx_t sparsebit_next_set(const struct sparsebit *sbit, sparsebit_idx_t prev);
sparsebit_idx_t sparsebit_next_clear(const struct sparsebit *sbit, sparsebit_idx_t prev);
sparsebit_idx_t sparsebit_next_set_num(const struct sparsebit *sbit,
sparsebit_idx_t start, sparsebit_num_t num);
sparsebit_idx_t sparsebit_next_clear_num(struct sparsebit *sbit,
sparsebit_idx_t sparsebit_next_clear_num(const struct sparsebit *sbit,
sparsebit_idx_t start, sparsebit_num_t num);

void sparsebit_set(struct sparsebit *sbitp, sparsebit_idx_t idx);
Expand All @@ -62,9 +62,29 @@ void sparsebit_clear_num(struct sparsebit *sbitp,
sparsebit_idx_t start, sparsebit_num_t num);
void sparsebit_clear_all(struct sparsebit *sbitp);

void sparsebit_dump(FILE *stream, struct sparsebit *sbit,
void sparsebit_dump(FILE *stream, const struct sparsebit *sbit,
unsigned int indent);
void sparsebit_validate_internal(struct sparsebit *sbit);
void sparsebit_validate_internal(const struct sparsebit *sbit);

/*
* Iterate over an inclusive ranges within sparsebit @s. In each iteration,
* @range_begin and @range_end will take the beginning and end of the set
* range, which are of type sparsebit_idx_t.
*
* For example, if the range [3, 7] (inclusive) is set, within the
* iteration,@range_begin will take the value 3 and @range_end will take
* the value 7.
*
* Ensure that there is at least one bit set before using this macro with
* sparsebit_any_set(), because sparsebit_first_set() will abort if none
* are set.
*/
#define sparsebit_for_each_set_range(s, range_begin, range_end) \
for (range_begin = sparsebit_first_set(s), \
range_end = sparsebit_next_clear(s, range_begin) - 1; \
range_begin && range_end; \
range_begin = sparsebit_next_set(s, range_end), \
range_end = sparsebit_next_clear(s, range_begin) - 1)

#ifdef __cplusplus
}
Expand Down
23 changes: 23 additions & 0 deletions tools/testing/selftests/kvm/include/x86_64/kvm_util_arch.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef SELFTEST_KVM_UTIL_ARCH_H
#define SELFTEST_KVM_UTIL_ARCH_H

#include <stdbool.h>
#include <stdint.h>

struct kvm_vm_arch {
uint64_t c_bit;
uint64_t s_bit;
int sev_fd;
bool is_pt_protected;
};

static inline bool __vm_arch_has_protected_memory(struct kvm_vm_arch *arch)
{
return arch->c_bit || arch->s_bit;
}

#define vm_arch_has_protected_memory(vm) \
__vm_arch_has_protected_memory(&(vm)->arch)

#endif // SELFTEST_KVM_UTIL_ARCH_H
8 changes: 8 additions & 0 deletions tools/testing/selftests/kvm/include/x86_64/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,12 @@
extern bool host_cpu_is_intel;
extern bool host_cpu_is_amd;

enum vm_guest_x86_subtype {
VM_SUBTYPE_NONE = 0,
VM_SUBTYPE_SEV,
VM_SUBTYPE_SEV_ES,
};

#define NMI_VECTOR 0x02

#define X86_EFLAGS_FIXED (1u << 1)
Expand Down Expand Up @@ -273,6 +279,7 @@ struct kvm_x86_cpu_property {
#define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31)
#define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7)
#define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15)
#define X86_PROPERTY_SEV_C_BIT KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5)
#define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)

#define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31)
Expand Down Expand Up @@ -1059,6 +1066,7 @@ do { \
} while (0)

void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
void kvm_init_vm_address_properties(struct kvm_vm *vm);
bool vm_is_unrestricted_guest(struct kvm_vm *vm);

struct ex_regs {
Expand Down
Loading

0 comments on commit 4d4c028

Please sign in to comment.