Skip to content

Commit

Permalink
Merge tag 'kvm-x86-selftests-6.8-rcN' of https://github.com/kvm-x86/l…
Browse files Browse the repository at this point in the history
…inux into HEAD

KVM selftests fixes/cleanups (and one KVM x86 cleanup) for 6.8:

 - Remove redundant newlines from error messages.

 - Delete an unused variable in the AMX test (which causes build failures when
   compiling with -Werror).

 - Fail instead of skipping tests if open(), e.g. of /dev/kvm, fails with an
   error code other than ENOENT (a Hyper-V selftest bug resulted in an EMFILE,
   and the test eventually got skipped).

 - Fix TSC related bugs in several Hyper-V selftests.

 - Fix a bug in the dirty ring logging test where a sem_post() could be left
   pending across multiple runs, resulting in incorrect synchronization between
   the main thread and the vCPU worker thread.

 - Relax the dirty log split test's assertions on 4KiB mappings to fix false
   positives due to the number of mappings for memslot 0 (used for code and
   data that is NOT being dirty logged) changing, e.g. due to NUMA balancing.

 - Have KVM's gtod_is_based_on_tsc() return "bool" instead of an "int" (the
   function generates boolean values, and all callers treat the return value as
   a bool).
  • Loading branch information
bonzini committed Feb 14, 2024
2 parents 22d0bc0 + 6fd78be commit 2f8ebe4
Show file tree
Hide file tree
Showing 59 changed files with 237 additions and 240 deletions.
2 changes: 1 addition & 1 deletion arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -2506,7 +2506,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
}

#ifdef CONFIG_X86_64
static inline int gtod_is_based_on_tsc(int mode)
static inline bool gtod_is_based_on_tsc(int mode)
{
return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
}
Expand Down
12 changes: 6 additions & 6 deletions tools/testing/selftests/kvm/aarch64/arch_timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ static void *test_vcpu_run(void *arg)
REPORT_GUEST_ASSERT(uc);
break;
default:
TEST_FAIL("Unexpected guest exit\n");
TEST_FAIL("Unexpected guest exit");
}

return NULL;
Expand Down Expand Up @@ -287,7 +287,7 @@ static int test_migrate_vcpu(unsigned int vcpu_idx)

/* Allow the error where the vCPU thread is already finished */
TEST_ASSERT(ret == 0 || ret == ESRCH,
"Failed to migrate the vCPU:%u to pCPU: %u; ret: %d\n",
"Failed to migrate the vCPU:%u to pCPU: %u; ret: %d",
vcpu_idx, new_pcpu, ret);

return ret;
Expand Down Expand Up @@ -326,12 +326,12 @@ static void test_run(struct kvm_vm *vm)

pthread_mutex_init(&vcpu_done_map_lock, NULL);
vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus);
TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap\n");
TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap");

for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) {
ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run,
(void *)(unsigned long)i);
TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread\n", i);
TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread", i);
}

/* Spawn a thread to control the vCPU migrations */
Expand All @@ -340,7 +340,7 @@ static void test_run(struct kvm_vm *vm)

ret = pthread_create(&pt_vcpu_migration, NULL,
test_vcpu_migration, NULL);
TEST_ASSERT(!ret, "Failed to create the migration pthread\n");
TEST_ASSERT(!ret, "Failed to create the migration pthread");
}


Expand Down Expand Up @@ -384,7 +384,7 @@ static struct kvm_vm *test_vm_create(void)
if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET))
vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &test_args.offset);
else
TEST_FAIL("no support for global offset\n");
TEST_FAIL("no support for global offset");
}

for (i = 0; i < nr_vcpus; i++)
Expand Down
16 changes: 8 additions & 8 deletions tools/testing/selftests/kvm/aarch64/hypercalls.c
Original file line number Diff line number Diff line change
Expand Up @@ -175,18 +175,18 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
/* First 'read' should be an upper limit of the features supported */
vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
"Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n",
"Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx",
reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);

/* Test a 'write' by disabling all the features of the register map */
ret = __vcpu_set_reg(vcpu, reg_info->reg, 0);
TEST_ASSERT(ret == 0,
"Failed to clear all the features of reg: 0x%lx; ret: %d\n",
"Failed to clear all the features of reg: 0x%lx; ret: %d",
reg_info->reg, errno);

vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == 0,
"Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg);
"Expected all the features to be cleared for reg: 0x%lx", reg_info->reg);

/*
* Test enabling a feature that's not supported.
Expand All @@ -195,7 +195,7 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
if (reg_info->max_feat_bit < 63) {
ret = __vcpu_set_reg(vcpu, reg_info->reg, BIT(reg_info->max_feat_bit + 1));
TEST_ASSERT(ret != 0 && errno == EINVAL,
"Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n",
"Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx",
errno, reg_info->reg);
}
}
Expand All @@ -216,7 +216,7 @@ static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
*/
vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == 0,
"Expected all the features to be cleared for reg: 0x%lx\n",
"Expected all the features to be cleared for reg: 0x%lx",
reg_info->reg);

/*
Expand All @@ -226,7 +226,7 @@ static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
*/
ret = __vcpu_set_reg(vcpu, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit));
TEST_ASSERT(ret != 0 && errno == EBUSY,
"Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n",
"Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx",
errno, reg_info->reg);
}
}
Expand Down Expand Up @@ -265,7 +265,7 @@ static void test_guest_stage(struct kvm_vm **vm, struct kvm_vcpu **vcpu)
case TEST_STAGE_HVC_IFACE_FALSE_INFO:
break;
default:
TEST_FAIL("Unknown test stage: %d\n", prev_stage);
TEST_FAIL("Unknown test stage: %d", prev_stage);
}
}

Expand Down Expand Up @@ -294,7 +294,7 @@ static void test_run(void)
REPORT_GUEST_ASSERT(uc);
break;
default:
TEST_FAIL("Unexpected guest exit\n");
TEST_FAIL("Unexpected guest exit");
}
}

Expand Down
6 changes: 3 additions & 3 deletions tools/testing/selftests/kvm/aarch64/page_fault_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -414,10 +414,10 @@ static bool punch_hole_in_backing_store(struct kvm_vm *vm,
if (fd != -1) {
ret = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
0, paging_size);
TEST_ASSERT(ret == 0, "fallocate failed\n");
TEST_ASSERT(ret == 0, "fallocate failed");
} else {
ret = madvise(hva, paging_size, MADV_DONTNEED);
TEST_ASSERT(ret == 0, "madvise failed\n");
TEST_ASSERT(ret == 0, "madvise failed");
}

return true;
Expand Down Expand Up @@ -501,7 +501,7 @@ static bool handle_cmd(struct kvm_vm *vm, int cmd)

void fail_vcpu_run_no_handler(int ret)
{
TEST_FAIL("Unexpected vcpu run failure\n");
TEST_FAIL("Unexpected vcpu run failure");
}

void fail_vcpu_run_mmio_no_syndrome_handler(int ret)
Expand Down
2 changes: 1 addition & 1 deletion tools/testing/selftests/kvm/aarch64/smccc_filter.c
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ static void expect_call_denied(struct kvm_vcpu *vcpu)
struct ucall uc;

if (get_ucall(vcpu, &uc) != UCALL_SYNC)
TEST_FAIL("Unexpected ucall: %lu\n", uc.cmd);
TEST_FAIL("Unexpected ucall: %lu", uc.cmd);

TEST_ASSERT(uc.args[1] == SMCCC_RET_NOT_SUPPORTED,
"Unexpected SMCCC return code: %lu", uc.args[1]);
Expand Down
12 changes: 6 additions & 6 deletions tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
Original file line number Diff line number Diff line change
Expand Up @@ -517,11 +517,11 @@ static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail)

if (expect_fail)
TEST_ASSERT(pmcr_orig == pmcr,
"PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx\n",
"PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx",
pmcr, pmcr_n);
else
TEST_ASSERT(pmcr_n == get_pmcr_n(pmcr),
"Failed to update PMCR.N to %lu (received: %lu)\n",
"Failed to update PMCR.N to %lu (received: %lu)",
pmcr_n, get_pmcr_n(pmcr));
}

Expand Down Expand Up @@ -594,12 +594,12 @@ static void run_pmregs_validity_test(uint64_t pmcr_n)
*/
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val);
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
"Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n",
"Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
KVM_ARM64_SYS_REG(set_reg_id), reg_val);

vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val);
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
"Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n",
"Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
KVM_ARM64_SYS_REG(clr_reg_id), reg_val);

/*
Expand All @@ -611,12 +611,12 @@ static void run_pmregs_validity_test(uint64_t pmcr_n)

vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val);
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
"Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n",
"Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
KVM_ARM64_SYS_REG(set_reg_id), reg_val);

vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val);
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
"Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n",
"Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
}

Expand Down
4 changes: 2 additions & 2 deletions tools/testing/selftests/kvm/demand_paging_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,10 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)

/* Let the guest access its memory */
ret = _vcpu_run(vcpu);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d", ret);
if (get_ucall(vcpu, NULL) != UCALL_SYNC) {
TEST_ASSERT(false,
"Invalid guest sync status: exit_reason=%s\n",
"Invalid guest sync status: exit_reason=%s",
exit_reason_str(run->exit_reason));
}

Expand Down
4 changes: 2 additions & 2 deletions tools/testing/selftests/kvm/dirty_log_perf_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,9 +88,9 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
ret = _vcpu_run(vcpu);
ts_diff = timespec_elapsed(start);

TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d", ret);
TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n",
"Invalid guest sync status: exit_reason=%s",
exit_reason_str(run->exit_reason));

pr_debug("Got sync event from vCPU %d\n", vcpu_idx);
Expand Down
54 changes: 29 additions & 25 deletions tools/testing/selftests/kvm/dirty_log_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ static void default_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
"vcpu run failed: errno=%d", err);

TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n",
"Invalid guest sync status: exit_reason=%s",
exit_reason_str(run->exit_reason));

vcpu_handle_sync_stop();
Expand Down Expand Up @@ -376,7 +376,10 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,

cleared = kvm_vm_reset_dirty_ring(vcpu->vm);

/* Cleared pages should be the same as collected */
/*
* Cleared pages should be the same as collected, as KVM is supposed to
* clear only the entries that have been harvested.
*/
TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
"with collected (%u)", cleared, count);

Expand Down Expand Up @@ -410,17 +413,11 @@ static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
pr_info("vcpu continues now.\n");
} else {
TEST_ASSERT(false, "Invalid guest sync status: "
"exit_reason=%s\n",
"exit_reason=%s",
exit_reason_str(run->exit_reason));
}
}

static void dirty_ring_before_vcpu_join(void)
{
/* Kick another round of vcpu just to make sure it will quit */
sem_post(&sem_vcpu_cont);
}

struct log_mode {
const char *name;
/* Return true if this mode is supported, otherwise false */
Expand All @@ -433,7 +430,6 @@ struct log_mode {
uint32_t *ring_buf_idx);
/* Hook to call when after each vcpu run */
void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err);
void (*before_vcpu_join) (void);
} log_modes[LOG_MODE_NUM] = {
{
.name = "dirty-log",
Expand All @@ -452,7 +448,6 @@ struct log_mode {
.supported = dirty_ring_supported,
.create_vm_done = dirty_ring_create_vm_done,
.collect_dirty_pages = dirty_ring_collect_dirty_pages,
.before_vcpu_join = dirty_ring_before_vcpu_join,
.after_vcpu_run = dirty_ring_after_vcpu_run,
},
};
Expand Down Expand Up @@ -513,14 +508,6 @@ static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
mode->after_vcpu_run(vcpu, ret, err);
}

static void log_mode_before_vcpu_join(void)
{
struct log_mode *mode = &log_modes[host_log_mode];

if (mode->before_vcpu_join)
mode->before_vcpu_join();
}

static void generate_random_array(uint64_t *guest_array, uint64_t size)
{
uint64_t i;
Expand Down Expand Up @@ -719,6 +706,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct kvm_vm *vm;
unsigned long *bmap;
uint32_t ring_buf_idx = 0;
int sem_val;

if (!log_mode_supported()) {
print_skip("Log mode '%s' not supported",
Expand Down Expand Up @@ -788,12 +776,22 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Start the iterations */
iteration = 1;
sync_global_to_guest(vm, iteration);
host_quit = false;
WRITE_ONCE(host_quit, false);
host_dirty_count = 0;
host_clear_count = 0;
host_track_next_count = 0;
WRITE_ONCE(dirty_ring_vcpu_ring_full, false);

/*
* Ensure the previous iteration didn't leave a dangling semaphore, i.e.
* that the main task and vCPU worker were synchronized and completed
* verification of all iterations.
*/
sem_getvalue(&sem_vcpu_stop, &sem_val);
TEST_ASSERT_EQ(sem_val, 0);
sem_getvalue(&sem_vcpu_cont, &sem_val);
TEST_ASSERT_EQ(sem_val, 0);

pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);

while (iteration < p->iterations) {
Expand All @@ -819,15 +817,21 @@ static void run_test(enum vm_guest_mode mode, void *arg)
assert(host_log_mode == LOG_MODE_DIRTY_RING ||
atomic_read(&vcpu_sync_stop_requested) == false);
vm_dirty_log_verify(mode, bmap);
sem_post(&sem_vcpu_cont);

iteration++;
/*
* Set host_quit before sem_vcpu_cont in the final iteration to
* ensure that the vCPU worker doesn't resume the guest. As
* above, the dirty ring test may stop and wait even when not
* explicitly request to do so, i.e. would hang waiting for a
* "continue" if it's allowed to resume the guest.
*/
if (++iteration == p->iterations)
WRITE_ONCE(host_quit, true);

sem_post(&sem_vcpu_cont);
sync_global_to_guest(vm, iteration);
}

/* Tell the vcpu thread to quit */
host_quit = true;
log_mode_before_vcpu_join();
pthread_join(vcpu_thread, NULL);

pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
Expand Down
2 changes: 1 addition & 1 deletion tools/testing/selftests/kvm/get-reg-list.c
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ static void check_supported(struct vcpu_reg_list *c)
continue;

__TEST_REQUIRE(kvm_has_cap(s->capability),
"%s: %s not available, skipping tests\n",
"%s: %s not available, skipping tests",
config_name(c), s->name);
}
}
Expand Down
8 changes: 4 additions & 4 deletions tools/testing/selftests/kvm/guest_print_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ static void ucall_abort(const char *assert_msg, const char *expected_assert_msg)
int offset = len_str - len_substr;

TEST_ASSERT(len_substr <= len_str,
"Expected '%s' to be a substring of '%s'\n",
"Expected '%s' to be a substring of '%s'",
assert_msg, expected_assert_msg);

TEST_ASSERT(strcmp(&assert_msg[offset], expected_assert_msg) == 0,
Expand All @@ -116,7 +116,7 @@ static void run_test(struct kvm_vcpu *vcpu, const char *expected_printf,
vcpu_run(vcpu);

TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON,
"Unexpected exit reason: %u (%s),\n",
"Unexpected exit reason: %u (%s),",
run->exit_reason, exit_reason_str(run->exit_reason));

switch (get_ucall(vcpu, &uc)) {
Expand Down Expand Up @@ -161,11 +161,11 @@ static void test_limits(void)
vcpu_run(vcpu);

TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON,
"Unexpected exit reason: %u (%s),\n",
"Unexpected exit reason: %u (%s),",
run->exit_reason, exit_reason_str(run->exit_reason));

TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_ABORT,
"Unexpected ucall command: %lu, Expected: %u (UCALL_ABORT)\n",
"Unexpected ucall command: %lu, Expected: %u (UCALL_ABORT)",
uc.cmd, UCALL_ABORT);

kvm_vm_free(vm);
Expand Down
Loading

0 comments on commit 2f8ebe4

Please sign in to comment.