Merge tag 'kvm-x86-selftests-6.18' of https://github.com/kvm-x86/linux into HEAD

KVM selftests changes for 6.18

 - Add #DE coverage in the fastops test (the only exception that's guest-
   triggerable in fastop-emulated instructions).

 - Fix PMU selftests errors encountered on Granite Rapids (GNR), Sierra
   Forest (SRF) and Clearwater Forest (CWF).

 - Minor cleanups and improvements
This commit is contained in:
Paolo Bonzini
2025-09-30 13:23:54 -04:00
16 changed files with 303 additions and 109 deletions

View File

@@ -24,7 +24,7 @@ uint32_t guest_random_seed;
struct guest_random_state guest_rng;
static uint32_t last_guest_seed;
static int vcpu_mmap_sz(void);
static size_t vcpu_mmap_sz(void);
int __open_path_or_exit(const char *path, int flags, const char *enoent_help)
{
@@ -95,7 +95,7 @@ static ssize_t get_module_param(const char *module_name, const char *param,
return bytes_read;
}
static int get_module_param_integer(const char *module_name, const char *param)
int kvm_get_module_param_integer(const char *module_name, const char *param)
{
/*
* 16 bytes to hold a 64-bit value (1 byte per char), 1 byte for the
@@ -119,7 +119,7 @@ static int get_module_param_integer(const char *module_name, const char *param)
return atoi_paranoid(value);
}
static bool get_module_param_bool(const char *module_name, const char *param)
bool kvm_get_module_param_bool(const char *module_name, const char *param)
{
char value;
ssize_t r;
@@ -135,36 +135,6 @@ static bool get_module_param_bool(const char *module_name, const char *param)
TEST_FAIL("Unrecognized value '%c' for boolean module param", value);
}
bool get_kvm_param_bool(const char *param)
{
return get_module_param_bool("kvm", param);
}
bool get_kvm_intel_param_bool(const char *param)
{
return get_module_param_bool("kvm_intel", param);
}
bool get_kvm_amd_param_bool(const char *param)
{
return get_module_param_bool("kvm_amd", param);
}
int get_kvm_param_integer(const char *param)
{
return get_module_param_integer("kvm", param);
}
int get_kvm_intel_param_integer(const char *param)
{
return get_module_param_integer("kvm_intel", param);
}
int get_kvm_amd_param_integer(const char *param)
{
return get_module_param_integer("kvm_amd", param);
}
/*
* Capability
*
@@ -1324,14 +1294,14 @@ void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
}
/* Returns the size of a vCPU's kvm_run structure. */
static int vcpu_mmap_sz(void)
static size_t vcpu_mmap_sz(void)
{
int dev_fd, ret;
dev_fd = open_kvm_dev_path_or_exit();
ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
TEST_ASSERT(ret >= sizeof(struct kvm_run),
TEST_ASSERT(ret >= 0 && ret >= sizeof(struct kvm_run),
KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret));
close(dev_fd);
@@ -1372,7 +1342,7 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
TEST_ASSERT_VM_VCPU_IOCTL(vcpu->fd >= 0, KVM_CREATE_VCPU, vcpu->fd, vm);
TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size "
"smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
"smaller than expected, vcpu_mmap_sz: %zi expected_min: %zi",
vcpu_mmap_sz(), sizeof(*vcpu->run));
vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(),
PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);

View File

@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include "kvm_util.h"
#include "processor.h"
#include "pmu.h"
const uint64_t intel_pmu_arch_events[] = {
@@ -19,6 +20,11 @@ const uint64_t intel_pmu_arch_events[] = {
INTEL_ARCH_BRANCHES_RETIRED,
INTEL_ARCH_BRANCHES_MISPREDICTED,
INTEL_ARCH_TOPDOWN_SLOTS,
INTEL_ARCH_TOPDOWN_BE_BOUND,
INTEL_ARCH_TOPDOWN_BAD_SPEC,
INTEL_ARCH_TOPDOWN_FE_BOUND,
INTEL_ARCH_TOPDOWN_RETIRING,
INTEL_ARCH_LBR_INSERTS,
};
kvm_static_assert(ARRAY_SIZE(intel_pmu_arch_events) == NR_INTEL_ARCH_EVENTS);
@@ -29,3 +35,46 @@ const uint64_t amd_pmu_zen_events[] = {
AMD_ZEN_BRANCHES_MISPREDICTED,
};
kvm_static_assert(ARRAY_SIZE(amd_pmu_zen_events) == NR_AMD_ZEN_EVENTS);
/*
* For Intel Atom CPUs, the PMU events "Instruction Retired" or
* "Branch Instruction Retired" may be overcounted for some certain
* instructions, like FAR CALL/JMP, RETF, IRET, VMENTRY/VMEXIT/VMPTRLD
* and complex SGX/SMX/CSTATE instructions/flows.
*
* The detailed information can be found in the errata (section SRF7):
* https://edc.intel.com/content/www/us/en/design/products-and-solutions/processors-and-chipsets/sierra-forest/xeon-6700-series-processor-with-e-cores-specification-update/errata-details/
*
* For the Atom platforms before Sierra Forest (including Sierra Forest),
* Both 2 events "Instruction Retired" and "Branch Instruction Retired" would
* be overcounted on these certain instructions, but for Clearwater Forest
* only "Instruction Retired" event is overcounted on these instructions.
*/
static uint64_t get_pmu_errata(void)
{
if (!this_cpu_is_intel())
return 0;
if (this_cpu_family() != 0x6)
return 0;
switch (this_cpu_model()) {
case 0xDD: /* Clearwater Forest */
return BIT_ULL(INSTRUCTIONS_RETIRED_OVERCOUNT);
case 0xAF: /* Sierra Forest */
case 0x4D: /* Avaton, Rangely */
case 0x5F: /* Denverton */
case 0x86: /* Jacobsville */
return BIT_ULL(INSTRUCTIONS_RETIRED_OVERCOUNT) |
BIT_ULL(BRANCHES_RETIRED_OVERCOUNT);
default:
return 0;
}
}
uint64_t pmu_errata_mask;
void kvm_init_pmu_errata(void)
{
pmu_errata_mask = get_pmu_errata();
}

View File

@@ -6,6 +6,7 @@
#include "linux/bitmap.h"
#include "test_util.h"
#include "kvm_util.h"
#include "pmu.h"
#include "processor.h"
#include "sev.h"
@@ -23,6 +24,39 @@ bool host_cpu_is_intel;
bool is_forced_emulation_enabled;
uint64_t guest_tsc_khz;
const char *ex_str(int vector)
{
switch (vector) {
#define VEC_STR(v) case v##_VECTOR: return "#" #v
case DE_VECTOR: return "no exception";
case KVM_MAGIC_DE_VECTOR: return "#DE";
VEC_STR(DB);
VEC_STR(NMI);
VEC_STR(BP);
VEC_STR(OF);
VEC_STR(BR);
VEC_STR(UD);
VEC_STR(NM);
VEC_STR(DF);
VEC_STR(TS);
VEC_STR(NP);
VEC_STR(SS);
VEC_STR(GP);
VEC_STR(PF);
VEC_STR(MF);
VEC_STR(AC);
VEC_STR(MC);
VEC_STR(XM);
VEC_STR(VE);
VEC_STR(CP);
VEC_STR(HV);
VEC_STR(VC);
VEC_STR(SX);
default: return "#??";
#undef VEC_STR
}
}
static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
{
fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
@@ -557,7 +591,7 @@ static bool kvm_fixup_exception(struct ex_regs *regs)
return false;
if (regs->vector == DE_VECTOR)
return false;
regs->vector = KVM_MAGIC_DE_VECTOR;
regs->rip = regs->r11;
regs->r9 = regs->vector;
@@ -638,6 +672,7 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus)
sync_global_to_guest(vm, host_cpu_is_intel);
sync_global_to_guest(vm, host_cpu_is_amd);
sync_global_to_guest(vm, is_forced_emulation_enabled);
sync_global_to_guest(vm, pmu_errata_mask);
if (is_sev_vm(vm)) {
struct kvm_sev_init init = { 0 };
@@ -1269,6 +1304,8 @@ void kvm_selftest_arch_init(void)
host_cpu_is_intel = this_cpu_is_intel();
host_cpu_is_amd = this_cpu_is_amd();
is_forced_emulation_enabled = kvm_is_forced_emulation_enabled();
kvm_init_pmu_errata();
}
bool sys_clocksource_is_based_on_tsc(void)