ARM:
- Fix the handling of ZCR_EL2 in NV VMs - Pick the correct translation regime when doing a PTW on the back of a SEA - Prevent userspace from injecting an event into a vcpu that isn't initialised yet - Move timer save/restore to the sysreg handling code, fixing EL2 timer access in the process - Add FGT-based trapping of MDSCR_EL1 to reduce the overhead of debug - Fix trapping configuration when the host isn't GICv3 - Improve the detection of HCR_EL2.E2H being RES1 - Drop a spurious 'break' statement in the S1 PTW - Don't try to access SPE when owned by EL3 Documentation updates: - Document the failure modes of event injection - Document that a GICv3 guest can be created on a GICv5 host with FEAT_GCIE_LEGACY Selftest improvements: - Add a selftest for the effective value of HCR_EL2.AMO - Address build warning in the timer selftest when building with clang - Teach irqfd selftests about non-x86 architectures - Add missing sysregs to the set_id_regs selftest - Fix vcpu allocation in the vgic_lpi_stress selftest - Correctly enable interrupts in the vgic_lpi_stress selftest x86: - Expand the KVM_PRE_FAULT_MEMORY selftest to add a regression test for the bug fixed by commit3ccbf6f470
("KVM: x86/mmu: Return -EAGAIN if userspace deletes/moves memslot during prefault") - Don't try to get PMU capabilities from perf when running a CPU with hybrid CPUs/PMUs, as perf will rightly WARN. guest_memfd: - Rework KVM_CAP_GUEST_MEMFD_MMAP (newly introduced in 6.18) into a more generic KVM_CAP_GUEST_MEMFD_FLAGS - Add a guest_memfd INIT_SHARED flag and require userspace to explicitly set said flag to initialize memory as SHARED, irrespective of MMAP. The behavior merged in 6.18 is that enabling mmap() implicitly initializes memory as SHARED, which would result in an ABI collision for x86 CoCo VMs as their memory is currently always initialized PRIVATE. - Allow mmap() on guest_memfd for x86 CoCo VMs, i.e. on VMs with private memory, to enable testing such setups, i.e. to hopefully flush out any other lurking ABI issues before 6.18 is officially released. - Add testcases to the guest_memfd selftest to cover guest_memfd without MMAP, and host userspace accesses to mmap()'d private memory. -----BEGIN PGP SIGNATURE----- iQFIBAABCgAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmjzqVIUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroO+qQgArc7XXmoiHQfTmdqbFL+1ipzfqd/c SHJghONWVNKaSm0EsH72iEokmUyI8HssllaBuaGEAT/1F6YmRFwSSFgUG+N02rah pL5ShCG2fPVxHal9ZJ04M4DYWPPClmcE2myfQ6k9kwcMgCRK2BdSRRnKH3XfOKrY jAFNZVBCeODcnSvjOyxK2QFEt7J97H1AoAxOORvdqFmRqVIEQNJA/3Hx51wPfkwD UnCQiNaPinDMxuuwvcmlYsIrQhGaqO4de1Kx0A4ZkSQqFUcyhvB6Qa+DoApz/IBw qsFLqoR/1XXJ90wxutSTFzfjHM/SU6fhj57Cl9dAHI3pgnssC1iUvEt9Iw== =dvAj -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm fixes from Paolo Bonzini: "ARM: - Fix the handling of ZCR_EL2 in NV VMs - Pick the correct translation regime when doing a PTW on the back of a SEA - Prevent userspace from injecting an event into a vcpu that isn't initialised yet - Move timer save/restore to the sysreg handling code, fixing EL2 timer access in the process - Add FGT-based trapping of MDSCR_EL1 to reduce the overhead of debug - Fix trapping configuration when the host isn't GICv3 - Improve the detection of HCR_EL2.E2H being RES1 - Drop a spurious 'break' statement in the S1 PTW - Don't try to access SPE when owned by EL3 Documentation updates: - Document the failure modes of event injection - Document that a GICv3 guest can be created on a GICv5 host with FEAT_GCIE_LEGACY Selftest improvements: - Add a selftest for the effective value of HCR_EL2.AMO - Address build warning in the timer selftest when building with clang - Teach irqfd selftests about non-x86 architectures - Add missing sysregs to the set_id_regs selftest - Fix vcpu allocation in the vgic_lpi_stress selftest - Correctly enable interrupts in the vgic_lpi_stress selftest x86: - Expand the KVM_PRE_FAULT_MEMORY selftest to add a regression test for the bug fixed by commit3ccbf6f470
("KVM: x86/mmu: Return -EAGAIN if userspace deletes/moves memslot during prefault") - Don't try to get PMU capabilities from perf when running a CPU with hybrid CPUs/PMUs, as perf will rightly WARN. guest_memfd: - Rework KVM_CAP_GUEST_MEMFD_MMAP (newly introduced in 6.18) into a more generic KVM_CAP_GUEST_MEMFD_FLAGS - Add a guest_memfd INIT_SHARED flag and require userspace to explicitly set said flag to initialize memory as SHARED, irrespective of MMAP. The behavior merged in 6.18 is that enabling mmap() implicitly initializes memory as SHARED, which would result in an ABI collision for x86 CoCo VMs as their memory is currently always initialized PRIVATE. - Allow mmap() on guest_memfd for x86 CoCo VMs, i.e. on VMs with private memory, to enable testing such setups, i.e. to hopefully flush out any other lurking ABI issues before 6.18 is officially released. - Add testcases to the guest_memfd selftest to cover guest_memfd without MMAP, and host userspace accesses to mmap()'d private memory" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (46 commits) arm64: Revamp HCR_EL2.E2H RES1 detection KVM: arm64: nv: Use FGT write trap of MDSCR_EL1 when available KVM: arm64: Compute per-vCPU FGTs at vcpu_load() KVM: arm64: selftests: Fix misleading comment about virtual timer encoding KVM: arm64: selftests: Add an E2H=0-specific configuration to get_reg_list KVM: arm64: selftests: Make dependencies on VHE-specific registers explicit KVM: arm64: Kill leftovers of ad-hoc timer userspace access KVM: arm64: Fix WFxT handling of nested virt KVM: arm64: Move CNT*CT_EL0 userspace accessors to generic infrastructure KVM: arm64: Move CNT*_CVAL_EL0 userspace accessors to generic infrastructure KVM: arm64: Move CNT*_CTL_EL0 userspace accessors to generic infrastructure KVM: arm64: Add timer UAPI workaround to sysreg infrastructure KVM: arm64: Make timer_set_offset() generally accessible KVM: arm64: Replace timer context vcpu pointer with timer_id KVM: arm64: Introduce timer_context_to_vcpu() helper KVM: arm64: Hide CNTHV_*_EL2 from userspace for nVHE guests Documentation: KVM: Update GICv3 docs for GICv5 hosts KVM: arm64: gic-v3: Only set ICH_HCR traps for v2-on-v3 or v3 guests KVM: arm64: selftests: Actually enable IRQs in vgic_lpi_stress KVM: arm64: selftests: Allocate vcpus with correct size ...
This commit is contained in:
commit
02e5f74ef0
|
@ -1229,6 +1229,9 @@ It is not possible to read back a pending external abort (injected via
|
|||
KVM_SET_VCPU_EVENTS or otherwise) because such an exception is always delivered
|
||||
directly to the virtual CPU).
|
||||
|
||||
Calling this ioctl on a vCPU that hasn't been initialized will return
|
||||
-ENOEXEC.
|
||||
|
||||
::
|
||||
|
||||
struct kvm_vcpu_events {
|
||||
|
@ -1309,6 +1312,8 @@ exceptions by manipulating individual registers using the KVM_SET_ONE_REG API.
|
|||
|
||||
See KVM_GET_VCPU_EVENTS for the data structure.
|
||||
|
||||
Calling this ioctl on a vCPU that hasn't been initialized will return
|
||||
-ENOEXEC.
|
||||
|
||||
4.33 KVM_GET_DEBUGREGS
|
||||
----------------------
|
||||
|
@ -6432,9 +6437,18 @@ most one mapping per page, i.e. binding multiple memory regions to a single
|
|||
guest_memfd range is not allowed (any number of memory regions can be bound to
|
||||
a single guest_memfd file, but the bound ranges must not overlap).
|
||||
|
||||
When the capability KVM_CAP_GUEST_MEMFD_MMAP is supported, the 'flags' field
|
||||
supports GUEST_MEMFD_FLAG_MMAP. Setting this flag on guest_memfd creation
|
||||
enables mmap() and faulting of guest_memfd memory to host userspace.
|
||||
The capability KVM_CAP_GUEST_MEMFD_FLAGS enumerates the `flags` that can be
|
||||
specified via KVM_CREATE_GUEST_MEMFD. Currently defined flags:
|
||||
|
||||
============================ ================================================
|
||||
GUEST_MEMFD_FLAG_MMAP Enable using mmap() on the guest_memfd file
|
||||
descriptor.
|
||||
GUEST_MEMFD_FLAG_INIT_SHARED Make all memory in the file shared during
|
||||
KVM_CREATE_GUEST_MEMFD (memory files created
|
||||
without INIT_SHARED will be marked private).
|
||||
Shared memory can be faulted into host userspace
|
||||
page tables. Private memory cannot.
|
||||
============================ ================================================
|
||||
|
||||
When the KVM MMU performs a PFN lookup to service a guest fault and the backing
|
||||
guest_memfd has the GUEST_MEMFD_FLAG_MMAP set, then the fault will always be
|
||||
|
|
|
@ -13,7 +13,8 @@ will act as the VM interrupt controller, requiring emulated user-space devices
|
|||
to inject interrupts to the VGIC instead of directly to CPUs. It is not
|
||||
possible to create both a GICv3 and GICv2 on the same VM.
|
||||
|
||||
Creating a guest GICv3 device requires a host GICv3 as well.
|
||||
Creating a guest GICv3 device requires a host GICv3 host, or a GICv5 host with
|
||||
support for FEAT_GCIE_LEGACY.
|
||||
|
||||
|
||||
Groups:
|
||||
|
|
|
@ -24,22 +24,48 @@
|
|||
* ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it
|
||||
* can reset into an UNKNOWN state and might not read as 1 until it has
|
||||
* been initialized explicitly.
|
||||
*
|
||||
* Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
|
||||
* don't advertise it (they predate this relaxation).
|
||||
*
|
||||
* Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H
|
||||
* indicating whether the CPU is running in E2H mode.
|
||||
*/
|
||||
mrs_s x1, SYS_ID_AA64MMFR4_EL1
|
||||
sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
|
||||
cmp x1, #0
|
||||
b.ge .LnVHE_\@
|
||||
b.lt .LnE2H0_\@
|
||||
|
||||
/*
|
||||
* Unfortunately, HCR_EL2.E2H can be RES1 even if not advertised
|
||||
* as such via ID_AA64MMFR4_EL1.E2H0:
|
||||
*
|
||||
* - Fruity CPUs predate the !FEAT_E2H0 relaxation, and seem to
|
||||
* have HCR_EL2.E2H implemented as RAO/WI.
|
||||
*
|
||||
* - On CPUs that lack FEAT_FGT, a hypervisor can't trap guest
|
||||
* reads of ID_AA64MMFR4_EL1 to advertise !FEAT_E2H0. NV
|
||||
* guests on these hosts can write to HCR_EL2.E2H without
|
||||
* trapping to the hypervisor, but these writes have no
|
||||
* functional effect.
|
||||
*
|
||||
* Handle both cases by checking for an essential VHE property
|
||||
* (system register remapping) to decide whether we're
|
||||
* effectively VHE-only or not.
|
||||
*/
|
||||
msr_hcr_el2 x0 // Setup HCR_EL2 as nVHE
|
||||
isb
|
||||
mov x1, #1 // Write something to FAR_EL1
|
||||
msr far_el1, x1
|
||||
isb
|
||||
mov x1, #2 // Try to overwrite it via FAR_EL2
|
||||
msr far_el2, x1
|
||||
isb
|
||||
mrs x1, far_el1 // If we see the latest write in FAR_EL1,
|
||||
cmp x1, #2 // we can safely assume we are VHE only.
|
||||
b.ne .LnVHE_\@ // Otherwise, we know that nVHE works.
|
||||
|
||||
.LnE2H0_\@:
|
||||
orr x0, x0, #HCR_E2H
|
||||
.LnVHE_\@:
|
||||
msr_hcr_el2 x0
|
||||
isb
|
||||
.LnVHE_\@:
|
||||
.endm
|
||||
|
||||
.macro __init_el2_sctlr
|
||||
|
|
|
@ -816,6 +816,11 @@ struct kvm_vcpu_arch {
|
|||
u64 hcrx_el2;
|
||||
u64 mdcr_el2;
|
||||
|
||||
struct {
|
||||
u64 r;
|
||||
u64 w;
|
||||
} fgt[__NR_FGT_GROUP_IDS__];
|
||||
|
||||
/* Exception Information */
|
||||
struct kvm_vcpu_fault_info fault;
|
||||
|
||||
|
@ -1600,6 +1605,51 @@ static inline bool kvm_arch_has_irq_bypass(void)
|
|||
void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt);
|
||||
void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1);
|
||||
void check_feature_map(void);
|
||||
void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu);
|
||||
|
||||
static __always_inline enum fgt_group_id __fgt_reg_to_group_id(enum vcpu_sysreg reg)
|
||||
{
|
||||
switch (reg) {
|
||||
case HFGRTR_EL2:
|
||||
case HFGWTR_EL2:
|
||||
return HFGRTR_GROUP;
|
||||
case HFGITR_EL2:
|
||||
return HFGITR_GROUP;
|
||||
case HDFGRTR_EL2:
|
||||
case HDFGWTR_EL2:
|
||||
return HDFGRTR_GROUP;
|
||||
case HAFGRTR_EL2:
|
||||
return HAFGRTR_GROUP;
|
||||
case HFGRTR2_EL2:
|
||||
case HFGWTR2_EL2:
|
||||
return HFGRTR2_GROUP;
|
||||
case HFGITR2_EL2:
|
||||
return HFGITR2_GROUP;
|
||||
case HDFGRTR2_EL2:
|
||||
case HDFGWTR2_EL2:
|
||||
return HDFGRTR2_GROUP;
|
||||
default:
|
||||
BUILD_BUG_ON(1);
|
||||
}
|
||||
}
|
||||
|
||||
#define vcpu_fgt(vcpu, reg) \
|
||||
({ \
|
||||
enum fgt_group_id id = __fgt_reg_to_group_id(reg); \
|
||||
u64 *p; \
|
||||
switch (reg) { \
|
||||
case HFGWTR_EL2: \
|
||||
case HDFGWTR_EL2: \
|
||||
case HFGWTR2_EL2: \
|
||||
case HDFGWTR2_EL2: \
|
||||
p = &(vcpu)->arch.fgt[id].w; \
|
||||
break; \
|
||||
default: \
|
||||
p = &(vcpu)->arch.fgt[id].r; \
|
||||
break; \
|
||||
} \
|
||||
\
|
||||
p; \
|
||||
})
|
||||
|
||||
#endif /* __ARM64_KVM_HOST_H__ */
|
||||
|
|
|
@ -66,7 +66,7 @@ static int nr_timers(struct kvm_vcpu *vcpu)
|
|||
|
||||
u32 timer_get_ctl(struct arch_timer_context *ctxt)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt->vcpu;
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt);
|
||||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
|
@ -85,7 +85,7 @@ u32 timer_get_ctl(struct arch_timer_context *ctxt)
|
|||
|
||||
u64 timer_get_cval(struct arch_timer_context *ctxt)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt->vcpu;
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt);
|
||||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
|
@ -104,7 +104,7 @@ u64 timer_get_cval(struct arch_timer_context *ctxt)
|
|||
|
||||
static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt->vcpu;
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt);
|
||||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
|
@ -126,7 +126,7 @@ static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
|
|||
|
||||
static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt->vcpu;
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt);
|
||||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
|
@ -146,16 +146,6 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
|
|||
}
|
||||
}
|
||||
|
||||
static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
|
||||
{
|
||||
if (!ctxt->offset.vm_offset) {
|
||||
WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
|
||||
return;
|
||||
}
|
||||
|
||||
WRITE_ONCE(*ctxt->offset.vm_offset, offset);
|
||||
}
|
||||
|
||||
u64 kvm_phys_timer_read(void)
|
||||
{
|
||||
return timecounter->cc->read(timecounter->cc);
|
||||
|
@ -343,7 +333,7 @@ static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
|
|||
u64 ns;
|
||||
|
||||
ctx = container_of(hrt, struct arch_timer_context, hrtimer);
|
||||
vcpu = ctx->vcpu;
|
||||
vcpu = timer_context_to_vcpu(ctx);
|
||||
|
||||
trace_kvm_timer_hrtimer_expire(ctx);
|
||||
|
||||
|
@ -436,8 +426,9 @@ static void kvm_timer_update_status(struct arch_timer_context *ctx, bool level)
|
|||
*
|
||||
* But hey, it's fast, right?
|
||||
*/
|
||||
if (is_hyp_ctxt(ctx->vcpu) &&
|
||||
(ctx == vcpu_vtimer(ctx->vcpu) || ctx == vcpu_ptimer(ctx->vcpu))) {
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctx);
|
||||
if (is_hyp_ctxt(vcpu) &&
|
||||
(ctx == vcpu_vtimer(vcpu) || ctx == vcpu_ptimer(vcpu))) {
|
||||
unsigned long val = timer_get_ctl(ctx);
|
||||
__assign_bit(__ffs(ARCH_TIMER_CTRL_IT_STAT), &val, level);
|
||||
timer_set_ctl(ctx, val);
|
||||
|
@ -470,7 +461,7 @@ static void timer_emulate(struct arch_timer_context *ctx)
|
|||
trace_kvm_timer_emulate(ctx, should_fire);
|
||||
|
||||
if (should_fire != ctx->irq.level)
|
||||
kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
|
||||
kvm_timer_update_irq(timer_context_to_vcpu(ctx), should_fire, ctx);
|
||||
|
||||
kvm_timer_update_status(ctx, should_fire);
|
||||
|
||||
|
@ -498,7 +489,7 @@ static void set_cntpoff(u64 cntpoff)
|
|||
|
||||
static void timer_save_state(struct arch_timer_context *ctx)
|
||||
{
|
||||
struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
|
||||
struct arch_timer_cpu *timer = vcpu_timer(timer_context_to_vcpu(ctx));
|
||||
enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -609,7 +600,7 @@ static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
|
|||
|
||||
static void timer_restore_state(struct arch_timer_context *ctx)
|
||||
{
|
||||
struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
|
||||
struct arch_timer_cpu *timer = vcpu_timer(timer_context_to_vcpu(ctx));
|
||||
enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -668,7 +659,7 @@ static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, boo
|
|||
|
||||
static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctx->vcpu;
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctx);
|
||||
bool phys_active = false;
|
||||
|
||||
/*
|
||||
|
@ -677,7 +668,7 @@ static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
|
|||
* this point and the register restoration, we'll take the
|
||||
* interrupt anyway.
|
||||
*/
|
||||
kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
|
||||
kvm_timer_update_irq(vcpu, kvm_timer_should_fire(ctx), ctx);
|
||||
|
||||
if (irqchip_in_kernel(vcpu->kvm))
|
||||
phys_active = kvm_vgic_map_is_active(vcpu, timer_irq(ctx));
|
||||
|
@ -1063,7 +1054,7 @@ static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)
|
|||
struct arch_timer_context *ctxt = vcpu_get_timer(vcpu, timerid);
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
ctxt->vcpu = vcpu;
|
||||
ctxt->timer_id = timerid;
|
||||
|
||||
if (timerid == TIMER_VTIMER)
|
||||
ctxt->offset.vm_offset = &kvm->arch.timer_data.voffset;
|
||||
|
@ -1121,49 +1112,6 @@ void kvm_timer_cpu_down(void)
|
|||
disable_percpu_irq(host_ptimer_irq);
|
||||
}
|
||||
|
||||
int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
|
||||
{
|
||||
struct arch_timer_context *timer;
|
||||
|
||||
switch (regid) {
|
||||
case KVM_REG_ARM_TIMER_CTL:
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
|
||||
break;
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
|
||||
&vcpu->kvm->arch.flags)) {
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
timer_set_offset(timer, kvm_phys_timer_read() - value);
|
||||
}
|
||||
break;
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
|
||||
break;
|
||||
case KVM_REG_ARM_PTIMER_CTL:
|
||||
timer = vcpu_ptimer(vcpu);
|
||||
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
|
||||
break;
|
||||
case KVM_REG_ARM_PTIMER_CNT:
|
||||
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
|
||||
&vcpu->kvm->arch.flags)) {
|
||||
timer = vcpu_ptimer(vcpu);
|
||||
timer_set_offset(timer, kvm_phys_timer_read() - value);
|
||||
}
|
||||
break;
|
||||
case KVM_REG_ARM_PTIMER_CVAL:
|
||||
timer = vcpu_ptimer(vcpu);
|
||||
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
|
||||
break;
|
||||
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 read_timer_ctl(struct arch_timer_context *timer)
|
||||
{
|
||||
/*
|
||||
|
@ -1180,31 +1128,6 @@ static u64 read_timer_ctl(struct arch_timer_context *timer)
|
|||
return ctl;
|
||||
}
|
||||
|
||||
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
|
||||
{
|
||||
switch (regid) {
|
||||
case KVM_REG_ARM_TIMER_CTL:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_vtimer(vcpu), TIMER_REG_CTL);
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_vtimer(vcpu), TIMER_REG_CNT);
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_vtimer(vcpu), TIMER_REG_CVAL);
|
||||
case KVM_REG_ARM_PTIMER_CTL:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_ptimer(vcpu), TIMER_REG_CTL);
|
||||
case KVM_REG_ARM_PTIMER_CNT:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_ptimer(vcpu), TIMER_REG_CNT);
|
||||
case KVM_REG_ARM_PTIMER_CVAL:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_ptimer(vcpu), TIMER_REG_CVAL);
|
||||
}
|
||||
return (u64)-1;
|
||||
}
|
||||
|
||||
static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
|
||||
struct arch_timer_context *timer,
|
||||
enum kvm_arch_timer_regs treg)
|
||||
|
|
|
@ -642,6 +642,7 @@ nommu:
|
|||
vcpu->arch.hcr_el2 |= HCR_TWI;
|
||||
|
||||
vcpu_set_pauth_traps(vcpu);
|
||||
kvm_vcpu_load_fgt(vcpu);
|
||||
|
||||
if (is_protected_kvm_enabled()) {
|
||||
kvm_call_hyp_nvhe(__pkvm_vcpu_load,
|
||||
|
@ -1794,6 +1795,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
case KVM_GET_VCPU_EVENTS: {
|
||||
struct kvm_vcpu_events events;
|
||||
|
||||
if (!kvm_vcpu_initialized(vcpu))
|
||||
return -ENOEXEC;
|
||||
|
||||
if (kvm_arm_vcpu_get_events(vcpu, &events))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1805,6 +1809,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
case KVM_SET_VCPU_EVENTS: {
|
||||
struct kvm_vcpu_events events;
|
||||
|
||||
if (!kvm_vcpu_initialized(vcpu))
|
||||
return -ENOEXEC;
|
||||
|
||||
if (copy_from_user(&events, argp, sizeof(events)))
|
||||
return -EFAULT;
|
||||
|
||||
|
|
|
@ -91,7 +91,6 @@ static enum trans_regime compute_translation_regime(struct kvm_vcpu *vcpu, u32 o
|
|||
case OP_AT_S1E2W:
|
||||
case OP_AT_S1E2A:
|
||||
return vcpu_el2_e2h_is_set(vcpu) ? TR_EL20 : TR_EL2;
|
||||
break;
|
||||
default:
|
||||
return (vcpu_el2_e2h_is_set(vcpu) &&
|
||||
vcpu_el2_tge_is_set(vcpu)) ? TR_EL20 : TR_EL10;
|
||||
|
@ -1602,13 +1601,17 @@ int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa, int *level)
|
|||
.fn = match_s1_desc,
|
||||
.priv = &dm,
|
||||
},
|
||||
.regime = TR_EL10,
|
||||
.as_el0 = false,
|
||||
.pan = false,
|
||||
};
|
||||
struct s1_walk_result wr = {};
|
||||
int ret;
|
||||
|
||||
if (is_hyp_ctxt(vcpu))
|
||||
wi.regime = vcpu_el2_e2h_is_set(vcpu) ? TR_EL20 : TR_EL2;
|
||||
else
|
||||
wi.regime = TR_EL10;
|
||||
|
||||
ret = setup_s1_walk(vcpu, &wi, &wr, va);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_nested.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
/*
|
||||
|
@ -1428,3 +1430,91 @@ void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *r
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline struct fgt_masks *__fgt_reg_to_masks(enum vcpu_sysreg reg)
|
||||
{
|
||||
switch (reg) {
|
||||
case HFGRTR_EL2:
|
||||
return &hfgrtr_masks;
|
||||
case HFGWTR_EL2:
|
||||
return &hfgwtr_masks;
|
||||
case HFGITR_EL2:
|
||||
return &hfgitr_masks;
|
||||
case HDFGRTR_EL2:
|
||||
return &hdfgrtr_masks;
|
||||
case HDFGWTR_EL2:
|
||||
return &hdfgwtr_masks;
|
||||
case HAFGRTR_EL2:
|
||||
return &hafgrtr_masks;
|
||||
case HFGRTR2_EL2:
|
||||
return &hfgrtr2_masks;
|
||||
case HFGWTR2_EL2:
|
||||
return &hfgwtr2_masks;
|
||||
case HFGITR2_EL2:
|
||||
return &hfgitr2_masks;
|
||||
case HDFGRTR2_EL2:
|
||||
return &hdfgrtr2_masks;
|
||||
case HDFGWTR2_EL2:
|
||||
return &hdfgwtr2_masks;
|
||||
default:
|
||||
BUILD_BUG_ON(1);
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void __compute_fgt(struct kvm_vcpu *vcpu, enum vcpu_sysreg reg)
|
||||
{
|
||||
u64 fgu = vcpu->kvm->arch.fgu[__fgt_reg_to_group_id(reg)];
|
||||
struct fgt_masks *m = __fgt_reg_to_masks(reg);
|
||||
u64 clear = 0, set = 0, val = m->nmask;
|
||||
|
||||
set |= fgu & m->mask;
|
||||
clear |= fgu & m->nmask;
|
||||
|
||||
if (is_nested_ctxt(vcpu)) {
|
||||
u64 nested = __vcpu_sys_reg(vcpu, reg);
|
||||
set |= nested & m->mask;
|
||||
clear |= ~nested & m->nmask;
|
||||
}
|
||||
|
||||
val |= set;
|
||||
val &= ~clear;
|
||||
*vcpu_fgt(vcpu, reg) = val;
|
||||
}
|
||||
|
||||
static void __compute_hfgwtr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__compute_fgt(vcpu, HFGWTR_EL2);
|
||||
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
|
||||
*vcpu_fgt(vcpu, HFGWTR_EL2) |= HFGWTR_EL2_TCR_EL1;
|
||||
}
|
||||
|
||||
static void __compute_hdfgwtr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__compute_fgt(vcpu, HDFGWTR_EL2);
|
||||
|
||||
if (is_hyp_ctxt(vcpu))
|
||||
*vcpu_fgt(vcpu, HDFGWTR_EL2) |= HDFGWTR_EL2_MDSCR_EL1;
|
||||
}
|
||||
|
||||
void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!cpus_have_final_cap(ARM64_HAS_FGT))
|
||||
return;
|
||||
|
||||
__compute_fgt(vcpu, HFGRTR_EL2);
|
||||
__compute_hfgwtr(vcpu);
|
||||
__compute_fgt(vcpu, HFGITR_EL2);
|
||||
__compute_fgt(vcpu, HDFGRTR_EL2);
|
||||
__compute_hdfgwtr(vcpu);
|
||||
__compute_fgt(vcpu, HAFGRTR_EL2);
|
||||
|
||||
if (!cpus_have_final_cap(ARM64_HAS_FGT2))
|
||||
return;
|
||||
|
||||
__compute_fgt(vcpu, HFGRTR2_EL2);
|
||||
__compute_fgt(vcpu, HFGWTR2_EL2);
|
||||
__compute_fgt(vcpu, HFGITR2_EL2);
|
||||
__compute_fgt(vcpu, HDFGRTR2_EL2);
|
||||
__compute_fgt(vcpu, HDFGWTR2_EL2);
|
||||
}
|
||||
|
|
|
@ -15,6 +15,12 @@
|
|||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
|
||||
static int cpu_has_spe(u64 dfr0)
|
||||
{
|
||||
return cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
|
||||
!(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
|
||||
*
|
||||
|
@ -77,13 +83,12 @@ void kvm_init_host_debug_data(void)
|
|||
*host_data_ptr(debug_brps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr0);
|
||||
*host_data_ptr(debug_wrps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr0);
|
||||
|
||||
if (cpu_has_spe(dfr0))
|
||||
host_data_set_flag(HAS_SPE);
|
||||
|
||||
if (has_vhe())
|
||||
return;
|
||||
|
||||
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
|
||||
!(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P))
|
||||
host_data_set_flag(HAS_SPE);
|
||||
|
||||
/* Check if we have BRBE implemented and available at the host */
|
||||
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_BRBE_SHIFT))
|
||||
host_data_set_flag(HAS_BRBE);
|
||||
|
@ -102,7 +107,7 @@ void kvm_init_host_debug_data(void)
|
|||
void kvm_debug_init_vhe(void)
|
||||
{
|
||||
/* Clear PMSCR_EL1.E{0,1}SPE which reset to UNKNOWN values. */
|
||||
if (SYS_FIELD_GET(ID_AA64DFR0_EL1, PMSVer, read_sysreg(id_aa64dfr0_el1)))
|
||||
if (host_data_test_flag(HAS_SPE))
|
||||
write_sysreg_el1(0, SYS_PMSCR);
|
||||
}
|
||||
|
||||
|
|
|
@ -591,64 +591,6 @@ static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
|
|||
return copy_core_reg_indices(vcpu, NULL);
|
||||
}
|
||||
|
||||
static const u64 timer_reg_list[] = {
|
||||
KVM_REG_ARM_TIMER_CTL,
|
||||
KVM_REG_ARM_TIMER_CNT,
|
||||
KVM_REG_ARM_TIMER_CVAL,
|
||||
KVM_REG_ARM_PTIMER_CTL,
|
||||
KVM_REG_ARM_PTIMER_CNT,
|
||||
KVM_REG_ARM_PTIMER_CVAL,
|
||||
};
|
||||
|
||||
#define NUM_TIMER_REGS ARRAY_SIZE(timer_reg_list)
|
||||
|
||||
static bool is_timer_reg(u64 index)
|
||||
{
|
||||
switch (index) {
|
||||
case KVM_REG_ARM_TIMER_CTL:
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
case KVM_REG_ARM_PTIMER_CTL:
|
||||
case KVM_REG_ARM_PTIMER_CNT:
|
||||
case KVM_REG_ARM_PTIMER_CVAL:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
{
|
||||
for (int i = 0; i < NUM_TIMER_REGS; i++) {
|
||||
if (put_user(timer_reg_list[i], uindices))
|
||||
return -EFAULT;
|
||||
uindices++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
void __user *uaddr = (void __user *)(long)reg->addr;
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
|
||||
if (ret != 0)
|
||||
return -EFAULT;
|
||||
|
||||
return kvm_arm_timer_set_reg(vcpu, reg->id, val);
|
||||
}
|
||||
|
||||
static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
void __user *uaddr = (void __user *)(long)reg->addr;
|
||||
u64 val;
|
||||
|
||||
val = kvm_arm_timer_get_reg(vcpu, reg->id);
|
||||
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
const unsigned int slices = vcpu_sve_slices(vcpu);
|
||||
|
@ -724,7 +666,6 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
|
|||
res += num_sve_regs(vcpu);
|
||||
res += kvm_arm_num_sys_reg_descs(vcpu);
|
||||
res += kvm_arm_get_fw_num_regs(vcpu);
|
||||
res += NUM_TIMER_REGS;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -755,11 +696,6 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
|||
return ret;
|
||||
uindices += kvm_arm_get_fw_num_regs(vcpu);
|
||||
|
||||
ret = copy_timer_indices(vcpu, uindices);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
uindices += NUM_TIMER_REGS;
|
||||
|
||||
return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
|
||||
}
|
||||
|
||||
|
@ -777,9 +713,6 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
|||
case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
if (is_timer_reg(reg->id))
|
||||
return get_timer_reg(vcpu, reg);
|
||||
|
||||
return kvm_arm_sys_reg_get_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
|
@ -797,9 +730,6 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
|||
case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
if (is_timer_reg(reg->id))
|
||||
return set_timer_reg(vcpu, reg);
|
||||
|
||||
return kvm_arm_sys_reg_set_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
|
|
|
@ -147,7 +147,12 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
|
|||
if (esr & ESR_ELx_WFx_ISS_RV) {
|
||||
u64 val, now;
|
||||
|
||||
now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
|
||||
now = kvm_phys_timer_read();
|
||||
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
|
||||
now -= timer_get_offset(vcpu_hvtimer(vcpu));
|
||||
else
|
||||
now -= timer_get_offset(vcpu_vtimer(vcpu));
|
||||
|
||||
val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
|
||||
|
||||
if (now >= val)
|
||||
|
|
|
@ -195,123 +195,6 @@ static inline void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
|
|||
__deactivate_cptr_traps_nvhe(vcpu);
|
||||
}
|
||||
|
||||
#define reg_to_fgt_masks(reg) \
|
||||
({ \
|
||||
struct fgt_masks *m; \
|
||||
switch(reg) { \
|
||||
case HFGRTR_EL2: \
|
||||
m = &hfgrtr_masks; \
|
||||
break; \
|
||||
case HFGWTR_EL2: \
|
||||
m = &hfgwtr_masks; \
|
||||
break; \
|
||||
case HFGITR_EL2: \
|
||||
m = &hfgitr_masks; \
|
||||
break; \
|
||||
case HDFGRTR_EL2: \
|
||||
m = &hdfgrtr_masks; \
|
||||
break; \
|
||||
case HDFGWTR_EL2: \
|
||||
m = &hdfgwtr_masks; \
|
||||
break; \
|
||||
case HAFGRTR_EL2: \
|
||||
m = &hafgrtr_masks; \
|
||||
break; \
|
||||
case HFGRTR2_EL2: \
|
||||
m = &hfgrtr2_masks; \
|
||||
break; \
|
||||
case HFGWTR2_EL2: \
|
||||
m = &hfgwtr2_masks; \
|
||||
break; \
|
||||
case HFGITR2_EL2: \
|
||||
m = &hfgitr2_masks; \
|
||||
break; \
|
||||
case HDFGRTR2_EL2: \
|
||||
m = &hdfgrtr2_masks; \
|
||||
break; \
|
||||
case HDFGWTR2_EL2: \
|
||||
m = &hdfgwtr2_masks; \
|
||||
break; \
|
||||
default: \
|
||||
BUILD_BUG_ON(1); \
|
||||
} \
|
||||
\
|
||||
m; \
|
||||
})
|
||||
|
||||
#define compute_clr_set(vcpu, reg, clr, set) \
|
||||
do { \
|
||||
u64 hfg = __vcpu_sys_reg(vcpu, reg); \
|
||||
struct fgt_masks *m = reg_to_fgt_masks(reg); \
|
||||
set |= hfg & m->mask; \
|
||||
clr |= ~hfg & m->nmask; \
|
||||
} while(0)
|
||||
|
||||
#define reg_to_fgt_group_id(reg) \
|
||||
({ \
|
||||
enum fgt_group_id id; \
|
||||
switch(reg) { \
|
||||
case HFGRTR_EL2: \
|
||||
case HFGWTR_EL2: \
|
||||
id = HFGRTR_GROUP; \
|
||||
break; \
|
||||
case HFGITR_EL2: \
|
||||
id = HFGITR_GROUP; \
|
||||
break; \
|
||||
case HDFGRTR_EL2: \
|
||||
case HDFGWTR_EL2: \
|
||||
id = HDFGRTR_GROUP; \
|
||||
break; \
|
||||
case HAFGRTR_EL2: \
|
||||
id = HAFGRTR_GROUP; \
|
||||
break; \
|
||||
case HFGRTR2_EL2: \
|
||||
case HFGWTR2_EL2: \
|
||||
id = HFGRTR2_GROUP; \
|
||||
break; \
|
||||
case HFGITR2_EL2: \
|
||||
id = HFGITR2_GROUP; \
|
||||
break; \
|
||||
case HDFGRTR2_EL2: \
|
||||
case HDFGWTR2_EL2: \
|
||||
id = HDFGRTR2_GROUP; \
|
||||
break; \
|
||||
default: \
|
||||
BUILD_BUG_ON(1); \
|
||||
} \
|
||||
\
|
||||
id; \
|
||||
})
|
||||
|
||||
#define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \
|
||||
do { \
|
||||
u64 hfg = kvm->arch.fgu[reg_to_fgt_group_id(reg)]; \
|
||||
struct fgt_masks *m = reg_to_fgt_masks(reg); \
|
||||
set |= hfg & m->mask; \
|
||||
clr |= hfg & m->nmask; \
|
||||
} while(0)
|
||||
|
||||
#define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \
|
||||
do { \
|
||||
struct fgt_masks *m = reg_to_fgt_masks(reg); \
|
||||
u64 c = clr, s = set; \
|
||||
u64 val; \
|
||||
\
|
||||
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
|
||||
if (is_nested_ctxt(vcpu)) \
|
||||
compute_clr_set(vcpu, reg, c, s); \
|
||||
\
|
||||
compute_undef_clr_set(vcpu, kvm, reg, c, s); \
|
||||
\
|
||||
val = m->nmask; \
|
||||
val |= s; \
|
||||
val &= ~c; \
|
||||
write_sysreg_s(val, SYS_ ## reg); \
|
||||
} while(0)
|
||||
|
||||
#define update_fgt_traps(hctxt, vcpu, kvm, reg) \
|
||||
update_fgt_traps_cs(hctxt, vcpu, kvm, reg, 0, 0)
|
||||
|
||||
static inline bool cpu_has_amu(void)
|
||||
{
|
||||
u64 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
|
||||
|
@ -320,33 +203,36 @@ static inline bool cpu_has_amu(void)
|
|||
ID_AA64PFR0_EL1_AMU_SHIFT);
|
||||
}
|
||||
|
||||
#define __activate_fgt(hctxt, vcpu, reg) \
|
||||
do { \
|
||||
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
|
||||
write_sysreg_s(*vcpu_fgt(vcpu, reg), SYS_ ## reg); \
|
||||
} while (0)
|
||||
|
||||
static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
|
||||
if (!cpus_have_final_cap(ARM64_HAS_FGT))
|
||||
return;
|
||||
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2);
|
||||
update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, 0,
|
||||
cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) ?
|
||||
HFGWTR_EL2_TCR_EL1_MASK : 0);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGRTR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGWTR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGITR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HDFGRTR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HDFGWTR_EL2);
|
||||
|
||||
if (cpu_has_amu())
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HAFGRTR_EL2);
|
||||
|
||||
if (!cpus_have_final_cap(ARM64_HAS_FGT2))
|
||||
return;
|
||||
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HFGRTR2_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HFGWTR2_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HFGITR2_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR2_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR2_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGRTR2_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGWTR2_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGITR2_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HDFGRTR2_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HDFGWTR2_EL2);
|
||||
}
|
||||
|
||||
#define __deactivate_fgt(htcxt, vcpu, reg) \
|
||||
|
|
|
@ -172,6 +172,7 @@ static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
|
|||
|
||||
/* Trust the host for non-protected vcpu features. */
|
||||
vcpu->arch.hcrx_el2 = host_vcpu->arch.hcrx_el2;
|
||||
memcpy(vcpu->arch.fgt, host_vcpu->arch.fgt, sizeof(vcpu->arch.fgt));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1859,13 +1859,16 @@ void kvm_nested_setup_mdcr_el2(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
u64 guest_mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2);
|
||||
|
||||
if (is_nested_ctxt(vcpu))
|
||||
vcpu->arch.mdcr_el2 |= (guest_mdcr & NV_MDCR_GUEST_INCLUDE);
|
||||
/*
|
||||
* In yet another example where FEAT_NV2 is fscking broken, accesses
|
||||
* to MDSCR_EL1 are redirected to the VNCR despite having an effect
|
||||
* at EL2. Use a big hammer to apply sanity.
|
||||
*
|
||||
* Unless of course we have FEAT_FGT, in which case we can precisely
|
||||
* trap MDSCR_EL1.
|
||||
*/
|
||||
if (is_hyp_ctxt(vcpu))
|
||||
else if (!cpus_have_final_cap(ARM64_HAS_FGT))
|
||||
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
|
||||
else
|
||||
vcpu->arch.mdcr_el2 |= (guest_mdcr & NV_MDCR_GUEST_INCLUDE);
|
||||
}
|
||||
|
|
|
@ -203,7 +203,6 @@ static void locate_register(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg,
|
|||
MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1, NULL );
|
||||
case CNTHCTL_EL2:
|
||||
|
@ -1595,14 +1594,47 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool access_hv_timer(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
static int arch_timer_set_user(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd,
|
||||
u64 val)
|
||||
{
|
||||
if (!vcpu_el2_e2h_is_set(vcpu))
|
||||
return undef_access(vcpu, p, r);
|
||||
switch (reg_to_encoding(rd)) {
|
||||
case SYS_CNTV_CTL_EL0:
|
||||
case SYS_CNTP_CTL_EL0:
|
||||
case SYS_CNTHV_CTL_EL2:
|
||||
case SYS_CNTHP_CTL_EL2:
|
||||
val &= ~ARCH_TIMER_CTRL_IT_STAT;
|
||||
break;
|
||||
case SYS_CNTVCT_EL0:
|
||||
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags))
|
||||
timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read() - val);
|
||||
return 0;
|
||||
case SYS_CNTPCT_EL0:
|
||||
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags))
|
||||
timer_set_offset(vcpu_ptimer(vcpu), kvm_phys_timer_read() - val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return access_arch_timer(vcpu, p, r);
|
||||
__vcpu_assign_sys_reg(vcpu, rd->reg, val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int arch_timer_get_user(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd,
|
||||
u64 *val)
|
||||
{
|
||||
switch (reg_to_encoding(rd)) {
|
||||
case SYS_CNTVCT_EL0:
|
||||
*val = kvm_phys_timer_read() - timer_get_offset(vcpu_vtimer(vcpu));
|
||||
break;
|
||||
case SYS_CNTPCT_EL0:
|
||||
*val = kvm_phys_timer_read() - timer_get_offset(vcpu_ptimer(vcpu));
|
||||
break;
|
||||
default:
|
||||
*val = __vcpu_sys_reg(vcpu, rd->reg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
|
||||
|
@ -2507,15 +2539,20 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
|
|||
"trap of EL2 register redirected to EL1");
|
||||
}
|
||||
|
||||
#define EL2_REG_FILTERED(name, acc, rst, v, filter) { \
|
||||
#define SYS_REG_USER_FILTER(name, acc, rst, v, gu, su, filter) { \
|
||||
SYS_DESC(SYS_##name), \
|
||||
.access = acc, \
|
||||
.reset = rst, \
|
||||
.reg = name, \
|
||||
.get_user = gu, \
|
||||
.set_user = su, \
|
||||
.visibility = filter, \
|
||||
.val = v, \
|
||||
}
|
||||
|
||||
#define EL2_REG_FILTERED(name, acc, rst, v, filter) \
|
||||
SYS_REG_USER_FILTER(name, acc, rst, v, NULL, NULL, filter)
|
||||
|
||||
#define EL2_REG(name, acc, rst, v) \
|
||||
EL2_REG_FILTERED(name, acc, rst, v, el2_visibility)
|
||||
|
||||
|
@ -2526,6 +2563,10 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
|
|||
EL2_REG_VNCR_FILT(name, hidden_visibility)
|
||||
#define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
|
||||
|
||||
#define TIMER_REG(name, vis) \
|
||||
SYS_REG_USER_FILTER(name, access_arch_timer, reset_val, 0, \
|
||||
arch_timer_get_user, arch_timer_set_user, vis)
|
||||
|
||||
/*
|
||||
* Since reset() callback and field val are not used for idregs, they will be
|
||||
* used for specific purposes for idregs.
|
||||
|
@ -2705,18 +2746,17 @@ static bool access_zcr_el2(struct kvm_vcpu *vcpu,
|
|||
|
||||
if (guest_hyp_sve_traps_enabled(vcpu)) {
|
||||
kvm_inject_nested_sve_trap(vcpu);
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!p->is_write) {
|
||||
p->regval = vcpu_read_sys_reg(vcpu, ZCR_EL2);
|
||||
p->regval = __vcpu_sys_reg(vcpu, ZCR_EL2);
|
||||
return true;
|
||||
}
|
||||
|
||||
vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1;
|
||||
vq = min(vq, vcpu_sve_max_vq(vcpu));
|
||||
vcpu_write_sys_reg(vcpu, vq - 1, ZCR_EL2);
|
||||
|
||||
__vcpu_assign_sys_reg(vcpu, ZCR_EL2, vq - 1);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2833,6 +2873,16 @@ static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
|
|||
return __el2_visibility(vcpu, rd, s1pie_visibility);
|
||||
}
|
||||
|
||||
static unsigned int cnthv_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
if (vcpu_has_nv(vcpu) &&
|
||||
!vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2_E2H0))
|
||||
return 0;
|
||||
|
||||
return REG_HIDDEN;
|
||||
}
|
||||
|
||||
static bool access_mdcr(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
|
@ -3482,17 +3532,19 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
AMU_AMEVTYPER1_EL0(14),
|
||||
AMU_AMEVTYPER1_EL0(15),
|
||||
|
||||
{ SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTVCT_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTPCT_EL0), .access = access_arch_timer,
|
||||
.get_user = arch_timer_get_user, .set_user = arch_timer_set_user },
|
||||
{ SYS_DESC(SYS_CNTVCT_EL0), .access = access_arch_timer,
|
||||
.get_user = arch_timer_get_user, .set_user = arch_timer_set_user },
|
||||
{ SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTVCTSS_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
|
||||
TIMER_REG(CNTP_CTL_EL0, NULL),
|
||||
TIMER_REG(CNTP_CVAL_EL0, NULL),
|
||||
|
||||
{ SYS_DESC(SYS_CNTV_TVAL_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTV_CTL_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTV_CVAL_EL0), access_arch_timer },
|
||||
TIMER_REG(CNTV_CTL_EL0, NULL),
|
||||
TIMER_REG(CNTV_CVAL_EL0, NULL),
|
||||
|
||||
/* PMEVCNTRn_EL0 */
|
||||
PMU_PMEVCNTR_EL0(0),
|
||||
|
@ -3690,12 +3742,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
|
||||
EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
|
||||
{ SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer },
|
||||
EL2_REG(CNTHP_CTL_EL2, access_arch_timer, reset_val, 0),
|
||||
EL2_REG(CNTHP_CVAL_EL2, access_arch_timer, reset_val, 0),
|
||||
TIMER_REG(CNTHP_CTL_EL2, el2_visibility),
|
||||
TIMER_REG(CNTHP_CVAL_EL2, el2_visibility),
|
||||
|
||||
{ SYS_DESC(SYS_CNTHV_TVAL_EL2), access_hv_timer },
|
||||
EL2_REG(CNTHV_CTL_EL2, access_hv_timer, reset_val, 0),
|
||||
EL2_REG(CNTHV_CVAL_EL2, access_hv_timer, reset_val, 0),
|
||||
{ SYS_DESC(SYS_CNTHV_TVAL_EL2), access_arch_timer, .visibility = cnthv_visibility },
|
||||
TIMER_REG(CNTHV_CTL_EL2, cnthv_visibility),
|
||||
TIMER_REG(CNTHV_CVAL_EL2, cnthv_visibility),
|
||||
|
||||
{ SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },
|
||||
|
||||
|
@ -5233,15 +5285,28 @@ static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
|
|||
}
|
||||
}
|
||||
|
||||
static u64 kvm_one_reg_to_id(const struct kvm_one_reg *reg)
|
||||
{
|
||||
switch(reg->id) {
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
return TO_ARM64_SYS_REG(CNTV_CVAL_EL0);
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
return TO_ARM64_SYS_REG(CNTVCT_EL0);
|
||||
default:
|
||||
return reg->id;
|
||||
}
|
||||
}
|
||||
|
||||
int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
|
||||
const struct sys_reg_desc table[], unsigned int num)
|
||||
{
|
||||
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
|
||||
const struct sys_reg_desc *r;
|
||||
u64 id = kvm_one_reg_to_id(reg);
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
|
||||
r = id_to_sys_reg_desc(vcpu, id, table, num);
|
||||
if (!r || sysreg_hidden(vcpu, r))
|
||||
return -ENOENT;
|
||||
|
||||
|
@ -5274,13 +5339,14 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
|
|||
{
|
||||
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
|
||||
const struct sys_reg_desc *r;
|
||||
u64 id = kvm_one_reg_to_id(reg);
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
if (get_user(val, uaddr))
|
||||
return -EFAULT;
|
||||
|
||||
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
|
||||
r = id_to_sys_reg_desc(vcpu, id, table, num);
|
||||
if (!r || sysreg_hidden(vcpu, r))
|
||||
return -ENOENT;
|
||||
|
||||
|
@ -5340,10 +5406,23 @@ static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
|
|||
|
||||
static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
|
||||
{
|
||||
u64 idx;
|
||||
|
||||
if (!*uind)
|
||||
return true;
|
||||
|
||||
if (put_user(sys_reg_to_index(reg), *uind))
|
||||
switch (reg_to_encoding(reg)) {
|
||||
case SYS_CNTV_CVAL_EL0:
|
||||
idx = KVM_REG_ARM_TIMER_CVAL;
|
||||
break;
|
||||
case SYS_CNTVCT_EL0:
|
||||
idx = KVM_REG_ARM_TIMER_CNT;
|
||||
break;
|
||||
default:
|
||||
idx = sys_reg_to_index(reg);
|
||||
}
|
||||
|
||||
if (put_user(idx, *uind))
|
||||
return false;
|
||||
|
||||
(*uind)++;
|
||||
|
|
|
@ -257,4 +257,10 @@ int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu);
|
|||
(val); \
|
||||
})
|
||||
|
||||
#define TO_ARM64_SYS_REG(r) ARM64_SYS_REG(sys_reg_Op0(SYS_ ## r), \
|
||||
sys_reg_Op1(SYS_ ## r), \
|
||||
sys_reg_CRn(SYS_ ## r), \
|
||||
sys_reg_CRm(SYS_ ## r), \
|
||||
sys_reg_Op2(SYS_ ## r))
|
||||
|
||||
#endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */
|
||||
|
|
|
@ -297,8 +297,11 @@ void vcpu_set_ich_hcr(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
|
||||
if (!vgic_is_v3(vcpu->kvm))
|
||||
return;
|
||||
|
||||
/* Hide GICv3 sysreg if necessary */
|
||||
if (!kvm_has_gicv3(vcpu->kvm)) {
|
||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
|
||||
vgic_v3->vgic_hcr |= (ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 |
|
||||
ICH_HCR_EL2_TC);
|
||||
return;
|
||||
|
|
|
@ -108,16 +108,18 @@ void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
|
|||
bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
|
||||
int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
|
||||
|
||||
perf_get_x86_pmu_capability(&kvm_host_pmu);
|
||||
|
||||
/*
|
||||
* Hybrid PMUs don't play nice with virtualization without careful
|
||||
* configuration by userspace, and KVM's APIs for reporting supported
|
||||
* vPMU features do not account for hybrid PMUs. Disable vPMU support
|
||||
* for hybrid PMUs until KVM gains a way to let userspace opt-in.
|
||||
*/
|
||||
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
|
||||
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
|
||||
enable_pmu = false;
|
||||
memset(&kvm_host_pmu, 0, sizeof(kvm_host_pmu));
|
||||
} else {
|
||||
perf_get_x86_pmu_capability(&kvm_host_pmu);
|
||||
}
|
||||
|
||||
if (enable_pmu) {
|
||||
/*
|
||||
|
|
|
@ -13941,10 +13941,11 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
|
|||
|
||||
#ifdef CONFIG_KVM_GUEST_MEMFD
|
||||
/*
|
||||
* KVM doesn't yet support mmap() on guest_memfd for VMs with private memory
|
||||
* (the private vs. shared tracking needs to be moved into guest_memfd).
|
||||
* KVM doesn't yet support initializing guest_memfd memory as shared for VMs
|
||||
* with private memory (the private vs. shared tracking needs to be moved into
|
||||
* guest_memfd).
|
||||
*/
|
||||
bool kvm_arch_supports_gmem_mmap(struct kvm *kvm)
|
||||
bool kvm_arch_supports_gmem_init_shared(struct kvm *kvm)
|
||||
{
|
||||
return !kvm_arch_has_private_mem(kvm);
|
||||
}
|
||||
|
|
|
@ -51,8 +51,6 @@ struct arch_timer_vm_data {
|
|||
};
|
||||
|
||||
struct arch_timer_context {
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
/* Emulated Timer (may be unused) */
|
||||
struct hrtimer hrtimer;
|
||||
u64 ns_frac;
|
||||
|
@ -71,6 +69,9 @@ struct arch_timer_context {
|
|||
bool level;
|
||||
} irq;
|
||||
|
||||
/* Who am I? */
|
||||
enum kvm_arch_timers timer_id;
|
||||
|
||||
/* Duplicated state from arch_timer.c for convenience */
|
||||
u32 host_timer_irq;
|
||||
};
|
||||
|
@ -106,9 +107,6 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
|
|||
|
||||
void kvm_timer_init_vm(struct kvm *kvm);
|
||||
|
||||
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
|
||||
int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
|
||||
|
||||
int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
|
||||
int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
|
||||
int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
|
||||
|
@ -127,9 +125,9 @@ void kvm_timer_init_vhe(void);
|
|||
#define vcpu_hvtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HVTIMER])
|
||||
#define vcpu_hptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HPTIMER])
|
||||
|
||||
#define arch_timer_ctx_index(ctx) ((ctx) - vcpu_timer((ctx)->vcpu)->timers)
|
||||
|
||||
#define timer_vm_data(ctx) (&(ctx)->vcpu->kvm->arch.timer_data)
|
||||
#define arch_timer_ctx_index(ctx) ((ctx)->timer_id)
|
||||
#define timer_context_to_vcpu(ctx) container_of((ctx), struct kvm_vcpu, arch.timer_cpu.timers[(ctx)->timer_id])
|
||||
#define timer_vm_data(ctx) (&(timer_context_to_vcpu(ctx)->kvm->arch.timer_data))
|
||||
#define timer_irq(ctx) (timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)])
|
||||
|
||||
u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
|
||||
|
@ -178,4 +176,14 @@ static inline u64 timer_get_offset(struct arch_timer_context *ctxt)
|
|||
return offset;
|
||||
}
|
||||
|
||||
static inline void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
|
||||
{
|
||||
if (!ctxt->offset.vm_offset) {
|
||||
WARN(offset, "timer %d\n", arch_timer_ctx_index(ctxt));
|
||||
return;
|
||||
}
|
||||
|
||||
WRITE_ONCE(*ctxt->offset.vm_offset, offset);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -729,7 +729,17 @@ static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_GUEST_MEMFD
|
||||
bool kvm_arch_supports_gmem_mmap(struct kvm *kvm);
|
||||
bool kvm_arch_supports_gmem_init_shared(struct kvm *kvm);
|
||||
|
||||
static inline u64 kvm_gmem_get_supported_flags(struct kvm *kvm)
|
||||
{
|
||||
u64 flags = GUEST_MEMFD_FLAG_MMAP;
|
||||
|
||||
if (!kvm || kvm_arch_supports_gmem_init_shared(kvm))
|
||||
flags |= GUEST_MEMFD_FLAG_INIT_SHARED;
|
||||
|
||||
return flags;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef kvm_arch_has_readonly_mem
|
||||
|
|
|
@ -962,7 +962,7 @@ struct kvm_enable_cap {
|
|||
#define KVM_CAP_ARM_EL2_E2H0 241
|
||||
#define KVM_CAP_RISCV_MP_STATE_RESET 242
|
||||
#define KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED 243
|
||||
#define KVM_CAP_GUEST_MEMFD_MMAP 244
|
||||
#define KVM_CAP_GUEST_MEMFD_FLAGS 244
|
||||
|
||||
struct kvm_irq_routing_irqchip {
|
||||
__u32 irqchip;
|
||||
|
@ -1599,7 +1599,8 @@ struct kvm_memory_attributes {
|
|||
#define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3)
|
||||
|
||||
#define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd)
|
||||
#define GUEST_MEMFD_FLAG_MMAP (1ULL << 0)
|
||||
#define GUEST_MEMFD_FLAG_MMAP (1ULL << 0)
|
||||
#define GUEST_MEMFD_FLAG_INIT_SHARED (1ULL << 1)
|
||||
|
||||
struct kvm_create_guest_memfd {
|
||||
__u64 size;
|
||||
|
|
|
@ -1020,7 +1020,7 @@ static void set_counter_defaults(void)
|
|||
{
|
||||
const uint64_t MIN_ROLLOVER_SECS = 40ULL * 365 * 24 * 3600;
|
||||
uint64_t freq = read_sysreg(CNTFRQ_EL0);
|
||||
uint64_t width = ilog2(MIN_ROLLOVER_SECS * freq);
|
||||
int width = ilog2(MIN_ROLLOVER_SECS * freq);
|
||||
|
||||
width = clamp(width, 56, 64);
|
||||
CVAL_MAX = GENMASK_ULL(width - 1, 0);
|
||||
|
|
|
@ -359,6 +359,44 @@ static void test_mmio_ease(void)
|
|||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
static void test_serror_amo_guest(void)
|
||||
{
|
||||
/*
|
||||
* The ISB is entirely unnecessary (and highlights how FEAT_NV2 is borked)
|
||||
* since the write is redirected to memory. But don't write (intentionally)
|
||||
* broken code!
|
||||
*/
|
||||
sysreg_clear_set(hcr_el2, HCR_EL2_AMO | HCR_EL2_TGE, 0);
|
||||
isb();
|
||||
|
||||
GUEST_SYNC(0);
|
||||
GUEST_ASSERT(read_sysreg(isr_el1) & ISR_EL1_A);
|
||||
|
||||
/*
|
||||
* KVM treats the effective value of AMO as 1 when
|
||||
* HCR_EL2.{E2H,TGE} = {1, 0}, meaning the SError will be taken when
|
||||
* unmasked.
|
||||
*/
|
||||
local_serror_enable();
|
||||
isb();
|
||||
local_serror_disable();
|
||||
|
||||
GUEST_FAIL("Should've taken pending SError exception");
|
||||
}
|
||||
|
||||
static void test_serror_amo(void)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_serror_amo_guest,
|
||||
unexpected_dabt_handler);
|
||||
|
||||
vm_install_exception_handler(vm, VECTOR_ERROR_CURRENT, expect_serror_handler);
|
||||
vcpu_run_expect_sync(vcpu);
|
||||
vcpu_inject_serror(vcpu);
|
||||
vcpu_run_expect_done(vcpu);
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
test_mmio_abort();
|
||||
|
@ -369,4 +407,9 @@ int main(void)
|
|||
test_serror_emulated();
|
||||
test_mmio_ease();
|
||||
test_s1ptw_abort();
|
||||
|
||||
if (!test_supports_el2())
|
||||
return 0;
|
||||
|
||||
test_serror_amo();
|
||||
}
|
||||
|
|
|
@ -65,6 +65,9 @@ static struct feature_id_reg feat_id_regs[] = {
|
|||
REG_FEAT(SCTLR2_EL1, ID_AA64MMFR3_EL1, SCTLRX, IMP),
|
||||
REG_FEAT(VDISR_EL2, ID_AA64PFR0_EL1, RAS, IMP),
|
||||
REG_FEAT(VSESR_EL2, ID_AA64PFR0_EL1, RAS, IMP),
|
||||
REG_FEAT(VNCR_EL2, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY),
|
||||
REG_FEAT(CNTHV_CTL_EL2, ID_AA64MMFR1_EL1, VH, IMP),
|
||||
REG_FEAT(CNTHV_CVAL_EL2,ID_AA64MMFR1_EL1, VH, IMP),
|
||||
};
|
||||
|
||||
bool filter_reg(__u64 reg)
|
||||
|
@ -345,9 +348,20 @@ static __u64 base_regs[] = {
|
|||
KVM_REG_ARM_FW_FEAT_BMAP_REG(1), /* KVM_REG_ARM_STD_HYP_BMAP */
|
||||
KVM_REG_ARM_FW_FEAT_BMAP_REG(2), /* KVM_REG_ARM_VENDOR_HYP_BMAP */
|
||||
KVM_REG_ARM_FW_FEAT_BMAP_REG(3), /* KVM_REG_ARM_VENDOR_HYP_BMAP_2 */
|
||||
ARM64_SYS_REG(3, 3, 14, 3, 1), /* CNTV_CTL_EL0 */
|
||||
ARM64_SYS_REG(3, 3, 14, 3, 2), /* CNTV_CVAL_EL0 */
|
||||
ARM64_SYS_REG(3, 3, 14, 0, 2),
|
||||
|
||||
/*
|
||||
* EL0 Virtual Timer Registers
|
||||
*
|
||||
* WARNING:
|
||||
* KVM_REG_ARM_TIMER_CVAL and KVM_REG_ARM_TIMER_CNT are not defined
|
||||
* with the appropriate register encodings. Their values have been
|
||||
* accidentally swapped. As this is set API, the definitions here
|
||||
* must be used, rather than ones derived from the encodings.
|
||||
*/
|
||||
KVM_ARM64_SYS_REG(SYS_CNTV_CTL_EL0),
|
||||
KVM_REG_ARM_TIMER_CVAL,
|
||||
KVM_REG_ARM_TIMER_CNT,
|
||||
|
||||
ARM64_SYS_REG(3, 0, 0, 0, 0), /* MIDR_EL1 */
|
||||
ARM64_SYS_REG(3, 0, 0, 0, 6), /* REVIDR_EL1 */
|
||||
ARM64_SYS_REG(3, 1, 0, 0, 1), /* CLIDR_EL1 */
|
||||
|
@ -755,6 +769,10 @@ static __u64 el2_regs[] = {
|
|||
SYS_REG(VSESR_EL2),
|
||||
};
|
||||
|
||||
static __u64 el2_e2h0_regs[] = {
|
||||
/* Empty */
|
||||
};
|
||||
|
||||
#define BASE_SUBLIST \
|
||||
{ "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), }
|
||||
#define VREGS_SUBLIST \
|
||||
|
@ -789,6 +807,15 @@ static __u64 el2_regs[] = {
|
|||
.regs = el2_regs, \
|
||||
.regs_n = ARRAY_SIZE(el2_regs), \
|
||||
}
|
||||
#define EL2_E2H0_SUBLIST \
|
||||
EL2_SUBLIST, \
|
||||
{ \
|
||||
.name = "EL2 E2H0", \
|
||||
.capability = KVM_CAP_ARM_EL2_E2H0, \
|
||||
.feature = KVM_ARM_VCPU_HAS_EL2_E2H0, \
|
||||
.regs = el2_e2h0_regs, \
|
||||
.regs_n = ARRAY_SIZE(el2_e2h0_regs), \
|
||||
}
|
||||
|
||||
static struct vcpu_reg_list vregs_config = {
|
||||
.sublists = {
|
||||
|
@ -897,6 +924,65 @@ static struct vcpu_reg_list el2_pauth_pmu_config = {
|
|||
},
|
||||
};
|
||||
|
||||
static struct vcpu_reg_list el2_e2h0_vregs_config = {
|
||||
.sublists = {
|
||||
BASE_SUBLIST,
|
||||
EL2_E2H0_SUBLIST,
|
||||
VREGS_SUBLIST,
|
||||
{0},
|
||||
},
|
||||
};
|
||||
|
||||
static struct vcpu_reg_list el2_e2h0_vregs_pmu_config = {
|
||||
.sublists = {
|
||||
BASE_SUBLIST,
|
||||
EL2_E2H0_SUBLIST,
|
||||
VREGS_SUBLIST,
|
||||
PMU_SUBLIST,
|
||||
{0},
|
||||
},
|
||||
};
|
||||
|
||||
static struct vcpu_reg_list el2_e2h0_sve_config = {
|
||||
.sublists = {
|
||||
BASE_SUBLIST,
|
||||
EL2_E2H0_SUBLIST,
|
||||
SVE_SUBLIST,
|
||||
{0},
|
||||
},
|
||||
};
|
||||
|
||||
static struct vcpu_reg_list el2_e2h0_sve_pmu_config = {
|
||||
.sublists = {
|
||||
BASE_SUBLIST,
|
||||
EL2_E2H0_SUBLIST,
|
||||
SVE_SUBLIST,
|
||||
PMU_SUBLIST,
|
||||
{0},
|
||||
},
|
||||
};
|
||||
|
||||
static struct vcpu_reg_list el2_e2h0_pauth_config = {
|
||||
.sublists = {
|
||||
BASE_SUBLIST,
|
||||
EL2_E2H0_SUBLIST,
|
||||
VREGS_SUBLIST,
|
||||
PAUTH_SUBLIST,
|
||||
{0},
|
||||
},
|
||||
};
|
||||
|
||||
static struct vcpu_reg_list el2_e2h0_pauth_pmu_config = {
|
||||
.sublists = {
|
||||
BASE_SUBLIST,
|
||||
EL2_E2H0_SUBLIST,
|
||||
VREGS_SUBLIST,
|
||||
PAUTH_SUBLIST,
|
||||
PMU_SUBLIST,
|
||||
{0},
|
||||
},
|
||||
};
|
||||
|
||||
struct vcpu_reg_list *vcpu_configs[] = {
|
||||
&vregs_config,
|
||||
&vregs_pmu_config,
|
||||
|
@ -911,5 +997,12 @@ struct vcpu_reg_list *vcpu_configs[] = {
|
|||
&el2_sve_pmu_config,
|
||||
&el2_pauth_config,
|
||||
&el2_pauth_pmu_config,
|
||||
|
||||
&el2_e2h0_vregs_config,
|
||||
&el2_e2h0_vregs_pmu_config,
|
||||
&el2_e2h0_sve_config,
|
||||
&el2_e2h0_sve_pmu_config,
|
||||
&el2_e2h0_pauth_config,
|
||||
&el2_e2h0_pauth_pmu_config,
|
||||
};
|
||||
int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
|
||||
|
|
|
@ -249,11 +249,14 @@ static void guest_code(void)
|
|||
GUEST_REG_SYNC(SYS_ID_AA64ISAR2_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64ISAR3_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64PFR0_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64PFR1_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64MMFR0_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64MMFR3_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);
|
||||
GUEST_REG_SYNC(SYS_MPIDR_EL1);
|
||||
GUEST_REG_SYNC(SYS_CLIDR_EL1);
|
||||
GUEST_REG_SYNC(SYS_CTR_EL0);
|
||||
GUEST_REG_SYNC(SYS_MIDR_EL1);
|
||||
GUEST_REG_SYNC(SYS_REVIDR_EL1);
|
||||
|
|
|
@ -123,6 +123,7 @@ static void guest_setup_gic(void)
|
|||
static void guest_code(size_t nr_lpis)
|
||||
{
|
||||
guest_setup_gic();
|
||||
local_irq_enable();
|
||||
|
||||
GUEST_SYNC(0);
|
||||
|
||||
|
@ -331,7 +332,7 @@ static void setup_vm(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
vcpus = malloc(test_data.nr_cpus * sizeof(struct kvm_vcpu));
|
||||
vcpus = malloc(test_data.nr_cpus * sizeof(struct kvm_vcpu *));
|
||||
TEST_ASSERT(vcpus, "Failed to allocate vCPU array");
|
||||
|
||||
vm = vm_create_with_vcpus(test_data.nr_cpus, guest_code, vcpus);
|
||||
|
|
|
@ -14,8 +14,6 @@
|
|||
#include <linux/bitmap.h>
|
||||
#include <linux/falloc.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <setjmp.h>
|
||||
#include <signal.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
|
@ -24,7 +22,9 @@
|
|||
#include "test_util.h"
|
||||
#include "ucall_common.h"
|
||||
|
||||
static void test_file_read_write(int fd)
|
||||
static size_t page_size;
|
||||
|
||||
static void test_file_read_write(int fd, size_t total_size)
|
||||
{
|
||||
char buf[64];
|
||||
|
||||
|
@ -38,18 +38,22 @@ static void test_file_read_write(int fd)
|
|||
"pwrite on a guest_mem fd should fail");
|
||||
}
|
||||
|
||||
static void test_mmap_supported(int fd, size_t page_size, size_t total_size)
|
||||
static void test_mmap_cow(int fd, size_t size)
|
||||
{
|
||||
void *mem;
|
||||
|
||||
mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
|
||||
TEST_ASSERT(mem == MAP_FAILED, "Copy-on-write not allowed by guest_memfd.");
|
||||
}
|
||||
|
||||
static void test_mmap_supported(int fd, size_t total_size)
|
||||
{
|
||||
const char val = 0xaa;
|
||||
char *mem;
|
||||
size_t i;
|
||||
int ret;
|
||||
|
||||
mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
|
||||
TEST_ASSERT(mem == MAP_FAILED, "Copy-on-write not allowed by guest_memfd.");
|
||||
|
||||
mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
TEST_ASSERT(mem != MAP_FAILED, "mmap() for guest_memfd should succeed.");
|
||||
mem = kvm_mmap(total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
|
||||
|
||||
memset(mem, val, total_size);
|
||||
for (i = 0; i < total_size; i++)
|
||||
|
@ -68,45 +72,37 @@ static void test_mmap_supported(int fd, size_t page_size, size_t total_size)
|
|||
for (i = 0; i < total_size; i++)
|
||||
TEST_ASSERT_EQ(READ_ONCE(mem[i]), val);
|
||||
|
||||
ret = munmap(mem, total_size);
|
||||
TEST_ASSERT(!ret, "munmap() should succeed.");
|
||||
kvm_munmap(mem, total_size);
|
||||
}
|
||||
|
||||
static sigjmp_buf jmpbuf;
|
||||
void fault_sigbus_handler(int signum)
|
||||
static void test_fault_sigbus(int fd, size_t accessible_size, size_t map_size)
|
||||
{
|
||||
siglongjmp(jmpbuf, 1);
|
||||
}
|
||||
|
||||
static void test_fault_overflow(int fd, size_t page_size, size_t total_size)
|
||||
{
|
||||
struct sigaction sa_old, sa_new = {
|
||||
.sa_handler = fault_sigbus_handler,
|
||||
};
|
||||
size_t map_size = total_size * 4;
|
||||
const char val = 0xaa;
|
||||
char *mem;
|
||||
size_t i;
|
||||
int ret;
|
||||
|
||||
mem = mmap(NULL, map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
TEST_ASSERT(mem != MAP_FAILED, "mmap() for guest_memfd should succeed.");
|
||||
mem = kvm_mmap(map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
|
||||
|
||||
sigaction(SIGBUS, &sa_new, &sa_old);
|
||||
if (sigsetjmp(jmpbuf, 1) == 0) {
|
||||
memset(mem, 0xaa, map_size);
|
||||
TEST_ASSERT(false, "memset() should have triggered SIGBUS.");
|
||||
}
|
||||
sigaction(SIGBUS, &sa_old, NULL);
|
||||
TEST_EXPECT_SIGBUS(memset(mem, val, map_size));
|
||||
TEST_EXPECT_SIGBUS((void)READ_ONCE(mem[accessible_size]));
|
||||
|
||||
for (i = 0; i < total_size; i++)
|
||||
for (i = 0; i < accessible_size; i++)
|
||||
TEST_ASSERT_EQ(READ_ONCE(mem[i]), val);
|
||||
|
||||
ret = munmap(mem, map_size);
|
||||
TEST_ASSERT(!ret, "munmap() should succeed.");
|
||||
kvm_munmap(mem, map_size);
|
||||
}
|
||||
|
||||
static void test_mmap_not_supported(int fd, size_t page_size, size_t total_size)
|
||||
static void test_fault_overflow(int fd, size_t total_size)
|
||||
{
|
||||
test_fault_sigbus(fd, total_size, total_size * 4);
|
||||
}
|
||||
|
||||
static void test_fault_private(int fd, size_t total_size)
|
||||
{
|
||||
test_fault_sigbus(fd, 0, total_size);
|
||||
}
|
||||
|
||||
static void test_mmap_not_supported(int fd, size_t total_size)
|
||||
{
|
||||
char *mem;
|
||||
|
||||
|
@ -117,7 +113,7 @@ static void test_mmap_not_supported(int fd, size_t page_size, size_t total_size)
|
|||
TEST_ASSERT_EQ(mem, MAP_FAILED);
|
||||
}
|
||||
|
||||
static void test_file_size(int fd, size_t page_size, size_t total_size)
|
||||
static void test_file_size(int fd, size_t total_size)
|
||||
{
|
||||
struct stat sb;
|
||||
int ret;
|
||||
|
@ -128,7 +124,7 @@ static void test_file_size(int fd, size_t page_size, size_t total_size)
|
|||
TEST_ASSERT_EQ(sb.st_blksize, page_size);
|
||||
}
|
||||
|
||||
static void test_fallocate(int fd, size_t page_size, size_t total_size)
|
||||
static void test_fallocate(int fd, size_t total_size)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -165,7 +161,7 @@ static void test_fallocate(int fd, size_t page_size, size_t total_size)
|
|||
TEST_ASSERT(!ret, "fallocate to restore punched hole should succeed");
|
||||
}
|
||||
|
||||
static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size)
|
||||
static void test_invalid_punch_hole(int fd, size_t total_size)
|
||||
{
|
||||
struct {
|
||||
off_t offset;
|
||||
|
@ -196,8 +192,7 @@ static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size)
|
|||
}
|
||||
|
||||
static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm,
|
||||
uint64_t guest_memfd_flags,
|
||||
size_t page_size)
|
||||
uint64_t guest_memfd_flags)
|
||||
{
|
||||
size_t size;
|
||||
int fd;
|
||||
|
@ -214,7 +209,6 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
|
|||
{
|
||||
int fd1, fd2, ret;
|
||||
struct stat st1, st2;
|
||||
size_t page_size = getpagesize();
|
||||
|
||||
fd1 = __vm_create_guest_memfd(vm, page_size, 0);
|
||||
TEST_ASSERT(fd1 != -1, "memfd creation should succeed");
|
||||
|
@ -239,9 +233,9 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
|
|||
close(fd1);
|
||||
}
|
||||
|
||||
static void test_guest_memfd_flags(struct kvm_vm *vm, uint64_t valid_flags)
|
||||
static void test_guest_memfd_flags(struct kvm_vm *vm)
|
||||
{
|
||||
size_t page_size = getpagesize();
|
||||
uint64_t valid_flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS);
|
||||
uint64_t flag;
|
||||
int fd;
|
||||
|
||||
|
@ -260,43 +254,57 @@ static void test_guest_memfd_flags(struct kvm_vm *vm, uint64_t valid_flags)
|
|||
}
|
||||
}
|
||||
|
||||
static void test_guest_memfd(unsigned long vm_type)
|
||||
#define gmem_test(__test, __vm, __flags) \
|
||||
do { \
|
||||
int fd = vm_create_guest_memfd(__vm, page_size * 4, __flags); \
|
||||
\
|
||||
test_##__test(fd, page_size * 4); \
|
||||
close(fd); \
|
||||
} while (0)
|
||||
|
||||
static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags)
|
||||
{
|
||||
uint64_t flags = 0;
|
||||
struct kvm_vm *vm;
|
||||
size_t total_size;
|
||||
size_t page_size;
|
||||
int fd;
|
||||
|
||||
page_size = getpagesize();
|
||||
total_size = page_size * 4;
|
||||
|
||||
vm = vm_create_barebones_type(vm_type);
|
||||
|
||||
if (vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_MMAP))
|
||||
flags |= GUEST_MEMFD_FLAG_MMAP;
|
||||
|
||||
test_create_guest_memfd_multiple(vm);
|
||||
test_create_guest_memfd_invalid_sizes(vm, flags, page_size);
|
||||
test_create_guest_memfd_invalid_sizes(vm, flags);
|
||||
|
||||
fd = vm_create_guest_memfd(vm, total_size, flags);
|
||||
|
||||
test_file_read_write(fd);
|
||||
gmem_test(file_read_write, vm, flags);
|
||||
|
||||
if (flags & GUEST_MEMFD_FLAG_MMAP) {
|
||||
test_mmap_supported(fd, page_size, total_size);
|
||||
test_fault_overflow(fd, page_size, total_size);
|
||||
if (flags & GUEST_MEMFD_FLAG_INIT_SHARED) {
|
||||
gmem_test(mmap_supported, vm, flags);
|
||||
gmem_test(fault_overflow, vm, flags);
|
||||
} else {
|
||||
gmem_test(fault_private, vm, flags);
|
||||
}
|
||||
|
||||
gmem_test(mmap_cow, vm, flags);
|
||||
} else {
|
||||
test_mmap_not_supported(fd, page_size, total_size);
|
||||
gmem_test(mmap_not_supported, vm, flags);
|
||||
}
|
||||
|
||||
test_file_size(fd, page_size, total_size);
|
||||
test_fallocate(fd, page_size, total_size);
|
||||
test_invalid_punch_hole(fd, page_size, total_size);
|
||||
gmem_test(file_size, vm, flags);
|
||||
gmem_test(fallocate, vm, flags);
|
||||
gmem_test(invalid_punch_hole, vm, flags);
|
||||
}
|
||||
|
||||
test_guest_memfd_flags(vm, flags);
|
||||
static void test_guest_memfd(unsigned long vm_type)
|
||||
{
|
||||
struct kvm_vm *vm = vm_create_barebones_type(vm_type);
|
||||
uint64_t flags;
|
||||
|
||||
test_guest_memfd_flags(vm);
|
||||
|
||||
__test_guest_memfd(vm, 0);
|
||||
|
||||
flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS);
|
||||
if (flags & GUEST_MEMFD_FLAG_MMAP)
|
||||
__test_guest_memfd(vm, GUEST_MEMFD_FLAG_MMAP);
|
||||
|
||||
/* MMAP should always be supported if INIT_SHARED is supported. */
|
||||
if (flags & GUEST_MEMFD_FLAG_INIT_SHARED)
|
||||
__test_guest_memfd(vm, GUEST_MEMFD_FLAG_MMAP |
|
||||
GUEST_MEMFD_FLAG_INIT_SHARED);
|
||||
|
||||
close(fd);
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
|
@ -328,22 +336,26 @@ static void test_guest_memfd_guest(void)
|
|||
size_t size;
|
||||
int fd, i;
|
||||
|
||||
if (!kvm_has_cap(KVM_CAP_GUEST_MEMFD_MMAP))
|
||||
if (!kvm_check_cap(KVM_CAP_GUEST_MEMFD_FLAGS))
|
||||
return;
|
||||
|
||||
vm = __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, &vcpu, 1, guest_code);
|
||||
|
||||
TEST_ASSERT(vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_MMAP),
|
||||
"Default VM type should always support guest_memfd mmap()");
|
||||
TEST_ASSERT(vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS) & GUEST_MEMFD_FLAG_MMAP,
|
||||
"Default VM type should support MMAP, supported flags = 0x%x",
|
||||
vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS));
|
||||
TEST_ASSERT(vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS) & GUEST_MEMFD_FLAG_INIT_SHARED,
|
||||
"Default VM type should support INIT_SHARED, supported flags = 0x%x",
|
||||
vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS));
|
||||
|
||||
size = vm->page_size;
|
||||
fd = vm_create_guest_memfd(vm, size, GUEST_MEMFD_FLAG_MMAP);
|
||||
fd = vm_create_guest_memfd(vm, size, GUEST_MEMFD_FLAG_MMAP |
|
||||
GUEST_MEMFD_FLAG_INIT_SHARED);
|
||||
vm_set_user_memory_region2(vm, slot, KVM_MEM_GUEST_MEMFD, gpa, size, NULL, fd, 0);
|
||||
|
||||
mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
TEST_ASSERT(mem != MAP_FAILED, "mmap() on guest_memfd failed");
|
||||
mem = kvm_mmap(size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
|
||||
memset(mem, 0xaa, size);
|
||||
munmap(mem, size);
|
||||
kvm_munmap(mem, size);
|
||||
|
||||
virt_pg_map(vm, gpa, gpa);
|
||||
vcpu_args_set(vcpu, 2, gpa, size);
|
||||
|
@ -351,8 +363,7 @@ static void test_guest_memfd_guest(void)
|
|||
|
||||
TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
|
||||
|
||||
mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
TEST_ASSERT(mem != MAP_FAILED, "mmap() on guest_memfd failed");
|
||||
mem = kvm_mmap(size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
|
||||
for (i = 0; i < size; i++)
|
||||
TEST_ASSERT_EQ(mem[i], 0xff);
|
||||
|
||||
|
@ -366,6 +377,8 @@ int main(int argc, char *argv[])
|
|||
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
|
||||
|
||||
page_size = getpagesize();
|
||||
|
||||
/*
|
||||
* Not all architectures support KVM_CAP_VM_TYPES. However, those that
|
||||
* support guest_memfd have that support for the default VM type.
|
||||
|
|
|
@ -305,7 +305,17 @@ void test_wants_mte(void);
|
|||
void test_disable_default_vgic(void);
|
||||
|
||||
bool vm_supports_el2(struct kvm_vm *vm);
|
||||
static bool vcpu_has_el2(struct kvm_vcpu *vcpu)
|
||||
|
||||
static inline bool test_supports_el2(void)
|
||||
{
|
||||
struct kvm_vm *vm = vm_create(1);
|
||||
bool supported = vm_supports_el2(vm);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
return supported;
|
||||
}
|
||||
|
||||
static inline bool vcpu_has_el2(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->init.features[0] & BIT(KVM_ARM_VCPU_HAS_EL2);
|
||||
}
|
||||
|
|
|
@ -286,6 +286,31 @@ static inline bool kvm_has_cap(long cap)
|
|||
#define __KVM_SYSCALL_ERROR(_name, _ret) \
|
||||
"%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
|
||||
|
||||
static inline void *__kvm_mmap(size_t size, int prot, int flags, int fd,
|
||||
off_t offset)
|
||||
{
|
||||
void *mem;
|
||||
|
||||
mem = mmap(NULL, size, prot, flags, fd, offset);
|
||||
TEST_ASSERT(mem != MAP_FAILED, __KVM_SYSCALL_ERROR("mmap()",
|
||||
(int)(unsigned long)MAP_FAILED));
|
||||
|
||||
return mem;
|
||||
}
|
||||
|
||||
static inline void *kvm_mmap(size_t size, int prot, int flags, int fd)
|
||||
{
|
||||
return __kvm_mmap(size, prot, flags, fd, 0);
|
||||
}
|
||||
|
||||
static inline void kvm_munmap(void *mem, size_t size)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = munmap(mem, size);
|
||||
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the "inner", double-underscore macro when reporting errors from within
|
||||
* other macros so that the name of ioctl() and not its literal numeric value
|
||||
|
@ -1273,4 +1298,6 @@ bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
|
|||
|
||||
uint32_t guest_get_vcpuid(void);
|
||||
|
||||
bool kvm_arch_has_default_irqchip(void);
|
||||
|
||||
#endif /* SELFTEST_KVM_UTIL_H */
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
#ifndef SELFTEST_KVM_TEST_UTIL_H
|
||||
#define SELFTEST_KVM_TEST_UTIL_H
|
||||
|
||||
#include <setjmp.h>
|
||||
#include <signal.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdbool.h>
|
||||
|
@ -78,6 +80,23 @@ do { \
|
|||
__builtin_unreachable(); \
|
||||
} while (0)
|
||||
|
||||
extern sigjmp_buf expect_sigbus_jmpbuf;
|
||||
void expect_sigbus_handler(int signum);
|
||||
|
||||
#define TEST_EXPECT_SIGBUS(action) \
|
||||
do { \
|
||||
struct sigaction sa_old, sa_new = { \
|
||||
.sa_handler = expect_sigbus_handler, \
|
||||
}; \
|
||||
\
|
||||
sigaction(SIGBUS, &sa_new, &sa_old); \
|
||||
if (sigsetjmp(expect_sigbus_jmpbuf, 1) == 0) { \
|
||||
action; \
|
||||
TEST_FAIL("'%s' should have triggered SIGBUS", #action); \
|
||||
} \
|
||||
sigaction(SIGBUS, &sa_old, NULL); \
|
||||
} while (0)
|
||||
|
||||
size_t parse_size(const char *size);
|
||||
|
||||
int64_t timespec_to_ns(struct timespec ts);
|
||||
|
|
|
@ -89,11 +89,19 @@ static void juggle_eventfd_primary(struct kvm_vm *vm, int eventfd)
|
|||
int main(int argc, char *argv[])
|
||||
{
|
||||
pthread_t racing_thread;
|
||||
struct kvm_vcpu *unused;
|
||||
int r, i;
|
||||
|
||||
/* Create "full" VMs, as KVM_IRQFD requires an in-kernel IRQ chip. */
|
||||
vm1 = vm_create(1);
|
||||
vm2 = vm_create(1);
|
||||
TEST_REQUIRE(kvm_arch_has_default_irqchip());
|
||||
|
||||
/*
|
||||
* Create "full" VMs, as KVM_IRQFD requires an in-kernel IRQ chip. Also
|
||||
* create an unused vCPU as certain architectures (like arm64) need to
|
||||
* complete IRQ chip initialization after all possible vCPUs for a VM
|
||||
* have been created.
|
||||
*/
|
||||
vm1 = vm_create_with_one_vcpu(&unused, NULL);
|
||||
vm2 = vm_create_with_one_vcpu(&unused, NULL);
|
||||
|
||||
WRITE_ONCE(__eventfd, kvm_new_eventfd());
|
||||
|
||||
|
|
|
@ -725,3 +725,8 @@ void kvm_arch_vm_release(struct kvm_vm *vm)
|
|||
if (vm->arch.has_gic)
|
||||
close(vm->arch.gic_fd);
|
||||
}
|
||||
|
||||
bool kvm_arch_has_default_irqchip(void)
|
||||
{
|
||||
return request_vgic && kvm_supports_vgic_v3();
|
||||
}
|
||||
|
|
|
@ -741,13 +741,11 @@ static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
|
|||
int ret;
|
||||
|
||||
if (vcpu->dirty_gfns) {
|
||||
ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
|
||||
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
|
||||
kvm_munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
|
||||
vcpu->dirty_gfns = NULL;
|
||||
}
|
||||
|
||||
ret = munmap(vcpu->run, vcpu_mmap_sz());
|
||||
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
|
||||
kvm_munmap(vcpu->run, vcpu_mmap_sz());
|
||||
|
||||
ret = close(vcpu->fd);
|
||||
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
|
||||
|
@ -783,20 +781,16 @@ void kvm_vm_release(struct kvm_vm *vmp)
|
|||
static void __vm_mem_region_delete(struct kvm_vm *vm,
|
||||
struct userspace_mem_region *region)
|
||||
{
|
||||
int ret;
|
||||
|
||||
rb_erase(®ion->gpa_node, &vm->regions.gpa_tree);
|
||||
rb_erase(®ion->hva_node, &vm->regions.hva_tree);
|
||||
hash_del(®ion->slot_node);
|
||||
|
||||
sparsebit_free(®ion->unused_phy_pages);
|
||||
sparsebit_free(®ion->protected_phy_pages);
|
||||
ret = munmap(region->mmap_start, region->mmap_size);
|
||||
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
|
||||
kvm_munmap(region->mmap_start, region->mmap_size);
|
||||
if (region->fd >= 0) {
|
||||
/* There's an extra map when using shared memory. */
|
||||
ret = munmap(region->mmap_alias, region->mmap_size);
|
||||
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
|
||||
kvm_munmap(region->mmap_alias, region->mmap_size);
|
||||
close(region->fd);
|
||||
}
|
||||
if (region->region.guest_memfd >= 0)
|
||||
|
@ -1053,12 +1047,9 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
|
|||
region->fd = kvm_memfd_alloc(region->mmap_size,
|
||||
src_type == VM_MEM_SRC_SHARED_HUGETLB);
|
||||
|
||||
region->mmap_start = mmap(NULL, region->mmap_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
vm_mem_backing_src_alias(src_type)->flag,
|
||||
region->fd, 0);
|
||||
TEST_ASSERT(region->mmap_start != MAP_FAILED,
|
||||
__KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
|
||||
region->mmap_start = kvm_mmap(region->mmap_size, PROT_READ | PROT_WRITE,
|
||||
vm_mem_backing_src_alias(src_type)->flag,
|
||||
region->fd);
|
||||
|
||||
TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||
|
||||
region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz),
|
||||
|
@ -1129,12 +1120,10 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
|
|||
|
||||
/* If shared memory, create an alias. */
|
||||
if (region->fd >= 0) {
|
||||
region->mmap_alias = mmap(NULL, region->mmap_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
vm_mem_backing_src_alias(src_type)->flag,
|
||||
region->fd, 0);
|
||||
TEST_ASSERT(region->mmap_alias != MAP_FAILED,
|
||||
__KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
|
||||
region->mmap_alias = kvm_mmap(region->mmap_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
vm_mem_backing_src_alias(src_type)->flag,
|
||||
region->fd);
|
||||
|
||||
/* Align host alias address */
|
||||
region->host_alias = align_ptr_up(region->mmap_alias, alignment);
|
||||
|
@ -1344,10 +1333,8 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
|||
TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size "
|
||||
"smaller than expected, vcpu_mmap_sz: %zi expected_min: %zi",
|
||||
vcpu_mmap_sz(), sizeof(*vcpu->run));
|
||||
vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(),
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
|
||||
TEST_ASSERT(vcpu->run != MAP_FAILED,
|
||||
__KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
|
||||
vcpu->run = kvm_mmap(vcpu_mmap_sz(), PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED, vcpu->fd);
|
||||
|
||||
if (kvm_has_cap(KVM_CAP_BINARY_STATS_FD))
|
||||
vcpu->stats.fd = vcpu_get_stats_fd(vcpu);
|
||||
|
@ -1794,9 +1781,8 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
|
|||
page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
|
||||
TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
|
||||
|
||||
addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd,
|
||||
page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
|
||||
TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed");
|
||||
addr = __kvm_mmap(size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd,
|
||||
page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
|
||||
|
||||
vcpu->dirty_gfns = addr;
|
||||
vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn);
|
||||
|
@ -2344,3 +2330,8 @@ bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr)
|
|||
pg = paddr >> vm->page_shift;
|
||||
return sparsebit_is_set(region->protected_phy_pages, pg);
|
||||
}
|
||||
|
||||
__weak bool kvm_arch_has_default_irqchip(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -221,3 +221,8 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
|
|||
void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
bool kvm_arch_has_default_irqchip(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -18,6 +18,13 @@
|
|||
|
||||
#include "test_util.h"
|
||||
|
||||
sigjmp_buf expect_sigbus_jmpbuf;
|
||||
|
||||
void __attribute__((used)) expect_sigbus_handler(int signum)
|
||||
{
|
||||
siglongjmp(expect_sigbus_jmpbuf, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Random number generator that is usable from guest code. This is the
|
||||
* Park-Miller LCG using standard constants.
|
||||
|
|
|
@ -1318,3 +1318,8 @@ bool sys_clocksource_is_based_on_tsc(void)
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool kvm_arch_has_default_irqchip(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -339,8 +339,7 @@ int main(int argc, char *argv[])
|
|||
TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb ");
|
||||
|
||||
fd = kvm_memfd_alloc(slot_size, hugepages);
|
||||
mem = mmap(NULL, slot_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
TEST_ASSERT(mem != MAP_FAILED, "mmap() failed");
|
||||
mem = kvm_mmap(slot_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
|
||||
|
||||
TEST_ASSERT(!madvise(mem, slot_size, MADV_NOHUGEPAGE), "madvise() failed");
|
||||
|
||||
|
@ -413,7 +412,7 @@ int main(int argc, char *argv[])
|
|||
for (slot = (slot - 1) & ~1ull; slot >= first_slot; slot -= 2)
|
||||
vm_set_user_memory_region(vm, slot, 0, 0, 0, NULL);
|
||||
|
||||
munmap(mem, slot_size / 2);
|
||||
kvm_munmap(mem, slot_size / 2);
|
||||
|
||||
/* Sanity check that the vCPUs actually ran. */
|
||||
for (i = 0; i < nr_vcpus; i++)
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <test_util.h>
|
||||
#include <kvm_util.h>
|
||||
#include <processor.h>
|
||||
#include <pthread.h>
|
||||
|
||||
/* Arbitrarily chosen values */
|
||||
#define TEST_SIZE (SZ_2M + PAGE_SIZE)
|
||||
|
@ -30,18 +31,66 @@ static void guest_code(uint64_t base_gpa)
|
|||
GUEST_DONE();
|
||||
}
|
||||
|
||||
static void pre_fault_memory(struct kvm_vcpu *vcpu, u64 gpa, u64 size,
|
||||
u64 left)
|
||||
struct slot_worker_data {
|
||||
struct kvm_vm *vm;
|
||||
u64 gpa;
|
||||
uint32_t flags;
|
||||
bool worker_ready;
|
||||
bool prefault_ready;
|
||||
bool recreate_slot;
|
||||
};
|
||||
|
||||
static void *delete_slot_worker(void *__data)
|
||||
{
|
||||
struct slot_worker_data *data = __data;
|
||||
struct kvm_vm *vm = data->vm;
|
||||
|
||||
WRITE_ONCE(data->worker_ready, true);
|
||||
|
||||
while (!READ_ONCE(data->prefault_ready))
|
||||
cpu_relax();
|
||||
|
||||
vm_mem_region_delete(vm, TEST_SLOT);
|
||||
|
||||
while (!READ_ONCE(data->recreate_slot))
|
||||
cpu_relax();
|
||||
|
||||
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, data->gpa,
|
||||
TEST_SLOT, TEST_NPAGES, data->flags);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void pre_fault_memory(struct kvm_vcpu *vcpu, u64 base_gpa, u64 offset,
|
||||
u64 size, u64 expected_left, bool private)
|
||||
{
|
||||
struct kvm_pre_fault_memory range = {
|
||||
.gpa = gpa,
|
||||
.gpa = base_gpa + offset,
|
||||
.size = size,
|
||||
.flags = 0,
|
||||
};
|
||||
u64 prev;
|
||||
struct slot_worker_data data = {
|
||||
.vm = vcpu->vm,
|
||||
.gpa = base_gpa,
|
||||
.flags = private ? KVM_MEM_GUEST_MEMFD : 0,
|
||||
};
|
||||
bool slot_recreated = false;
|
||||
pthread_t slot_worker;
|
||||
int ret, save_errno;
|
||||
u64 prev;
|
||||
|
||||
do {
|
||||
/*
|
||||
* Concurrently delete (and recreate) the slot to test KVM's handling
|
||||
* of a racing memslot deletion with prefaulting.
|
||||
*/
|
||||
pthread_create(&slot_worker, NULL, delete_slot_worker, &data);
|
||||
|
||||
while (!READ_ONCE(data.worker_ready))
|
||||
cpu_relax();
|
||||
|
||||
WRITE_ONCE(data.prefault_ready, true);
|
||||
|
||||
for (;;) {
|
||||
prev = range.size;
|
||||
ret = __vcpu_ioctl(vcpu, KVM_PRE_FAULT_MEMORY, &range);
|
||||
save_errno = errno;
|
||||
|
@ -49,18 +98,65 @@ static void pre_fault_memory(struct kvm_vcpu *vcpu, u64 gpa, u64 size,
|
|||
"%sexpecting range.size to change on %s",
|
||||
ret < 0 ? "not " : "",
|
||||
ret < 0 ? "failure" : "success");
|
||||
} while (ret >= 0 ? range.size : save_errno == EINTR);
|
||||
|
||||
TEST_ASSERT(range.size == left,
|
||||
"Completed with %lld bytes left, expected %" PRId64,
|
||||
range.size, left);
|
||||
/*
|
||||
* Immediately retry prefaulting if KVM was interrupted by an
|
||||
* unrelated signal/event.
|
||||
*/
|
||||
if (ret < 0 && save_errno == EINTR)
|
||||
continue;
|
||||
|
||||
if (left == 0)
|
||||
__TEST_ASSERT_VM_VCPU_IOCTL(!ret, "KVM_PRE_FAULT_MEMORY", ret, vcpu->vm);
|
||||
/*
|
||||
* Tell the worker to recreate the slot in order to complete
|
||||
* prefaulting (if prefault didn't already succeed before the
|
||||
* slot was deleted) and/or to prepare for the next testcase.
|
||||
* Wait for the worker to exit so that the next invocation of
|
||||
* prefaulting is guaranteed to complete (assuming no KVM bugs).
|
||||
*/
|
||||
if (!slot_recreated) {
|
||||
WRITE_ONCE(data.recreate_slot, true);
|
||||
pthread_join(slot_worker, NULL);
|
||||
slot_recreated = true;
|
||||
|
||||
/*
|
||||
* Retry prefaulting to get a stable result, i.e. to
|
||||
* avoid seeing random EAGAIN failures. Don't retry if
|
||||
* prefaulting already succeeded, as KVM disallows
|
||||
* prefaulting with size=0, i.e. blindly retrying would
|
||||
* result in test failures due to EINVAL. KVM should
|
||||
* always return success if all bytes are prefaulted,
|
||||
* i.e. there is no need to guard against EAGAIN being
|
||||
* returned.
|
||||
*/
|
||||
if (range.size)
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* All done if there are no remaining bytes to prefault, or if
|
||||
* prefaulting failed (EINTR was handled above, and EAGAIN due
|
||||
* to prefaulting a memslot that's being actively deleted should
|
||||
* be impossible since the memslot has already been recreated).
|
||||
*/
|
||||
if (!range.size || ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
TEST_ASSERT(range.size == expected_left,
|
||||
"Completed with %llu bytes left, expected %lu",
|
||||
range.size, expected_left);
|
||||
|
||||
/*
|
||||
* Assert success if prefaulting the entire range should succeed, i.e.
|
||||
* complete with no bytes remaining. Otherwise prefaulting should have
|
||||
* failed due to ENOENT (due to RET_PF_EMULATE for emulated MMIO when
|
||||
* no memslot exists).
|
||||
*/
|
||||
if (!expected_left)
|
||||
TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_PRE_FAULT_MEMORY, ret, vcpu->vm);
|
||||
else
|
||||
/* No memory slot causes RET_PF_EMULATE. it results in -ENOENT. */
|
||||
__TEST_ASSERT_VM_VCPU_IOCTL(ret && save_errno == ENOENT,
|
||||
"KVM_PRE_FAULT_MEMORY", ret, vcpu->vm);
|
||||
TEST_ASSERT_VM_VCPU_IOCTL(ret && save_errno == ENOENT,
|
||||
KVM_PRE_FAULT_MEMORY, ret, vcpu->vm);
|
||||
}
|
||||
|
||||
static void __test_pre_fault_memory(unsigned long vm_type, bool private)
|
||||
|
@ -97,9 +193,10 @@ static void __test_pre_fault_memory(unsigned long vm_type, bool private)
|
|||
|
||||
if (private)
|
||||
vm_mem_set_private(vm, guest_test_phys_mem, TEST_SIZE);
|
||||
pre_fault_memory(vcpu, guest_test_phys_mem, SZ_2M, 0);
|
||||
pre_fault_memory(vcpu, guest_test_phys_mem + SZ_2M, PAGE_SIZE * 2, PAGE_SIZE);
|
||||
pre_fault_memory(vcpu, guest_test_phys_mem + TEST_SIZE, PAGE_SIZE, PAGE_SIZE);
|
||||
|
||||
pre_fault_memory(vcpu, guest_test_phys_mem, 0, SZ_2M, 0, private);
|
||||
pre_fault_memory(vcpu, guest_test_phys_mem, SZ_2M, PAGE_SIZE * 2, PAGE_SIZE, private);
|
||||
pre_fault_memory(vcpu, guest_test_phys_mem, TEST_SIZE, PAGE_SIZE, PAGE_SIZE, private);
|
||||
|
||||
vcpu_args_set(vcpu, 1, guest_test_virt_mem);
|
||||
vcpu_run(vcpu);
|
||||
|
|
|
@ -142,19 +142,17 @@ FIXTURE_SETUP(uc_kvm)
|
|||
self->kvm_run_size = ioctl(self->kvm_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
|
||||
ASSERT_GE(self->kvm_run_size, sizeof(struct kvm_run))
|
||||
TH_LOG(KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, self->kvm_run_size));
|
||||
self->run = (struct kvm_run *)mmap(NULL, self->kvm_run_size,
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
|
||||
ASSERT_NE(self->run, MAP_FAILED);
|
||||
self->run = kvm_mmap(self->kvm_run_size, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED, self->vcpu_fd);
|
||||
/**
|
||||
* For virtual cpus that have been created with S390 user controlled
|
||||
* virtual machines, the resulting vcpu fd can be memory mapped at page
|
||||
* offset KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of
|
||||
* the virtual cpu's hardware control block.
|
||||
*/
|
||||
self->sie_block = (struct kvm_s390_sie_block *)mmap(NULL, PAGE_SIZE,
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
self->vcpu_fd, KVM_S390_SIE_PAGE_OFFSET << PAGE_SHIFT);
|
||||
ASSERT_NE(self->sie_block, MAP_FAILED);
|
||||
self->sie_block = __kvm_mmap(PAGE_SIZE, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED, self->vcpu_fd,
|
||||
KVM_S390_SIE_PAGE_OFFSET << PAGE_SHIFT);
|
||||
|
||||
TH_LOG("VM created %p %p", self->run, self->sie_block);
|
||||
|
||||
|
@ -186,8 +184,8 @@ FIXTURE_SETUP(uc_kvm)
|
|||
|
||||
FIXTURE_TEARDOWN(uc_kvm)
|
||||
{
|
||||
munmap(self->sie_block, PAGE_SIZE);
|
||||
munmap(self->run, self->kvm_run_size);
|
||||
kvm_munmap(self->sie_block, PAGE_SIZE);
|
||||
kvm_munmap(self->run, self->kvm_run_size);
|
||||
close(self->vcpu_fd);
|
||||
close(self->vm_fd);
|
||||
close(self->kvm_fd);
|
||||
|
|
|
@ -433,10 +433,10 @@ static void test_add_max_memory_regions(void)
|
|||
pr_info("Adding slots 0..%i, each memory region with %dK size\n",
|
||||
(max_mem_slots - 1), MEM_REGION_SIZE >> 10);
|
||||
|
||||
mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
|
||||
TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host");
|
||||
|
||||
mem = kvm_mmap((size_t)max_mem_slots * MEM_REGION_SIZE + alignment,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1);
|
||||
mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1));
|
||||
|
||||
for (slot = 0; slot < max_mem_slots; slot++)
|
||||
|
@ -446,9 +446,8 @@ static void test_add_max_memory_regions(void)
|
|||
mem_aligned + (uint64_t)slot * MEM_REGION_SIZE);
|
||||
|
||||
/* Check it cannot be added memory slots beyond the limit */
|
||||
mem_extra = mmap(NULL, MEM_REGION_SIZE, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
TEST_ASSERT(mem_extra != MAP_FAILED, "Failed to mmap() host");
|
||||
mem_extra = kvm_mmap(MEM_REGION_SIZE, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1);
|
||||
|
||||
ret = __vm_set_user_memory_region(vm, max_mem_slots, 0,
|
||||
(uint64_t)max_mem_slots * MEM_REGION_SIZE,
|
||||
|
@ -456,8 +455,8 @@ static void test_add_max_memory_regions(void)
|
|||
TEST_ASSERT(ret == -1 && errno == EINVAL,
|
||||
"Adding one more memory slot should fail with EINVAL");
|
||||
|
||||
munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment);
|
||||
munmap(mem_extra, MEM_REGION_SIZE);
|
||||
kvm_munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment);
|
||||
kvm_munmap(mem_extra, MEM_REGION_SIZE);
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
|
|
|
@ -113,6 +113,7 @@ config KVM_GENERIC_MEMORY_ATTRIBUTES
|
|||
bool
|
||||
|
||||
config KVM_GUEST_MEMFD
|
||||
depends on KVM_GENERIC_MMU_NOTIFIER
|
||||
select XARRAY_MULTI
|
||||
bool
|
||||
|
||||
|
|
|
@ -102,8 +102,17 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
|
|||
return filemap_grab_folio(inode->i_mapping, index);
|
||||
}
|
||||
|
||||
static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
|
||||
pgoff_t end)
|
||||
static enum kvm_gfn_range_filter kvm_gmem_get_invalidate_filter(struct inode *inode)
|
||||
{
|
||||
if ((u64)inode->i_private & GUEST_MEMFD_FLAG_INIT_SHARED)
|
||||
return KVM_FILTER_SHARED;
|
||||
|
||||
return KVM_FILTER_PRIVATE;
|
||||
}
|
||||
|
||||
static void __kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
|
||||
pgoff_t end,
|
||||
enum kvm_gfn_range_filter attr_filter)
|
||||
{
|
||||
bool flush = false, found_memslot = false;
|
||||
struct kvm_memory_slot *slot;
|
||||
|
@ -118,8 +127,7 @@ static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
|
|||
.end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff,
|
||||
.slot = slot,
|
||||
.may_block = true,
|
||||
/* guest memfd is relevant to only private mappings. */
|
||||
.attr_filter = KVM_FILTER_PRIVATE,
|
||||
.attr_filter = attr_filter,
|
||||
};
|
||||
|
||||
if (!found_memslot) {
|
||||
|
@ -139,8 +147,21 @@ static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
|
|||
KVM_MMU_UNLOCK(kvm);
|
||||
}
|
||||
|
||||
static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
|
||||
pgoff_t end)
|
||||
static void kvm_gmem_invalidate_begin(struct inode *inode, pgoff_t start,
|
||||
pgoff_t end)
|
||||
{
|
||||
struct list_head *gmem_list = &inode->i_mapping->i_private_list;
|
||||
enum kvm_gfn_range_filter attr_filter;
|
||||
struct kvm_gmem *gmem;
|
||||
|
||||
attr_filter = kvm_gmem_get_invalidate_filter(inode);
|
||||
|
||||
list_for_each_entry(gmem, gmem_list, entry)
|
||||
__kvm_gmem_invalidate_begin(gmem, start, end, attr_filter);
|
||||
}
|
||||
|
||||
static void __kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
|
||||
pgoff_t end)
|
||||
{
|
||||
struct kvm *kvm = gmem->kvm;
|
||||
|
||||
|
@ -151,12 +172,20 @@ static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
|
|||
}
|
||||
}
|
||||
|
||||
static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
||||
static void kvm_gmem_invalidate_end(struct inode *inode, pgoff_t start,
|
||||
pgoff_t end)
|
||||
{
|
||||
struct list_head *gmem_list = &inode->i_mapping->i_private_list;
|
||||
struct kvm_gmem *gmem;
|
||||
|
||||
list_for_each_entry(gmem, gmem_list, entry)
|
||||
__kvm_gmem_invalidate_end(gmem, start, end);
|
||||
}
|
||||
|
||||
static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
||||
{
|
||||
pgoff_t start = offset >> PAGE_SHIFT;
|
||||
pgoff_t end = (offset + len) >> PAGE_SHIFT;
|
||||
struct kvm_gmem *gmem;
|
||||
|
||||
/*
|
||||
* Bindings must be stable across invalidation to ensure the start+end
|
||||
|
@ -164,13 +193,11 @@ static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
|||
*/
|
||||
filemap_invalidate_lock(inode->i_mapping);
|
||||
|
||||
list_for_each_entry(gmem, gmem_list, entry)
|
||||
kvm_gmem_invalidate_begin(gmem, start, end);
|
||||
kvm_gmem_invalidate_begin(inode, start, end);
|
||||
|
||||
truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1);
|
||||
|
||||
list_for_each_entry(gmem, gmem_list, entry)
|
||||
kvm_gmem_invalidate_end(gmem, start, end);
|
||||
kvm_gmem_invalidate_end(inode, start, end);
|
||||
|
||||
filemap_invalidate_unlock(inode->i_mapping);
|
||||
|
||||
|
@ -280,8 +307,9 @@ static int kvm_gmem_release(struct inode *inode, struct file *file)
|
|||
* Zap all SPTEs pointed at by this file. Do not free the backing
|
||||
* memory, as its lifetime is associated with the inode, not the file.
|
||||
*/
|
||||
kvm_gmem_invalidate_begin(gmem, 0, -1ul);
|
||||
kvm_gmem_invalidate_end(gmem, 0, -1ul);
|
||||
__kvm_gmem_invalidate_begin(gmem, 0, -1ul,
|
||||
kvm_gmem_get_invalidate_filter(inode));
|
||||
__kvm_gmem_invalidate_end(gmem, 0, -1ul);
|
||||
|
||||
list_del(&gmem->entry);
|
||||
|
||||
|
@ -328,6 +356,9 @@ static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf)
|
|||
if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
if (!((u64)inode->i_private & GUEST_MEMFD_FLAG_INIT_SHARED))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
folio = kvm_gmem_get_folio(inode, vmf->pgoff);
|
||||
if (IS_ERR(folio)) {
|
||||
int err = PTR_ERR(folio);
|
||||
|
@ -400,8 +431,6 @@ static int kvm_gmem_migrate_folio(struct address_space *mapping,
|
|||
|
||||
static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *folio)
|
||||
{
|
||||
struct list_head *gmem_list = &mapping->i_private_list;
|
||||
struct kvm_gmem *gmem;
|
||||
pgoff_t start, end;
|
||||
|
||||
filemap_invalidate_lock_shared(mapping);
|
||||
|
@ -409,8 +438,7 @@ static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *fol
|
|||
start = folio->index;
|
||||
end = start + folio_nr_pages(folio);
|
||||
|
||||
list_for_each_entry(gmem, gmem_list, entry)
|
||||
kvm_gmem_invalidate_begin(gmem, start, end);
|
||||
kvm_gmem_invalidate_begin(mapping->host, start, end);
|
||||
|
||||
/*
|
||||
* Do not truncate the range, what action is taken in response to the
|
||||
|
@ -421,8 +449,7 @@ static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *fol
|
|||
* error to userspace.
|
||||
*/
|
||||
|
||||
list_for_each_entry(gmem, gmem_list, entry)
|
||||
kvm_gmem_invalidate_end(gmem, start, end);
|
||||
kvm_gmem_invalidate_end(mapping->host, start, end);
|
||||
|
||||
filemap_invalidate_unlock_shared(mapping);
|
||||
|
||||
|
@ -458,7 +485,7 @@ static const struct inode_operations kvm_gmem_iops = {
|
|||
.setattr = kvm_gmem_setattr,
|
||||
};
|
||||
|
||||
bool __weak kvm_arch_supports_gmem_mmap(struct kvm *kvm)
|
||||
bool __weak kvm_arch_supports_gmem_init_shared(struct kvm *kvm)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
@ -522,12 +549,8 @@ int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
|
|||
{
|
||||
loff_t size = args->size;
|
||||
u64 flags = args->flags;
|
||||
u64 valid_flags = 0;
|
||||
|
||||
if (kvm_arch_supports_gmem_mmap(kvm))
|
||||
valid_flags |= GUEST_MEMFD_FLAG_MMAP;
|
||||
|
||||
if (flags & ~valid_flags)
|
||||
if (flags & ~kvm_gmem_get_supported_flags(kvm))
|
||||
return -EINVAL;
|
||||
|
||||
if (size <= 0 || !PAGE_ALIGNED(size))
|
||||
|
|
|
@ -4928,8 +4928,8 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
|
|||
#ifdef CONFIG_KVM_GUEST_MEMFD
|
||||
case KVM_CAP_GUEST_MEMFD:
|
||||
return 1;
|
||||
case KVM_CAP_GUEST_MEMFD_MMAP:
|
||||
return !kvm || kvm_arch_supports_gmem_mmap(kvm);
|
||||
case KVM_CAP_GUEST_MEMFD_FLAGS:
|
||||
return kvm_gmem_get_supported_flags(kvm);
|
||||
#endif
|
||||
default:
|
||||
break;
|
||||
|
|
Loading…
Reference in New Issue