KVM: arm64: Add helper for swapping guest descriptor

Implementing FEAT_HAFDBS in KVM's software PTWs requires the ability to
CAS a descriptor to update the in-memory value. Add an accessor to do
exactly that, coping with the fact that guest descriptors are in user
memory (duh).

While FEAT_LSE required on any system that implements NV, KVM now uses
the stage-1 PTW for non-nested use cases meaning an LL/SC implementation
is necessary as well.

Reviewed-by: Marc Zyngier <maz@kernel.org>
Tested-by: Marc Zyngier <maz@kernel.org>
Link: https://msgid.link/20251124190158.177318-11-oupton@kernel.org
Signed-off-by: Oliver Upton <oupton@kernel.org>
This commit is contained in:
Oliver Upton
2025-11-24 11:01:52 -08:00
parent 590e694820
commit f6927b41d5
2 changed files with 89 additions and 0 deletions

View File

@@ -403,4 +403,6 @@ void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val);
(FIX_VNCR - __c); \
})
int __kvm_at_swap_desc(struct kvm *kvm, gpa_t ipa, u64 old, u64 new);
#endif /* __ARM64_KVM_NESTED_H */

View File

@@ -1650,3 +1650,90 @@ int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa, int *level)
return ret;
}
}
static int __lse_swap_desc(u64 __user *ptep, u64 old, u64 new)
{
u64 tmp = old;
int ret = 0;
uaccess_enable_privileged();
asm volatile(__LSE_PREAMBLE
"1: cas %[old], %[new], %[addr]\n"
"2:\n"
_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w[ret])
: [old] "+r" (old), [addr] "+Q" (*ptep), [ret] "+r" (ret)
: [new] "r" (new)
: "memory");
uaccess_disable_privileged();
if (ret)
return ret;
if (tmp != old)
return -EAGAIN;
return ret;
}
static int __llsc_swap_desc(u64 __user *ptep, u64 old, u64 new)
{
int ret = 1;
u64 tmp;
uaccess_enable_privileged();
asm volatile("prfm pstl1strm, %[addr]\n"
"1: ldxr %[tmp], %[addr]\n"
"sub %[tmp], %[tmp], %[old]\n"
"cbnz %[tmp], 3f\n"
"2: stlxr %w[ret], %[new], %[addr]\n"
"3:\n"
_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w[ret])
_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w[ret])
: [ret] "+r" (ret), [addr] "+Q" (*ptep), [tmp] "=&r" (tmp)
: [old] "r" (old), [new] "r" (new)
: "memory");
uaccess_disable_privileged();
/* STLXR didn't update the descriptor, or the compare failed */
if (ret == 1)
return -EAGAIN;
return ret;
}
int __kvm_at_swap_desc(struct kvm *kvm, gpa_t ipa, u64 old, u64 new)
{
struct kvm_memory_slot *slot;
unsigned long hva;
u64 __user *ptep;
bool writable;
int offset;
gfn_t gfn;
int r;
lockdep_assert(srcu_read_lock_held(&kvm->srcu));
gfn = ipa >> PAGE_SHIFT;
offset = offset_in_page(ipa);
slot = gfn_to_memslot(kvm, gfn);
hva = gfn_to_hva_memslot_prot(slot, gfn, &writable);
if (kvm_is_error_hva(hva))
return -EINVAL;
if (!writable)
return -EPERM;
ptep = (u64 __user *)hva + offset;
if (cpus_have_final_cap(ARM64_HAS_LSE_ATOMICS))
r = __lse_swap_desc(ptep, old, new);
else
r = __llsc_swap_desc(ptep, old, new);
if (r < 0)
return r;
mark_page_dirty_in_slot(kvm, slot, gfn);
return 0;
}