KVM: s390: KVM page table management functions: clear and replace

Add page table management functions to be used for KVM guest (gmap)
page tables.

This patch adds functions to clear, replace or exchange DAT table
entries.

Acked-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
This commit is contained in:
Claudio Imbrenda
2026-02-04 16:02:43 +01:00
parent 12f2f61a9e
commit 589071eaaa
2 changed files with 155 additions and 0 deletions

View File

@@ -101,3 +101,118 @@ void dat_free_level(struct crst_table *table, bool owns_ptes)
}
dat_free_crst(table);
}
/**
* dat_crstep_xchg() - Exchange a gmap CRSTE with another.
* @crstep: Pointer to the CRST entry
* @new: Replacement entry.
* @gfn: The affected guest address.
* @asce: The ASCE of the address space.
*
* Context: This function is assumed to be called with kvm->mmu_lock held.
*/
void dat_crstep_xchg(union crste *crstep, union crste new, gfn_t gfn, union asce asce)
{
if (crstep->h.i) {
WRITE_ONCE(*crstep, new);
return;
} else if (cpu_has_edat2()) {
crdte_crste(crstep, *crstep, new, gfn, asce);
return;
}
if (machine_has_tlb_guest())
idte_crste(crstep, gfn, IDTE_GUEST_ASCE, asce, IDTE_GLOBAL);
else
idte_crste(crstep, gfn, 0, NULL_ASCE, IDTE_GLOBAL);
WRITE_ONCE(*crstep, new);
}
/**
* dat_crstep_xchg_atomic() - Atomically exchange a gmap CRSTE with another.
* @crstep: Pointer to the CRST entry.
* @old: Expected old value.
* @new: Replacement entry.
* @gfn: The affected guest address.
* @asce: The asce of the address space.
*
* This function is needed to atomically exchange a CRSTE that potentially
* maps a prefix area, without having to invalidate it inbetween.
*
* Context: This function is assumed to be called with kvm->mmu_lock held.
*
* Return: %true if the exchange was successful.
*/
bool dat_crstep_xchg_atomic(union crste *crstep, union crste old, union crste new, gfn_t gfn,
union asce asce)
{
if (old.h.i)
return arch_try_cmpxchg((long *)crstep, &old.val, new.val);
if (cpu_has_edat2())
return crdte_crste(crstep, old, new, gfn, asce);
return cspg_crste(crstep, old, new);
}
static void dat_set_storage_key_from_pgste(union pte pte, union pgste pgste)
{
union skey nkey = { .acc = pgste.acc, .fp = pgste.fp };
page_set_storage_key(pte_origin(pte), nkey.skey, 0);
}
static void dat_move_storage_key(union pte old, union pte new)
{
page_set_storage_key(pte_origin(new), page_get_storage_key(pte_origin(old)), 1);
}
static union pgste dat_save_storage_key_into_pgste(union pte pte, union pgste pgste)
{
union skey skey;
skey.skey = page_get_storage_key(pte_origin(pte));
pgste.acc = skey.acc;
pgste.fp = skey.fp;
pgste.gr |= skey.r;
pgste.gc |= skey.c;
return pgste;
}
union pgste __dat_ptep_xchg(union pte *ptep, union pgste pgste, union pte new, gfn_t gfn,
union asce asce, bool uses_skeys)
{
union pte old = READ_ONCE(*ptep);
/* Updating only the software bits while holding the pgste lock. */
if (!((ptep->val ^ new.val) & ~_PAGE_SW_BITS)) {
WRITE_ONCE(ptep->swbyte, new.swbyte);
return pgste;
}
if (!old.h.i) {
unsigned long opts = IPTE_GUEST_ASCE | (pgste.nodat ? IPTE_NODAT : 0);
if (machine_has_tlb_guest())
__ptep_ipte(gfn_to_gpa(gfn), (void *)ptep, opts, asce.val, IPTE_GLOBAL);
else
__ptep_ipte(gfn_to_gpa(gfn), (void *)ptep, 0, 0, IPTE_GLOBAL);
}
if (uses_skeys) {
if (old.h.i && !new.h.i)
/* Invalid to valid: restore storage keys from PGSTE. */
dat_set_storage_key_from_pgste(new, pgste);
else if (!old.h.i && new.h.i)
/* Valid to invalid: save storage keys to PGSTE. */
pgste = dat_save_storage_key_into_pgste(old, pgste);
else if (!old.h.i && !new.h.i)
/* Valid to valid: move storage keys. */
if (old.h.pfra != new.h.pfra)
dat_move_storage_key(old, new);
/* Invalid to invalid: nothing to do. */
}
WRITE_ONCE(*ptep, new);
return pgste;
}

View File

@@ -430,6 +430,12 @@ struct kvm_s390_mmu_cache {
short int n_rmaps;
};
union pgste __must_check __dat_ptep_xchg(union pte *ptep, union pgste pgste, union pte new,
gfn_t gfn, union asce asce, bool uses_skeys);
bool dat_crstep_xchg_atomic(union crste *crstep, union crste old, union crste new, gfn_t gfn,
union asce asce);
void dat_crstep_xchg(union crste *crstep, union crste new, gfn_t gfn, union asce asce);
void dat_free_level(struct crst_table *table, bool owns_ptes);
struct crst_table *dat_alloc_crst_sleepable(unsigned long init);
@@ -757,6 +763,21 @@ static inline void pgste_set_unlock(union pte *ptep, union pgste pgste)
WRITE_ONCE(*pgste_of(ptep), pgste);
}
static inline void dat_ptep_xchg(union pte *ptep, union pte new, gfn_t gfn, union asce asce,
bool has_skeys)
{
union pgste pgste;
pgste = pgste_get_lock(ptep);
pgste = __dat_ptep_xchg(ptep, pgste, new, gfn, asce, has_skeys);
pgste_set_unlock(ptep, pgste);
}
static inline void dat_ptep_clear(union pte *ptep, gfn_t gfn, union asce asce, bool has_skeys)
{
dat_ptep_xchg(ptep, _PTE_EMPTY, gfn, asce, has_skeys);
}
static inline void dat_free_pt(struct page_table *pt)
{
free_page((unsigned long)pt);
@@ -794,4 +815,23 @@ static inline struct kvm_s390_mmu_cache *kvm_s390_new_mmu_cache(void)
return NULL;
}
static inline bool dat_pmdp_xchg_atomic(union pmd *pmdp, union pmd old, union pmd new,
gfn_t gfn, union asce asce)
{
return dat_crstep_xchg_atomic(_CRSTEP(pmdp), _CRSTE(old), _CRSTE(new), gfn, asce);
}
static inline bool dat_pudp_xchg_atomic(union pud *pudp, union pud old, union pud new,
gfn_t gfn, union asce asce)
{
return dat_crstep_xchg_atomic(_CRSTEP(pudp), _CRSTE(old), _CRSTE(new), gfn, asce);
}
static inline void dat_crstep_clear(union crste *crstep, gfn_t gfn, union asce asce)
{
union crste newcrste = _CRSTE_EMPTY(crstep->h.tt);
dat_crstep_xchg(crstep, newcrste, gfn, asce);
}
#endif /* __KVM_S390_DAT_H */