mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/
synced 2026-04-03 23:37:40 -04:00
Merge tag 'kvm-s390-master-7.0-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD
KVM: s390: Fixes for 7.0 - fix deadlock in new memory management - handle kernel faults on donated memory properly - fix bounds checking for irq routing + selftest - fix invalid machine checks + logging
This commit is contained in:
@@ -710,6 +710,9 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm);
|
||||
void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
|
||||
unsigned long *aqm, unsigned long *adm);
|
||||
|
||||
#define SIE64_RETURN_NORMAL 0
|
||||
#define SIE64_RETURN_MCCK 1
|
||||
|
||||
int __sie64a(phys_addr_t sie_block_phys, struct kvm_s390_sie_block *sie_block, u64 *rsa,
|
||||
unsigned long gasce);
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ struct stack_frame {
|
||||
struct {
|
||||
unsigned long sie_control_block;
|
||||
unsigned long sie_savearea;
|
||||
unsigned long sie_reason;
|
||||
unsigned long sie_return;
|
||||
unsigned long sie_flags;
|
||||
unsigned long sie_control_block_phys;
|
||||
unsigned long sie_guest_asce;
|
||||
|
||||
@@ -63,7 +63,7 @@ int main(void)
|
||||
OFFSET(__SF_EMPTY, stack_frame, empty[0]);
|
||||
OFFSET(__SF_SIE_CONTROL, stack_frame, sie_control_block);
|
||||
OFFSET(__SF_SIE_SAVEAREA, stack_frame, sie_savearea);
|
||||
OFFSET(__SF_SIE_REASON, stack_frame, sie_reason);
|
||||
OFFSET(__SF_SIE_RETURN, stack_frame, sie_return);
|
||||
OFFSET(__SF_SIE_FLAGS, stack_frame, sie_flags);
|
||||
OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys);
|
||||
OFFSET(__SF_SIE_GUEST_ASCE, stack_frame, sie_guest_asce);
|
||||
|
||||
@@ -200,7 +200,7 @@ SYM_FUNC_START(__sie64a)
|
||||
stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses
|
||||
stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area
|
||||
stg %r5,__SF_SIE_GUEST_ASCE(%r15) # save guest asce
|
||||
xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
|
||||
xc __SF_SIE_RETURN(8,%r15),__SF_SIE_RETURN(%r15) # return code = 0
|
||||
mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r14) # copy thread flags
|
||||
lmg %r0,%r13,0(%r4) # load guest gprs 0-13
|
||||
mvi __TI_sie(%r14),1
|
||||
@@ -237,7 +237,7 @@ SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL)
|
||||
xgr %r4,%r4
|
||||
xgr %r5,%r5
|
||||
lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
|
||||
lg %r2,__SF_SIE_REASON(%r15) # return exit reason code
|
||||
lg %r2,__SF_SIE_RETURN(%r15) # return sie return code
|
||||
BR_EX %r14
|
||||
SYM_FUNC_END(__sie64a)
|
||||
EXPORT_SYMBOL(__sie64a)
|
||||
|
||||
@@ -487,8 +487,8 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
|
||||
mcck_dam_code = (mci.val & MCIC_SUBCLASS_MASK);
|
||||
if (test_cpu_flag(CIF_MCCK_GUEST) &&
|
||||
(mcck_dam_code & MCCK_CODE_NO_GUEST) != mcck_dam_code) {
|
||||
/* Set exit reason code for host's later handling */
|
||||
*((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR;
|
||||
/* Set sie return code for host's later handling */
|
||||
((struct stack_frame *)regs->gprs[15])->sie_return = SIE64_RETURN_MCCK;
|
||||
}
|
||||
clear_cpu_flag(CIF_MCCK_GUEST);
|
||||
|
||||
|
||||
@@ -1434,7 +1434,8 @@ static int _do_shadow_pte(struct gmap *sg, gpa_t raddr, union pte *ptep_h, union
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
pgste = pgste_get_lock(ptep_h);
|
||||
if (!pgste_get_trylock(ptep_h, &pgste))
|
||||
return -EAGAIN;
|
||||
newpte = _pte(f->pfn, f->writable, !p, 0);
|
||||
newpte.s.d |= ptep->s.d;
|
||||
newpte.s.sd |= ptep->s.sd;
|
||||
@@ -1444,7 +1445,8 @@ static int _do_shadow_pte(struct gmap *sg, gpa_t raddr, union pte *ptep_h, union
|
||||
pgste_set_unlock(ptep_h, pgste);
|
||||
|
||||
newpte = _pte(f->pfn, 0, !p, 0);
|
||||
pgste = pgste_get_lock(ptep);
|
||||
if (!pgste_get_trylock(ptep, &pgste))
|
||||
return -EAGAIN;
|
||||
pgste = __dat_ptep_xchg(ptep, pgste, newpte, gpa_to_gfn(raddr), sg->asce, uses_skeys(sg));
|
||||
pgste_set_unlock(ptep, pgste);
|
||||
|
||||
|
||||
@@ -2724,6 +2724,9 @@ static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
|
||||
|
||||
bit = bit_nr + (addr % PAGE_SIZE) * 8;
|
||||
|
||||
/* kvm_set_routing_entry() should never allow this to happen */
|
||||
WARN_ON_ONCE(bit > (PAGE_SIZE * BITS_PER_BYTE - 1));
|
||||
|
||||
return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
|
||||
}
|
||||
|
||||
@@ -2824,6 +2827,12 @@ void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
|
||||
int rc;
|
||||
|
||||
mci.val = mcck_info->mcic;
|
||||
|
||||
/* log machine checks being reinjected on all debugs */
|
||||
VCPU_EVENT(vcpu, 2, "guest machine check %lx", mci.val);
|
||||
KVM_EVENT(2, "guest machine check %lx", mci.val);
|
||||
pr_info("guest machine check pid %d: %lx", current->pid, mci.val);
|
||||
|
||||
if (mci.sr)
|
||||
cr14 |= CR14_RECOVERY_SUBMASK;
|
||||
if (mci.dg)
|
||||
@@ -2852,6 +2861,7 @@ int kvm_set_routing_entry(struct kvm *kvm,
|
||||
struct kvm_kernel_irq_routing_entry *e,
|
||||
const struct kvm_irq_routing_entry *ue)
|
||||
{
|
||||
const struct kvm_irq_routing_s390_adapter *adapter;
|
||||
u64 uaddr_s, uaddr_i;
|
||||
int idx;
|
||||
|
||||
@@ -2862,6 +2872,14 @@ int kvm_set_routing_entry(struct kvm *kvm,
|
||||
return -EINVAL;
|
||||
e->set = set_adapter_int;
|
||||
|
||||
adapter = &ue->u.adapter;
|
||||
if (adapter->summary_addr + (adapter->summary_offset / 8) >=
|
||||
(adapter->summary_addr & PAGE_MASK) + PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
if (adapter->ind_addr + (adapter->ind_offset / 8) >=
|
||||
(adapter->ind_addr & PAGE_MASK) + PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
uaddr_s = gpa_to_hva(kvm, ue->u.adapter.summary_addr);
|
||||
uaddr_i = gpa_to_hva(kvm, ue->u.adapter.ind_addr);
|
||||
|
||||
@@ -4617,7 +4617,7 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
|
||||
static int vcpu_post_run(struct kvm_vcpu *vcpu, int sie_return)
|
||||
{
|
||||
struct mcck_volatile_info *mcck_info;
|
||||
struct sie_page *sie_page;
|
||||
@@ -4633,14 +4633,14 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
|
||||
vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
|
||||
vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
|
||||
|
||||
if (exit_reason == -EINTR) {
|
||||
VCPU_EVENT(vcpu, 3, "%s", "machine check");
|
||||
if (sie_return == SIE64_RETURN_MCCK) {
|
||||
sie_page = container_of(vcpu->arch.sie_block,
|
||||
struct sie_page, sie_block);
|
||||
mcck_info = &sie_page->mcck_info;
|
||||
kvm_s390_reinject_machine_check(vcpu, mcck_info);
|
||||
return 0;
|
||||
}
|
||||
WARN_ON_ONCE(sie_return != SIE64_RETURN_NORMAL);
|
||||
|
||||
if (vcpu->arch.sie_block->icptcode > 0) {
|
||||
rc = kvm_handle_sie_intercept(vcpu);
|
||||
@@ -4679,7 +4679,7 @@ int noinstr kvm_s390_enter_exit_sie(struct kvm_s390_sie_block *scb,
|
||||
#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
|
||||
static int __vcpu_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int rc, exit_reason;
|
||||
int rc, sie_return;
|
||||
struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
|
||||
|
||||
/*
|
||||
@@ -4719,9 +4719,9 @@ xfer_to_guest_mode_check:
|
||||
guest_timing_enter_irqoff();
|
||||
__disable_cpu_timer_accounting(vcpu);
|
||||
|
||||
exit_reason = kvm_s390_enter_exit_sie(vcpu->arch.sie_block,
|
||||
vcpu->run->s.regs.gprs,
|
||||
vcpu->arch.gmap->asce.val);
|
||||
sie_return = kvm_s390_enter_exit_sie(vcpu->arch.sie_block,
|
||||
vcpu->run->s.regs.gprs,
|
||||
vcpu->arch.gmap->asce.val);
|
||||
|
||||
__enable_cpu_timer_accounting(vcpu);
|
||||
guest_timing_exit_irqoff();
|
||||
@@ -4744,7 +4744,7 @@ xfer_to_guest_mode_check:
|
||||
}
|
||||
kvm_vcpu_srcu_read_lock(vcpu);
|
||||
|
||||
rc = vcpu_post_run(vcpu, exit_reason);
|
||||
rc = vcpu_post_run(vcpu, sie_return);
|
||||
if (rc || guestdbg_exit_pending(vcpu)) {
|
||||
kvm_vcpu_srcu_read_unlock(vcpu);
|
||||
break;
|
||||
|
||||
@@ -1122,6 +1122,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, struc
|
||||
{
|
||||
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
|
||||
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
|
||||
unsigned long sie_return = SIE64_RETURN_NORMAL;
|
||||
int guest_bp_isolation;
|
||||
int rc = 0;
|
||||
|
||||
@@ -1163,7 +1164,7 @@ xfer_to_guest_mode_check:
|
||||
goto xfer_to_guest_mode_check;
|
||||
}
|
||||
guest_timing_enter_irqoff();
|
||||
rc = kvm_s390_enter_exit_sie(scb_s, vcpu->run->s.regs.gprs, sg->asce.val);
|
||||
sie_return = kvm_s390_enter_exit_sie(scb_s, vcpu->run->s.regs.gprs, sg->asce.val);
|
||||
guest_timing_exit_irqoff();
|
||||
local_irq_enable();
|
||||
}
|
||||
@@ -1178,12 +1179,13 @@ skip_sie:
|
||||
|
||||
kvm_vcpu_srcu_read_lock(vcpu);
|
||||
|
||||
if (rc == -EINTR) {
|
||||
VCPU_EVENT(vcpu, 3, "%s", "machine check");
|
||||
if (sie_return == SIE64_RETURN_MCCK) {
|
||||
kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(sie_return != SIE64_RETURN_NORMAL);
|
||||
|
||||
if (rc > 0)
|
||||
rc = 0; /* we could still have an icpt */
|
||||
else if (current->thread.gmap_int_code)
|
||||
|
||||
@@ -441,10 +441,17 @@ void do_secure_storage_access(struct pt_regs *regs)
|
||||
folio = phys_to_folio(addr);
|
||||
if (unlikely(!folio_try_get(folio)))
|
||||
return;
|
||||
rc = arch_make_folio_accessible(folio);
|
||||
rc = uv_convert_from_secure(folio_to_phys(folio));
|
||||
if (!rc)
|
||||
clear_bit(PG_arch_1, &folio->flags.f);
|
||||
folio_put(folio);
|
||||
/*
|
||||
* There are some valid fixup types for kernel
|
||||
* accesses to donated secure memory. zeropad is one
|
||||
* of them.
|
||||
*/
|
||||
if (rc)
|
||||
BUG();
|
||||
return handle_fault_error_nolock(regs, 0);
|
||||
} else {
|
||||
if (faulthandler_disabled())
|
||||
return handle_fault_error_nolock(regs, 0);
|
||||
|
||||
@@ -206,6 +206,7 @@ TEST_GEN_PROGS_s390 += s390/ucontrol_test
|
||||
TEST_GEN_PROGS_s390 += s390/user_operexec
|
||||
TEST_GEN_PROGS_s390 += s390/keyop
|
||||
TEST_GEN_PROGS_s390 += rseq_test
|
||||
TEST_GEN_PROGS_s390 += s390/irq_routing
|
||||
|
||||
TEST_GEN_PROGS_riscv = $(TEST_GEN_PROGS_COMMON)
|
||||
TEST_GEN_PROGS_riscv += riscv/sbi_pmu_test
|
||||
|
||||
75
tools/testing/selftests/kvm/s390/irq_routing.c
Normal file
75
tools/testing/selftests/kvm/s390/irq_routing.c
Normal file
@@ -0,0 +1,75 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* IRQ routing offset tests.
|
||||
*
|
||||
* Copyright IBM Corp. 2026
|
||||
*
|
||||
* Authors:
|
||||
* Janosch Frank <frankja@linux.ibm.com>
|
||||
*/
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/ioctl.h>
|
||||
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "kselftest.h"
|
||||
#include "ucall_common.h"
|
||||
|
||||
extern char guest_code[];
|
||||
asm("guest_code:\n"
|
||||
"diag %r0,%r0,0\n"
|
||||
"j .\n");
|
||||
|
||||
static void test(void)
|
||||
{
|
||||
struct kvm_irq_routing *routing;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
vm_paddr_t mem;
|
||||
int ret;
|
||||
|
||||
struct kvm_irq_routing_entry ue = {
|
||||
.type = KVM_IRQ_ROUTING_S390_ADAPTER,
|
||||
.gsi = 1,
|
||||
};
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
mem = vm_phy_pages_alloc(vm, 2, 4096 * 42, 0);
|
||||
|
||||
routing = kvm_gsi_routing_create();
|
||||
routing->nr = 1;
|
||||
routing->entries[0] = ue;
|
||||
routing->entries[0].u.adapter.summary_addr = (uintptr_t)mem;
|
||||
routing->entries[0].u.adapter.ind_addr = (uintptr_t)mem;
|
||||
|
||||
routing->entries[0].u.adapter.summary_offset = 4096 * 8;
|
||||
ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing);
|
||||
ksft_test_result(ret == -1 && errno == EINVAL, "summary offset outside of page\n");
|
||||
|
||||
routing->entries[0].u.adapter.summary_offset -= 4;
|
||||
ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing);
|
||||
ksft_test_result(ret == 0, "summary offset inside of page\n");
|
||||
|
||||
routing->entries[0].u.adapter.ind_offset = 4096 * 8;
|
||||
ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing);
|
||||
ksft_test_result(ret == -1 && errno == EINVAL, "ind offset outside of page\n");
|
||||
|
||||
routing->entries[0].u.adapter.ind_offset -= 4;
|
||||
ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing);
|
||||
ksft_test_result(ret == 0, "ind offset inside of page\n");
|
||||
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_IRQ_ROUTING));
|
||||
|
||||
ksft_print_header();
|
||||
ksft_set_plan(4);
|
||||
test();
|
||||
|
||||
ksft_finished(); /* Print results and exit() accordingly */
|
||||
}
|
||||
Reference in New Issue
Block a user