mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-05-02 18:17:50 -04:00
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Paolo Bonzini:
"RISCV:
- Use common KVM implementation of MMU memory caches
- SBI v0.2 support for Guest
- Initial KVM selftests support
- Fix to avoid spurious virtual interrupts after clearing hideleg CSR
- Update email address for Anup and Atish
ARM:
- Simplification of the 'vcpu first run' by integrating it into KVM's
'pid change' flow
- Refactoring of the FP and SVE state tracking, also leading to a
simpler state and less shared data between EL1 and EL2 in the nVHE
case
- Tidy up the header file usage for the nvhe hyp object
- New HYP unsharing mechanism, finally allowing pages to be unmapped
from the Stage-1 EL2 page-tables
- Various pKVM cleanups around refcounting and sharing
- A couple of vgic fixes for bugs that would trigger once the vcpu
xarray rework is merged, but not sooner
- Add minimal support for ARMv8.7's PMU extension
- Rework kvm_pgtable initialisation ahead of the NV work
- New selftest for IRQ injection
- Teach selftests about the lack of default IPA space and page sizes
- Expand sysreg selftest to deal with Pointer Authentication
- The usual bunch of cleanups and doc update
s390:
- fix sigp sense/start/stop/inconsistency
- cleanups
x86:
- Clean up some function prototypes more
- improved gfn_to_pfn_cache with proper invalidation, used by Xen
emulation
- add KVM_IRQ_ROUTING_XEN_EVTCHN and event channel delivery
- completely remove potential TOC/TOU races in nested SVM consistency
checks
- update some PMCs on emulated instructions
- Intel AMX support (joint work between Thomas and Intel)
- large MMU cleanups
- module parameter to disable PMU virtualization
- cleanup register cache
- first part of halt handling cleanups
- Hyper-V enlightened MSR bitmap support for nested hypervisors
Generic:
- clean up Makefiles
- introduce CONFIG_HAVE_KVM_DIRTY_RING
- optimize memslot lookup using a tree
- optimize vCPU array usage by converting to xarray"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (268 commits)
x86/fpu: Fix inline prefix warnings
selftest: kvm: Add amx selftest
selftest: kvm: Move struct kvm_x86_state to header
selftest: kvm: Reorder vcpu_load_state steps for AMX
kvm: x86: Disable interception for IA32_XFD on demand
x86/fpu: Provide fpu_sync_guest_vmexit_xfd_state()
kvm: selftests: Add support for KVM_CAP_XSAVE2
kvm: x86: Add support for getting/setting expanded xstate buffer
x86/fpu: Add uabi_size to guest_fpu
kvm: x86: Add CPUID support for Intel AMX
kvm: x86: Add XCR0 support for Intel AMX
kvm: x86: Disable RDMSR interception of IA32_XFD_ERR
kvm: x86: Emulate IA32_XFD_ERR for guest
kvm: x86: Intercept #NM for saving IA32_XFD_ERR
x86/fpu: Prepare xfd_err in struct fpu_guest
kvm: x86: Add emulation for IA32_XFD
x86/fpu: Provide fpu_update_guest_xfd() for IA32_XFD emulation
kvm: x86: Enable dynamic xfeatures at KVM_SET_CPUID2
x86/fpu: Provide fpu_enable_guest_xfd_features() for KVM
x86/fpu: Add guest support to xfd_enable_feature()
...
This commit is contained in:
@@ -4,6 +4,9 @@
|
||||
config HAVE_KVM
|
||||
bool
|
||||
|
||||
config HAVE_KVM_PFNCACHE
|
||||
bool
|
||||
|
||||
config HAVE_KVM_IRQCHIP
|
||||
bool
|
||||
|
||||
@@ -13,6 +16,9 @@ config HAVE_KVM_IRQFD
|
||||
config HAVE_KVM_IRQ_ROUTING
|
||||
bool
|
||||
|
||||
config HAVE_KVM_DIRTY_RING
|
||||
bool
|
||||
|
||||
config HAVE_KVM_EVENTFD
|
||||
bool
|
||||
select EVENTFD
|
||||
|
||||
14
virt/kvm/Makefile.kvm
Normal file
14
virt/kvm/Makefile.kvm
Normal file
@@ -0,0 +1,14 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Makefile for Kernel-based Virtual Machine module
|
||||
#
|
||||
|
||||
KVM ?= ../../../virt/kvm
|
||||
|
||||
kvm-y := $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/binary_stats.o
|
||||
kvm-$(CONFIG_KVM_VFIO) += $(KVM)/vfio.o
|
||||
kvm-$(CONFIG_KVM_MMIO) += $(KVM)/coalesced_mmio.o
|
||||
kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
|
||||
kvm-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o
|
||||
kvm-$(CONFIG_HAVE_KVM_DIRTY_RING) += $(KVM)/dirty_ring.o
|
||||
kvm-$(CONFIG_HAVE_KVM_PFNCACHE) += $(KVM)/pfncache.o
|
||||
@@ -85,7 +85,7 @@ static void async_pf_execute(struct work_struct *work)
|
||||
|
||||
trace_kvm_async_pf_completed(addr, cr2_or_gpa);
|
||||
|
||||
rcuwait_wake_up(&vcpu->wait);
|
||||
__kvm_vcpu_wake_up(vcpu);
|
||||
|
||||
mmput(mm);
|
||||
kvm_put_kvm(vcpu->kvm);
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/kvm_dirty_ring.h>
|
||||
#include <trace/events/kvm.h>
|
||||
#include "mmu_lock.h"
|
||||
#include "kvm_mm.h"
|
||||
|
||||
int __weak kvm_cpu_dirty_log_size(void)
|
||||
{
|
||||
@@ -36,15 +36,6 @@ static bool kvm_dirty_ring_full(struct kvm_dirty_ring *ring)
|
||||
return kvm_dirty_ring_used(ring) >= ring->size;
|
||||
}
|
||||
|
||||
struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
|
||||
|
||||
WARN_ON_ONCE(vcpu->kvm != kvm);
|
||||
|
||||
return &vcpu->dirty_ring;
|
||||
}
|
||||
|
||||
static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
|
||||
{
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
1144
virt/kvm/kvm_main.c
1144
virt/kvm/kvm_main.c
File diff suppressed because it is too large
Load Diff
44
virt/kvm/kvm_mm.h
Normal file
44
virt/kvm/kvm_mm.h
Normal file
@@ -0,0 +1,44 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#ifndef __KVM_MM_H__
|
||||
#define __KVM_MM_H__ 1
|
||||
|
||||
/*
|
||||
* Architectures can choose whether to use an rwlock or spinlock
|
||||
* for the mmu_lock. These macros, for use in common code
|
||||
* only, avoids using #ifdefs in places that must deal with
|
||||
* multiple architectures.
|
||||
*/
|
||||
|
||||
#ifdef KVM_HAVE_MMU_RWLOCK
|
||||
#define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock)
|
||||
#define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock)
|
||||
#define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock)
|
||||
#define KVM_MMU_READ_LOCK(kvm) read_lock(&(kvm)->mmu_lock)
|
||||
#define KVM_MMU_READ_UNLOCK(kvm) read_unlock(&(kvm)->mmu_lock)
|
||||
#else
|
||||
#define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock)
|
||||
#define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
|
||||
#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
|
||||
#define KVM_MMU_READ_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
|
||||
#define KVM_MMU_READ_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
|
||||
#endif /* KVM_HAVE_MMU_RWLOCK */
|
||||
|
||||
kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
|
||||
bool write_fault, bool *writable);
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM_PFNCACHE
|
||||
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
bool may_block);
|
||||
#else
|
||||
static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
bool may_block)
|
||||
{
|
||||
}
|
||||
#endif /* HAVE_KVM_PFNCACHE */
|
||||
|
||||
#endif /* __KVM_MM_H__ */
|
||||
@@ -1,23 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#ifndef KVM_MMU_LOCK_H
|
||||
#define KVM_MMU_LOCK_H 1
|
||||
|
||||
/*
|
||||
* Architectures can choose whether to use an rwlock or spinlock
|
||||
* for the mmu_lock. These macros, for use in common code
|
||||
* only, avoids using #ifdefs in places that must deal with
|
||||
* multiple architectures.
|
||||
*/
|
||||
|
||||
#ifdef KVM_HAVE_MMU_RWLOCK
|
||||
#define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock)
|
||||
#define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock)
|
||||
#define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock)
|
||||
#else
|
||||
#define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock)
|
||||
#define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
|
||||
#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
|
||||
#endif /* KVM_HAVE_MMU_RWLOCK */
|
||||
|
||||
#endif
|
||||
337
virt/kvm/pfncache.c
Normal file
337
virt/kvm/pfncache.c
Normal file
@@ -0,0 +1,337 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Kernel-based Virtual Machine driver for Linux
|
||||
*
|
||||
* This module enables kernel and guest-mode vCPU access to guest physical
|
||||
* memory with suitable invalidation mechanisms.
|
||||
*
|
||||
* Copyright © 2021 Amazon.com, Inc. or its affiliates.
|
||||
*
|
||||
* Authors:
|
||||
* David Woodhouse <dwmw2@infradead.org>
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
#include "kvm_mm.h"
|
||||
|
||||
/*
|
||||
* MMU notifier 'invalidate_range_start' hook.
|
||||
*/
|
||||
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
|
||||
unsigned long end, bool may_block)
|
||||
{
|
||||
DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||
struct gfn_to_pfn_cache *gpc;
|
||||
bool wake_vcpus = false;
|
||||
|
||||
spin_lock(&kvm->gpc_lock);
|
||||
list_for_each_entry(gpc, &kvm->gpc_list, list) {
|
||||
write_lock_irq(&gpc->lock);
|
||||
|
||||
/* Only a single page so no need to care about length */
|
||||
if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
|
||||
gpc->uhva >= start && gpc->uhva < end) {
|
||||
gpc->valid = false;
|
||||
|
||||
/*
|
||||
* If a guest vCPU could be using the physical address,
|
||||
* it needs to be woken.
|
||||
*/
|
||||
if (gpc->guest_uses_pa) {
|
||||
if (!wake_vcpus) {
|
||||
wake_vcpus = true;
|
||||
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||
}
|
||||
__set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* We cannot call mark_page_dirty() from here because
|
||||
* this physical CPU might not have an active vCPU
|
||||
* with which to do the KVM dirty tracking.
|
||||
*
|
||||
* Neither is there any point in telling the kernel MM
|
||||
* that the underlying page is dirty. A vCPU in guest
|
||||
* mode might still be writing to it up to the point
|
||||
* where we wake them a few lines further down anyway.
|
||||
*
|
||||
* So all the dirty marking happens on the unmap.
|
||||
*/
|
||||
}
|
||||
write_unlock_irq(&gpc->lock);
|
||||
}
|
||||
spin_unlock(&kvm->gpc_lock);
|
||||
|
||||
if (wake_vcpus) {
|
||||
unsigned int req = KVM_REQ_GPC_INVALIDATE;
|
||||
bool called;
|
||||
|
||||
/*
|
||||
* If the OOM reaper is active, then all vCPUs should have
|
||||
* been stopped already, so perform the request without
|
||||
* KVM_REQUEST_WAIT and be sad if any needed to be woken.
|
||||
*/
|
||||
if (!may_block)
|
||||
req &= ~KVM_REQUEST_WAIT;
|
||||
|
||||
called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
|
||||
|
||||
WARN_ON_ONCE(called && !may_block);
|
||||
}
|
||||
}
|
||||
|
||||
bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
||||
gpa_t gpa, unsigned long len)
|
||||
{
|
||||
struct kvm_memslots *slots = kvm_memslots(kvm);
|
||||
|
||||
if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE)
|
||||
return false;
|
||||
|
||||
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
|
||||
kvm_is_error_hva(gpc->uhva))
|
||||
return false;
|
||||
|
||||
if (!gpc->valid)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check);
|
||||
|
||||
static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva,
|
||||
gpa_t gpa, bool dirty)
|
||||
{
|
||||
/* Unmap the old page if it was mapped before, and release it */
|
||||
if (!is_error_noslot_pfn(pfn)) {
|
||||
if (khva) {
|
||||
if (pfn_valid(pfn))
|
||||
kunmap(pfn_to_page(pfn));
|
||||
#ifdef CONFIG_HAS_IOMEM
|
||||
else
|
||||
memunmap(khva);
|
||||
#endif
|
||||
}
|
||||
|
||||
kvm_release_pfn(pfn, dirty);
|
||||
if (dirty)
|
||||
mark_page_dirty(kvm, gpa);
|
||||
}
|
||||
}
|
||||
|
||||
static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, unsigned long uhva)
|
||||
{
|
||||
unsigned long mmu_seq;
|
||||
kvm_pfn_t new_pfn;
|
||||
int retry;
|
||||
|
||||
do {
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
smp_rmb();
|
||||
|
||||
/* We always request a writeable mapping */
|
||||
new_pfn = hva_to_pfn(uhva, false, NULL, true, NULL);
|
||||
if (is_error_noslot_pfn(new_pfn))
|
||||
break;
|
||||
|
||||
KVM_MMU_READ_LOCK(kvm);
|
||||
retry = mmu_notifier_retry_hva(kvm, mmu_seq, uhva);
|
||||
KVM_MMU_READ_UNLOCK(kvm);
|
||||
if (!retry)
|
||||
break;
|
||||
|
||||
cond_resched();
|
||||
} while (1);
|
||||
|
||||
return new_pfn;
|
||||
}
|
||||
|
||||
int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
||||
gpa_t gpa, unsigned long len, bool dirty)
|
||||
{
|
||||
struct kvm_memslots *slots = kvm_memslots(kvm);
|
||||
unsigned long page_offset = gpa & ~PAGE_MASK;
|
||||
kvm_pfn_t old_pfn, new_pfn;
|
||||
unsigned long old_uhva;
|
||||
gpa_t old_gpa;
|
||||
void *old_khva;
|
||||
bool old_valid, old_dirty;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* If must fit within a single page. The 'len' argument is
|
||||
* only to enforce that.
|
||||
*/
|
||||
if (page_offset + len > PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
write_lock_irq(&gpc->lock);
|
||||
|
||||
old_gpa = gpc->gpa;
|
||||
old_pfn = gpc->pfn;
|
||||
old_khva = gpc->khva - offset_in_page(gpc->khva);
|
||||
old_uhva = gpc->uhva;
|
||||
old_valid = gpc->valid;
|
||||
old_dirty = gpc->dirty;
|
||||
|
||||
/* If the userspace HVA is invalid, refresh that first */
|
||||
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
|
||||
kvm_is_error_hva(gpc->uhva)) {
|
||||
gfn_t gfn = gpa_to_gfn(gpa);
|
||||
|
||||
gpc->dirty = false;
|
||||
gpc->gpa = gpa;
|
||||
gpc->generation = slots->generation;
|
||||
gpc->memslot = __gfn_to_memslot(slots, gfn);
|
||||
gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
|
||||
|
||||
if (kvm_is_error_hva(gpc->uhva)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
gpc->uhva += page_offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the userspace HVA changed or the PFN was already invalid,
|
||||
* drop the lock and do the HVA to PFN lookup again.
|
||||
*/
|
||||
if (!old_valid || old_uhva != gpc->uhva) {
|
||||
unsigned long uhva = gpc->uhva;
|
||||
void *new_khva = NULL;
|
||||
|
||||
/* Placeholders for "hva is valid but not yet mapped" */
|
||||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||
gpc->khva = NULL;
|
||||
gpc->valid = true;
|
||||
|
||||
write_unlock_irq(&gpc->lock);
|
||||
|
||||
new_pfn = hva_to_pfn_retry(kvm, uhva);
|
||||
if (is_error_noslot_pfn(new_pfn)) {
|
||||
ret = -EFAULT;
|
||||
goto map_done;
|
||||
}
|
||||
|
||||
if (gpc->kernel_map) {
|
||||
if (new_pfn == old_pfn) {
|
||||
new_khva = old_khva;
|
||||
old_pfn = KVM_PFN_ERR_FAULT;
|
||||
old_khva = NULL;
|
||||
} else if (pfn_valid(new_pfn)) {
|
||||
new_khva = kmap(pfn_to_page(new_pfn));
|
||||
#ifdef CONFIG_HAS_IOMEM
|
||||
} else {
|
||||
new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
|
||||
#endif
|
||||
}
|
||||
if (new_khva)
|
||||
new_khva += page_offset;
|
||||
else
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
map_done:
|
||||
write_lock_irq(&gpc->lock);
|
||||
if (ret) {
|
||||
gpc->valid = false;
|
||||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||
gpc->khva = NULL;
|
||||
} else {
|
||||
/* At this point, gpc->valid may already have been cleared */
|
||||
gpc->pfn = new_pfn;
|
||||
gpc->khva = new_khva;
|
||||
}
|
||||
} else {
|
||||
/* If the HVA→PFN mapping was already valid, don't unmap it. */
|
||||
old_pfn = KVM_PFN_ERR_FAULT;
|
||||
old_khva = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
gpc->dirty = false;
|
||||
else
|
||||
gpc->dirty = dirty;
|
||||
|
||||
write_unlock_irq(&gpc->lock);
|
||||
|
||||
__release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_refresh);
|
||||
|
||||
void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
|
||||
{
|
||||
void *old_khva;
|
||||
kvm_pfn_t old_pfn;
|
||||
bool old_dirty;
|
||||
gpa_t old_gpa;
|
||||
|
||||
write_lock_irq(&gpc->lock);
|
||||
|
||||
gpc->valid = false;
|
||||
|
||||
old_khva = gpc->khva - offset_in_page(gpc->khva);
|
||||
old_dirty = gpc->dirty;
|
||||
old_gpa = gpc->gpa;
|
||||
old_pfn = gpc->pfn;
|
||||
|
||||
/*
|
||||
* We can leave the GPA → uHVA map cache intact but the PFN
|
||||
* lookup will need to be redone even for the same page.
|
||||
*/
|
||||
gpc->khva = NULL;
|
||||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||
|
||||
write_unlock_irq(&gpc->lock);
|
||||
|
||||
__release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
|
||||
|
||||
|
||||
int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
||||
struct kvm_vcpu *vcpu, bool guest_uses_pa,
|
||||
bool kernel_map, gpa_t gpa, unsigned long len,
|
||||
bool dirty)
|
||||
{
|
||||
if (!gpc->active) {
|
||||
rwlock_init(&gpc->lock);
|
||||
|
||||
gpc->khva = NULL;
|
||||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||
gpc->uhva = KVM_HVA_ERR_BAD;
|
||||
gpc->vcpu = vcpu;
|
||||
gpc->kernel_map = kernel_map;
|
||||
gpc->guest_uses_pa = guest_uses_pa;
|
||||
gpc->valid = false;
|
||||
gpc->active = true;
|
||||
|
||||
spin_lock(&kvm->gpc_lock);
|
||||
list_add(&gpc->list, &kvm->gpc_list);
|
||||
spin_unlock(&kvm->gpc_lock);
|
||||
}
|
||||
return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len, dirty);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_init);
|
||||
|
||||
void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
|
||||
{
|
||||
if (gpc->active) {
|
||||
spin_lock(&kvm->gpc_lock);
|
||||
list_del(&gpc->list);
|
||||
spin_unlock(&kvm->gpc_lock);
|
||||
|
||||
kvm_gfn_to_pfn_cache_unmap(kvm, gpc);
|
||||
gpc->active = false;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_destroy);
|
||||
Reference in New Issue
Block a user