Just one series here - Mike Rappoport has taught KEXEC handover to

preserve vmalloc allocations across handover.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaOmDWAAKCRDdBJ7gKXxA
 jh+MAQDUPBj3mFm238CXI5DC1gJ3ETe3NJjJvfzIjLs51c+dFgD+PUuvDA0GUtKH
 LCl6T+HJXh2FgGn1F2Kl/0hwPtEvuA4=
 =HYr7
 -----END PGP SIGNATURE-----

Merge tag 'mm-nonmm-stable-2025-10-10-15-03' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull more updates from Andrew Morton:
 "Just one series here - Mike Rappoport has taught KEXEC handover to
  preserve vmalloc allocations across handover"

* tag 'mm-nonmm-stable-2025-10-10-15-03' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  lib/test_kho: use kho_preserve_vmalloc instead of storing addresses in fdt
  kho: add support for preserving vmalloc allocations
  kho: replace kho_preserve_phys() with kho_preserve_pages()
  kho: check if kho is finalized in __kho_preserve_order()
  MAINTAINERS, .mailmap: update Umang's email address
This commit is contained in:
Linus Torvalds 2025-10-11 10:27:52 -07:00
commit ae13bd2310
6 changed files with 384 additions and 60 deletions

View File

@ -803,6 +803,7 @@ Tvrtko Ursulin <tursulin@ursulin.net> <tvrtko.ursulin@onelan.co.uk>
Tvrtko Ursulin <tursulin@ursulin.net> <tvrtko@ursulin.net> Tvrtko Ursulin <tursulin@ursulin.net> <tvrtko@ursulin.net>
Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws> Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
Tzung-Bi Shih <tzungbi@kernel.org> <tzungbi@google.com> Tzung-Bi Shih <tzungbi@kernel.org> <tzungbi@google.com>
Umang Jain <uajain@igalia.com> <umang.jain@ideasonboard.com>
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de> Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
Uwe Kleine-König <u.kleine-koenig@baylibre.com> <ukleinek@baylibre.com> Uwe Kleine-König <u.kleine-koenig@baylibre.com> <ukleinek@baylibre.com>
Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Uwe Kleine-König <u.kleine-koenig@pengutronix.de>

View File

@ -23911,7 +23911,7 @@ F: drivers/media/i2c/imx274.c
SONY IMX283 SENSOR DRIVER SONY IMX283 SENSOR DRIVER
M: Kieran Bingham <kieran.bingham@ideasonboard.com> M: Kieran Bingham <kieran.bingham@ideasonboard.com>
M: Umang Jain <umang.jain@ideasonboard.com> R: Umang Jain <uajain@igalia.com>
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
S: Maintained S: Maintained
T: git git://linuxtv.org/media.git T: git git://linuxtv.org/media.git

View File

@ -18,6 +18,7 @@ enum kho_event {
struct folio; struct folio;
struct notifier_block; struct notifier_block;
struct page;
#define DECLARE_KHOSER_PTR(name, type) \ #define DECLARE_KHOSER_PTR(name, type) \
union { \ union { \
@ -38,13 +39,24 @@ struct notifier_block;
struct kho_serialization; struct kho_serialization;
struct kho_vmalloc_chunk;
struct kho_vmalloc {
DECLARE_KHOSER_PTR(first, struct kho_vmalloc_chunk *);
unsigned int total_pages;
unsigned short flags;
unsigned short order;
};
#ifdef CONFIG_KEXEC_HANDOVER #ifdef CONFIG_KEXEC_HANDOVER
bool kho_is_enabled(void); bool kho_is_enabled(void);
bool is_kho_boot(void); bool is_kho_boot(void);
int kho_preserve_folio(struct folio *folio); int kho_preserve_folio(struct folio *folio);
int kho_preserve_phys(phys_addr_t phys, size_t size); int kho_preserve_pages(struct page *page, unsigned int nr_pages);
int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation);
struct folio *kho_restore_folio(phys_addr_t phys); struct folio *kho_restore_folio(phys_addr_t phys);
struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages);
void *kho_restore_vmalloc(const struct kho_vmalloc *preservation);
int kho_add_subtree(struct kho_serialization *ser, const char *name, void *fdt); int kho_add_subtree(struct kho_serialization *ser, const char *name, void *fdt);
int kho_retrieve_subtree(const char *name, phys_addr_t *phys); int kho_retrieve_subtree(const char *name, phys_addr_t *phys);
@ -71,7 +83,13 @@ static inline int kho_preserve_folio(struct folio *folio)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline int kho_preserve_phys(phys_addr_t phys, size_t size) static inline int kho_preserve_pages(struct page *page, unsigned int nr_pages)
{
return -EOPNOTSUPP;
}
static inline int kho_preserve_vmalloc(void *ptr,
struct kho_vmalloc *preservation)
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
@ -81,6 +99,17 @@ static inline struct folio *kho_restore_folio(phys_addr_t phys)
return NULL; return NULL;
} }
static inline struct page *kho_restore_pages(phys_addr_t phys,
unsigned int nr_pages)
{
return NULL;
}
static inline void *kho_restore_vmalloc(const struct kho_vmalloc *preservation)
{
return NULL;
}
static inline int kho_add_subtree(struct kho_serialization *ser, static inline int kho_add_subtree(struct kho_serialization *ser,
const char *name, void *fdt) const char *name, void *fdt)
{ {

View File

@ -18,6 +18,7 @@
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/page-isolation.h> #include <linux/page-isolation.h>
#include <linux/vmalloc.h>
#include <asm/early_ioremap.h> #include <asm/early_ioremap.h>
@ -107,6 +108,29 @@ struct kho_serialization {
struct khoser_mem_chunk *preserved_mem_map; struct khoser_mem_chunk *preserved_mem_map;
}; };
struct kho_out {
struct blocking_notifier_head chain_head;
struct dentry *dir;
struct mutex lock; /* protects KHO FDT finalization */
struct kho_serialization ser;
bool finalized;
};
static struct kho_out kho_out = {
.chain_head = BLOCKING_NOTIFIER_INIT(kho_out.chain_head),
.lock = __MUTEX_INITIALIZER(kho_out.lock),
.ser = {
.fdt_list = LIST_HEAD_INIT(kho_out.ser.fdt_list),
.track = {
.orders = XARRAY_INIT(kho_out.ser.track.orders, 0),
},
},
.finalized = false,
};
static void *xa_load_or_alloc(struct xarray *xa, unsigned long index, size_t sz) static void *xa_load_or_alloc(struct xarray *xa, unsigned long index, size_t sz)
{ {
void *elm, *res; void *elm, *res;
@ -165,6 +189,9 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
might_sleep(); might_sleep();
if (kho_out.finalized)
return -EBUSY;
physxa = xa_load(&track->orders, order); physxa = xa_load(&track->orders, order);
if (!physxa) { if (!physxa) {
int err; int err;
@ -248,6 +275,37 @@ struct folio *kho_restore_folio(phys_addr_t phys)
} }
EXPORT_SYMBOL_GPL(kho_restore_folio); EXPORT_SYMBOL_GPL(kho_restore_folio);
/**
* kho_restore_pages - restore list of contiguous order 0 pages.
* @phys: physical address of the first page.
* @nr_pages: number of pages.
*
* Restore a contiguous list of order 0 pages that was preserved with
* kho_preserve_pages().
*
* Return: 0 on success, error code on failure
*/
struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages)
{
const unsigned long start_pfn = PHYS_PFN(phys);
const unsigned long end_pfn = start_pfn + nr_pages;
unsigned long pfn = start_pfn;
while (pfn < end_pfn) {
const unsigned int order =
min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
struct page *page = kho_restore_page(PFN_PHYS(pfn));
if (!page)
return NULL;
split_page(page, order);
pfn += 1 << order;
}
return pfn_to_page(start_pfn);
}
EXPORT_SYMBOL_GPL(kho_restore_pages);
/* Serialize and deserialize struct kho_mem_phys across kexec /* Serialize and deserialize struct kho_mem_phys across kexec
* *
* Record all the bitmaps in a linked list of pages for the next kernel to * Record all the bitmaps in a linked list of pages for the next kernel to
@ -667,29 +725,6 @@ int kho_add_subtree(struct kho_serialization *ser, const char *name, void *fdt)
} }
EXPORT_SYMBOL_GPL(kho_add_subtree); EXPORT_SYMBOL_GPL(kho_add_subtree);
struct kho_out {
struct blocking_notifier_head chain_head;
struct dentry *dir;
struct mutex lock; /* protects KHO FDT finalization */
struct kho_serialization ser;
bool finalized;
};
static struct kho_out kho_out = {
.chain_head = BLOCKING_NOTIFIER_INIT(kho_out.chain_head),
.lock = __MUTEX_INITIALIZER(kho_out.lock),
.ser = {
.fdt_list = LIST_HEAD_INIT(kho_out.ser.fdt_list),
.track = {
.orders = XARRAY_INIT(kho_out.ser.track.orders, 0),
},
},
.finalized = false,
};
int register_kho_notifier(struct notifier_block *nb) int register_kho_notifier(struct notifier_block *nb)
{ {
return blocking_notifier_chain_register(&kho_out.chain_head, nb); return blocking_notifier_chain_register(&kho_out.chain_head, nb);
@ -717,37 +752,28 @@ int kho_preserve_folio(struct folio *folio)
const unsigned int order = folio_order(folio); const unsigned int order = folio_order(folio);
struct kho_mem_track *track = &kho_out.ser.track; struct kho_mem_track *track = &kho_out.ser.track;
if (kho_out.finalized)
return -EBUSY;
return __kho_preserve_order(track, pfn, order); return __kho_preserve_order(track, pfn, order);
} }
EXPORT_SYMBOL_GPL(kho_preserve_folio); EXPORT_SYMBOL_GPL(kho_preserve_folio);
/** /**
* kho_preserve_phys - preserve a physically contiguous range across kexec. * kho_preserve_pages - preserve contiguous pages across kexec
* @phys: physical address of the range. * @page: first page in the list.
* @size: size of the range. * @nr_pages: number of pages.
* *
* Instructs KHO to preserve the memory range from @phys to @phys + @size * Preserve a contiguous list of order 0 pages. Must be restored using
* across kexec. * kho_restore_pages() to ensure the pages are restored properly as order 0.
* *
* Return: 0 on success, error code on failure * Return: 0 on success, error code on failure
*/ */
int kho_preserve_phys(phys_addr_t phys, size_t size) int kho_preserve_pages(struct page *page, unsigned int nr_pages)
{ {
unsigned long pfn = PHYS_PFN(phys);
unsigned long failed_pfn = 0;
const unsigned long start_pfn = pfn;
const unsigned long end_pfn = PHYS_PFN(phys + size);
int err = 0;
struct kho_mem_track *track = &kho_out.ser.track; struct kho_mem_track *track = &kho_out.ser.track;
const unsigned long start_pfn = page_to_pfn(page);
if (kho_out.finalized) const unsigned long end_pfn = start_pfn + nr_pages;
return -EBUSY; unsigned long pfn = start_pfn;
unsigned long failed_pfn = 0;
if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size)) int err = 0;
return -EINVAL;
while (pfn < end_pfn) { while (pfn < end_pfn) {
const unsigned int order = const unsigned int order =
@ -767,7 +793,256 @@ int kho_preserve_phys(phys_addr_t phys, size_t size)
return err; return err;
} }
EXPORT_SYMBOL_GPL(kho_preserve_phys); EXPORT_SYMBOL_GPL(kho_preserve_pages);
struct kho_vmalloc_hdr {
DECLARE_KHOSER_PTR(next, struct kho_vmalloc_chunk *);
};
#define KHO_VMALLOC_SIZE \
((PAGE_SIZE - sizeof(struct kho_vmalloc_hdr)) / \
sizeof(phys_addr_t))
struct kho_vmalloc_chunk {
struct kho_vmalloc_hdr hdr;
phys_addr_t phys[KHO_VMALLOC_SIZE];
};
static_assert(sizeof(struct kho_vmalloc_chunk) == PAGE_SIZE);
/* vmalloc flags KHO supports */
#define KHO_VMALLOC_SUPPORTED_FLAGS (VM_ALLOC | VM_ALLOW_HUGE_VMAP)
/* KHO internal flags for vmalloc preservations */
#define KHO_VMALLOC_ALLOC 0x0001
#define KHO_VMALLOC_HUGE_VMAP 0x0002
static unsigned short vmalloc_flags_to_kho(unsigned int vm_flags)
{
unsigned short kho_flags = 0;
if (vm_flags & VM_ALLOC)
kho_flags |= KHO_VMALLOC_ALLOC;
if (vm_flags & VM_ALLOW_HUGE_VMAP)
kho_flags |= KHO_VMALLOC_HUGE_VMAP;
return kho_flags;
}
static unsigned int kho_flags_to_vmalloc(unsigned short kho_flags)
{
unsigned int vm_flags = 0;
if (kho_flags & KHO_VMALLOC_ALLOC)
vm_flags |= VM_ALLOC;
if (kho_flags & KHO_VMALLOC_HUGE_VMAP)
vm_flags |= VM_ALLOW_HUGE_VMAP;
return vm_flags;
}
static struct kho_vmalloc_chunk *new_vmalloc_chunk(struct kho_vmalloc_chunk *cur)
{
struct kho_vmalloc_chunk *chunk;
int err;
chunk = (struct kho_vmalloc_chunk *)get_zeroed_page(GFP_KERNEL);
if (!chunk)
return NULL;
err = kho_preserve_pages(virt_to_page(chunk), 1);
if (err)
goto err_free;
if (cur)
KHOSER_STORE_PTR(cur->hdr.next, chunk);
return chunk;
err_free:
free_page((unsigned long)chunk);
return NULL;
}
static void kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk *chunk)
{
struct kho_mem_track *track = &kho_out.ser.track;
unsigned long pfn = PHYS_PFN(virt_to_phys(chunk));
__kho_unpreserve(track, pfn, pfn + 1);
for (int i = 0; chunk->phys[i]; i++) {
pfn = PHYS_PFN(chunk->phys[i]);
__kho_unpreserve(track, pfn, pfn + 1);
}
}
static void kho_vmalloc_free_chunks(struct kho_vmalloc *kho_vmalloc)
{
struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(kho_vmalloc->first);
while (chunk) {
struct kho_vmalloc_chunk *tmp = chunk;
kho_vmalloc_unpreserve_chunk(chunk);
chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
free_page((unsigned long)tmp);
}
}
/**
* kho_preserve_vmalloc - preserve memory allocated with vmalloc() across kexec
* @ptr: pointer to the area in vmalloc address space
* @preservation: placeholder for preservation metadata
*
* Instructs KHO to preserve the area in vmalloc address space at @ptr. The
* physical pages mapped at @ptr will be preserved and on successful return
* @preservation will hold the physical address of a structure that describes
* the preservation.
*
* NOTE: The memory allocated with vmalloc_node() variants cannot be reliably
* restored on the same node
*
* Return: 0 on success, error code on failure
*/
int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation)
{
struct kho_vmalloc_chunk *chunk;
struct vm_struct *vm = find_vm_area(ptr);
unsigned int order, flags, nr_contig_pages;
unsigned int idx = 0;
int err;
if (!vm)
return -EINVAL;
if (vm->flags & ~KHO_VMALLOC_SUPPORTED_FLAGS)
return -EOPNOTSUPP;
flags = vmalloc_flags_to_kho(vm->flags);
order = get_vm_area_page_order(vm);
chunk = new_vmalloc_chunk(NULL);
if (!chunk)
return -ENOMEM;
KHOSER_STORE_PTR(preservation->first, chunk);
nr_contig_pages = (1 << order);
for (int i = 0; i < vm->nr_pages; i += nr_contig_pages) {
phys_addr_t phys = page_to_phys(vm->pages[i]);
err = kho_preserve_pages(vm->pages[i], nr_contig_pages);
if (err)
goto err_free;
chunk->phys[idx++] = phys;
if (idx == ARRAY_SIZE(chunk->phys)) {
chunk = new_vmalloc_chunk(chunk);
if (!chunk)
goto err_free;
idx = 0;
}
}
preservation->total_pages = vm->nr_pages;
preservation->flags = flags;
preservation->order = order;
return 0;
err_free:
kho_vmalloc_free_chunks(preservation);
return err;
}
EXPORT_SYMBOL_GPL(kho_preserve_vmalloc);
/**
* kho_restore_vmalloc - recreates and populates an area in vmalloc address
* space from the preserved memory.
* @preservation: preservation metadata.
*
* Recreates an area in vmalloc address space and populates it with memory that
* was preserved using kho_preserve_vmalloc().
*
* Return: pointer to the area in the vmalloc address space, NULL on failure.
*/
void *kho_restore_vmalloc(const struct kho_vmalloc *preservation)
{
struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first);
unsigned int align, order, shift, vm_flags;
unsigned long total_pages, contig_pages;
unsigned long addr, size;
struct vm_struct *area;
struct page **pages;
unsigned int idx = 0;
int err;
vm_flags = kho_flags_to_vmalloc(preservation->flags);
if (vm_flags & ~KHO_VMALLOC_SUPPORTED_FLAGS)
return NULL;
total_pages = preservation->total_pages;
pages = kvmalloc_array(total_pages, sizeof(*pages), GFP_KERNEL);
if (!pages)
return NULL;
order = preservation->order;
contig_pages = (1 << order);
shift = PAGE_SHIFT + order;
align = 1 << shift;
while (chunk) {
struct page *page;
for (int i = 0; chunk->phys[i]; i++) {
phys_addr_t phys = chunk->phys[i];
if (idx + contig_pages > total_pages)
goto err_free_pages_array;
page = kho_restore_pages(phys, contig_pages);
if (!page)
goto err_free_pages_array;
for (int j = 0; j < contig_pages; j++)
pages[idx++] = page;
phys += contig_pages * PAGE_SIZE;
}
page = kho_restore_pages(virt_to_phys(chunk), 1);
if (!page)
goto err_free_pages_array;
chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
__free_page(page);
}
if (idx != total_pages)
goto err_free_pages_array;
area = __get_vm_area_node(total_pages * PAGE_SIZE, align, shift,
vm_flags, VMALLOC_START, VMALLOC_END,
NUMA_NO_NODE, GFP_KERNEL,
__builtin_return_address(0));
if (!area)
goto err_free_pages_array;
addr = (unsigned long)area->addr;
size = get_vm_area_size(area);
err = vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, shift);
if (err)
goto err_free_vm_area;
area->nr_pages = total_pages;
area->pages = pages;
return area->addr;
err_free_vm_area:
free_vm_area(area);
err_free_pages_array:
kvfree(pages);
return NULL;
}
EXPORT_SYMBOL_GPL(kho_restore_vmalloc);
/* Handling for debug/kho/out */ /* Handling for debug/kho/out */

View File

@ -32,6 +32,7 @@ module_param(max_mem, long, 0644);
struct kho_test_state { struct kho_test_state {
unsigned int nr_folios; unsigned int nr_folios;
struct folio **folios; struct folio **folios;
phys_addr_t *folios_info;
struct folio *fdt; struct folio *fdt;
__wsum csum; __wsum csum;
}; };
@ -67,18 +68,15 @@ static struct notifier_block kho_test_nb = {
static int kho_test_save_data(struct kho_test_state *state, void *fdt) static int kho_test_save_data(struct kho_test_state *state, void *fdt)
{ {
phys_addr_t *folios_info; phys_addr_t *folios_info __free(kvfree) = NULL;
struct kho_vmalloc folios_info_phys;
int err = 0; int err = 0;
err |= fdt_begin_node(fdt, "data"); folios_info = vmalloc_array(state->nr_folios, sizeof(*folios_info));
err |= fdt_property(fdt, "nr_folios", &state->nr_folios, if (!folios_info)
sizeof(state->nr_folios)); return -ENOMEM;
err |= fdt_property_placeholder(fdt, "folios_info",
state->nr_folios * sizeof(*folios_info),
(void **)&folios_info);
err |= fdt_property(fdt, "csum", &state->csum, sizeof(state->csum));
err |= fdt_end_node(fdt);
err = kho_preserve_vmalloc(folios_info, &folios_info_phys);
if (err) if (err)
return err; return err;
@ -93,6 +91,17 @@ static int kho_test_save_data(struct kho_test_state *state, void *fdt)
break; break;
} }
err |= fdt_begin_node(fdt, "data");
err |= fdt_property(fdt, "nr_folios", &state->nr_folios,
sizeof(state->nr_folios));
err |= fdt_property(fdt, "folios_info", &folios_info_phys,
sizeof(folios_info_phys));
err |= fdt_property(fdt, "csum", &state->csum, sizeof(state->csum));
err |= fdt_end_node(fdt);
if (!err)
state->folios_info = no_free_ptr(folios_info);
return err; return err;
} }
@ -209,8 +218,9 @@ err_free_folios:
static int kho_test_restore_data(const void *fdt, int node) static int kho_test_restore_data(const void *fdt, int node)
{ {
const struct kho_vmalloc *folios_info_phys;
const unsigned int *nr_folios; const unsigned int *nr_folios;
const phys_addr_t *folios_info; phys_addr_t *folios_info;
const __wsum *old_csum; const __wsum *old_csum;
__wsum csum = 0; __wsum csum = 0;
int len; int len;
@ -225,8 +235,12 @@ static int kho_test_restore_data(const void *fdt, int node)
if (!old_csum || len != sizeof(*old_csum)) if (!old_csum || len != sizeof(*old_csum))
return -EINVAL; return -EINVAL;
folios_info = fdt_getprop(fdt, node, "folios_info", &len); folios_info_phys = fdt_getprop(fdt, node, "folios_info", &len);
if (!folios_info || len != sizeof(*folios_info) * *nr_folios) if (!folios_info_phys || len != sizeof(*folios_info_phys))
return -EINVAL;
folios_info = kho_restore_vmalloc(folios_info_phys);
if (!folios_info)
return -EINVAL; return -EINVAL;
for (int i = 0; i < *nr_folios; i++) { for (int i = 0; i < *nr_folios; i++) {
@ -246,6 +260,8 @@ static int kho_test_restore_data(const void *fdt, int node)
folio_put(folio); folio_put(folio);
} }
vfree(folios_info);
if (csum != *old_csum) if (csum != *old_csum)
return -EINVAL; return -EINVAL;
@ -304,6 +320,7 @@ static void kho_test_cleanup(void)
folio_put(kho_test_state.folios[i]); folio_put(kho_test_state.folios[i]);
kvfree(kho_test_state.folios); kvfree(kho_test_state.folios);
vfree(kho_test_state.folios_info);
folio_put(kho_test_state.fdt); folio_put(kho_test_state.fdt);
} }

View File

@ -2452,8 +2452,10 @@ static int reserve_mem_kho_finalize(struct kho_serialization *ser)
for (i = 0; i < reserved_mem_count; i++) { for (i = 0; i < reserved_mem_count; i++) {
struct reserve_mem_table *map = &reserved_mem_table[i]; struct reserve_mem_table *map = &reserved_mem_table[i];
struct page *page = phys_to_page(map->start);
unsigned int nr_pages = map->size >> PAGE_SHIFT;
err |= kho_preserve_phys(map->start, map->size); err |= kho_preserve_pages(page, nr_pages);
} }
err |= kho_preserve_folio(page_folio(kho_fdt)); err |= kho_preserve_folio(page_folio(kho_fdt));