mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-04 20:57:45 -04:00
mm: page_alloc: add __split_page()
Factor out the splitting of non-compound page from make_alloc_exact() and split_page() into a new helper function __split_page(). While at it, convert the VM_BUG_ON_PAGE() into a VM_WARN_ON_PAGE(). Link: https://lkml.kernel.org/r/20260109093136.1491549-3-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Acked-by: David Hildenbrand <david@redhat.com> Acked-by: Muchun Song <muchun.song@linux.dev> Reviewed-by: Zi Yan <ziy@nvidia.com> Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Cc: Brendan Jackman <jackmanb@google.com> Cc: Jane Chu <jane.chu@oracle.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Oscar Salvador <osalvador@suse.de> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Cc: Mark Brown <broonie@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
01152bd2e4
commit
a9deb800b8
@@ -47,6 +47,15 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
|
||||
BUG(); \
|
||||
} \
|
||||
} while (0)
|
||||
#define VM_WARN_ON_PAGE(cond, page) ({ \
|
||||
int __ret_warn = !!(cond); \
|
||||
\
|
||||
if (unlikely(__ret_warn)) { \
|
||||
dump_page(page, "VM_WARN_ON_PAGE(" __stringify(cond)")");\
|
||||
WARN_ON(1); \
|
||||
} \
|
||||
unlikely(__ret_warn); \
|
||||
})
|
||||
#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \
|
||||
static bool __section(".data..once") __warned; \
|
||||
int __ret_warn_once = !!(cond); \
|
||||
@@ -122,6 +131,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
|
||||
#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
|
||||
#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
|
||||
#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
|
||||
#define VM_WARN_ON_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
|
||||
#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
|
||||
#define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
|
||||
#define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
|
||||
|
||||
@@ -3107,6 +3107,15 @@ void free_unref_folios(struct folio_batch *folios)
|
||||
folio_batch_reinit(folios);
|
||||
}
|
||||
|
||||
static void __split_page(struct page *page, unsigned int order)
|
||||
{
|
||||
VM_WARN_ON_PAGE(PageCompound(page), page);
|
||||
|
||||
split_page_owner(page, order, 0);
|
||||
pgalloc_tag_split(page_folio(page), order, 0);
|
||||
split_page_memcg(page, order);
|
||||
}
|
||||
|
||||
/*
|
||||
* split_page takes a non-compound higher-order page, and splits it into
|
||||
* n (1<<order) sub-pages: page[0..n]
|
||||
@@ -3119,14 +3128,12 @@ void split_page(struct page *page, unsigned int order)
|
||||
{
|
||||
int i;
|
||||
|
||||
VM_BUG_ON_PAGE(PageCompound(page), page);
|
||||
VM_BUG_ON_PAGE(!page_count(page), page);
|
||||
VM_WARN_ON_PAGE(!page_count(page), page);
|
||||
|
||||
for (i = 1; i < (1 << order); i++)
|
||||
set_page_refcounted(page + i);
|
||||
split_page_owner(page, order, 0);
|
||||
pgalloc_tag_split(page_folio(page), order, 0);
|
||||
split_page_memcg(page, order);
|
||||
|
||||
__split_page(page, order);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(split_page);
|
||||
|
||||
@@ -5389,9 +5396,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
|
||||
struct page *page = virt_to_page((void *)addr);
|
||||
struct page *last = page + nr;
|
||||
|
||||
split_page_owner(page, order, 0);
|
||||
pgalloc_tag_split(page_folio(page), order, 0);
|
||||
split_page_memcg(page, order);
|
||||
__split_page(page, order);
|
||||
while (page < --last)
|
||||
set_page_refcounted(last);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user