maple_tree: Convert forking to use the sheaf interface

Use the generic interface which should result in less bulk allocations
during a forking.

A part of this is to abstract the freeing of the sheaf or maple state
allocations into its own function so mas_destroy() and the tree
duplication code can use the same functionality to return any unused
resources.

[andriy.shevchenko@linux.intel.com: remove unused mt_alloc_bulk()]
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Liam R. Howlett 2025-09-03 15:00:03 +02:00 committed by Vlastimil Babka
parent 6bf377b06c
commit 719a42e563
1 changed files with 23 additions and 24 deletions

View File

@ -172,11 +172,6 @@ static inline struct maple_node *mt_alloc_one(gfp_t gfp)
return kmem_cache_alloc(maple_node_cache, gfp);
}
static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
{
return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
}
static inline void mt_free_bulk(size_t size, void __rcu **nodes)
{
kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
@ -1150,6 +1145,19 @@ error:
mas_set_err(mas, -ENOMEM);
}
static inline void mas_empty_nodes(struct ma_state *mas)
{
mas->node_request = 0;
if (mas->sheaf) {
mt_return_sheaf(mas->sheaf);
mas->sheaf = NULL;
}
if (mas->alloc) {
kfree(mas->alloc);
mas->alloc = NULL;
}
}
/*
* mas_free() - Free an encoded maple node
@ -5208,15 +5216,7 @@ EXPORT_SYMBOL_GPL(mas_preallocate);
void mas_destroy(struct ma_state *mas)
{
mas->mas_flags &= ~MA_STATE_PREALLOC;
mas->node_request = 0;
if (mas->sheaf)
mt_return_sheaf(mas->sheaf);
mas->sheaf = NULL;
if (mas->alloc)
kfree(mas->alloc);
mas->alloc = NULL;
mas_empty_nodes(mas);
}
EXPORT_SYMBOL_GPL(mas_destroy);
@ -6241,7 +6241,7 @@ static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas,
struct maple_node *node = mte_to_node(mas->node);
struct maple_node *new_node = mte_to_node(new_mas->node);
enum maple_type type;
unsigned char request, count, i;
unsigned char count, i;
void __rcu **slots;
void __rcu **new_slots;
unsigned long val;
@ -6249,20 +6249,17 @@ static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas,
/* Allocate memory for child nodes. */
type = mte_node_type(mas->node);
new_slots = ma_slots(new_node, type);
request = mas_data_end(mas) + 1;
count = mt_alloc_bulk(gfp, request, (void **)new_slots);
if (unlikely(count < request)) {
memset(new_slots, 0, request * sizeof(void *));
mas_set_err(mas, -ENOMEM);
count = mas->node_request = mas_data_end(mas) + 1;
mas_alloc_nodes(mas, gfp);
if (unlikely(mas_is_err(mas)))
return;
}
/* Restore node type information in slots. */
slots = ma_slots(node, type);
for (i = 0; i < count; i++) {
val = (unsigned long)mt_slot_locked(mas->tree, slots, i);
val &= MAPLE_NODE_MASK;
((unsigned long *)new_slots)[i] |= val;
new_slots[i] = ma_mnode_ptr((unsigned long)mas_pop_node(mas) |
val);
}
}
@ -6316,7 +6313,7 @@ static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas,
/* Only allocate child nodes for non-leaf nodes. */
mas_dup_alloc(mas, new_mas, gfp);
if (unlikely(mas_is_err(mas)))
return;
goto empty_mas;
} else {
/*
* This is the last leaf node and duplication is
@ -6349,6 +6346,8 @@ set_new_tree:
/* Make them the same height */
new_mas->tree->ma_flags = mas->tree->ma_flags;
rcu_assign_pointer(new_mas->tree->ma_root, root);
empty_mas:
mas_empty_nodes(mas);
}
/**