mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-05-02 18:17:50 -04:00
Merge tag 'mm-stable-2024-01-08-15-31' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton:
"Many singleton patches against the MM code. The patch series which are
included in this merge do the following:
- Peng Zhang has done some mapletree maintainance work in the series
'maple_tree: add mt_free_one() and mt_attr() helpers'
'Some cleanups of maple tree'
- In the series 'mm: use memmap_on_memory semantics for dax/kmem'
Vishal Verma has altered the interworking between memory-hotplug
and dax/kmem so that newly added 'device memory' can more easily
have its memmap placed within that newly added memory.
- Matthew Wilcox continues folio-related work (including a few fixes)
in the patch series
'Add folio_zero_tail() and folio_fill_tail()'
'Make folio_start_writeback return void'
'Fix fault handler's handling of poisoned tail pages'
'Convert aops->error_remove_page to ->error_remove_folio'
'Finish two folio conversions'
'More swap folio conversions'
- Kefeng Wang has also contributed folio-related work in the series
'mm: cleanup and use more folio in page fault'
- Jim Cromie has improved the kmemleak reporting output in the series
'tweak kmemleak report format'.
- In the series 'stackdepot: allow evicting stack traces' Andrey
Konovalov to permits clients (in this case KASAN) to cause eviction
of no longer needed stack traces.
- Charan Teja Kalla has fixed some accounting issues in the page
allocator's atomic reserve calculations in the series 'mm:
page_alloc: fixes for high atomic reserve caluculations'.
- Dmitry Rokosov has added to the samples/ dorectory some sample code
for a userspace memcg event listener application. See the series
'samples: introduce cgroup events listeners'.
- Some mapletree maintanance work from Liam Howlett in the series
'maple_tree: iterator state changes'.
- Nhat Pham has improved zswap's approach to writeback in the series
'workload-specific and memory pressure-driven zswap writeback'.
- DAMON/DAMOS feature and maintenance work from SeongJae Park in the
series
'mm/damon: let users feed and tame/auto-tune DAMOS'
'selftests/damon: add Python-written DAMON functionality tests'
'mm/damon: misc updates for 6.8'
- Yosry Ahmed has improved memcg's stats flushing in the series 'mm:
memcg: subtree stats flushing and thresholds'.
- In the series 'Multi-size THP for anonymous memory' Ryan Roberts
has added a runtime opt-in feature to transparent hugepages which
improves performance by allocating larger chunks of memory during
anonymous page faults.
- Matthew Wilcox has also contributed some cleanup and maintenance
work against eh buffer_head code int he series 'More buffer_head
cleanups'.
- Suren Baghdasaryan has done work on Andrea Arcangeli's series
'userfaultfd move option'. UFFDIO_MOVE permits userspace heap
compaction algorithms to move userspace's pages around rather than
UFFDIO_COPY'a alloc/copy/free.
- Stefan Roesch has developed a 'KSM Advisor', in the series 'mm/ksm:
Add ksm advisor'. This is a governor which tunes KSM's scanning
aggressiveness in response to userspace's current needs.
- Chengming Zhou has optimized zswap's temporary working memory use
in the series 'mm/zswap: dstmem reuse optimizations and cleanups'.
- Matthew Wilcox has performed some maintenance work on the writeback
code, both code and within filesystems. The series is 'Clean up the
writeback paths'.
- Andrey Konovalov has optimized KASAN's handling of alloc and free
stack traces for secondary-level allocators, in the series 'kasan:
save mempool stack traces'.
- Andrey also performed some KASAN maintenance work in the series
'kasan: assorted clean-ups'.
- David Hildenbrand has gone to town on the rmap code. Cleanups, more
pte batching, folio conversions and more. See the series 'mm/rmap:
interface overhaul'.
- Kinsey Ho has contributed some maintenance work on the MGLRU code
in the series 'mm/mglru: Kconfig cleanup'.
- Matthew Wilcox has contributed lruvec page accounting code cleanups
in the series 'Remove some lruvec page accounting functions'"
* tag 'mm-stable-2024-01-08-15-31' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (361 commits)
mm, treewide: rename MAX_ORDER to MAX_PAGE_ORDER
mm, treewide: introduce NR_PAGE_ORDERS
selftests/mm: add separate UFFDIO_MOVE test for PMD splitting
selftests/mm: skip test if application doesn't has root privileges
selftests/mm: conform test to TAP format output
selftests: mm: hugepage-mmap: conform to TAP format output
selftests/mm: gup_test: conform test to TAP format output
mm/selftests: hugepage-mremap: conform test to TAP format output
mm/vmstat: move pgdemote_* out of CONFIG_NUMA_BALANCING
mm: zsmalloc: return -ENOSPC rather than -EINVAL in zs_malloc while size is too large
mm/memcontrol: remove __mod_lruvec_page_state()
mm/khugepaged: use a folio more in collapse_file()
slub: use a folio in __kmalloc_large_node
slub: use folio APIs in free_large_kmalloc()
slub: use alloc_pages_node() in alloc_slab_page()
mm: remove inc/dec lruvec page state functions
mm: ratelimit stat flush from workingset shrinker
kasan: stop leaking stack trace handles
mm/mglru: remove CONFIG_TRANSPARENT_HUGEPAGE
mm/mglru: add dummy pmd_dirty()
...
This commit is contained in:
10
lib/Kconfig
10
lib/Kconfig
@@ -713,10 +713,20 @@ config ARCH_STACKWALK
|
||||
config STACKDEPOT
|
||||
bool
|
||||
select STACKTRACE
|
||||
help
|
||||
Stack depot: stack trace storage that avoids duplication
|
||||
|
||||
config STACKDEPOT_ALWAYS_INIT
|
||||
bool
|
||||
select STACKDEPOT
|
||||
help
|
||||
Always initialize stack depot during early boot
|
||||
|
||||
config STACKDEPOT_MAX_FRAMES
|
||||
int "Maximum number of frames in trace saved in stack depot"
|
||||
range 1 256
|
||||
default 64
|
||||
depends on STACKDEPOT
|
||||
|
||||
config REF_TRACKER
|
||||
bool
|
||||
|
||||
@@ -129,7 +129,7 @@ endchoice
|
||||
choice
|
||||
prompt "Instrumentation type"
|
||||
depends on KASAN_GENERIC || KASAN_SW_TAGS
|
||||
default KASAN_OUTLINE
|
||||
default KASAN_INLINE if !ARCH_DISABLE_KASAN_INLINE
|
||||
|
||||
config KASAN_OUTLINE
|
||||
bool "Outline instrumentation"
|
||||
@@ -202,4 +202,25 @@ config KASAN_MODULE_TEST
|
||||
A part of the KASAN test suite that is not integrated with KUnit.
|
||||
Incompatible with Hardware Tag-Based KASAN.
|
||||
|
||||
config KASAN_EXTRA_INFO
|
||||
bool "Record and report more information"
|
||||
depends on KASAN
|
||||
help
|
||||
Record and report more information to help us find the cause of the
|
||||
bug and to help us correlate the error with other system events.
|
||||
|
||||
Currently, the CPU number and timestamp are additionally
|
||||
recorded for each heap block at allocation and free time, and
|
||||
8 bytes will be added to each metadata structure that records
|
||||
allocation or free information.
|
||||
|
||||
In Generic KASAN, each kmalloc-8 and kmalloc-16 object will add
|
||||
16 bytes of additional memory consumption, and each kmalloc-32
|
||||
object will add 8 bytes of additional memory consumption, not
|
||||
affecting other larger objects.
|
||||
|
||||
In SW_TAGS KASAN and HW_TAGS KASAN, depending on the stack_ring_size
|
||||
boot parameter, it will add 8 * stack_ring_size bytes of additional
|
||||
memory consumption.
|
||||
|
||||
endif # KASAN
|
||||
|
||||
1090
lib/maple_tree.c
1090
lib/maple_tree.c
File diff suppressed because it is too large
Load Diff
465
lib/stackdepot.c
465
lib/stackdepot.c
@@ -18,11 +18,14 @@
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kmsan.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/stacktrace.h>
|
||||
#include <linux/stackdepot.h>
|
||||
#include <linux/string.h>
|
||||
@@ -32,14 +35,23 @@
|
||||
|
||||
#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
|
||||
|
||||
#define DEPOT_VALID_BITS 1
|
||||
#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
|
||||
#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
|
||||
#define DEPOT_STACK_ALIGN 4
|
||||
#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
|
||||
#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_VALID_BITS - \
|
||||
DEPOT_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
|
||||
#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \
|
||||
STACK_DEPOT_EXTRA_BITS)
|
||||
#if IS_ENABLED(CONFIG_KMSAN) && CONFIG_STACKDEPOT_MAX_FRAMES >= 32
|
||||
/*
|
||||
* KMSAN is frequently used in fuzzing scenarios and thus saves a lot of stack
|
||||
* traces. As KMSAN does not support evicting stack traces from the stack
|
||||
* depot, the stack depot capacity might be reached quickly with large stack
|
||||
* records. Adjust the maximum number of stack depot pools for this case.
|
||||
*/
|
||||
#define DEPOT_POOLS_CAP (8192 * (CONFIG_STACKDEPOT_MAX_FRAMES / 16))
|
||||
#else
|
||||
#define DEPOT_POOLS_CAP 8192
|
||||
#endif
|
||||
#define DEPOT_MAX_POOLS \
|
||||
(((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
|
||||
(1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
|
||||
@@ -50,19 +62,22 @@ union handle_parts {
|
||||
struct {
|
||||
u32 pool_index : DEPOT_POOL_INDEX_BITS;
|
||||
u32 offset : DEPOT_OFFSET_BITS;
|
||||
u32 valid : DEPOT_VALID_BITS;
|
||||
u32 extra : STACK_DEPOT_EXTRA_BITS;
|
||||
};
|
||||
};
|
||||
|
||||
struct stack_record {
|
||||
struct stack_record *next; /* Link in the hash table */
|
||||
u32 hash; /* Hash in the hash table */
|
||||
struct list_head list; /* Links in hash table or freelist */
|
||||
u32 hash; /* Hash in hash table */
|
||||
u32 size; /* Number of stored frames */
|
||||
union handle_parts handle;
|
||||
unsigned long entries[]; /* Variable-sized array of frames */
|
||||
refcount_t count;
|
||||
unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */
|
||||
};
|
||||
|
||||
#define DEPOT_STACK_RECORD_SIZE \
|
||||
ALIGN(sizeof(struct stack_record), 1 << DEPOT_STACK_ALIGN)
|
||||
|
||||
static bool stack_depot_disabled;
|
||||
static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
|
||||
static bool __stack_depot_early_init_passed __initdata;
|
||||
@@ -75,40 +90,34 @@ static bool __stack_depot_early_init_passed __initdata;
|
||||
/* Initial seed for jhash2. */
|
||||
#define STACK_HASH_SEED 0x9747b28c
|
||||
|
||||
/* Hash table of pointers to stored stack traces. */
|
||||
static struct stack_record **stack_table;
|
||||
/* Hash table of stored stack records. */
|
||||
static struct list_head *stack_table;
|
||||
/* Fixed order of the number of table buckets. Used when KASAN is enabled. */
|
||||
static unsigned int stack_bucket_number_order;
|
||||
/* Hash mask for indexing the table. */
|
||||
static unsigned int stack_hash_mask;
|
||||
|
||||
/* Array of memory regions that store stack traces. */
|
||||
/* Array of memory regions that store stack records. */
|
||||
static void *stack_pools[DEPOT_MAX_POOLS];
|
||||
/* Currently used pool in stack_pools. */
|
||||
static int pool_index;
|
||||
/* Offset to the unused space in the currently used pool. */
|
||||
static size_t pool_offset;
|
||||
/* Lock that protects the variables above. */
|
||||
static DEFINE_RAW_SPINLOCK(pool_lock);
|
||||
/* Newly allocated pool that is not yet added to stack_pools. */
|
||||
static void *new_pool;
|
||||
/* Number of pools in stack_pools. */
|
||||
static int pools_num;
|
||||
/* Freelist of stack records within stack_pools. */
|
||||
static LIST_HEAD(free_stacks);
|
||||
/*
|
||||
* Stack depot tries to keep an extra pool allocated even before it runs out
|
||||
* of space in the currently used pool.
|
||||
* This flag marks that this next extra pool needs to be allocated and
|
||||
* initialized. It has the value 0 when either the next pool is not yet
|
||||
* initialized or the limit on the number of pools is reached.
|
||||
* of space in the currently used pool. This flag marks whether this extra pool
|
||||
* needs to be allocated. It has the value 0 when either an extra pool is not
|
||||
* yet allocated or if the limit on the number of pools is reached.
|
||||
*/
|
||||
static int next_pool_required = 1;
|
||||
static bool new_pool_required = true;
|
||||
/* Lock that protects the variables above. */
|
||||
static DEFINE_RWLOCK(pool_rwlock);
|
||||
|
||||
static int __init disable_stack_depot(char *str)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = kstrtobool(str, &stack_depot_disabled);
|
||||
if (!ret && stack_depot_disabled) {
|
||||
pr_info("disabled\n");
|
||||
stack_table = NULL;
|
||||
}
|
||||
return 0;
|
||||
return kstrtobool(str, &stack_depot_disabled);
|
||||
}
|
||||
early_param("stack_depot_disable", disable_stack_depot);
|
||||
|
||||
@@ -120,6 +129,15 @@ void __init stack_depot_request_early_init(void)
|
||||
__stack_depot_early_init_requested = true;
|
||||
}
|
||||
|
||||
/* Initialize list_head's within the hash table. */
|
||||
static void init_stack_table(unsigned long entries)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < entries; i++)
|
||||
INIT_LIST_HEAD(&stack_table[i]);
|
||||
}
|
||||
|
||||
/* Allocates a hash table via memblock. Can only be used during early boot. */
|
||||
int __init stack_depot_early_init(void)
|
||||
{
|
||||
@@ -130,6 +148,15 @@ int __init stack_depot_early_init(void)
|
||||
return 0;
|
||||
__stack_depot_early_init_passed = true;
|
||||
|
||||
/*
|
||||
* Print disabled message even if early init has not been requested:
|
||||
* stack_depot_init() will not print one.
|
||||
*/
|
||||
if (stack_depot_disabled) {
|
||||
pr_info("disabled\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If KASAN is enabled, use the maximum order: KASAN is frequently used
|
||||
* in fuzzing scenarios, which leads to a large number of different
|
||||
@@ -138,21 +165,25 @@ int __init stack_depot_early_init(void)
|
||||
if (kasan_enabled() && !stack_bucket_number_order)
|
||||
stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
|
||||
|
||||
if (!__stack_depot_early_init_requested || stack_depot_disabled)
|
||||
/*
|
||||
* Check if early init has been requested after setting
|
||||
* stack_bucket_number_order: stack_depot_init() uses its value.
|
||||
*/
|
||||
if (!__stack_depot_early_init_requested)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If stack_bucket_number_order is not set, leave entries as 0 to rely
|
||||
* on the automatic calculations performed by alloc_large_system_hash.
|
||||
* on the automatic calculations performed by alloc_large_system_hash().
|
||||
*/
|
||||
if (stack_bucket_number_order)
|
||||
entries = 1UL << stack_bucket_number_order;
|
||||
pr_info("allocating hash table via alloc_large_system_hash\n");
|
||||
stack_table = alloc_large_system_hash("stackdepot",
|
||||
sizeof(struct stack_record *),
|
||||
sizeof(struct list_head),
|
||||
entries,
|
||||
STACK_HASH_TABLE_SCALE,
|
||||
HASH_EARLY | HASH_ZERO,
|
||||
HASH_EARLY,
|
||||
NULL,
|
||||
&stack_hash_mask,
|
||||
1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
|
||||
@@ -162,6 +193,14 @@ int __init stack_depot_early_init(void)
|
||||
stack_depot_disabled = true;
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!entries) {
|
||||
/*
|
||||
* Obtain the number of entries that was calculated by
|
||||
* alloc_large_system_hash().
|
||||
*/
|
||||
entries = stack_hash_mask + 1;
|
||||
}
|
||||
init_stack_table(entries);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -202,7 +241,7 @@ int stack_depot_init(void)
|
||||
entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
|
||||
|
||||
pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
|
||||
stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
|
||||
stack_table = kvcalloc(entries, sizeof(struct list_head), GFP_KERNEL);
|
||||
if (!stack_table) {
|
||||
pr_err("hash table allocation failed, disabling\n");
|
||||
stack_depot_disabled = true;
|
||||
@@ -210,6 +249,7 @@ int stack_depot_init(void)
|
||||
goto out_unlock;
|
||||
}
|
||||
stack_hash_mask = entries - 1;
|
||||
init_stack_table(entries);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&stack_depot_init_mutex);
|
||||
@@ -218,41 +258,103 @@ out_unlock:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(stack_depot_init);
|
||||
|
||||
/* Uses preallocated memory to initialize a new stack depot pool. */
|
||||
static void depot_init_pool(void **prealloc)
|
||||
/* Initializes a stack depol pool. */
|
||||
static void depot_init_pool(void *pool)
|
||||
{
|
||||
int offset;
|
||||
|
||||
lockdep_assert_held_write(&pool_rwlock);
|
||||
|
||||
WARN_ON(!list_empty(&free_stacks));
|
||||
|
||||
/* Initialize handles and link stack records into the freelist. */
|
||||
for (offset = 0; offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE;
|
||||
offset += DEPOT_STACK_RECORD_SIZE) {
|
||||
struct stack_record *stack = pool + offset;
|
||||
|
||||
stack->handle.pool_index = pools_num;
|
||||
stack->handle.offset = offset >> DEPOT_STACK_ALIGN;
|
||||
stack->handle.extra = 0;
|
||||
|
||||
list_add(&stack->list, &free_stacks);
|
||||
}
|
||||
|
||||
/* Save reference to the pool to be used by depot_fetch_stack(). */
|
||||
stack_pools[pools_num] = pool;
|
||||
pools_num++;
|
||||
}
|
||||
|
||||
/* Keeps the preallocated memory to be used for a new stack depot pool. */
|
||||
static void depot_keep_new_pool(void **prealloc)
|
||||
{
|
||||
lockdep_assert_held_write(&pool_rwlock);
|
||||
|
||||
/*
|
||||
* If the next pool is already initialized or the maximum number of
|
||||
* If a new pool is already saved or the maximum number of
|
||||
* pools is reached, do not use the preallocated memory.
|
||||
* smp_load_acquire() here pairs with smp_store_release() below and
|
||||
* in depot_alloc_stack().
|
||||
*/
|
||||
if (!smp_load_acquire(&next_pool_required))
|
||||
if (!new_pool_required)
|
||||
return;
|
||||
|
||||
/* Check if the current pool is not yet allocated. */
|
||||
if (stack_pools[pool_index] == NULL) {
|
||||
/* Use the preallocated memory for the current pool. */
|
||||
stack_pools[pool_index] = *prealloc;
|
||||
/*
|
||||
* Use the preallocated memory for the new pool
|
||||
* as long as we do not exceed the maximum number of pools.
|
||||
*/
|
||||
if (pools_num < DEPOT_MAX_POOLS) {
|
||||
new_pool = *prealloc;
|
||||
*prealloc = NULL;
|
||||
} else {
|
||||
/*
|
||||
* Otherwise, use the preallocated memory for the next pool
|
||||
* as long as we do not exceed the maximum number of pools.
|
||||
*/
|
||||
if (pool_index + 1 < DEPOT_MAX_POOLS) {
|
||||
stack_pools[pool_index + 1] = *prealloc;
|
||||
*prealloc = NULL;
|
||||
}
|
||||
/*
|
||||
* At this point, either the next pool is initialized or the
|
||||
* maximum number of pools is reached. In either case, take
|
||||
* note that initializing another pool is not required.
|
||||
* This smp_store_release pairs with smp_load_acquire() above
|
||||
* and in stack_depot_save().
|
||||
*/
|
||||
smp_store_release(&next_pool_required, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* At this point, either a new pool is kept or the maximum
|
||||
* number of pools is reached. In either case, take note that
|
||||
* keeping another pool is not required.
|
||||
*/
|
||||
new_pool_required = false;
|
||||
}
|
||||
|
||||
/* Updates references to the current and the next stack depot pools. */
|
||||
static bool depot_update_pools(void **prealloc)
|
||||
{
|
||||
lockdep_assert_held_write(&pool_rwlock);
|
||||
|
||||
/* Check if we still have objects in the freelist. */
|
||||
if (!list_empty(&free_stacks))
|
||||
goto out_keep_prealloc;
|
||||
|
||||
/* Check if we have a new pool saved and use it. */
|
||||
if (new_pool) {
|
||||
depot_init_pool(new_pool);
|
||||
new_pool = NULL;
|
||||
|
||||
/* Take note that we might need a new new_pool. */
|
||||
if (pools_num < DEPOT_MAX_POOLS)
|
||||
new_pool_required = true;
|
||||
|
||||
/* Try keeping the preallocated memory for new_pool. */
|
||||
goto out_keep_prealloc;
|
||||
}
|
||||
|
||||
/* Bail out if we reached the pool limit. */
|
||||
if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
|
||||
WARN_ONCE(1, "Stack depot reached limit capacity");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Check if we have preallocated memory and use it. */
|
||||
if (*prealloc) {
|
||||
depot_init_pool(*prealloc);
|
||||
*prealloc = NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
out_keep_prealloc:
|
||||
/* Keep the preallocated memory for a new pool if required. */
|
||||
if (*prealloc)
|
||||
depot_keep_new_pool(prealloc);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Allocates a new stack in a stack depot pool. */
|
||||
@@ -260,62 +362,72 @@ static struct stack_record *
|
||||
depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
|
||||
{
|
||||
struct stack_record *stack;
|
||||
size_t required_size = struct_size(stack, entries, size);
|
||||
|
||||
required_size = ALIGN(required_size, 1 << DEPOT_STACK_ALIGN);
|
||||
lockdep_assert_held_write(&pool_rwlock);
|
||||
|
||||
/* Check if there is not enough space in the current pool. */
|
||||
if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) {
|
||||
/* Bail out if we reached the pool limit. */
|
||||
if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
|
||||
WARN_ONCE(1, "Stack depot reached limit capacity");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Move on to the next pool.
|
||||
* WRITE_ONCE pairs with potential concurrent read in
|
||||
* stack_depot_fetch().
|
||||
*/
|
||||
WRITE_ONCE(pool_index, pool_index + 1);
|
||||
pool_offset = 0;
|
||||
/*
|
||||
* If the maximum number of pools is not reached, take note
|
||||
* that the next pool needs to initialized.
|
||||
* smp_store_release() here pairs with smp_load_acquire() in
|
||||
* stack_depot_save() and depot_init_pool().
|
||||
*/
|
||||
if (pool_index + 1 < DEPOT_MAX_POOLS)
|
||||
smp_store_release(&next_pool_required, 1);
|
||||
}
|
||||
|
||||
/* Assign the preallocated memory to a pool if required. */
|
||||
if (*prealloc)
|
||||
depot_init_pool(prealloc);
|
||||
|
||||
/* Check if we have a pool to save the stack trace. */
|
||||
if (stack_pools[pool_index] == NULL)
|
||||
/* Update current and new pools if required and possible. */
|
||||
if (!depot_update_pools(prealloc))
|
||||
return NULL;
|
||||
|
||||
/* Check if we have a stack record to save the stack trace. */
|
||||
if (list_empty(&free_stacks))
|
||||
return NULL;
|
||||
|
||||
/* Get and unlink the first entry from the freelist. */
|
||||
stack = list_first_entry(&free_stacks, struct stack_record, list);
|
||||
list_del(&stack->list);
|
||||
|
||||
/* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
|
||||
if (size > CONFIG_STACKDEPOT_MAX_FRAMES)
|
||||
size = CONFIG_STACKDEPOT_MAX_FRAMES;
|
||||
|
||||
/* Save the stack trace. */
|
||||
stack = stack_pools[pool_index] + pool_offset;
|
||||
stack->hash = hash;
|
||||
stack->size = size;
|
||||
stack->handle.pool_index = pool_index;
|
||||
stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
|
||||
stack->handle.valid = 1;
|
||||
stack->handle.extra = 0;
|
||||
/* stack->handle is already filled in by depot_init_pool(). */
|
||||
refcount_set(&stack->count, 1);
|
||||
memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
|
||||
pool_offset += required_size;
|
||||
|
||||
/*
|
||||
* Let KMSAN know the stored stack record is initialized. This shall
|
||||
* prevent false positive reports if instrumented code accesses it.
|
||||
*/
|
||||
kmsan_unpoison_memory(stack, required_size);
|
||||
kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE);
|
||||
|
||||
return stack;
|
||||
}
|
||||
|
||||
static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
|
||||
{
|
||||
union handle_parts parts = { .handle = handle };
|
||||
void *pool;
|
||||
size_t offset = parts.offset << DEPOT_STACK_ALIGN;
|
||||
struct stack_record *stack;
|
||||
|
||||
lockdep_assert_held(&pool_rwlock);
|
||||
|
||||
if (parts.pool_index > pools_num) {
|
||||
WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
|
||||
parts.pool_index, pools_num, handle);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pool = stack_pools[parts.pool_index];
|
||||
if (!pool)
|
||||
return NULL;
|
||||
|
||||
stack = pool + offset;
|
||||
return stack;
|
||||
}
|
||||
|
||||
/* Links stack into the freelist. */
|
||||
static void depot_free_stack(struct stack_record *stack)
|
||||
{
|
||||
lockdep_assert_held_write(&pool_rwlock);
|
||||
|
||||
list_add(&stack->list, &free_stacks);
|
||||
}
|
||||
|
||||
/* Calculates the hash for a stack. */
|
||||
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
|
||||
{
|
||||
@@ -340,13 +452,17 @@ int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
|
||||
}
|
||||
|
||||
/* Finds a stack in a bucket of the hash table. */
|
||||
static inline struct stack_record *find_stack(struct stack_record *bucket,
|
||||
static inline struct stack_record *find_stack(struct list_head *bucket,
|
||||
unsigned long *entries, int size,
|
||||
u32 hash)
|
||||
{
|
||||
struct list_head *pos;
|
||||
struct stack_record *found;
|
||||
|
||||
for (found = bucket; found; found = found->next) {
|
||||
lockdep_assert_held(&pool_rwlock);
|
||||
|
||||
list_for_each(pos, bucket) {
|
||||
found = list_entry(pos, struct stack_record, list);
|
||||
if (found->hash == hash &&
|
||||
found->size == size &&
|
||||
!stackdepot_memcmp(entries, found->entries, size))
|
||||
@@ -355,17 +471,24 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
depot_stack_handle_t __stack_depot_save(unsigned long *entries,
|
||||
unsigned int nr_entries,
|
||||
gfp_t alloc_flags, bool can_alloc)
|
||||
depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
|
||||
unsigned int nr_entries,
|
||||
gfp_t alloc_flags,
|
||||
depot_flags_t depot_flags)
|
||||
{
|
||||
struct stack_record *found = NULL, **bucket;
|
||||
union handle_parts retval = { .handle = 0 };
|
||||
struct list_head *bucket;
|
||||
struct stack_record *found = NULL;
|
||||
depot_stack_handle_t handle = 0;
|
||||
struct page *page = NULL;
|
||||
void *prealloc = NULL;
|
||||
bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC;
|
||||
bool need_alloc = false;
|
||||
unsigned long flags;
|
||||
u32 hash;
|
||||
|
||||
if (WARN_ON(depot_flags & ~STACK_DEPOT_FLAGS_MASK))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If this stack trace is from an interrupt, including anything before
|
||||
* interrupt entry usually leads to unbounded stack depot growth.
|
||||
@@ -377,28 +500,36 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
|
||||
nr_entries = filter_irq_stacks(entries, nr_entries);
|
||||
|
||||
if (unlikely(nr_entries == 0) || stack_depot_disabled)
|
||||
goto fast_exit;
|
||||
return 0;
|
||||
|
||||
hash = hash_stack(entries, nr_entries);
|
||||
bucket = &stack_table[hash & stack_hash_mask];
|
||||
|
||||
/*
|
||||
* Fast path: look the stack trace up without locking.
|
||||
* The smp_load_acquire() here pairs with smp_store_release() to
|
||||
* |bucket| below.
|
||||
*/
|
||||
found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash);
|
||||
if (found)
|
||||
read_lock_irqsave(&pool_rwlock, flags);
|
||||
printk_deferred_enter();
|
||||
|
||||
/* Fast path: look the stack trace up without full locking. */
|
||||
found = find_stack(bucket, entries, nr_entries, hash);
|
||||
if (found) {
|
||||
if (depot_flags & STACK_DEPOT_FLAG_GET)
|
||||
refcount_inc(&found->count);
|
||||
printk_deferred_exit();
|
||||
read_unlock_irqrestore(&pool_rwlock, flags);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Take note if another stack pool needs to be allocated. */
|
||||
if (new_pool_required)
|
||||
need_alloc = true;
|
||||
|
||||
printk_deferred_exit();
|
||||
read_unlock_irqrestore(&pool_rwlock, flags);
|
||||
|
||||
/*
|
||||
* Check if another stack pool needs to be initialized. If so, allocate
|
||||
* the memory now - we won't be able to do that under the lock.
|
||||
*
|
||||
* The smp_load_acquire() here pairs with smp_store_release() to
|
||||
* |next_pool_inited| in depot_alloc_stack() and depot_init_pool().
|
||||
* Allocate memory for a new pool if required now:
|
||||
* we won't be able to do that under the lock.
|
||||
*/
|
||||
if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) {
|
||||
if (unlikely(can_alloc && need_alloc)) {
|
||||
/*
|
||||
* Zero out zone modifiers, as we don't have specific zone
|
||||
* requirements. Keep the flags related to allocation in atomic
|
||||
@@ -412,63 +543,56 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
|
||||
prealloc = page_address(page);
|
||||
}
|
||||
|
||||
raw_spin_lock_irqsave(&pool_lock, flags);
|
||||
write_lock_irqsave(&pool_rwlock, flags);
|
||||
printk_deferred_enter();
|
||||
|
||||
found = find_stack(*bucket, entries, nr_entries, hash);
|
||||
found = find_stack(bucket, entries, nr_entries, hash);
|
||||
if (!found) {
|
||||
struct stack_record *new =
|
||||
depot_alloc_stack(entries, nr_entries, hash, &prealloc);
|
||||
|
||||
if (new) {
|
||||
new->next = *bucket;
|
||||
/*
|
||||
* This smp_store_release() pairs with
|
||||
* smp_load_acquire() from |bucket| above.
|
||||
*/
|
||||
smp_store_release(bucket, new);
|
||||
list_add(&new->list, bucket);
|
||||
found = new;
|
||||
}
|
||||
} else if (prealloc) {
|
||||
} else {
|
||||
if (depot_flags & STACK_DEPOT_FLAG_GET)
|
||||
refcount_inc(&found->count);
|
||||
/*
|
||||
* Stack depot already contains this stack trace, but let's
|
||||
* keep the preallocated memory for the future.
|
||||
* keep the preallocated memory for future.
|
||||
*/
|
||||
depot_init_pool(&prealloc);
|
||||
if (prealloc)
|
||||
depot_keep_new_pool(&prealloc);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&pool_lock, flags);
|
||||
printk_deferred_exit();
|
||||
write_unlock_irqrestore(&pool_rwlock, flags);
|
||||
exit:
|
||||
if (prealloc) {
|
||||
/* Stack depot didn't use this memory, free it. */
|
||||
free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
|
||||
}
|
||||
if (found)
|
||||
retval.handle = found->handle.handle;
|
||||
fast_exit:
|
||||
return retval.handle;
|
||||
handle = found->handle.handle;
|
||||
return handle;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__stack_depot_save);
|
||||
EXPORT_SYMBOL_GPL(stack_depot_save_flags);
|
||||
|
||||
depot_stack_handle_t stack_depot_save(unsigned long *entries,
|
||||
unsigned int nr_entries,
|
||||
gfp_t alloc_flags)
|
||||
{
|
||||
return __stack_depot_save(entries, nr_entries, alloc_flags, true);
|
||||
return stack_depot_save_flags(entries, nr_entries, alloc_flags,
|
||||
STACK_DEPOT_FLAG_CAN_ALLOC);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(stack_depot_save);
|
||||
|
||||
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
|
||||
unsigned long **entries)
|
||||
{
|
||||
union handle_parts parts = { .handle = handle };
|
||||
/*
|
||||
* READ_ONCE pairs with potential concurrent write in
|
||||
* depot_alloc_stack.
|
||||
*/
|
||||
int pool_index_cached = READ_ONCE(pool_index);
|
||||
void *pool;
|
||||
size_t offset = parts.offset << DEPOT_STACK_ALIGN;
|
||||
struct stack_record *stack;
|
||||
unsigned long flags;
|
||||
|
||||
*entries = NULL;
|
||||
/*
|
||||
@@ -477,24 +601,51 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
|
||||
*/
|
||||
kmsan_unpoison_memory(entries, sizeof(*entries));
|
||||
|
||||
if (!handle)
|
||||
if (!handle || stack_depot_disabled)
|
||||
return 0;
|
||||
|
||||
if (parts.pool_index > pool_index_cached) {
|
||||
WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
|
||||
parts.pool_index, pool_index_cached, handle);
|
||||
return 0;
|
||||
}
|
||||
pool = stack_pools[parts.pool_index];
|
||||
if (!pool)
|
||||
return 0;
|
||||
stack = pool + offset;
|
||||
read_lock_irqsave(&pool_rwlock, flags);
|
||||
printk_deferred_enter();
|
||||
|
||||
stack = depot_fetch_stack(handle);
|
||||
|
||||
printk_deferred_exit();
|
||||
read_unlock_irqrestore(&pool_rwlock, flags);
|
||||
|
||||
*entries = stack->entries;
|
||||
return stack->size;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(stack_depot_fetch);
|
||||
|
||||
void stack_depot_put(depot_stack_handle_t handle)
|
||||
{
|
||||
struct stack_record *stack;
|
||||
unsigned long flags;
|
||||
|
||||
if (!handle || stack_depot_disabled)
|
||||
return;
|
||||
|
||||
write_lock_irqsave(&pool_rwlock, flags);
|
||||
printk_deferred_enter();
|
||||
|
||||
stack = depot_fetch_stack(handle);
|
||||
if (WARN_ON(!stack))
|
||||
goto out;
|
||||
|
||||
if (refcount_dec_and_test(&stack->count)) {
|
||||
/* Unlink stack from the hash table. */
|
||||
list_del(&stack->list);
|
||||
|
||||
/* Free stack. */
|
||||
depot_free_stack(stack);
|
||||
}
|
||||
|
||||
out:
|
||||
printk_deferred_exit();
|
||||
write_unlock_irqrestore(&pool_rwlock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(stack_depot_put);
|
||||
|
||||
void stack_depot_print(depot_stack_handle_t stack)
|
||||
{
|
||||
unsigned long *entries;
|
||||
|
||||
@@ -43,6 +43,7 @@ atomic_t maple_tree_tests_passed;
|
||||
/* #define BENCH_NODE_STORE */
|
||||
/* #define BENCH_AWALK */
|
||||
/* #define BENCH_WALK */
|
||||
/* #define BENCH_LOAD */
|
||||
/* #define BENCH_MT_FOR_EACH */
|
||||
/* #define BENCH_FORK */
|
||||
/* #define BENCH_MAS_FOR_EACH */
|
||||
@@ -54,6 +55,11 @@ atomic_t maple_tree_tests_passed;
|
||||
#else
|
||||
#define cond_resched() do {} while (0)
|
||||
#endif
|
||||
|
||||
#define mas_is_none(x) ((x)->status == ma_none)
|
||||
#define mas_is_overflow(x) ((x)->status == ma_overflow)
|
||||
#define mas_is_underflow(x) ((x)->status == ma_underflow)
|
||||
|
||||
static int __init mtree_insert_index(struct maple_tree *mt,
|
||||
unsigned long index, gfp_t gfp)
|
||||
{
|
||||
@@ -582,7 +588,7 @@ static noinline void __init check_find(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, last != mas.last);
|
||||
|
||||
|
||||
mas.node = MAS_NONE;
|
||||
mas.status = ma_none;
|
||||
mas.index = ULONG_MAX;
|
||||
mas.last = ULONG_MAX;
|
||||
entry2 = mas_prev(&mas, 0);
|
||||
@@ -1749,6 +1755,19 @@ static noinline void __init bench_walk(struct maple_tree *mt)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(BENCH_LOAD)
|
||||
static noinline void __init bench_load(struct maple_tree *mt)
|
||||
{
|
||||
int i, max = 2500, count = 550000000;
|
||||
|
||||
for (i = 0; i < max; i += 10)
|
||||
mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
mtree_load(mt, 1470);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(BENCH_MT_FOR_EACH)
|
||||
static noinline void __init bench_mt_for_each(struct maple_tree *mt)
|
||||
{
|
||||
@@ -1834,47 +1853,48 @@ static noinline void __init bench_mas_prev(struct maple_tree *mt)
|
||||
}
|
||||
#endif
|
||||
/* check_forking - simulate the kernel forking sequence with the tree. */
|
||||
static noinline void __init check_forking(struct maple_tree *mt)
|
||||
static noinline void __init check_forking(void)
|
||||
{
|
||||
|
||||
struct maple_tree newmt;
|
||||
int i, nr_entries = 134;
|
||||
struct maple_tree mt, newmt;
|
||||
int i, nr_entries = 134, ret;
|
||||
void *val;
|
||||
MA_STATE(mas, mt, 0, 0);
|
||||
MA_STATE(newmas, mt, 0, 0);
|
||||
struct rw_semaphore newmt_lock;
|
||||
MA_STATE(mas, &mt, 0, 0);
|
||||
MA_STATE(newmas, &newmt, 0, 0);
|
||||
struct rw_semaphore mt_lock, newmt_lock;
|
||||
|
||||
init_rwsem(&mt_lock);
|
||||
init_rwsem(&newmt_lock);
|
||||
|
||||
for (i = 0; i <= nr_entries; i++)
|
||||
mtree_store_range(mt, i*10, i*10 + 5,
|
||||
xa_mk_value(i), GFP_KERNEL);
|
||||
mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
|
||||
mt_set_external_lock(&mt, &mt_lock);
|
||||
|
||||
mt_set_non_kernel(99999);
|
||||
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
|
||||
mt_set_external_lock(&newmt, &newmt_lock);
|
||||
newmas.tree = &newmt;
|
||||
mas_reset(&newmas);
|
||||
mas_reset(&mas);
|
||||
down_write(&newmt_lock);
|
||||
mas.index = 0;
|
||||
mas.last = 0;
|
||||
if (mas_expected_entries(&newmas, nr_entries)) {
|
||||
|
||||
down_write(&mt_lock);
|
||||
for (i = 0; i <= nr_entries; i++) {
|
||||
mas_set_range(&mas, i*10, i*10 + 5);
|
||||
mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
|
||||
}
|
||||
|
||||
down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING);
|
||||
ret = __mt_dup(&mt, &newmt, GFP_KERNEL);
|
||||
if (ret) {
|
||||
pr_err("OOM!");
|
||||
BUG_ON(1);
|
||||
}
|
||||
rcu_read_lock();
|
||||
mas_for_each(&mas, val, ULONG_MAX) {
|
||||
newmas.index = mas.index;
|
||||
newmas.last = mas.last;
|
||||
|
||||
mas_set(&newmas, 0);
|
||||
mas_for_each(&newmas, val, ULONG_MAX)
|
||||
mas_store(&newmas, val);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
mas_destroy(&newmas);
|
||||
mas_destroy(&mas);
|
||||
mt_validate(&newmt);
|
||||
mt_set_non_kernel(0);
|
||||
__mt_destroy(&newmt);
|
||||
__mt_destroy(&mt);
|
||||
up_write(&newmt_lock);
|
||||
up_write(&mt_lock);
|
||||
}
|
||||
|
||||
static noinline void __init check_iteration(struct maple_tree *mt)
|
||||
@@ -1977,49 +1997,51 @@ static noinline void __init check_mas_store_gfp(struct maple_tree *mt)
|
||||
}
|
||||
|
||||
#if defined(BENCH_FORK)
|
||||
static noinline void __init bench_forking(struct maple_tree *mt)
|
||||
static noinline void __init bench_forking(void)
|
||||
{
|
||||
|
||||
struct maple_tree newmt;
|
||||
int i, nr_entries = 134, nr_fork = 80000;
|
||||
struct maple_tree mt, newmt;
|
||||
int i, nr_entries = 134, nr_fork = 80000, ret;
|
||||
void *val;
|
||||
MA_STATE(mas, mt, 0, 0);
|
||||
MA_STATE(newmas, mt, 0, 0);
|
||||
struct rw_semaphore newmt_lock;
|
||||
MA_STATE(mas, &mt, 0, 0);
|
||||
MA_STATE(newmas, &newmt, 0, 0);
|
||||
struct rw_semaphore mt_lock, newmt_lock;
|
||||
|
||||
init_rwsem(&mt_lock);
|
||||
init_rwsem(&newmt_lock);
|
||||
mt_set_external_lock(&newmt, &newmt_lock);
|
||||
|
||||
for (i = 0; i <= nr_entries; i++)
|
||||
mtree_store_range(mt, i*10, i*10 + 5,
|
||||
xa_mk_value(i), GFP_KERNEL);
|
||||
mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
|
||||
mt_set_external_lock(&mt, &mt_lock);
|
||||
|
||||
down_write(&mt_lock);
|
||||
for (i = 0; i <= nr_entries; i++) {
|
||||
mas_set_range(&mas, i*10, i*10 + 5);
|
||||
mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_fork; i++) {
|
||||
mt_set_non_kernel(99999);
|
||||
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
|
||||
newmas.tree = &newmt;
|
||||
mas_reset(&newmas);
|
||||
mas_reset(&mas);
|
||||
mas.index = 0;
|
||||
mas.last = 0;
|
||||
rcu_read_lock();
|
||||
down_write(&newmt_lock);
|
||||
if (mas_expected_entries(&newmas, nr_entries)) {
|
||||
printk("OOM!");
|
||||
mt_init_flags(&newmt,
|
||||
MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
|
||||
mt_set_external_lock(&newmt, &newmt_lock);
|
||||
|
||||
down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING);
|
||||
ret = __mt_dup(&mt, &newmt, GFP_KERNEL);
|
||||
if (ret) {
|
||||
pr_err("OOM!");
|
||||
BUG_ON(1);
|
||||
}
|
||||
mas_for_each(&mas, val, ULONG_MAX) {
|
||||
newmas.index = mas.index;
|
||||
newmas.last = mas.last;
|
||||
|
||||
mas_set(&newmas, 0);
|
||||
mas_for_each(&newmas, val, ULONG_MAX)
|
||||
mas_store(&newmas, val);
|
||||
}
|
||||
|
||||
mas_destroy(&newmas);
|
||||
rcu_read_unlock();
|
||||
mt_validate(&newmt);
|
||||
mt_set_non_kernel(0);
|
||||
__mt_destroy(&newmt);
|
||||
up_write(&newmt_lock);
|
||||
}
|
||||
mas_destroy(&mas);
|
||||
__mt_destroy(&mt);
|
||||
up_write(&mt_lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -2175,7 +2197,7 @@ static noinline void __init next_prev_test(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, val != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 5);
|
||||
MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
|
||||
MT_BUG_ON(mt, !mas_is_underflow(&mas));
|
||||
|
||||
mas.index = 0;
|
||||
mas.last = 5;
|
||||
@@ -3039,10 +3061,6 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt)
|
||||
* DNE active active range of NULL
|
||||
*/
|
||||
|
||||
#define mas_active(x) (((x).node != MAS_ROOT) && \
|
||||
((x).node != MAS_START) && \
|
||||
((x).node != MAS_PAUSE) && \
|
||||
((x).node != MAS_NONE))
|
||||
static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
{
|
||||
MA_STATE(mas, mt, 0, 0);
|
||||
@@ -3057,7 +3075,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
/* prev: Start -> underflow*/
|
||||
entry = mas_prev(&mas, 0);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
|
||||
MT_BUG_ON(mt, mas.status != ma_underflow);
|
||||
|
||||
/* prev: Start -> root */
|
||||
mas_set(&mas, 10);
|
||||
@@ -3065,7 +3083,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0);
|
||||
MT_BUG_ON(mt, mas.node != MAS_ROOT);
|
||||
MT_BUG_ON(mt, mas.status != ma_root);
|
||||
|
||||
/* prev: pause -> root */
|
||||
mas_set(&mas, 10);
|
||||
@@ -3074,7 +3092,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0);
|
||||
MT_BUG_ON(mt, mas.node != MAS_ROOT);
|
||||
MT_BUG_ON(mt, mas.status != ma_root);
|
||||
|
||||
/* next: start -> none */
|
||||
mas_set(&mas, 0);
|
||||
@@ -3082,7 +3100,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, mas.index != 1);
|
||||
MT_BUG_ON(mt, mas.last != ULONG_MAX);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.node != MAS_NONE);
|
||||
MT_BUG_ON(mt, mas.status != ma_none);
|
||||
|
||||
/* next: start -> none*/
|
||||
mas_set(&mas, 10);
|
||||
@@ -3090,7 +3108,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, mas.index != 1);
|
||||
MT_BUG_ON(mt, mas.last != ULONG_MAX);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.node != MAS_NONE);
|
||||
MT_BUG_ON(mt, mas.status != ma_none);
|
||||
|
||||
/* find: start -> root */
|
||||
mas_set(&mas, 0);
|
||||
@@ -3098,21 +3116,21 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0);
|
||||
MT_BUG_ON(mt, mas.node != MAS_ROOT);
|
||||
MT_BUG_ON(mt, mas.status != ma_root);
|
||||
|
||||
/* find: root -> none */
|
||||
entry = mas_find(&mas, ULONG_MAX);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 1);
|
||||
MT_BUG_ON(mt, mas.last != ULONG_MAX);
|
||||
MT_BUG_ON(mt, mas.node != MAS_NONE);
|
||||
MT_BUG_ON(mt, mas.status != ma_none);
|
||||
|
||||
/* find: none -> none */
|
||||
entry = mas_find(&mas, ULONG_MAX);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 1);
|
||||
MT_BUG_ON(mt, mas.last != ULONG_MAX);
|
||||
MT_BUG_ON(mt, mas.node != MAS_NONE);
|
||||
MT_BUG_ON(mt, mas.status != ma_none);
|
||||
|
||||
/* find: start -> none */
|
||||
mas_set(&mas, 10);
|
||||
@@ -3120,14 +3138,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 1);
|
||||
MT_BUG_ON(mt, mas.last != ULONG_MAX);
|
||||
MT_BUG_ON(mt, mas.node != MAS_NONE);
|
||||
MT_BUG_ON(mt, mas.status != ma_none);
|
||||
|
||||
/* find_rev: none -> root */
|
||||
entry = mas_find_rev(&mas, 0);
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0);
|
||||
MT_BUG_ON(mt, mas.node != MAS_ROOT);
|
||||
MT_BUG_ON(mt, mas.status != ma_root);
|
||||
|
||||
/* find_rev: start -> root */
|
||||
mas_set(&mas, 0);
|
||||
@@ -3135,21 +3153,21 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0);
|
||||
MT_BUG_ON(mt, mas.node != MAS_ROOT);
|
||||
MT_BUG_ON(mt, mas.status != ma_root);
|
||||
|
||||
/* find_rev: root -> none */
|
||||
entry = mas_find_rev(&mas, 0);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0);
|
||||
MT_BUG_ON(mt, mas.node != MAS_NONE);
|
||||
MT_BUG_ON(mt, mas.status != ma_none);
|
||||
|
||||
/* find_rev: none -> none */
|
||||
entry = mas_find_rev(&mas, 0);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0);
|
||||
MT_BUG_ON(mt, mas.node != MAS_NONE);
|
||||
MT_BUG_ON(mt, mas.status != ma_none);
|
||||
|
||||
/* find_rev: start -> root */
|
||||
mas_set(&mas, 10);
|
||||
@@ -3157,7 +3175,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0);
|
||||
MT_BUG_ON(mt, mas.node != MAS_ROOT);
|
||||
MT_BUG_ON(mt, mas.status != ma_root);
|
||||
|
||||
/* walk: start -> none */
|
||||
mas_set(&mas, 10);
|
||||
@@ -3165,7 +3183,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 1);
|
||||
MT_BUG_ON(mt, mas.last != ULONG_MAX);
|
||||
MT_BUG_ON(mt, mas.node != MAS_NONE);
|
||||
MT_BUG_ON(mt, mas.status != ma_none);
|
||||
|
||||
/* walk: pause -> none*/
|
||||
mas_set(&mas, 10);
|
||||
@@ -3174,7 +3192,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 1);
|
||||
MT_BUG_ON(mt, mas.last != ULONG_MAX);
|
||||
MT_BUG_ON(mt, mas.node != MAS_NONE);
|
||||
MT_BUG_ON(mt, mas.status != ma_none);
|
||||
|
||||
/* walk: none -> none */
|
||||
mas.index = mas.last = 10;
|
||||
@@ -3182,14 +3200,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 1);
|
||||
MT_BUG_ON(mt, mas.last != ULONG_MAX);
|
||||
MT_BUG_ON(mt, mas.node != MAS_NONE);
|
||||
MT_BUG_ON(mt, mas.status != ma_none);
|
||||
|
||||
/* walk: none -> none */
|
||||
entry = mas_walk(&mas);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 1);
|
||||
MT_BUG_ON(mt, mas.last != ULONG_MAX);
|
||||
MT_BUG_ON(mt, mas.node != MAS_NONE);
|
||||
MT_BUG_ON(mt, mas.status != ma_none);
|
||||
|
||||
/* walk: start -> root */
|
||||
mas_set(&mas, 0);
|
||||
@@ -3197,7 +3215,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0);
|
||||
MT_BUG_ON(mt, mas.node != MAS_ROOT);
|
||||
MT_BUG_ON(mt, mas.status != ma_root);
|
||||
|
||||
/* walk: pause -> root */
|
||||
mas_set(&mas, 0);
|
||||
@@ -3206,22 +3224,22 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0);
|
||||
MT_BUG_ON(mt, mas.node != MAS_ROOT);
|
||||
MT_BUG_ON(mt, mas.status != ma_root);
|
||||
|
||||
/* walk: none -> root */
|
||||
mas.node = MAS_NONE;
|
||||
mas.status = ma_none;
|
||||
entry = mas_walk(&mas);
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0);
|
||||
MT_BUG_ON(mt, mas.node != MAS_ROOT);
|
||||
MT_BUG_ON(mt, mas.status != ma_root);
|
||||
|
||||
/* walk: root -> root */
|
||||
entry = mas_walk(&mas);
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0);
|
||||
MT_BUG_ON(mt, mas.node != MAS_ROOT);
|
||||
MT_BUG_ON(mt, mas.status != ma_root);
|
||||
|
||||
/* walk: root -> none */
|
||||
mas_set(&mas, 10);
|
||||
@@ -3229,7 +3247,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 1);
|
||||
MT_BUG_ON(mt, mas.last != ULONG_MAX);
|
||||
MT_BUG_ON(mt, mas.node != MAS_NONE);
|
||||
MT_BUG_ON(mt, mas.status != ma_none);
|
||||
|
||||
/* walk: none -> root */
|
||||
mas.index = mas.last = 0;
|
||||
@@ -3237,7 +3255,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0);
|
||||
MT_BUG_ON(mt, mas.node != MAS_ROOT);
|
||||
MT_BUG_ON(mt, mas.status != ma_root);
|
||||
|
||||
mas_unlock(&mas);
|
||||
|
||||
@@ -3255,7 +3273,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* next: pause ->active */
|
||||
mas_set(&mas, 0);
|
||||
@@ -3264,126 +3282,132 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* next: none ->active */
|
||||
mas.index = mas.last = 0;
|
||||
mas.offset = 0;
|
||||
mas.node = MAS_NONE;
|
||||
mas.status = ma_none;
|
||||
entry = mas_next(&mas, ULONG_MAX);
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* next:active ->active */
|
||||
entry = mas_next(&mas, ULONG_MAX);
|
||||
/* next:active ->active (spanning limit) */
|
||||
entry = mas_next(&mas, 0x2100);
|
||||
MT_BUG_ON(mt, entry != ptr2);
|
||||
MT_BUG_ON(mt, mas.index != 0x2000);
|
||||
MT_BUG_ON(mt, mas.last != 0x2500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* next:active -> active beyond data */
|
||||
/* next:active -> overflow (limit reached) beyond data */
|
||||
entry = mas_next(&mas, 0x2999);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0x2501);
|
||||
MT_BUG_ON(mt, mas.last != 0x2fff);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_overflow(&mas));
|
||||
|
||||
/* Continue after last range ends after max */
|
||||
/* next:overflow -> active (limit changed) */
|
||||
entry = mas_next(&mas, ULONG_MAX);
|
||||
MT_BUG_ON(mt, entry != ptr3);
|
||||
MT_BUG_ON(mt, mas.index != 0x3000);
|
||||
MT_BUG_ON(mt, mas.last != 0x3500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* next:active -> active continued */
|
||||
/* next:active -> overflow (limit reached) */
|
||||
entry = mas_next(&mas, ULONG_MAX);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0x3501);
|
||||
MT_BUG_ON(mt, mas.last != ULONG_MAX);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
|
||||
/* next:active -> overflow */
|
||||
entry = mas_next(&mas, ULONG_MAX);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0x3501);
|
||||
MT_BUG_ON(mt, mas.last != ULONG_MAX);
|
||||
MT_BUG_ON(mt, mas.node != MAS_OVERFLOW);
|
||||
MT_BUG_ON(mt, !mas_is_overflow(&mas));
|
||||
|
||||
/* next:overflow -> overflow */
|
||||
entry = mas_next(&mas, ULONG_MAX);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0x3501);
|
||||
MT_BUG_ON(mt, mas.last != ULONG_MAX);
|
||||
MT_BUG_ON(mt, mas.node != MAS_OVERFLOW);
|
||||
MT_BUG_ON(mt, !mas_is_overflow(&mas));
|
||||
|
||||
/* prev:overflow -> active */
|
||||
entry = mas_prev(&mas, 0);
|
||||
MT_BUG_ON(mt, entry != ptr3);
|
||||
MT_BUG_ON(mt, mas.index != 0x3000);
|
||||
MT_BUG_ON(mt, mas.last != 0x3500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* next: none -> active, skip value at location */
|
||||
mas_set(&mas, 0);
|
||||
entry = mas_next(&mas, ULONG_MAX);
|
||||
mas.node = MAS_NONE;
|
||||
mas.status = ma_none;
|
||||
mas.offset = 0;
|
||||
entry = mas_next(&mas, ULONG_MAX);
|
||||
MT_BUG_ON(mt, entry != ptr2);
|
||||
MT_BUG_ON(mt, mas.index != 0x2000);
|
||||
MT_BUG_ON(mt, mas.last != 0x2500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* prev:active ->active */
|
||||
entry = mas_prev(&mas, 0);
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* prev:active -> active spanning end range */
|
||||
/* prev:active -> underflow (span limit) */
|
||||
mas_next(&mas, ULONG_MAX);
|
||||
entry = mas_prev(&mas, 0x1200);
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas)); /* spanning limit */
|
||||
entry = mas_prev(&mas, 0x1200); /* underflow */
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_is_underflow(&mas));
|
||||
|
||||
/* prev:underflow -> underflow (lower limit) spanning end range */
|
||||
entry = mas_prev(&mas, 0x0100);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0x0FFF);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
|
||||
/* prev:active -> underflow */
|
||||
entry = mas_prev(&mas, 0);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0x0FFF);
|
||||
MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
|
||||
MT_BUG_ON(mt, !mas_is_underflow(&mas));
|
||||
|
||||
/* prev:underflow -> underflow */
|
||||
entry = mas_prev(&mas, 0);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0x0FFF);
|
||||
MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
|
||||
MT_BUG_ON(mt, !mas_is_underflow(&mas));
|
||||
|
||||
/* prev:underflow -> underflow */
|
||||
entry = mas_prev(&mas, 0);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0x0FFF);
|
||||
MT_BUG_ON(mt, !mas_is_underflow(&mas));
|
||||
|
||||
/* next:underflow -> active */
|
||||
entry = mas_next(&mas, ULONG_MAX);
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* prev:first value -> underflow */
|
||||
entry = mas_prev(&mas, 0x1000);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
|
||||
MT_BUG_ON(mt, !mas_is_underflow(&mas));
|
||||
|
||||
/* find:underflow -> first value */
|
||||
entry = mas_find(&mas, ULONG_MAX);
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* prev: pause ->active */
|
||||
mas_set(&mas, 0x3600);
|
||||
@@ -3394,21 +3418,21 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr2);
|
||||
MT_BUG_ON(mt, mas.index != 0x2000);
|
||||
MT_BUG_ON(mt, mas.last != 0x2500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* prev:active -> active spanning min */
|
||||
/* prev:active -> underflow spanning min */
|
||||
entry = mas_prev(&mas, 0x1600);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0x1501);
|
||||
MT_BUG_ON(mt, mas.last != 0x1FFF);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_underflow(&mas));
|
||||
|
||||
/* prev: active ->active, continue */
|
||||
entry = mas_prev(&mas, 0);
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* find: start ->active */
|
||||
mas_set(&mas, 0);
|
||||
@@ -3416,7 +3440,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* find: pause ->active */
|
||||
mas_set(&mas, 0);
|
||||
@@ -3425,7 +3449,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* find: start ->active on value */;
|
||||
mas_set(&mas, 1200);
|
||||
@@ -3433,14 +3457,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* find:active ->active */
|
||||
entry = mas_find(&mas, ULONG_MAX);
|
||||
MT_BUG_ON(mt, entry != ptr2);
|
||||
MT_BUG_ON(mt, mas.index != 0x2000);
|
||||
MT_BUG_ON(mt, mas.last != 0x2500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
|
||||
/* find:active -> active (NULL)*/
|
||||
@@ -3448,35 +3472,35 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0x2501);
|
||||
MT_BUG_ON(mt, mas.last != 0x2FFF);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MAS_BUG_ON(&mas, !mas_is_active(&mas));
|
||||
|
||||
/* find: overflow ->active */
|
||||
entry = mas_find(&mas, 0x5000);
|
||||
MT_BUG_ON(mt, entry != ptr3);
|
||||
MT_BUG_ON(mt, mas.index != 0x3000);
|
||||
MT_BUG_ON(mt, mas.last != 0x3500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* find:active -> active (NULL) end*/
|
||||
entry = mas_find(&mas, ULONG_MAX);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0x3501);
|
||||
MT_BUG_ON(mt, mas.last != ULONG_MAX);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MAS_BUG_ON(&mas, !mas_is_active(&mas));
|
||||
|
||||
/* find_rev: active (END) ->active */
|
||||
entry = mas_find_rev(&mas, 0);
|
||||
MT_BUG_ON(mt, entry != ptr3);
|
||||
MT_BUG_ON(mt, mas.index != 0x3000);
|
||||
MT_BUG_ON(mt, mas.last != 0x3500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* find_rev:active ->active */
|
||||
entry = mas_find_rev(&mas, 0);
|
||||
MT_BUG_ON(mt, entry != ptr2);
|
||||
MT_BUG_ON(mt, mas.index != 0x2000);
|
||||
MT_BUG_ON(mt, mas.last != 0x2500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* find_rev: pause ->active */
|
||||
mas_pause(&mas);
|
||||
@@ -3484,14 +3508,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* find_rev:active -> active */
|
||||
/* find_rev:active -> underflow */
|
||||
entry = mas_find_rev(&mas, 0);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0);
|
||||
MT_BUG_ON(mt, mas.last != 0x0FFF);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_underflow(&mas));
|
||||
|
||||
/* find_rev: start ->active */
|
||||
mas_set(&mas, 0x1200);
|
||||
@@ -3499,7 +3523,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* mas_walk start ->active */
|
||||
mas_set(&mas, 0x1200);
|
||||
@@ -3507,7 +3531,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* mas_walk start ->active */
|
||||
mas_set(&mas, 0x1600);
|
||||
@@ -3515,7 +3539,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0x1501);
|
||||
MT_BUG_ON(mt, mas.last != 0x1fff);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* mas_walk pause ->active */
|
||||
mas_set(&mas, 0x1200);
|
||||
@@ -3524,7 +3548,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* mas_walk pause -> active */
|
||||
mas_set(&mas, 0x1600);
|
||||
@@ -3533,25 +3557,25 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0x1501);
|
||||
MT_BUG_ON(mt, mas.last != 0x1fff);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* mas_walk none -> active */
|
||||
mas_set(&mas, 0x1200);
|
||||
mas.node = MAS_NONE;
|
||||
mas.status = ma_none;
|
||||
entry = mas_walk(&mas);
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* mas_walk none -> active */
|
||||
mas_set(&mas, 0x1600);
|
||||
mas.node = MAS_NONE;
|
||||
mas.status = ma_none;
|
||||
entry = mas_walk(&mas);
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0x1501);
|
||||
MT_BUG_ON(mt, mas.last != 0x1fff);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* mas_walk active -> active */
|
||||
mas.index = 0x1200;
|
||||
@@ -3561,7 +3585,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != ptr);
|
||||
MT_BUG_ON(mt, mas.index != 0x1000);
|
||||
MT_BUG_ON(mt, mas.last != 0x1500);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
/* mas_walk active -> active */
|
||||
mas.index = 0x1600;
|
||||
@@ -3570,7 +3594,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
|
||||
MT_BUG_ON(mt, entry != NULL);
|
||||
MT_BUG_ON(mt, mas.index != 0x1501);
|
||||
MT_BUG_ON(mt, mas.last != 0x1fff);
|
||||
MT_BUG_ON(mt, !mas_active(mas));
|
||||
MT_BUG_ON(mt, !mas_is_active(&mas));
|
||||
|
||||
mas_unlock(&mas);
|
||||
}
|
||||
@@ -3585,10 +3609,6 @@ static int __init maple_tree_seed(void)
|
||||
|
||||
pr_info("\nTEST STARTING\n\n");
|
||||
|
||||
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
|
||||
check_root_expand(&tree);
|
||||
mtree_destroy(&tree);
|
||||
|
||||
#if defined(BENCH_SLOT_STORE)
|
||||
#define BENCH
|
||||
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
|
||||
@@ -3617,13 +3637,18 @@ static int __init maple_tree_seed(void)
|
||||
mtree_destroy(&tree);
|
||||
goto skip;
|
||||
#endif
|
||||
#if defined(BENCH_FORK)
|
||||
#if defined(BENCH_LOAD)
|
||||
#define BENCH
|
||||
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
|
||||
bench_forking(&tree);
|
||||
bench_load(&tree);
|
||||
mtree_destroy(&tree);
|
||||
goto skip;
|
||||
#endif
|
||||
#if defined(BENCH_FORK)
|
||||
#define BENCH
|
||||
bench_forking();
|
||||
goto skip;
|
||||
#endif
|
||||
#if defined(BENCH_MT_FOR_EACH)
|
||||
#define BENCH
|
||||
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
|
||||
@@ -3647,13 +3672,15 @@ static int __init maple_tree_seed(void)
|
||||
#endif
|
||||
|
||||
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
|
||||
check_iteration(&tree);
|
||||
check_root_expand(&tree);
|
||||
mtree_destroy(&tree);
|
||||
|
||||
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
|
||||
check_forking(&tree);
|
||||
check_iteration(&tree);
|
||||
mtree_destroy(&tree);
|
||||
|
||||
check_forking();
|
||||
|
||||
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
|
||||
check_mas_store_gfp(&tree);
|
||||
mtree_destroy(&tree);
|
||||
|
||||
@@ -93,7 +93,7 @@ static int __init test_pages(int *total_failures)
|
||||
int failures = 0, num_tests = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i <= MAX_ORDER; i++)
|
||||
for (i = 0; i < NR_PAGE_ORDERS; i++)
|
||||
num_tests += do_alloc_pages_order(i, &failures);
|
||||
|
||||
REPORT_FAILURES_IN_FN();
|
||||
|
||||
@@ -204,8 +204,8 @@ static void ubsan_prologue(struct source_location *loc, const char *reason)
|
||||
{
|
||||
current->in_ubsan++;
|
||||
|
||||
pr_err("========================================"
|
||||
"========================================\n");
|
||||
pr_warn(CUT_HERE);
|
||||
|
||||
pr_err("UBSAN: %s in %s:%d:%d\n", reason, loc->file_name,
|
||||
loc->line & LINE_MASK, loc->column & COLUMN_MASK);
|
||||
|
||||
@@ -215,8 +215,7 @@ static void ubsan_prologue(struct source_location *loc, const char *reason)
|
||||
static void ubsan_epilogue(void)
|
||||
{
|
||||
dump_stack();
|
||||
pr_err("========================================"
|
||||
"========================================\n");
|
||||
pr_warn("---[ end trace ]---\n");
|
||||
|
||||
current->in_ubsan--;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user