mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-05-02 18:17:50 -04:00
Merge tag 'locking-core-2025-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"Futexes:
- Add support for task local hash maps (Sebastian Andrzej Siewior,
Peter Zijlstra)
- Implement the FUTEX2_NUMA ABI, which feature extends the futex
interface to be NUMA-aware. On NUMA-aware futexes a second u32 word
containing the NUMA node is added to after the u32 futex value word
(Peter Zijlstra)
- Implement the FUTEX2_MPOL ABI, which feature extends the futex
interface to be mempolicy-aware as well, to further refine futex
node mappings and lookups (Peter Zijlstra)
Locking primitives:
- Misc cleanups (Andy Shevchenko, Borislav Petkov, Colin Ian King,
Ingo Molnar, Nam Cao, Peter Zijlstra)
Lockdep:
- Prevent abuse of lockdep subclasses (Waiman Long)
- Add number of dynamic keys to /proc/lockdep_stats (Waiman Long)
Plus misc cleanups and fixes"
* tag 'locking-core-2025-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (35 commits)
selftests/futex: Fix spelling mistake "unitiliazed" -> "uninitialized"
futex: Correct the kernedoc return value for futex_wait_setup().
tools headers: Synchronize prctl.h ABI header
futex: Use RCU_INIT_POINTER() in futex_mm_init().
selftests/futex: Use TAP output in futex_numa_mpol
selftests/futex: Use TAP output in futex_priv_hash
futex: Fix kernel-doc comments
futex: Relax the rcu_assign_pointer() assignment of mm->futex_phash in futex_mm_init()
futex: Fix outdated comment in struct restart_block
locking/lockdep: Add number of dynamic keys to /proc/lockdep_stats
locking/lockdep: Prevent abuse of lockdep subclass
locking/lockdep: Move hlock_equal() to the respective #ifdeffery
futex,selftests: Add another FUTEX2_NUMA selftest
selftests/futex: Add futex_numa_mpol
selftests/futex: Add futex_priv_hash
selftests/futex: Build without headers nonsense
tools/perf: Allow to select the number of hash buckets
tools headers: Synchronize prctl.h ABI header
futex: Implement FUTEX2_MPOL
futex: Implement FUTEX2_NUMA
...
This commit is contained in:
@@ -219,6 +219,7 @@ static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
|
||||
static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
|
||||
unsigned long nr_lock_classes;
|
||||
unsigned long nr_zapped_classes;
|
||||
unsigned long nr_dynamic_keys;
|
||||
unsigned long max_lock_class_idx;
|
||||
struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
|
||||
DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
|
||||
@@ -1238,6 +1239,7 @@ void lockdep_register_key(struct lock_class_key *key)
|
||||
goto out_unlock;
|
||||
}
|
||||
hlist_add_head_rcu(&key->hash_entry, hash_head);
|
||||
nr_dynamic_keys++;
|
||||
out_unlock:
|
||||
graph_unlock();
|
||||
restore_irqs:
|
||||
@@ -1976,41 +1978,6 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
|
||||
print_circular_bug_entry(entry, depth);
|
||||
}
|
||||
|
||||
/*
|
||||
* We are about to add A -> B into the dependency graph, and in __bfs() a
|
||||
* strong dependency path A -> .. -> B is found: hlock_class equals
|
||||
* entry->class.
|
||||
*
|
||||
* If A -> .. -> B can replace A -> B in any __bfs() search (means the former
|
||||
* is _stronger_ than or equal to the latter), we consider A -> B as redundant.
|
||||
* For example if A -> .. -> B is -(EN)-> (i.e. A -(E*)-> .. -(*N)-> B), and A
|
||||
* -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the
|
||||
* dependency graph, as any strong path ..-> A -> B ->.. we can get with
|
||||
* having dependency A -> B, we could already get a equivalent path ..-> A ->
|
||||
* .. -> B -> .. with A -> .. -> B. Therefore A -> B is redundant.
|
||||
*
|
||||
* We need to make sure both the start and the end of A -> .. -> B is not
|
||||
* weaker than A -> B. For the start part, please see the comment in
|
||||
* check_redundant(). For the end part, we need:
|
||||
*
|
||||
* Either
|
||||
*
|
||||
* a) A -> B is -(*R)-> (everything is not weaker than that)
|
||||
*
|
||||
* or
|
||||
*
|
||||
* b) A -> .. -> B is -(*N)-> (nothing is stronger than this)
|
||||
*
|
||||
*/
|
||||
static inline bool hlock_equal(struct lock_list *entry, void *data)
|
||||
{
|
||||
struct held_lock *hlock = (struct held_lock *)data;
|
||||
|
||||
return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */
|
||||
(hlock->read == 2 || /* A -> B is -(*R)-> */
|
||||
!entry->only_xr); /* A -> .. -> B is -(*N)-> */
|
||||
}
|
||||
|
||||
/*
|
||||
* We are about to add B -> A into the dependency graph, and in __bfs() a
|
||||
* strong dependency path A -> .. -> B is found: hlock_class equals
|
||||
@@ -2915,6 +2882,41 @@ static inline bool usage_skip(struct lock_list *entry, void *mask)
|
||||
#endif /* CONFIG_TRACE_IRQFLAGS */
|
||||
|
||||
#ifdef CONFIG_LOCKDEP_SMALL
|
||||
/*
|
||||
* We are about to add A -> B into the dependency graph, and in __bfs() a
|
||||
* strong dependency path A -> .. -> B is found: hlock_class equals
|
||||
* entry->class.
|
||||
*
|
||||
* If A -> .. -> B can replace A -> B in any __bfs() search (means the former
|
||||
* is _stronger_ than or equal to the latter), we consider A -> B as redundant.
|
||||
* For example if A -> .. -> B is -(EN)-> (i.e. A -(E*)-> .. -(*N)-> B), and A
|
||||
* -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the
|
||||
* dependency graph, as any strong path ..-> A -> B ->.. we can get with
|
||||
* having dependency A -> B, we could already get a equivalent path ..-> A ->
|
||||
* .. -> B -> .. with A -> .. -> B. Therefore A -> B is redundant.
|
||||
*
|
||||
* We need to make sure both the start and the end of A -> .. -> B is not
|
||||
* weaker than A -> B. For the start part, please see the comment in
|
||||
* check_redundant(). For the end part, we need:
|
||||
*
|
||||
* Either
|
||||
*
|
||||
* a) A -> B is -(*R)-> (everything is not weaker than that)
|
||||
*
|
||||
* or
|
||||
*
|
||||
* b) A -> .. -> B is -(*N)-> (nothing is stronger than this)
|
||||
*
|
||||
*/
|
||||
static inline bool hlock_equal(struct lock_list *entry, void *data)
|
||||
{
|
||||
struct held_lock *hlock = (struct held_lock *)data;
|
||||
|
||||
return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */
|
||||
(hlock->read == 2 || /* A -> B is -(*R)-> */
|
||||
!entry->only_xr); /* A -> .. -> B is -(*N)-> */
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that the dependency graph starting at <src> can lead to
|
||||
* <target> or not. If it can, <src> -> <target> dependency is already
|
||||
@@ -5101,6 +5103,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
lockevent_inc(lockdep_nocheck);
|
||||
}
|
||||
|
||||
if (DEBUG_LOCKS_WARN_ON(subclass >= MAX_LOCKDEP_SUBCLASSES))
|
||||
return 0;
|
||||
|
||||
if (subclass < NR_LOCKDEP_CACHING_CLASSES)
|
||||
class = lock->class_cache[subclass];
|
||||
/*
|
||||
@@ -6606,6 +6611,7 @@ void lockdep_unregister_key(struct lock_class_key *key)
|
||||
pf = get_pending_free();
|
||||
__lockdep_free_key_range(pf, key, 1);
|
||||
need_callback = prepare_call_rcu_zapped(pf);
|
||||
nr_dynamic_keys--;
|
||||
}
|
||||
lockdep_unlock();
|
||||
raw_local_irq_restore(flags);
|
||||
|
||||
@@ -138,6 +138,7 @@ extern unsigned long nr_lock_classes;
|
||||
extern unsigned long nr_zapped_classes;
|
||||
extern unsigned long nr_zapped_lock_chains;
|
||||
extern unsigned long nr_list_entries;
|
||||
extern unsigned long nr_dynamic_keys;
|
||||
long lockdep_next_lockchain(long i);
|
||||
unsigned long lock_chain_count(void);
|
||||
extern unsigned long nr_stack_trace_entries;
|
||||
|
||||
@@ -286,6 +286,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
|
||||
#endif
|
||||
seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
|
||||
nr_lock_classes, MAX_LOCKDEP_KEYS);
|
||||
seq_printf(m, " dynamic-keys: %11lu\n",
|
||||
nr_dynamic_keys);
|
||||
seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
|
||||
nr_list_entries, MAX_LOCKDEP_ENTRIES);
|
||||
seq_printf(m, " indirect dependencies: %11lu\n",
|
||||
|
||||
Reference in New Issue
Block a user