mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-18 03:23:53 -04:00
futex: Convert to get/put_user_inline()
Replace the open coded implementation with the new get/put_user_inline() helpers. This might be replaced by a regular get/put_user(), but that needs a proper performance evaluation. No functional change intended. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://patch.msgid.link/20251027083745.736737934@linutronix.de
This commit is contained in:
committed by
Ingo Molnar
parent
b2cfc0cd68
commit
e4e28fd698
@@ -581,7 +581,7 @@ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
|
|||||||
if (flags & FLAGS_NUMA) {
|
if (flags & FLAGS_NUMA) {
|
||||||
u32 __user *naddr = (void *)uaddr + size / 2;
|
u32 __user *naddr = (void *)uaddr + size / 2;
|
||||||
|
|
||||||
if (futex_get_value(&node, naddr))
|
if (get_user_inline(node, naddr))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if ((node != FUTEX_NO_NODE) &&
|
if ((node != FUTEX_NO_NODE) &&
|
||||||
@@ -601,7 +601,7 @@ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
|
|||||||
node = numa_node_id();
|
node = numa_node_id();
|
||||||
node_updated = true;
|
node_updated = true;
|
||||||
}
|
}
|
||||||
if (node_updated && futex_put_value(node, naddr))
|
if (node_updated && put_user_inline(node, naddr))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -281,63 +281,11 @@ static inline int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* Read from user memory with pagefaults disabled */
|
||||||
* This does a plain atomic user space read, and the user pointer has
|
|
||||||
* already been verified earlier by get_futex_key() to be both aligned
|
|
||||||
* and actually in user space, just like futex_atomic_cmpxchg_inatomic().
|
|
||||||
*
|
|
||||||
* We still want to avoid any speculation, and while __get_user() is
|
|
||||||
* the traditional model for this, it's actually slower than doing
|
|
||||||
* this manually these days.
|
|
||||||
*
|
|
||||||
* We could just have a per-architecture special function for it,
|
|
||||||
* the same way we do futex_atomic_cmpxchg_inatomic(), but rather
|
|
||||||
* than force everybody to do that, write it out long-hand using
|
|
||||||
* the low-level user-access infrastructure.
|
|
||||||
*
|
|
||||||
* This looks a bit overkill, but generally just results in a couple
|
|
||||||
* of instructions.
|
|
||||||
*/
|
|
||||||
static __always_inline int futex_get_value(u32 *dest, u32 __user *from)
|
|
||||||
{
|
|
||||||
u32 val;
|
|
||||||
|
|
||||||
if (can_do_masked_user_access())
|
|
||||||
from = masked_user_access_begin(from);
|
|
||||||
else if (!user_read_access_begin(from, sizeof(*from)))
|
|
||||||
return -EFAULT;
|
|
||||||
unsafe_get_user(val, from, Efault);
|
|
||||||
user_read_access_end();
|
|
||||||
*dest = val;
|
|
||||||
return 0;
|
|
||||||
Efault:
|
|
||||||
user_read_access_end();
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
static __always_inline int futex_put_value(u32 val, u32 __user *to)
|
|
||||||
{
|
|
||||||
if (can_do_masked_user_access())
|
|
||||||
to = masked_user_access_begin(to);
|
|
||||||
else if (!user_write_access_begin(to, sizeof(*to)))
|
|
||||||
return -EFAULT;
|
|
||||||
unsafe_put_user(val, to, Efault);
|
|
||||||
user_write_access_end();
|
|
||||||
return 0;
|
|
||||||
Efault:
|
|
||||||
user_write_access_end();
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int futex_get_value_locked(u32 *dest, u32 __user *from)
|
static inline int futex_get_value_locked(u32 *dest, u32 __user *from)
|
||||||
{
|
{
|
||||||
int ret;
|
guard(pagefault)();
|
||||||
|
return get_user_inline(*dest, from);
|
||||||
pagefault_disable();
|
|
||||||
ret = futex_get_value(dest, from);
|
|
||||||
pagefault_enable();
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void __futex_unqueue(struct futex_q *q);
|
extern void __futex_unqueue(struct futex_q *q);
|
||||||
|
|||||||
Reference in New Issue
Block a user