mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/
synced 2026-04-17 22:23:45 -04:00
The old 'copy_user_generic_unrolled' function was oddly implemented for largely historical reasons: it had been largely based on the uncached copy case, which has some other concerns. For example, the __copy_user_nocache() function uses 'movnti' for the destination stores, and those want the destination to be aligned. In contrast, the regular copy function doesn't really care, and trying to align things only complicates matters. Also, like the clear_user function, the copy function had some odd handling of the repeat counts, complicating the exception handling for no really good reason. So as with clear_user, just write it to keep all the byte counts in the %rcx register, exactly like the 'rep movs' functionality that this replaces. Unlike a real 'rep movs', we do allow for this to trash a few temporary registers to not have to unnecessarily save/restore registers on the stack. And like the clearing case, rename this to what it now clearly is: 'rep_movs_alternative', and make it one coherent function, so that it shows up as such in profiles (instead of the odd split between "copy_user_generic_unrolled" and "copy_user_short_string", the latter of which was not about strings at all, and which was shared with the uncached case). Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
118 lines
2.8 KiB
C
118 lines
2.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_UACCESS_64_H
|
|
#define _ASM_X86_UACCESS_64_H
|
|
|
|
/*
|
|
* User space memory access functions
|
|
*/
|
|
#include <linux/compiler.h>
|
|
#include <linux/lockdep.h>
|
|
#include <linux/kasan-checks.h>
|
|
#include <asm/alternative.h>
|
|
#include <asm/cpufeatures.h>
|
|
#include <asm/page.h>
|
|
|
|
/*
|
|
* Copy To/From Userspace
|
|
*/
|
|
|
|
/* Handles exceptions in both to and from, but doesn't do access_ok */
|
|
__must_check unsigned long
|
|
rep_movs_alternative(void *to, const void *from, unsigned len);
|
|
|
|
static __always_inline __must_check unsigned long
|
|
copy_user_generic(void *to, const void *from, unsigned long len)
|
|
{
|
|
stac();
|
|
/*
|
|
* If CPU has FSRM feature, use 'rep movs'.
|
|
* Otherwise, use rep_movs_alternative.
|
|
*/
|
|
asm volatile(
|
|
"1:\n\t"
|
|
ALTERNATIVE("rep movsb",
|
|
"call rep_movs_alternative", ALT_NOT(X86_FEATURE_FSRM))
|
|
"2:\n"
|
|
_ASM_EXTABLE_UA(1b, 2b)
|
|
:"+c" (len), "+D" (to), "+S" (from), ASM_CALL_CONSTRAINT
|
|
: : "memory", "rax", "r8", "r9", "r10", "r11");
|
|
clac();
|
|
return len;
|
|
}
|
|
|
|
static __always_inline __must_check unsigned long
|
|
raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
|
|
{
|
|
return copy_user_generic(dst, (__force void *)src, size);
|
|
}
|
|
|
|
static __always_inline __must_check unsigned long
|
|
raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
|
|
{
|
|
return copy_user_generic((__force void *)dst, src, size);
|
|
}
|
|
|
|
extern long __copy_user_nocache(void *dst, const void __user *src,
|
|
unsigned size, int zerorest);
|
|
|
|
extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
|
|
extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
|
|
size_t len);
|
|
|
|
static inline int
|
|
__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
|
|
unsigned size)
|
|
{
|
|
long ret;
|
|
kasan_check_write(dst, size);
|
|
stac();
|
|
ret = __copy_user_nocache(dst, src, size, 0);
|
|
clac();
|
|
return ret;
|
|
}
|
|
|
|
static inline int
|
|
__copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
|
|
{
|
|
kasan_check_write(dst, size);
|
|
return __copy_user_flushcache(dst, src, size);
|
|
}
|
|
|
|
/*
|
|
* Zero Userspace.
|
|
*/
|
|
|
|
__must_check unsigned long
|
|
rep_stos_alternative(void __user *addr, unsigned long len);
|
|
|
|
static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size)
|
|
{
|
|
might_fault();
|
|
stac();
|
|
|
|
/*
|
|
* No memory constraint because it doesn't change any memory gcc
|
|
* knows about.
|
|
*/
|
|
asm volatile(
|
|
"1:\n\t"
|
|
ALTERNATIVE("rep stosb",
|
|
"call rep_stos_alternative", ALT_NOT(X86_FEATURE_FSRS))
|
|
"2:\n"
|
|
_ASM_EXTABLE_UA(1b, 2b)
|
|
: "+c" (size), "+D" (addr), ASM_CALL_CONSTRAINT
|
|
: "a" (0));
|
|
|
|
clac();
|
|
|
|
return size;
|
|
}
|
|
|
|
static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
|
|
{
|
|
if (access_ok(to, n))
|
|
return __clear_user(to, n);
|
|
return n;
|
|
}
|
|
#endif /* _ASM_X86_UACCESS_64_H */
|