mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/
synced 2026-04-18 06:33:43 -04:00
The new PowerPC VMX fast path (__copy_tofrom_user_power7_vmx) is not
exercised by existing copyloops selftests. This patch updates
the selftest to exercise the VMX variant, ensuring the VMX copy path
is validated.
Changes include:
- COPY_LOOP=test___copy_tofrom_user_power7_vmx with -D VMX_TEST is used
in existing selftest build targets.
- Inclusion of ../utils.c to provide get_auxv_entry() for hardware
feature detection.
- At runtime, the test skips execution if Altivec is not available.
- Copy sizes above VMX_COPY_THRESHOLD are used to ensure the VMX
path is taken.
This enables validation of the VMX fast path without affecting systems
that do not support Altivec.
Signed-off-by: Sayali Patil <sayalip@linux.ibm.com>
Tested-by: Venkat Rao Bagalkote <venkat88@linux.ibm.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/20260304122201.153049-2-sayalip@linux.ibm.com
114 lines
2.5 KiB
C
114 lines
2.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <malloc.h>
|
|
#include <string.h>
|
|
#include <stdlib.h>
|
|
#include <stdbool.h>
|
|
|
|
#include "utils.h"
|
|
|
|
#define MAX_LEN 8192
|
|
#define MAX_OFFSET 16
|
|
#define MIN_REDZONE 128
|
|
#define BUFLEN (MAX_LEN+MAX_OFFSET+2*MIN_REDZONE)
|
|
#define POISON 0xa5
|
|
|
|
#ifdef VMX_TEST
|
|
#define VMX_COPY_THRESHOLD 3328
|
|
#endif
|
|
|
|
unsigned long COPY_LOOP(void *to, const void *from, unsigned long size);
|
|
|
|
static void do_one(char *src, char *dst, unsigned long src_off,
|
|
unsigned long dst_off, unsigned long len, void *redzone,
|
|
void *fill)
|
|
{
|
|
char *srcp, *dstp;
|
|
unsigned long ret;
|
|
unsigned long i;
|
|
|
|
srcp = src + MIN_REDZONE + src_off;
|
|
dstp = dst + MIN_REDZONE + dst_off;
|
|
|
|
memset(src, POISON, BUFLEN);
|
|
memset(dst, POISON, BUFLEN);
|
|
memcpy(srcp, fill, len);
|
|
|
|
ret = COPY_LOOP(dstp, srcp, len);
|
|
if (ret && ret != (unsigned long)dstp) {
|
|
printf("(%p,%p,%ld) returned %ld\n", dstp, srcp, len, ret);
|
|
abort();
|
|
}
|
|
|
|
if (memcmp(dstp, srcp, len)) {
|
|
printf("(%p,%p,%ld) miscompare\n", dstp, srcp, len);
|
|
printf("src: ");
|
|
for (i = 0; i < len; i++)
|
|
printf("%02x ", srcp[i]);
|
|
printf("\ndst: ");
|
|
for (i = 0; i < len; i++)
|
|
printf("%02x ", dstp[i]);
|
|
printf("\n");
|
|
abort();
|
|
}
|
|
|
|
if (memcmp(dst, redzone, dstp - dst)) {
|
|
printf("(%p,%p,%ld) redzone before corrupted\n",
|
|
dstp, srcp, len);
|
|
abort();
|
|
}
|
|
|
|
if (memcmp(dstp+len, redzone, dst+BUFLEN-(dstp+len))) {
|
|
printf("(%p,%p,%ld) redzone after corrupted\n",
|
|
dstp, srcp, len);
|
|
abort();
|
|
}
|
|
}
|
|
|
|
int test_copy_loop(void)
|
|
{
|
|
char *src, *dst, *redzone, *fill;
|
|
unsigned long len, src_off, dst_off;
|
|
unsigned long i;
|
|
|
|
src = memalign(BUFLEN, BUFLEN);
|
|
dst = memalign(BUFLEN, BUFLEN);
|
|
redzone = malloc(BUFLEN);
|
|
fill = malloc(BUFLEN);
|
|
|
|
if (!src || !dst || !redzone || !fill) {
|
|
fprintf(stderr, "malloc failed\n");
|
|
exit(1);
|
|
}
|
|
|
|
memset(redzone, POISON, BUFLEN);
|
|
|
|
/* Fill with sequential bytes */
|
|
for (i = 0; i < BUFLEN; i++)
|
|
fill[i] = i & 0xff;
|
|
#ifdef VMX_TEST
|
|
/* Force sizes above kernel VMX threshold (3328) */
|
|
for (len = VMX_COPY_THRESHOLD + 1; len < MAX_LEN; len++) {
|
|
#else
|
|
for (len = 1; len < MAX_LEN; len++) {
|
|
#endif
|
|
for (src_off = 0; src_off < MAX_OFFSET; src_off++) {
|
|
for (dst_off = 0; dst_off < MAX_OFFSET; dst_off++) {
|
|
do_one(src, dst, src_off, dst_off, len,
|
|
redzone, fill);
|
|
}
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int main(void)
|
|
{
|
|
#ifdef VMX_TEST
|
|
/* Skip if Altivec not present */
|
|
SKIP_IF_MSG(!have_hwcap(PPC_FEATURE_HAS_ALTIVEC), "ALTIVEC not supported");
|
|
#endif
|
|
|
|
return test_harness(test_copy_loop, str(COPY_LOOP));
|
|
}
|