Merge tag 'perf-tools-for-v7.0-1-2026-02-21' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools

Pull perf tools updates from Arnaldo Carvalho de Melo:

 - Introduce 'perf sched stats' tool with record/report/diff workflows
   using schedstat counters

 - Add a faster libdw based addr2line implementation and allow selecting
   it or its alternatives via 'perf config addr2line.style='

 - Data-type profiling fixes and improvements including the ability to
   select fields using 'perf report''s -F/-fields, e.g.:

     'perf report --fields overhead,type'

 - Add 'perf test' regression tests for Data-type profiling with C and
   Rust workloads

 - Fix srcline printing with inlines in callchains, make sure this has
   coverage in 'perf test'

 - Fix printing of leaf IP in LBR callchains

 - Fix display of metrics without sufficient permission in 'perf stat'

 - Print all machines in 'perf kvm report -vvv', not just the host

 - Switch from SHA-1 to BLAKE2s for build ID generation, remove SHA-1
   code

 - Fix 'perf report's histogram entry collapsing with '-F' option

 - Use system's cacheline size instead of a hardcoded value in 'perf
   report'

 - Allow filtering conversion by time range in 'perf data'

 - Cover conversion to CTF using 'perf data' in 'perf test'

 - Address newer glibc const-correctness (-Werror=discarded-qualifiers)
   issues

 - Fixes and improvements for ARM's CoreSight support, simplify ARM SPE
   event config in 'perf mem', update docs for 'perf c2c' including the
   ARM events it can be used with

 - Build support for generating metrics from arch specific python
   script, add extra AMD, Intel, ARM64 metrics using it

 - Add AMD Zen 6 events and metrics

 - Add JSON file with OpenHW Risc-V CVA6 hardware counters

 - Add 'perf kvm' stats live testing

 - Add more 'perf stat' tests to 'perf test'

 - Fix segfault in `perf lock contention -b/--use-bpf`

 - Fix various 'perf test' cases for s390

 - Build system cleanups, bump minimum shellcheck version to 0.7.2

 - Support building the capstone based annotation routines as a plugin

 - Allow passing extra Clang flags via EXTRA_BPF_FLAGS

* tag 'perf-tools-for-v7.0-1-2026-02-21' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools: (255 commits)
  perf test script: Add python script testing support
  perf test script: Add perl script testing support
  perf script: Allow the generated script to be a path
  perf test: perf data --to-ctf testing
  perf test: Test pipe mode with data conversion --to-json
  perf json: Pipe mode --to-ctf support
  perf json: Pipe mode --to-json support
  perf check: Add libbabeltrace to the listed features
  perf build: Allow passing extra Clang flags via EXTRA_BPF_FLAGS
  perf test data_type_profiling.sh: Skip just the Rust tests if code_with_type workload is missing
  tools build: Fix feature test for rust compiler
  perf libunwind: Fix calls to thread__e_machine()
  perf stat: Add no-affinity flag
  perf evlist: Reduce affinity use and move into iterator, fix no affinity
  perf evlist: Missing TPEBS close in evlist__close()
  perf evlist: Special map propagation for tool events that read on 1 CPU
  perf stat-shadow: In prepare_metric fix guard on reading NULL perf_stat_evsel
  Revert "perf tool_pmu: More accurately set the cpus for tool events"
  tools build: Emit dependencies file for test-rust.bin
  tools build: Make test-rust.bin be removed by the 'clean' target
  ...
This commit is contained in:
Linus Torvalds
2026-02-21 10:51:08 -08:00
324 changed files with 15622 additions and 4400 deletions

View File

@@ -153,6 +153,11 @@ static struct test_workload *workloads[] = {
&workload__datasym,
&workload__landlock,
&workload__traploop,
&workload__inlineloop,
#ifdef HAVE_RUST_SUPPORT
&workload__code_with_type,
#endif
};
#define workloads__for_each(workload) \

View File

@@ -30,7 +30,6 @@
#include "symbol.h"
#include "synthetic-events.h"
#include "util.h"
#include "archinsn.h"
#include "dlfilter.h"
#include "tests.h"
#include "util/sample.h"

View File

@@ -148,6 +148,7 @@ static int test__kallsyms_split(struct test_suite *test __maybe_unused,
ret = TEST_OK;
out:
map__put(map);
remove_proc_dir(0);
machine__exit(&m);
return ret;

View File

@@ -70,6 +70,7 @@ make_python_perf_so := $(python_perf_so)
make_debug := DEBUG=1
make_nondistro := BUILD_NONDISTRO=1
make_extra_tests := EXTRA_TESTS=1
make_no_jevents := NO_JEVENTS=1
make_jevents_all := JEVENTS_ARCH=all
make_no_bpf_skel := BUILD_BPF_SKEL=0
make_gen_vmlinux_h := GEN_VMLINUX_H=1
@@ -83,9 +84,9 @@ make_no_demangle := NO_DEMANGLE=1
make_no_libelf := NO_LIBELF=1
make_no_libdw := NO_LIBDW=1
make_libunwind := LIBUNWIND=1
make_no_libdw_dwarf_unwind := NO_LIBDW_DWARF_UNWIND=1
make_no_backtrace := NO_BACKTRACE=1
make_no_libcapstone := NO_CAPSTONE=1
make_libcapstone_dlopen := LIBCAPSTONE_DLOPEN=1
make_no_libnuma := NO_LIBNUMA=1
make_no_libbionic := NO_LIBBIONIC=1
make_no_libbpf := NO_LIBBPF=1
@@ -120,9 +121,12 @@ make_static := LDFLAGS=-static NO_PERF_READ_VDSO32=1 NO_PERF_READ_VDSOX3
make_minimal := NO_LIBPYTHON=1 NO_GTK2=1
make_minimal += NO_DEMANGLE=1 NO_LIBELF=1 NO_BACKTRACE=1
make_minimal += NO_LIBNUMA=1 NO_LIBBIONIC=1 NO_LIBDW=1
make_minimal += NO_LIBDW_DWARF_UNWIND=1 NO_LIBBPF=1
make_minimal += NO_LIBBPF=1
make_minimal += NO_SDT=1 NO_JVMTI=1 NO_LIBZSTD=1
make_minimal += NO_LIBCAP=1 NO_CAPSTONE=1
make_minimal += NO_CAPSTONE=1
# binutils 2_42 and newer have bfd_thread_init()
new_libbfd := $(shell echo '#include <bfd.h>' | $(CC) -E -x c - | grep bfd_thread_init)
# $(run) contains all available tests
run := make_pure
@@ -137,8 +141,11 @@ MAKE_F := $(MAKE) -f $(MK)
endif
run += make_python_perf_so
run += make_debug
ifneq ($(new_libbfd),)
run += make_nondistro
endif
run += make_extra_tests
run += make_no_jevents
run += make_jevents_all
run += make_no_bpf_skel
run += make_gen_vmlinux_h
@@ -155,6 +162,7 @@ run += make_libunwind
run += make_no_libdw_dwarf_unwind
run += make_no_backtrace
run += make_no_libcapstone
run += make_libcapstone_dlopen
run += make_no_libnuma
run += make_no_libbionic
run += make_no_libbpf

View File

@@ -2609,8 +2609,8 @@ static int test_events(const struct evlist_test *events, int cnt)
for (int i = 0; i < cnt; i++) {
struct evlist_test e = events[i];
int test_ret;
const char *pos = e.name;
char buf[1024], *buf_pos = buf, *end;
const char *pos = e.name, *end;
char buf[1024], *buf_pos = buf;
while ((end = strstr(pos, "default_core"))) {
size_t len = end - pos;
@@ -2627,7 +2627,7 @@ static int test_events(const struct evlist_test *events, int cnt)
pr_debug("running test %d '%s'\n", i, e.name);
test_ret = test_event(&e);
if (test_ret != TEST_OK) {
pr_debug("Event test failure: test %d '%s'", i, e.name);
pr_debug("Event test failure: test %d '%s'\n", i, e.name);
ret = combine_test_results(ret, test_ret);
}
}
@@ -2764,7 +2764,7 @@ static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest
test_ret = test_event(&e);
if (test_ret != TEST_OK) {
pr_debug("Test PMU event failed for '%s'", name);
pr_debug("Test PMU event failed for '%s'\n", name);
ret = combine_test_results(ret, test_ret);
}
@@ -2790,7 +2790,7 @@ static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest
e.check = test__checkevent_pmu_events_mix;
test_ret = test_event(&e);
if (test_ret != TEST_OK) {
pr_debug("Test PMU event failed for '%s'", name);
pr_debug("Test PMU event failed for '%s'\n", name);
ret = combine_test_results(ret, test_ret);
}
}

View File

@@ -41,6 +41,8 @@ static void load_runtime_stat(struct evlist *evlist, struct value *vals)
count = find_value(evsel->name, vals);
evsel->supported = true;
evsel->stats->aggr->counts.val = count;
evsel->stats->aggr->counts.ena = 1;
evsel->stats->aggr->counts.run = 1;
}
}

View File

@@ -192,7 +192,6 @@ static int test__pmu_format(struct test_suite *test __maybe_unused, int subtest
}
if (attr.config2 != 0x0400000020041d07) {
pr_err("Unexpected config2 value %llx\n", attr.config2);
goto err_out;
}
ret = TEST_OK;
@@ -202,6 +201,97 @@ err_out:
return ret;
}
static int test__pmu_usr_chgs(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
const char *event = "perf-pmu-test/config=15,config1=4,krava02=170,"
"krava03=1,krava11=27,krava12=1/";
struct parse_events_terms terms;
struct parse_events_error err;
LIST_HEAD(config_terms);
struct evlist *evlist;
struct perf_pmu *pmu;
struct evsel *evsel;
int ret = TEST_FAIL;
char dir[PATH_MAX];
u64 val;
pmu = test_pmu_get(dir, sizeof(dir));
if (!pmu)
return TEST_FAIL;
evlist = evlist__new();
if (evlist == NULL) {
pr_err("Failed allocation");
goto err_out;
}
parse_events_terms__init(&terms);
ret = parse_events(evlist, event, &err);
if (ret) {
pr_debug("failed to parse event '%s', err %d\n", event, ret);
parse_events_error__print(&err, event);
if (parse_events_error__contains(&err, "can't access trace events"))
ret = TEST_SKIP;
goto err_out;
}
evsel = evlist__first(evlist);
/*
* Set via config=15, krava01 bits 0-1
* Set via config1=4, krava11 bit 1
* Set values: krava02=170, krava03=1, krava11=27, krava12=1
*
* Test that already set values aren't overwritten.
*/
evsel__set_config_if_unset(evsel, "krava01", 16);
evsel__get_config_val(evsel, "krava01", &val);
TEST_ASSERT_EQUAL("krava01 overwritten", (int) val, (15 & 0b11));
evsel__set_config_if_unset(evsel, "krava11", 45);
evsel__get_config_val(evsel, "krava11", &val);
TEST_ASSERT_EQUAL("krava11 overwritten", (int) val, (27 | (4 << 1)));
evsel__set_config_if_unset(evsel, "krava02", 32);
evsel__get_config_val(evsel, "krava02", &val);
TEST_ASSERT_EQUAL("krava02 overwritten", (int) val, 170);
evsel__set_config_if_unset(evsel, "krava03", 0);
evsel__get_config_val(evsel, "krava03", &val);
TEST_ASSERT_EQUAL("krava03 overwritten", (int) val, 1);
/*
* krava13 doesn't have any bits set by either krava13= or config1=
* but setting _any_ raw value for config1 implies that krava13
* shouldn't be overwritten. So it's value should remain as 0.
*/
evsel__set_config_if_unset(evsel, "krava13", 5);
evsel__get_config_val(evsel, "krava13", &val);
TEST_ASSERT_EQUAL("krava13 overwritten", (int) val, 0);
/*
* Unset values: krava21, krava22, krava23
*
* Test that unset values are overwritten.
*/
evsel__set_config_if_unset(evsel, "krava21", 13905);
evsel__get_config_val(evsel, "krava21", &val);
TEST_ASSERT_EQUAL("krava21 not overwritten", (int) val, 13905);
evsel__set_config_if_unset(evsel, "krava22", 11);
evsel__get_config_val(evsel, "krava22", &val);
TEST_ASSERT_EQUAL("krava22 not overwritten", (int) val, 11);
evsel__set_config_if_unset(evsel, "krava23", 0);
evsel__get_config_val(evsel, "krava23", &val);
TEST_ASSERT_EQUAL("krava23 not overwritten", (int) val, 0);
ret = TEST_OK;
err_out:
parse_events_terms__exit(&terms);
evlist__delete(evlist);
test_pmu_put(dir, pmu);
return ret;
}
static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
char dir[PATH_MAX];
@@ -539,6 +629,7 @@ static struct test_case tests__pmu[] = {
TEST_CASE("PMU name combining", name_len),
TEST_CASE("PMU name comparison", name_cmp),
TEST_CASE("PMU cmdline match", pmu_match),
TEST_CASE("PMU user config changes", pmu_usr_chgs),
{ .name = NULL, }
};

View File

@@ -0,0 +1,96 @@
#!/bin/bash
# test addr2line inline unwinding
# SPDX-License-Identifier: GPL-2.0
set -e
err=0
test_dir=$(mktemp -d /tmp/perf-test-inline-addr2line.XXXXXXXXXX)
perf_data="${test_dir}/perf.data"
perf_script_txt="${test_dir}/perf_script.txt"
cleanup() {
rm -rf "${test_dir}"
trap - EXIT TERM INT
}
trap_cleanup() {
echo "Unexpected signal in ${FUNCNAME[1]}"
cleanup
exit 1
}
trap trap_cleanup EXIT TERM INT
test_fp() {
echo "Inline unwinding fp verification test"
# Record data. Currently only dwarf callchains support inlined functions.
perf record --call-graph fp -e task-clock:u -o "${perf_data}" -- perf test -w inlineloop 1
# Check output with inline (default) and srcline
perf script -i "${perf_data}" --fields +srcline > "${perf_script_txt}"
# Expect the leaf and middle functions to occur on lines in the 20s, with
# the non-inlined parent function on a line in the 30s.
if grep -q "inlineloop.c:2. (inlined)" "${perf_script_txt}" &&
grep -q "inlineloop.c:3.$" "${perf_script_txt}"
then
echo "Inline unwinding fp verification test [Success]"
else
echo "Inline unwinding fp verification test [Failed missing inlined functions]"
err=1
fi
}
test_dwarf() {
echo "Inline unwinding dwarf verification test"
# Record data. Currently only dwarf callchains support inlined functions.
perf record --call-graph dwarf -e task-clock:u -o "${perf_data}" -- perf test -w inlineloop 1
# Check output with inline (default) and srcline
perf script -i "${perf_data}" --fields +srcline > "${perf_script_txt}"
# Expect the leaf and middle functions to occur on lines in the 20s, with
# the non-inlined parent function on a line in the 30s.
if grep -q "inlineloop.c:2. (inlined)" "${perf_script_txt}" &&
grep -q "inlineloop.c:3.$" "${perf_script_txt}"
then
echo "Inline unwinding dwarf verification test [Success]"
else
echo "Inline unwinding dwarf verification test [Failed missing inlined functions]"
err=1
fi
}
test_lbr() {
echo "Inline unwinding LBR verification test"
if [ ! -f /sys/bus/event_source/devices/cpu/caps/branches ] &&
[ ! -f /sys/bus/event_source/devices/cpu_core/caps/branches ]
then
echo "Skip: only x86 CPUs support LBR"
return
fi
# Record data. Currently only dwarf callchains support inlined functions.
perf record --call-graph lbr -e cycles:u -o "${perf_data}" -- perf test -w inlineloop 1
# Check output with inline (default) and srcline
perf script -i "${perf_data}" --fields +srcline > "${perf_script_txt}"
# Expect the leaf and middle functions to occur on lines in the 20s, with
# the non-inlined parent function on a line in the 30s.
if grep -q "inlineloop.c:2. (inlined)" "${perf_script_txt}" &&
grep -q "inlineloop.c:3.$" "${perf_script_txt}"
then
echo "Inline unwinding lbr verification test [Success]"
else
echo "Inline unwinding lbr verification test [Failed missing inlined functions]"
err=1
fi
}
test_fp
test_dwarf
test_lbr
cleanup
exit $err

View File

@@ -0,0 +1,89 @@
#!/bin/bash
# perf data type profiling tests
# SPDX-License-Identifier: GPL-2.0
set -e
# The logic below follows the same line as the annotate test, but looks for a
# data type profiling manifestation
# Values in testtypes and testprogs should match
testtypes=("# data-type: struct Buf" "# data-type: struct _buf")
testprogs=("perf test -w code_with_type" "perf test -w datasym")
err=0
perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
perfout=$(mktemp /tmp/__perf_test.perf.out.XXXXX)
cleanup() {
rm -rf "${perfdata}" "${perfout}"
rm -rf "${perfdata}".old
trap - EXIT TERM INT
}
trap_cleanup() {
echo "Unexpected signal in ${FUNCNAME[1]}"
cleanup
exit 1
}
trap trap_cleanup EXIT TERM INT
test_basic_annotate() {
mode=$1
runtime=$2
echo "${mode} ${runtime} perf annotate test"
case "x${runtime}" in
"xRust")
if ! perf check feature -q rust
then
echo "Skip: code_with_type workload not built in 'perf test'"
return
fi
index=0 ;;
"xC")
index=1 ;;
esac
if [ "x${mode}" == "xBasic" ]
then
perf mem record -o "${perfdata}" ${testprogs[$index]} 2> /dev/null
else
perf mem record -o - ${testprogs[$index]} 2> /dev/null > "${perfdata}"
fi
if [ "x$?" != "x0" ]
then
echo "${mode} annotate [Failed: perf record]"
err=1
return
fi
# Generate the annotated output file
if [ "x${mode}" == "xBasic" ]
then
perf annotate --code-with-type -i "${perfdata}" --stdio --percent-limit 1 2> /dev/null > "${perfout}"
else
perf annotate --code-with-type -i - --stdio 2> /dev/null --percent-limit 1 < "${perfdata}" > "${perfout}"
fi
# check if it has the target data type
if ! grep -q "${testtypes[$index]}" "${perfout}"
then
echo "${mode} annotate [Failed: missing target data type]"
cat "${perfout}"
err=1
return
fi
echo "${mode} annotate test [Success]"
}
test_basic_annotate Basic Rust
test_basic_annotate Pipe Rust
test_basic_annotate Basic C
test_basic_annotate Pipe C
cleanup
exit $err

View File

@@ -21,13 +21,13 @@ trap trap_cleanup EXIT TERM INT
test_evlist_simple() {
echo "Simple evlist test"
if ! perf record -e cycles -o "${perfdata}" true 2> /dev/null
if ! perf record -e cpu-clock -o "${perfdata}" true 2> /dev/null
then
echo "Simple evlist [Failed record]"
err=1
return
fi
if ! perf evlist -i "${perfdata}" | grep -q "cycles"
if ! perf evlist -i "${perfdata}" | grep -q "cpu-clock"
then
echo "Simple evlist [Failed to list event]"
err=1
@@ -38,13 +38,14 @@ test_evlist_simple() {
test_evlist_group() {
echo "Group evlist test"
if ! perf record -e "{cycles,instructions}" -o "${perfdata}" true 2> /dev/null
if ! perf record -e "{cpu-clock,task-clock}" -o "${perfdata}" \
-- perf test -w noploop 2> /dev/null
then
echo "Group evlist [Skipped event group recording failed]"
return
fi
if ! perf evlist -i "${perfdata}" -g | grep -q "{.*cycles.*,.*instructions.*}"
if ! perf evlist -i "${perfdata}" -g | grep -q "{.*cpu-clock.*,.*task-clock.*}"
then
echo "Group evlist [Failed to list event group]"
err=1

View File

@@ -0,0 +1,45 @@
#!/bin/bash
# perf inject to convert DWARF callchains to regular ones
# SPDX-License-Identifier: GPL-2.0
if ! perf check feature -q dwarf; then
echo "SKIP: DWARF support is not available"
exit 2
fi
TESTDATA=$(mktemp /tmp/perf-test.XXXXXX)
err=0
cleanup()
{
trap - EXIT TERM INT
rm -f ${TESTDATA}*
}
trap_cleanup()
{
cleanup
exit 1
}
trap trap_cleanup EXIT TERM INT
echo "recording data with DWARF callchain"
perf record -F 999 --call-graph dwarf -o "${TESTDATA}" -- perf test -w noploop
echo "convert DWARF callchain using perf inject"
perf inject -i "${TESTDATA}" --convert-callchain -o "${TESTDATA}.new"
perf report -i "${TESTDATA}" --no-children -q --percent-limit=1 > ${TESTDATA}.out
perf report -i "${TESTDATA}.new" --no-children -q --percent-limit=1 > ${TESTDATA}.new.out
echo "compare the both result excluding inlined functions"
if diff -u "${TESTDATA}.out" "${TESTDATA}.new.out" | grep "^- " | grep -qv "(inlined)"; then
echo "Found some differences"
diff -u "${TESTDATA}.out" "${TESTDATA}.new.out"
err=1
fi
cleanup
exit $err

View File

@@ -7,9 +7,10 @@ set -e
err=0
perfdata=$(mktemp /tmp/__perf_kvm_test.perf.data.XXXXX)
qemu_pid_file=$(mktemp /tmp/__perf_kvm_test.qemu.pid.XXXXX)
log_file=$(mktemp /tmp/__perf_kvm_test.live_log.XXXXX)
cleanup() {
rm -f "${perfdata}"
rm -f "${perfdata}" "${log_file}"
if [ -f "${qemu_pid_file}" ]; then
if [ -s "${qemu_pid_file}" ]; then
qemu_pid=$(cat "${qemu_pid_file}")
@@ -96,6 +97,32 @@ test_kvm_buildid_list() {
echo "perf kvm buildid-list test [Success]"
}
test_kvm_stat_live() {
echo "Testing perf kvm stat live"
# Run perf kvm live for 5 seconds, monitoring that PID
# Use sleep to keep stdin open but silent, preventing EOF loop or interactive spam
if ! sleep 10 | timeout 5s perf kvm stat live -p "${qemu_pid}" > "${log_file}" 2>&1; then
retval=$?
if [ $retval -ne 124 ] && [ $retval -ne 0 ]; then
echo "perf kvm stat live [Failed: perf kvm stat live failed to start or run (ret=$retval)]"
head -n 50 "${log_file}"
err=1
return
fi
fi
# Check for some sample data (percentage)
if ! grep -E -q "[0-9]+\.[0-9]+%" "${log_file}"; then
echo "perf kvm stat live [Failed: no sample percentage found]"
head -n 50 "${log_file}"
err=1
return
fi
echo "perf kvm stat live test [Success]"
}
setup_qemu() {
# Find qemu
if [ "$(uname -m)" = "x86_64" ]; then
@@ -148,6 +175,7 @@ if [ $err -eq 0 ]; then
test_kvm_stat
test_kvm_record_report
test_kvm_buildid_list
test_kvm_stat_live
fi
cleanup

View File

@@ -0,0 +1,64 @@
#!/bin/sh
# perf sched stats tests
# SPDX-License-Identifier: GPL-2.0
set -e
err=0
test_perf_sched_stats_record() {
echo "Basic perf sched stats record test"
if ! perf sched stats record true 2>&1 | \
grep -E -q "[ perf sched stats: Wrote samples to perf.data ]"
then
echo "Basic perf sched stats record test [Failed]"
err=1
return
fi
echo "Basic perf sched stats record test [Success]"
}
test_perf_sched_stats_report() {
echo "Basic perf sched stats report test"
perf sched stats record true > /dev/null
if ! perf sched stats report 2>&1 | grep -E -q "Description"
then
echo "Basic perf sched stats report test [Failed]"
err=1
rm perf.data
return
fi
rm perf.data
echo "Basic perf sched stats report test [Success]"
}
test_perf_sched_stats_live() {
echo "Basic perf sched stats live mode test"
if ! perf sched stats true 2>&1 | grep -E -q "Description"
then
echo "Basic perf sched stats live mode test [Failed]"
err=1
return
fi
echo "Basic perf sched stats live mode test [Success]"
}
test_perf_sched_stats_diff() {
echo "Basic perf sched stats diff test"
perf sched stats record true > /dev/null
perf sched stats record true > /dev/null
if ! perf sched stats diff > /dev/null
then
echo "Basic perf sched stats diff test [Failed]"
err=1
rm perf.data.old perf.data
return
fi
rm perf.data.old perf.data
echo "Basic perf sched stats diff test [Success]"
}
test_perf_sched_stats_record
test_perf_sched_stats_report
test_perf_sched_stats_live
test_perf_sched_stats_diff
exit $err

View File

@@ -260,7 +260,21 @@ test_uid() {
test_leader_sampling() {
echo "Basic leader sampling test"
if ! perf record -o "${perfdata}" -e "{cycles,cycles}:Su" -- \
events="{cycles,cycles}:Su"
[ "$(uname -m)" = "s390x" ] && {
[ ! -d /sys/devices/cpum_sf ] && {
echo "No CPUMF [Skipped record]"
return
}
events="{cpum_sf/SF_CYCLES_BASIC/,cycles}:Su"
perf record -o "${perfdata}" -e "$events" -- perf test -w brstack 2> /dev/null
# Perf grouping might be unsupported, depends on version.
[ "$?" -ne 0 ] && {
echo "Grouping not support [Skipped record]"
return
}
}
if ! perf record -o "${perfdata}" -e "$events" -- \
perf test -w brstack 2> /dev/null
then
echo "Leader sampling [Failed record]"

View File

@@ -53,7 +53,7 @@ start_noploops() {
}
cleanup_noploops() {
kill "$PID1" "$PID2"
kill "$PID1" "$PID2" || true
}
test_sched_record() {

View File

@@ -68,17 +68,17 @@ test_dlfilter() {
fi
# Build the dlfilter
if ! cc -c -I tools/perf/include -fpic -x c "${dlfilter_c}" -o "${dlfilter_so}.o"
if ! cc -c -I ${shelldir}/../../include -fpic -x c "${dlfilter_c}" -o "${dlfilter_so}.o"
then
echo "Basic --dlfilter test [Failed to build dlfilter object]"
err=1
echo "Basic --dlfilter test [Skip - failed to build dlfilter object]"
err=2
return
fi
if ! cc -shared -o "${dlfilter_so}" "${dlfilter_so}.o"
then
echo "Basic --dlfilter test [Failed to link dlfilter shared object]"
err=1
echo "Basic --dlfilter test [Skip - failed to link dlfilter shared object]"
err=2
return
fi

View File

@@ -0,0 +1,102 @@
#!/bin/bash
# perf script perl tests
# SPDX-License-Identifier: GPL-2.0
set -e
# set PERF_EXEC_PATH to find scripts in the source directory
perfdir=$(dirname "$0")/../..
if [ -e "$perfdir/scripts/perl/Perf-Trace-Util" ]; then
export PERF_EXEC_PATH=$perfdir
fi
perfdata=$(mktemp /tmp/__perf_test_script_perl.perf.data.XXXXX)
generated_script=$(mktemp /tmp/__perf_test_script.XXXXX.pl)
cleanup() {
rm -f "${perfdata}"
rm -f "${generated_script}"
trap - EXIT TERM INT
}
trap_cleanup() {
echo "Unexpected signal in ${FUNCNAME[1]}"
cleanup
exit 1
}
trap trap_cleanup TERM INT
trap cleanup EXIT
check_perl_support() {
if perf check feature -q libperl; then
return 0
fi
echo "perf script perl test [Skipped: no libperl support]"
return 2
}
test_script() {
local event_name=$1
local expected_output=$2
local record_opts=$3
echo "Testing event: $event_name"
# Try to record. If this fails, it might be permissions or lack of support.
# We return 2 to indicate "skip this event" rather than "fail test".
if ! perf record -o "${perfdata}" -e "$event_name" $record_opts -- perf test -w thloop > /dev/null 2>&1; then
echo "perf script perl test [Skipped: failed to record $event_name]"
return 2
fi
echo "Generating perl script..."
if ! perf script -i "${perfdata}" -g "${generated_script}"; then
echo "perf script perl test [Failed: script generation for $event_name]"
return 1
fi
if [ ! -f "${generated_script}" ]; then
echo "perf script perl test [Failed: script not generated for $event_name]"
return 1
fi
echo "Executing perl script..."
output=$(perf script -i "${perfdata}" -s "${generated_script}" 2>&1)
if echo "$output" | grep -q "$expected_output"; then
echo "perf script perl test [Success: $event_name triggered $expected_output]"
return 0
else
echo "perf script perl test [Failed: $event_name did not trigger $expected_output]"
echo "Output was:"
echo "$output" | head -n 20
return 1
fi
}
check_perl_support || exit 2
# Try tracepoint first
test_script "sched:sched_switch" "sched::sched_switch" "-c 1" && res=0 || res=$?
if [ $res -eq 0 ]; then
exit 0
elif [ $res -eq 1 ]; then
exit 1
fi
# If tracepoint skipped (res=2), try task-clock
# For generic events like task-clock, the generated script uses process_event()
# which dumps data using Data::Dumper. We check for "$VAR1" which is standard Dumper output.
test_script "task-clock" "\$VAR1" "-c 100" && res=0 || res=$?
if [ $res -eq 0 ]; then
exit 0
elif [ $res -eq 1 ]; then
exit 1
fi
# If both skipped
echo "perf script perl test [Skipped: Could not record tracepoint or task-clock]"
exit 2

View File

@@ -0,0 +1,113 @@
#!/bin/bash
# perf script python tests
# SPDX-License-Identifier: GPL-2.0
set -e
# set PERF_EXEC_PATH to find scripts in the source directory
perfdir=$(dirname "$0")/../..
if [ -e "$perfdir/scripts/python/Perf-Trace-Util" ]; then
export PERF_EXEC_PATH=$perfdir
fi
perfdata=$(mktemp /tmp/__perf_test_script_python.perf.data.XXXXX)
generated_script=$(mktemp /tmp/__perf_test_script.XXXXX.py)
cleanup() {
rm -f "${perfdata}"
rm -f "${generated_script}"
trap - EXIT TERM INT
}
trap_cleanup() {
echo "Unexpected signal in ${FUNCNAME[1]}"
cleanup
exit 1
}
trap trap_cleanup TERM INT
trap cleanup EXIT
check_python_support() {
if perf check feature -q libpython; then
return 0
fi
echo "perf script python test [Skipped: no libpython support]"
return 2
}
test_script() {
local event_name=$1
local expected_output=$2
local record_opts=$3
echo "Testing event: $event_name"
# Try to record. If this fails, it might be permissions or lack of
# support. Return 2 to indicate "skip this event" rather than "fail
# test".
if ! perf record -o "${perfdata}" -e "$event_name" $record_opts -- perf test -w thloop > /dev/null 2>&1; then
echo "perf script python test [Skipped: failed to record $event_name]"
return 2
fi
echo "Generating python script..."
if ! perf script -i "${perfdata}" -g "${generated_script}"; then
echo "perf script python test [Failed: script generation for $event_name]"
return 1
fi
if [ ! -f "${generated_script}" ]; then
echo "perf script python test [Failed: script not generated for $event_name]"
return 1
fi
# Perf script -g python doesn't generate process_event for generic
# events so append it manually to test that the callback works.
if ! grep -q "def process_event" "${generated_script}"; then
cat <<EOF >> "${generated_script}"
def process_event(param_dict):
print("param_dict: %s" % param_dict)
EOF
fi
echo "Executing python script..."
output=$(perf script -i "${perfdata}" -s "${generated_script}" 2>&1)
if echo "$output" | grep -q "$expected_output"; then
echo "perf script python test [Success: $event_name triggered $expected_output]"
return 0
else
echo "perf script python test [Failed: $event_name did not trigger $expected_output]"
echo "Output was:"
echo "$output" | head -n 20
return 1
fi
}
check_python_support || exit 2
# Try tracepoint first
test_script "sched:sched_switch" "sched__sched_switch" "-c 1" && res=0 || res=$?
if [ $res -eq 0 ]; then
exit 0
elif [ $res -eq 1 ]; then
exit 1
fi
# If tracepoint skipped (res=2), try task-clock
# For generic events like task-clock, the generated script uses process_event()
# which prints the param_dict.
test_script "task-clock" "param_dict" "-c 100" && res=0 || res=$?
if [ $res -eq 0 ]; then
exit 0
elif [ $res -eq 1 ]; then
exit 1
fi
# If both skipped
echo "perf script python test [Skipped: Could not record tracepoint or task-clock]"
exit 2

View File

@@ -5,6 +5,21 @@
set -e
err=0
stat_output=$(mktemp /tmp/perf-stat-test-output.XXXXX)
cleanup() {
rm -f "${stat_output}"
trap - EXIT TERM INT
}
trap_cleanup() {
echo "Unexpected signal in ${FUNCNAME[1]}"
cleanup
exit 1
}
trap trap_cleanup EXIT TERM INT
test_default_stat() {
echo "Basic stat command test"
if ! perf stat true 2>&1 | grep -E -q "Performance counter stats for 'true':"
@@ -233,7 +248,7 @@ test_hybrid() {
fi
# Run default Perf stat
cycles_events=$(perf stat -a -- sleep 0.1 2>&1 | grep -E "/cpu-cycles/[uH]*| cpu-cycles[:uH]* " -c)
cycles_events=$(perf stat -a -- sleep 0.1 2>&1 | grep -E "/cpu-cycles/[uH]*| cpu-cycles[:uH]* " | wc -l)
# The expectation is that default output will have a cycles events on each
# hybrid PMU. In situations with no cycles PMU events, like virtualized, this
@@ -248,6 +263,226 @@ test_hybrid() {
echo "hybrid test [Success]"
}
test_stat_cpu() {
echo "stat -C <cpu> test"
# Test the full online CPU list (ranges and lists)
online_cpus=$(cat /sys/devices/system/cpu/online)
if ! perf stat -C "$online_cpus" -a true > "${stat_output}" 2>&1
then
echo "stat -C <cpu> test [Failed - command failed for cpus $online_cpus]"
cat "${stat_output}"
err=1
return
fi
if ! grep -E -q "Performance counter stats for" "${stat_output}"
then
echo "stat -C <cpu> test [Failed - missing output for cpus $online_cpus]"
cat "${stat_output}"
err=1
return
fi
# Test each individual online CPU
for cpu_dir in /sys/devices/system/cpu/cpu[0-9]*; do
cpu=${cpu_dir##*/cpu}
# Check if online
if [ -f "$cpu_dir/online" ] && [ "$(cat "$cpu_dir/online")" -eq 0 ]
then
continue
fi
if ! perf stat -C "$cpu" -a true > "${stat_output}" 2>&1
then
echo "stat -C <cpu> test [Failed - command failed for cpu $cpu]"
cat "${stat_output}"
err=1
return
fi
if ! grep -E -q "Performance counter stats for" "${stat_output}"
then
echo "stat -C <cpu> test [Failed - missing output for cpu $cpu]"
cat "${stat_output}"
err=1
return
fi
done
# Test synthetic list and range if cpu0 and cpu1 are online
c0_online=0
c1_online=0
if [ -d "/sys/devices/system/cpu/cpu0" ]
then
if [ ! -f "/sys/devices/system/cpu/cpu0/online" ] || [ "$(cat /sys/devices/system/cpu/cpu0/online)" -eq 1 ]
then
c0_online=1
fi
fi
if [ -d "/sys/devices/system/cpu/cpu1" ]
then
if [ ! -f "/sys/devices/system/cpu/cpu1/online" ] || [ "$(cat /sys/devices/system/cpu/cpu1/online)" -eq 1 ]
then
c1_online=1
fi
fi
if [ $c0_online -eq 1 ] && [ $c1_online -eq 1 ]
then
# Test list "0,1"
if ! perf stat -C "0,1" -a true > "${stat_output}" 2>&1
then
echo "stat -C <cpu> test [Failed - command failed for cpus 0,1]"
cat "${stat_output}"
err=1
return
fi
if ! grep -E -q "Performance counter stats for" "${stat_output}"
then
echo "stat -C <cpu> test [Failed - missing output for cpus 0,1]"
cat "${stat_output}"
err=1
return
fi
# Test range "0-1"
if ! perf stat -C "0-1" -a true > "${stat_output}" 2>&1
then
echo "stat -C <cpu> test [Failed - command failed for cpus 0-1]"
cat "${stat_output}"
err=1
return
fi
if ! grep -E -q "Performance counter stats for" "${stat_output}"
then
echo "stat -C <cpu> test [Failed - missing output for cpus 0-1]"
cat "${stat_output}"
err=1
return
fi
fi
echo "stat -C <cpu> test [Success]"
}
test_stat_no_aggr() {
echo "stat -A test"
if ! perf stat -A -a true > "${stat_output}" 2>&1
then
echo "stat -A test [Failed - command failed]"
cat "${stat_output}"
err=1
return
fi
if ! grep -E -q "CPU" "${stat_output}"
then
echo "stat -A test [Failed - missing CPU column]"
cat "${stat_output}"
err=1
return
fi
echo "stat -A test [Success]"
}
test_stat_detailed() {
echo "stat -d test"
if ! perf stat -d true > "${stat_output}" 2>&1
then
echo "stat -d test [Failed - command failed]"
cat "${stat_output}"
err=1
return
fi
if ! grep -E -q "Performance counter stats" "${stat_output}"
then
echo "stat -d test [Failed - missing output]"
cat "${stat_output}"
err=1
return
fi
if ! perf stat -dd true > "${stat_output}" 2>&1
then
echo "stat -dd test [Failed - command failed]"
cat "${stat_output}"
err=1
return
fi
if ! grep -E -q "Performance counter stats" "${stat_output}"
then
echo "stat -dd test [Failed - missing output]"
cat "${stat_output}"
err=1
return
fi
if ! perf stat -ddd true > "${stat_output}" 2>&1
then
echo "stat -ddd test [Failed - command failed]"
cat "${stat_output}"
err=1
return
fi
if ! grep -E -q "Performance counter stats" "${stat_output}"
then
echo "stat -ddd test [Failed - missing output]"
cat "${stat_output}"
err=1
return
fi
echo "stat -d test [Success]"
}
test_stat_repeat() {
echo "stat -r test"
if ! perf stat -r 2 true > "${stat_output}" 2>&1
then
echo "stat -r test [Failed - command failed]"
cat "${stat_output}"
err=1
return
fi
if ! grep -E -q "\([[:space:]]*\+-.*%[[:space:]]*\)" "${stat_output}"
then
echo "stat -r test [Failed - missing variance]"
cat "${stat_output}"
err=1
return
fi
echo "stat -r test [Success]"
}
test_stat_pid() {
echo "stat -p test"
sleep 1 &
pid=$!
if ! perf stat -p $pid > "${stat_output}" 2>&1
then
echo "stat -p test [Failed - command failed]"
cat "${stat_output}"
err=1
kill $pid 2>/dev/null || true
wait $pid 2>/dev/null || true
return
fi
if ! grep -E -q "Performance counter stats" "${stat_output}"
then
echo "stat -p test [Failed - missing output]"
cat "${stat_output}"
err=1
else
echo "stat -p test [Success]"
fi
kill $pid 2>/dev/null || true
wait $pid 2>/dev/null || true
}
test_default_stat
test_null_stat
test_offline_cpu_stat
@@ -258,4 +493,11 @@ test_topdown_groups
test_topdown_weak_groups
test_cputype
test_hybrid
test_stat_cpu
test_stat_no_aggr
test_stat_detailed
test_stat_repeat
test_stat_pid
cleanup
exit $err

View File

@@ -12,31 +12,32 @@ if ParanoidAndNotRoot 0
then
system_wide_flag=""
fi
err=0
err=3
skip=0
for m in $(perf list --raw-dump metricgroups)
do
echo "Testing $m"
result=$(perf stat -M "$m" $system_wide_flag sleep 0.01 2>&1)
result_err=$?
if [[ $result_err -gt 0 ]]
if [[ $result_err -eq 0 ]]
then
if [[ "$err" -ne 1 ]]
then
err=0
fi
else
if [[ "$result" =~ \
"Access to performance monitoring and observability operations is limited" ]]
then
echo "Permission failure"
echo $result
if [[ $err -eq 0 ]]
then
err=2 # Skip
fi
skip=1
elif [[ "$result" =~ "in per-thread mode, enable system wide" ]]
then
echo "Permissions - need system wide mode"
echo $result
if [[ $err -eq 0 ]]
then
err=2 # Skip
fi
skip=1
elif [[ "$m" == @(Default2|Default3|Default4) ]]
then
echo "Ignoring failures in $m that may contain unsupported legacy events"
@@ -48,4 +49,9 @@ do
fi
done
if [[ "$err" -eq 3 && "$skip" -eq 1 ]]
then
err=2
fi
exit $err

View File

@@ -15,7 +15,8 @@ then
test_prog="perf test -w noploop"
fi
err=0
skip=0
err=3
for m in $(perf list --raw-dump metrics); do
echo "Testing $m"
result=$(perf stat -M "$m" $system_wide_flag -- $test_prog 2>&1)
@@ -23,6 +24,10 @@ for m in $(perf list --raw-dump metrics); do
if [[ $result_err -eq 0 && "$result" =~ ${m:0:50} ]]
then
# No error result and metric shown.
if [[ "$err" -ne 1 ]]
then
err=0
fi
continue
fi
if [[ "$result" =~ "Cannot resolve IDs for" || "$result" =~ "No supported events found" ]]
@@ -44,7 +49,7 @@ for m in $(perf list --raw-dump metrics); do
echo $result
if [[ $err -eq 0 ]]
then
err=2 # Skip
skip=1
fi
continue
elif [[ "$result" =~ "in per-thread mode, enable system wide" ]]
@@ -53,7 +58,7 @@ for m in $(perf list --raw-dump metrics); do
echo $result
if [[ $err -eq 0 ]]
then
err=2 # Skip
skip=1
fi
continue
elif [[ "$result" =~ "<not supported>" ]]
@@ -68,7 +73,7 @@ for m in $(perf list --raw-dump metrics); do
echo $result
if [[ $err -eq 0 ]]
then
err=2 # Skip
skip=1
fi
continue
elif [[ "$result" =~ "<not counted>" ]]
@@ -77,7 +82,7 @@ for m in $(perf list --raw-dump metrics); do
echo $result
if [[ $err -eq 0 ]]
then
err=2 # Skip
skip=1
fi
continue
elif [[ "$result" =~ "FP_ARITH" || "$result" =~ "AMX" ]]
@@ -86,7 +91,7 @@ for m in $(perf list --raw-dump metrics); do
echo $result
if [[ $err -eq 0 ]]
then
err=2 # Skip
skip=1
fi
continue
elif [[ "$result" =~ "PMM" ]]
@@ -95,7 +100,7 @@ for m in $(perf list --raw-dump metrics); do
echo $result
if [[ $err -eq 0 ]]
then
err=2 # Skip
skip=1
fi
continue
fi
@@ -106,6 +111,10 @@ for m in $(perf list --raw-dump metrics); do
if [[ $result_err -eq 0 && "$result" =~ ${m:0:50} ]]
then
# No error result and metric shown.
if [[ "$err" -ne 1 ]]
then
err=0
fi
continue
fi
echo "[Failed $m] has non-zero error '$result_err' or not printed in:"
@@ -113,4 +122,10 @@ for m in $(perf list --raw-dump metrics); do
err=1
done
# return SKIP only if no success returned
if [[ "$err" -eq 3 && "$skip" -eq 1 ]]
then
err=2
fi
exit "$err"

View File

@@ -198,6 +198,58 @@ arm_cs_etm_basic_test() {
arm_cs_report "CoreSight basic testing with '$*'" $err
}
arm_cs_etm_test_cpu_list() {
echo "Testing sparse CPU list: $1"
perf record -o ${perfdata} -e cs_etm//u -C $1 \
-- taskset --cpu-list $1 true > /dev/null 2>&1
perf_script_branch_samples true
err=$?
arm_cs_report "CoreSight sparse CPUs with '$*'" $err
}
arm_cs_etm_sparse_cpus_test() {
# Iterate for every ETM device
cpus=()
for dev in /sys/bus/event_source/devices/cs_etm/cpu*; do
# Canonicalize the path
dev=`readlink -f $dev`
# Find the ETM device belonging to which CPU
cpus+=("$(cat $dev/cpu)")
done
mapfile -t cpus < <(printf '%s\n' "${cpus[@]}" | sort -n)
total=${#cpus[@]}
# Need more than 1 to test
if [ $total -le 1 ]; then
return 0
fi
half=$((total / 2))
# First half
first_half=$(IFS=,; echo "${cpus[*]:0:$half}")
arm_cs_etm_test_cpu_list $first_half
# Second half
second_half=$(IFS=,; echo "${cpus[*]:$half}")
arm_cs_etm_test_cpu_list $second_half
# Odd list is the same as halves unless >= 4 CPUs
if [ $total -lt 4 ]; then
return 0
fi
# Odd indices
odd_cpus=()
for ((i=1; i<total; i+=2)); do
odd_cpus+=("${cpus[$i]}")
done
odd_list=$(IFS=,; echo "${odd_cpus[*]}")
arm_cs_etm_test_cpu_list $odd_list
}
arm_cs_etm_traverse_path_test
arm_cs_etm_system_wide_test
arm_cs_etm_snapshot_test
@@ -211,4 +263,6 @@ arm_cs_etm_basic_test -e cs_etm/timestamp=1/ -a
arm_cs_etm_basic_test -e cs_etm/timestamp=0/
arm_cs_etm_basic_test -e cs_etm/timestamp=1/
arm_cs_etm_sparse_cpus_test
exit $glb_err

View File

@@ -22,10 +22,13 @@ cleanup_files()
trap cleanup_files exit term int
PERF_DIR=$(dirname "$(which perf)")
if [ -e "$PWD/tools/perf/libperf-jvmti.so" ]; then
LIBJVMTI=$PWD/tools/perf/libperf-jvmti.so
elif [ -e "$PWD/libperf-jvmti.so" ]; then
LIBJVMTI=$PWD/libperf-jvmti.so
elif [ -e "$PERF_DIR/libperf-jvmti.so" ]; then
LIBJVMTI=$PERF_DIR/libperf-jvmti.so
elif [ -e "$PREFIX/lib64/libperf-jvmti.so" ]; then
LIBJVMTI=$PREFIX/lib64/libperf-jvmti.so
elif [ -e "$PREFIX/lib/libperf-jvmti.so" ]; then
@@ -34,6 +37,7 @@ elif [ -e "/usr/lib/linux-tools-$(uname -a | awk '{ print $3 }' | sed -r 's/-gen
LIBJVMTI=/usr/lib/linux-tools-$(uname -a | awk '{ print $3 }' | sed -r 's/-generic//')/libperf-jvmti.so
else
echo "Fail to find libperf-jvmti.so"
# JVMTI is a build option, skip the test if fail to find lib
exit 2
fi

View File

@@ -0,0 +1,104 @@
#!/bin/bash
# 'perf data convert --to-ctf' command test
# SPDX-License-Identifier: GPL-2.0
set -e
err=0
perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
ctf_dir=$(mktemp -d /tmp/__perf_test.ctf.XXXXX)
cleanup()
{
rm -f "${perfdata}"
rm -rf "${ctf_dir}"
trap - exit term int
}
trap_cleanup()
{
echo "Unexpected signal in ${FUNCNAME[1]}"
cleanup
exit ${err}
}
trap trap_cleanup exit term int
check_babeltrace_support()
{
if ! perf check feature libbabeltrace
then
echo "perf not linked with libbabeltrace, skipping test"
exit 2
fi
}
test_ctf_converter_file()
{
echo "Testing Perf Data Conversion Command to CTF (File input)"
# Record some data
if ! perf record -o "$perfdata" -F 99 -g -- perf test -w noploop
then
echo "Failed to record perf data"
err=1
return
fi
# Cleanup previous ctf dir
rm -rf "${ctf_dir}"
# Convert
if ! perf data convert --to-ctf "$ctf_dir" --force -i "$perfdata"
then
echo "Perf Data Converter Command to CTF (File input) [FAILED]"
err=1
return
fi
if [ -d "${ctf_dir}" ] && [ "$(ls -A "${ctf_dir}")" ]
then
echo "Perf Data Converter Command to CTF (File input) [SUCCESS]"
else
echo "Perf Data Converter Command to CTF (File input) [FAILED]"
echo " Output directory empty or missing"
err=1
fi
}
test_ctf_converter_pipe()
{
echo "Testing Perf Data Conversion Command to CTF (Pipe mode)"
# Cleanup previous ctf dir
rm -rf "${ctf_dir}"
# Record to stdout and pipe to $perfdata file
if ! perf record -o - -F 99 -g -- perf test -w noploop > "$perfdata"
then
echo "Failed to record perf data"
err=1
return
fi
if ! perf data convert --to-ctf "$ctf_dir" --force -i "$perfdata"
then
echo "Perf Data Converter Command to CTF (Pipe mode) [FAILED]"
err=1
return
fi
if [ -d "${ctf_dir}" ] && [ "$(ls -A "${ctf_dir}")" ]
then
echo "Perf Data Converter Command to CTF (Pipe mode) [SUCCESS]"
else
echo "Perf Data Converter Command to CTF (Pipe mode) [FAILED]"
echo " Output directory empty or missing"
err=1
fi
}
check_babeltrace_support
test_ctf_converter_file
test_ctf_converter_pipe
exit ${err}

View File

@@ -15,29 +15,42 @@ result=$(mktemp /tmp/__perf_test.output.json.XXXXX)
cleanup()
{
rm -f "${perfdata}"
rm -f "${perfdata}*"
rm -f "${result}"
trap - exit term int
}
trap_cleanup()
{
echo "Unexpected signal in ${FUNCNAME[1]}"
cleanup
exit ${err}
exit 1
}
trap trap_cleanup exit term int
test_json_converter_command()
{
echo "Testing Perf Data Convertion Command to JSON"
perf record -o "$perfdata" -F 99 -g -- perf test -w noploop > /dev/null 2>&1
perf data convert --to-json "$result" --force -i "$perfdata" >/dev/null 2>&1
echo "Testing Perf Data Conversion Command to JSON"
perf record -o "$perfdata" -F 99 -g -- perf test -w noploop
perf data convert --to-json "$result" --force -i "$perfdata"
if [ "$(cat ${result} | wc -l)" -gt "0" ] ; then
echo "Perf Data Converter Command to JSON [SUCCESS]"
else
echo "Perf Data Converter Command to JSON [FAILED]"
err=1
exit
fi
}
test_json_converter_pipe()
{
echo "Testing Perf Data Conversion Command to JSON (Pipe mode)"
perf record -o - -F 99 -g -- perf test -w noploop > "$perfdata"
cat "$perfdata" | perf data convert --to-json "$result" --force -i -
if [ "$(cat ${result} | wc -l)" -gt "0" ] ; then
echo "Perf Data Converter Command to JSON (Pipe mode) [SUCCESS]"
else
echo "Perf Data Converter Command to JSON (Pipe mode) [FAILED]"
err=1
fi
}
@@ -50,16 +63,18 @@ validate_json_format()
else
echo "The file does not contain valid JSON format [FAILED]"
err=1
exit
fi
else
echo "File not found [FAILED]"
err=2
exit
err=1
fi
}
test_json_converter_command
validate_json_format
test_json_converter_pipe
validate_json_format
cleanup
exit ${err}

View File

@@ -95,10 +95,36 @@ static int test__exclude_cmdnames(struct test_suite *test __maybe_unused,
return TEST_OK;
}
static int test__exclude_cmdnames_no_overlap(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
struct cmdnames cmds1 = {};
struct cmdnames cmds2 = {};
add_cmdname(&cmds1, "read-vdso32", 11);
add_cmdname(&cmds2, "archive", 7);
TEST_ASSERT_VAL("invalid original size", cmds1.cnt == 1);
TEST_ASSERT_VAL("invalid original size", cmds2.cnt == 1);
exclude_cmds(&cmds1, &cmds2);
TEST_ASSERT_VAL("invalid excluded size", cmds1.cnt == 1);
TEST_ASSERT_VAL("invalid excluded size", cmds2.cnt == 1);
TEST_ASSERT_VAL("cannot find cmd", is_in_cmdlist(&cmds1, "read-vdso32") == 1);
TEST_ASSERT_VAL("wrong cmd", is_in_cmdlist(&cmds1, "archive") == 0);
clean_cmdnames(&cmds1);
clean_cmdnames(&cmds2);
return TEST_OK;
}
static struct test_case tests__subcmd_help[] = {
TEST_CASE("Load subcmd names", load_cmdnames),
TEST_CASE("Uniquify subcmd names", uniq_cmdnames),
TEST_CASE("Exclude duplicate subcmd names", exclude_cmdnames),
TEST_CASE("Exclude disjoint subcmd names", exclude_cmdnames_no_overlap),
{ .name = NULL, }
};

View File

@@ -5,6 +5,7 @@
#include <stdlib.h>
#include <signal.h>
#include <sys/mman.h>
#include <linux/compiler.h>
#include <linux/string.h>
#include "tests.h"
@@ -28,7 +29,7 @@
static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
{
int i, err = -1;
volatile int tmp = 0;
volatile int tmp __maybe_unused = 0;
u64 total_periods = 0;
int nr_samples = 0;
char sbuf[STRERR_BUFSIZE];

View File

@@ -240,6 +240,11 @@ DECLARE_WORKLOAD(brstack);
DECLARE_WORKLOAD(datasym);
DECLARE_WORKLOAD(landlock);
DECLARE_WORKLOAD(traploop);
DECLARE_WORKLOAD(inlineloop);
#ifdef HAVE_RUST_SUPPORT
DECLARE_WORKLOAD(code_with_type);
#endif
extern const char *dso_to_test;
extern const char *test_objdump_path;

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "tests.h"
#include "util/blake2s.h"
#include "util/debug.h"
#include "util/sha1.h"
#include <linux/compiler.h>
#include <stdlib.h>
@@ -17,45 +17,72 @@ static int test_strreplace(char needle, const char *haystack,
return ret == 0;
}
#define MAX_LEN 512
/* Maximum data length tested by test_blake2s() */
#define MAX_DATA_LEN 512
/* Test sha1() for all lengths from 0 to MAX_LEN inclusively. */
static int test_sha1(void)
/*
* Hash length tested by test_blake2s(). BLAKE2s supports variable-length
* hashes. However, the only user of BLAKE2s in 'perf' uses 20-byte hashes,
* matching the length of the ELF build ID field. So that's the length we test.
*/
#define HASH_LEN 20
/* Test the implementation of the BLAKE2s hash algorithm. */
static int test_blake2s(void)
{
u8 data[MAX_LEN];
size_t digests_size = (MAX_LEN + 1) * SHA1_DIGEST_SIZE;
u8 *digests;
u8 digest_of_digests[SHA1_DIGEST_SIZE];
u8 data[MAX_DATA_LEN];
u8 hash[HASH_LEN];
u8 hash2[HASH_LEN];
struct blake2s_ctx main_ctx;
/*
* The correctness of this value was verified by running this test with
* sha1() replaced by OpenSSL's SHA1().
* This value was generated by the following Python code:
*
* import hashlib
*
* data = bytes(i % 256 for i in range(513))
* h = hashlib.blake2s(digest_size=20)
* for i in range(513):
* h.update(hashlib.blake2s(data=data[:i], digest_size=20).digest())
* print(h.hexdigest())
*/
static const u8 expected_digest_of_digests[SHA1_DIGEST_SIZE] = {
0x74, 0xcd, 0x4c, 0xb9, 0xd8, 0xa6, 0xd5, 0x95, 0x22, 0x8b,
0x7e, 0xd6, 0x8b, 0x7e, 0x46, 0x95, 0x31, 0x9b, 0xa2, 0x43,
static const u8 expected_hash_of_hashes[20] = {
0xef, 0x9b, 0x13, 0x98, 0x78, 0x8e, 0x74, 0x59, 0x9c, 0xd5,
0x0c, 0xf0, 0x33, 0x97, 0x79, 0x3d, 0x3e, 0xd0, 0x95, 0xa6
};
size_t i;
digests = malloc(digests_size);
TEST_ASSERT_VAL("failed to allocate digests", digests != NULL);
/* Generate MAX_LEN bytes of data. */
for (i = 0; i < MAX_LEN; i++)
/* Generate MAX_DATA_LEN bytes of data. */
for (i = 0; i < MAX_DATA_LEN; i++)
data[i] = i;
/* Calculate a SHA-1 for each length 0 through MAX_LEN inclusively. */
for (i = 0; i <= MAX_LEN; i++)
sha1(data, i, &digests[i * SHA1_DIGEST_SIZE]);
blake2s_init(&main_ctx, sizeof(hash));
for (i = 0; i <= MAX_DATA_LEN; i++) {
struct blake2s_ctx ctx;
/* Calculate digest of all digests calculated above. */
sha1(digests, digests_size, digest_of_digests);
/* Compute the BLAKE2s hash of 'i' data bytes. */
blake2s_init(&ctx, HASH_LEN);
blake2s_update(&ctx, data, i);
blake2s_final(&ctx, hash);
free(digests);
/* Verify that multiple updates produce the same result. */
blake2s_init(&ctx, HASH_LEN);
blake2s_update(&ctx, data, i / 2);
blake2s_update(&ctx, &data[i / 2], i - (i / 2));
blake2s_final(&ctx, hash2);
TEST_ASSERT_VAL("inconsistent BLAKE2s hashes",
memcmp(hash, hash2, HASH_LEN) == 0);
/* Check for the expected result. */
TEST_ASSERT_VAL("wrong output from sha1()",
memcmp(digest_of_digests, expected_digest_of_digests,
SHA1_DIGEST_SIZE) == 0);
/*
* Pass the hash to another BLAKE2s context, so that we
* incrementally compute the hash of all the hashes.
*/
blake2s_update(&main_ctx, hash, HASH_LEN);
}
/* Verify the hash of all the hashes. */
blake2s_final(&main_ctx, hash);
TEST_ASSERT_VAL("wrong BLAKE2s hashes",
memcmp(hash, expected_hash_of_hashes, HASH_LEN) == 0);
return 0;
}
@@ -68,7 +95,7 @@ static int test__util(struct test_suite *t __maybe_unused, int subtest __maybe_u
TEST_ASSERT_VAL("replace long", test_strreplace('a', "abcabc", "longlong",
"longlongbclonglongbc"));
return test_sha1();
return test_blake2s();
}
DEFINE_SUITE("util", util);

View File

@@ -8,9 +8,16 @@ perf-test-y += brstack.o
perf-test-y += datasym.o
perf-test-y += landlock.o
perf-test-y += traploop.o
perf-test-y += inlineloop.o
ifeq ($(CONFIG_RUST_SUPPORT),y)
perf-test-y += code_with_type.o
perf-test-y += code_with_type.a
endif
CFLAGS_sqrtloop.o = -g -O0 -fno-inline -U_FORTIFY_SOURCE
CFLAGS_leafloop.o = -g -O0 -fno-inline -fno-omit-frame-pointer -U_FORTIFY_SOURCE
CFLAGS_brstack.o = -g -O0 -fno-inline -U_FORTIFY_SOURCE
CFLAGS_datasym.o = -g -O0 -fno-inline -U_FORTIFY_SOURCE
CFLAGS_traploop.o = -g -O0 -fno-inline -U_FORTIFY_SOURCE
CFLAGS_inlineloop.o = -g -O2

View File

@@ -0,0 +1,46 @@
// SPDX-License-Identifier: GPL-2.0
#include <pthread.h>
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
#include <linux/compiler.h>
#include "../tests.h"
extern void test_rs(uint count);
static volatile sig_atomic_t done;
static void sighandler(int sig __maybe_unused)
{
done = 1;
}
static int code_with_type(int argc, const char **argv)
{
int sec = 1, num_loops = 100;
pthread_setname_np(pthread_self(), "perf-code-with-type");
if (argc > 0)
sec = atoi(argv[0]);
if (argc > 1)
num_loops = atoi(argv[1]);
signal(SIGINT, sighandler);
signal(SIGALRM, sighandler);
alarm(sec);
/*
* Rust doesn't have signal management in the standard library. To
* not deal with any external crates, offload signal handling to the
* outside code.
*/
while (!done) {
test_rs(num_loops);
continue;
}
return 0;
}
DEFINE_WORKLOAD(code_with_type);

View File

@@ -0,0 +1,27 @@
// SPDX-License-Identifier: GPL-2.0
// We're going to look for this structure in the data type profiling report
#[allow(dead_code)]
struct Buf {
data1: u64,
data2: String,
data3: u64,
}
#[no_mangle]
pub extern "C" fn test_rs(count: u32) {
let mut b = Buf {
data1: 0,
data2: String::from("data"),
data3: 0,
};
for _ in 1..count {
b.data1 += 1;
if b.data1 == 123 {
b.data1 += 1;
}
b.data3 += b.data1;
}
}

View File

@@ -0,0 +1,52 @@
// SPDX-License-Identifier: GPL-2.0
#include <pthread.h>
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
#include <linux/compiler.h>
#include "../tests.h"
static volatile int a;
static volatile sig_atomic_t done;
static void sighandler(int sig __maybe_unused)
{
done = 1;
}
static inline void __attribute__((always_inline)) leaf(int b)
{
again:
a += b;
if (!done)
goto again;
}
static inline void __attribute__((always_inline)) middle(int b)
{
leaf(b);
}
static noinline void parent(int b)
{
middle(b);
}
static int inlineloop(int argc, const char **argv)
{
int sec = 1;
pthread_setname_np(pthread_self(), "perf-inlineloop");
if (argc > 0)
sec = atoi(argv[0]);
signal(SIGINT, sighandler);
signal(SIGALRM, sighandler);
alarm(sec);
parent(sec);
return 0;
}
DEFINE_WORKLOAD(inlineloop);