Commit 397a9794 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'linux-kselftest-5.7-rc1' of...

Merge tag 'linux-kselftest-5.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest

Pull kselftest update from Shuah Khan:
 "This kselftest update consists of:

   - resctrl_tests for resctrl file system. resctrl isn't included in
     the default TARGETS list in kselftest Makefile. It can be run
     manually.

   - Kselftest harness improvements.

   - Kselftest framework and individual test fixes to support runs on
     Kernel CI rings and other environments that use relocatable build
     and install features.

   - Minor cleanups and typo fixes"

* tag 'linux-kselftest-5.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest: (25 commits)
  selftests: enforce local header dependency in lib.mk
  selftests: Fix memfd to support relocatable build (O=objdir)
  selftests: Fix seccomp to support relocatable build (O=objdir)
  selftests/harness: Handle timeouts cleanly
  selftests/harness: Move test child waiting logic
  selftests: android: Fix custom install from skipping test progs
  selftests: android: ion: Fix ionmap_test compile error
  selftests: Fix kselftest O=objdir build from cluttering top level objdir
  selftests/seccomp: Adjust test fixture counts
  selftests/ftrace: Fix typo in trigger-multihist.tc
  selftests/timens: Remove duplicated include <time.h>
  selftests/resctrl: fix spelling mistake "Errror" -> "Error"
  selftests/resctrl: Add the test in MAINTAINERS
  selftests/resctrl: Disable MBA and MBM tests for AMD
  selftests/resctrl: Use cache index3 id for AMD schemata masks
  selftests/resctrl: Add vendor detection mechanism
  selftests/resctrl: Add Cache Allocation Technology (CAT) selftest
  selftests/resctrl: Add Cache QoS Monitoring (CQM) selftest
  selftests/resctrl: Add MBA test
  selftests/resctrl: Add MBM test
  ...
parents ffc1c20c 1056d3d2
...@@ -14189,6 +14189,7 @@ S: Supported ...@@ -14189,6 +14189,7 @@ S: Supported
F: arch/x86/kernel/cpu/resctrl/ F: arch/x86/kernel/cpu/resctrl/
F: arch/x86/include/asm/resctrl_sched.h F: arch/x86/include/asm/resctrl_sched.h
F: Documentation/x86/resctrl* F: Documentation/x86/resctrl*
F: tools/testing/selftests/resctrl/
READ-COPY UPDATE (RCU) READ-COPY UPDATE (RCU)
M: "Paul E. McKenney" <paulmck@kernel.org> M: "Paul E. McKenney" <paulmck@kernel.org>
......
...@@ -91,7 +91,7 @@ override LDFLAGS = ...@@ -91,7 +91,7 @@ override LDFLAGS =
override MAKEFLAGS = override MAKEFLAGS =
endif endif
# Append kselftest to KBUILD_OUTPUT to avoid cluttering # Append kselftest to KBUILD_OUTPUT and O to avoid cluttering
# KBUILD_OUTPUT with selftest objects and headers installed # KBUILD_OUTPUT with selftest objects and headers installed
# by selftests Makefile or lib.mk. # by selftests Makefile or lib.mk.
ifdef building_out_of_srctree ifdef building_out_of_srctree
...@@ -99,7 +99,7 @@ override LDFLAGS = ...@@ -99,7 +99,7 @@ override LDFLAGS =
endif endif
ifneq ($(O),) ifneq ($(O),)
BUILD := $(O) BUILD := $(O)/kselftest
else else
ifneq ($(KBUILD_OUTPUT),) ifneq ($(KBUILD_OUTPUT),)
BUILD := $(KBUILD_OUTPUT)/kselftest BUILD := $(KBUILD_OUTPUT)/kselftest
......
...@@ -21,7 +21,7 @@ all: ...@@ -21,7 +21,7 @@ all:
override define INSTALL_RULE override define INSTALL_RULE
mkdir -p $(INSTALL_PATH) mkdir -p $(INSTALL_PATH)
install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
@for SUBDIR in $(SUBDIRS); do \ @for SUBDIR in $(SUBDIRS); do \
BUILD_TARGET=$(OUTPUT)/$$SUBDIR; \ BUILD_TARGET=$(OUTPUT)/$$SUBDIR; \
......
...@@ -17,4 +17,4 @@ include ../../lib.mk ...@@ -17,4 +17,4 @@ include ../../lib.mk
$(OUTPUT)/ionapp_export: ionapp_export.c ipcsocket.c ionutils.c $(OUTPUT)/ionapp_export: ionapp_export.c ipcsocket.c ionutils.c
$(OUTPUT)/ionapp_import: ionapp_import.c ipcsocket.c ionutils.c $(OUTPUT)/ionapp_import: ionapp_import.c ipcsocket.c ionutils.c
$(OUTPUT)/ionmap_test: ionmap_test.c ionutils.c $(OUTPUT)/ionmap_test: ionmap_test.c ionutils.c ipcsocket.c
...@@ -23,7 +23,7 @@ if [ ! -f events/sched/sched_process_fork/hist ]; then ...@@ -23,7 +23,7 @@ if [ ! -f events/sched/sched_process_fork/hist ]; then
exit_unsupported exit_unsupported
fi fi
echo "Test histogram multiple tiggers" echo "Test histogram multiple triggers"
echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger
echo 'hist:keys=parent_comm:vals=child_pid' >> events/sched/sched_process_fork/trigger echo 'hist:keys=parent_comm:vals=child_pid' >> events/sched/sched_process_fork/trigger
......
...@@ -635,10 +635,12 @@ ...@@ -635,10 +635,12 @@
struct __test_metadata { struct __test_metadata {
const char *name; const char *name;
void (*fn)(struct __test_metadata *); void (*fn)(struct __test_metadata *);
pid_t pid; /* pid of test when being run */
int termsig; int termsig;
int passed; int passed;
int trigger; /* extra handler after the evaluation */ int trigger; /* extra handler after the evaluation */
int timeout; int timeout; /* seconds to wait for test timeout */
bool timed_out; /* did this test timeout instead of exiting? */
__u8 step; __u8 step;
bool no_print; /* manual trigger when TH_LOG_STREAM is not available */ bool no_print; /* manual trigger when TH_LOG_STREAM is not available */
struct __test_metadata *prev, *next; struct __test_metadata *prev, *next;
...@@ -695,64 +697,116 @@ static inline int __bail(int for_realz, bool no_print, __u8 step) ...@@ -695,64 +697,116 @@ static inline int __bail(int for_realz, bool no_print, __u8 step)
return 0; return 0;
} }
void __run_test(struct __test_metadata *t) struct __test_metadata *__active_test;
static void __timeout_handler(int sig, siginfo_t *info, void *ucontext)
{ {
pid_t child_pid; struct __test_metadata *t = __active_test;
/* Sanity check handler execution environment. */
if (!t) {
fprintf(TH_LOG_STREAM,
"no active test in SIGARLM handler!?\n");
abort();
}
if (sig != SIGALRM || sig != info->si_signo) {
fprintf(TH_LOG_STREAM,
"%s: SIGALRM handler caught signal %d!?\n",
t->name, sig != SIGALRM ? sig : info->si_signo);
abort();
}
t->timed_out = true;
kill(t->pid, SIGKILL);
}
void __wait_for_test(struct __test_metadata *t)
{
struct sigaction action = {
.sa_sigaction = __timeout_handler,
.sa_flags = SA_SIGINFO,
};
struct sigaction saved_action;
int status; int status;
if (sigaction(SIGALRM, &action, &saved_action)) {
t->passed = 0;
fprintf(TH_LOG_STREAM,
"%s: unable to install SIGARLM handler\n",
t->name);
return;
}
__active_test = t;
t->timed_out = false;
alarm(t->timeout);
waitpid(t->pid, &status, 0);
alarm(0);
if (sigaction(SIGALRM, &saved_action, NULL)) {
t->passed = 0;
fprintf(TH_LOG_STREAM,
"%s: unable to uninstall SIGARLM handler\n",
t->name);
return;
}
__active_test = NULL;
if (t->timed_out) {
t->passed = 0;
fprintf(TH_LOG_STREAM,
"%s: Test terminated by timeout\n", t->name);
} else if (WIFEXITED(status)) {
t->passed = t->termsig == -1 ? !WEXITSTATUS(status) : 0;
if (t->termsig != -1) {
fprintf(TH_LOG_STREAM,
"%s: Test exited normally "
"instead of by signal (code: %d)\n",
t->name,
WEXITSTATUS(status));
} else if (!t->passed) {
fprintf(TH_LOG_STREAM,
"%s: Test failed at step #%d\n",
t->name,
WEXITSTATUS(status));
}
} else if (WIFSIGNALED(status)) {
t->passed = 0;
if (WTERMSIG(status) == SIGABRT) {
fprintf(TH_LOG_STREAM,
"%s: Test terminated by assertion\n",
t->name);
} else if (WTERMSIG(status) == t->termsig) {
t->passed = 1;
} else {
fprintf(TH_LOG_STREAM,
"%s: Test terminated unexpectedly "
"by signal %d\n",
t->name,
WTERMSIG(status));
}
} else {
fprintf(TH_LOG_STREAM,
"%s: Test ended in some other way [%u]\n",
t->name,
status);
}
}
void __run_test(struct __test_metadata *t)
{
t->passed = 1; t->passed = 1;
t->trigger = 0; t->trigger = 0;
printf("[ RUN ] %s\n", t->name); printf("[ RUN ] %s\n", t->name);
alarm(t->timeout); t->pid = fork();
child_pid = fork(); if (t->pid < 0) {
if (child_pid < 0) {
printf("ERROR SPAWNING TEST CHILD\n"); printf("ERROR SPAWNING TEST CHILD\n");
t->passed = 0; t->passed = 0;
} else if (child_pid == 0) { } else if (t->pid == 0) {
t->fn(t); t->fn(t);
/* return the step that failed or 0 */ /* return the step that failed or 0 */
_exit(t->passed ? 0 : t->step); _exit(t->passed ? 0 : t->step);
} else { } else {
/* TODO(wad) add timeout support. */ __wait_for_test(t);
waitpid(child_pid, &status, 0);
if (WIFEXITED(status)) {
t->passed = t->termsig == -1 ? !WEXITSTATUS(status) : 0;
if (t->termsig != -1) {
fprintf(TH_LOG_STREAM,
"%s: Test exited normally "
"instead of by signal (code: %d)\n",
t->name,
WEXITSTATUS(status));
} else if (!t->passed) {
fprintf(TH_LOG_STREAM,
"%s: Test failed at step #%d\n",
t->name,
WEXITSTATUS(status));
}
} else if (WIFSIGNALED(status)) {
t->passed = 0;
if (WTERMSIG(status) == SIGABRT) {
fprintf(TH_LOG_STREAM,
"%s: Test terminated by assertion\n",
t->name);
} else if (WTERMSIG(status) == t->termsig) {
t->passed = 1;
} else {
fprintf(TH_LOG_STREAM,
"%s: Test terminated unexpectedly "
"by signal %d\n",
t->name,
WTERMSIG(status));
}
} else {
fprintf(TH_LOG_STREAM,
"%s: Test ended in some other way [%u]\n",
t->name,
status);
}
} }
printf("[ %4s ] %s\n", (t->passed ? "OK" : "FAIL"), t->name); printf("[ %4s ] %s\n", (t->passed ? "OK" : "FAIL"), t->name);
alarm(0);
} }
static int test_harness_run(int __attribute__((unused)) argc, static int test_harness_run(int __attribute__((unused)) argc,
......
...@@ -137,7 +137,8 @@ endif ...@@ -137,7 +137,8 @@ endif
# Selftest makefiles can override those targets by setting # Selftest makefiles can override those targets by setting
# OVERRIDE_TARGETS = 1. # OVERRIDE_TARGETS = 1.
ifeq ($(OVERRIDE_TARGETS),) ifeq ($(OVERRIDE_TARGETS),)
$(OUTPUT)/%:%.c LOCAL_HDRS := $(selfdir)/kselftest_harness.h $(selfdir)/kselftest.h
$(OUTPUT)/%:%.c $(LOCAL_HDRS)
$(LINK.c) $^ $(LDLIBS) -o $@ $(LINK.c) $^ $(LDLIBS) -o $@
$(OUTPUT)/%.o:%.S $(OUTPUT)/%.o:%.S
......
...@@ -4,9 +4,8 @@ CFLAGS += -I../../../../include/uapi/ ...@@ -4,9 +4,8 @@ CFLAGS += -I../../../../include/uapi/
CFLAGS += -I../../../../include/ CFLAGS += -I../../../../include/
CFLAGS += -I../../../../usr/include/ CFLAGS += -I../../../../usr/include/
TEST_GEN_PROGS := memfd_test TEST_GEN_PROGS := memfd_test fuse_test fuse_mnt
TEST_PROGS := run_fuse_test.sh run_hugetlbfs_test.sh TEST_PROGS := run_fuse_test.sh run_hugetlbfs_test.sh
TEST_GEN_FILES := fuse_mnt fuse_test
fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags) fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags)
...@@ -14,7 +13,7 @@ include ../lib.mk ...@@ -14,7 +13,7 @@ include ../lib.mk
$(OUTPUT)/fuse_mnt: LDLIBS += $(shell pkg-config fuse --libs) $(OUTPUT)/fuse_mnt: LDLIBS += $(shell pkg-config fuse --libs)
$(OUTPUT)/memfd_test: memfd_test.c common.o $(OUTPUT)/memfd_test: memfd_test.c common.c
$(OUTPUT)/fuse_test: fuse_test.c common.o $(OUTPUT)/fuse_test: fuse_test.c common.c
EXTRA_CLEAN = common.o EXTRA_CLEAN = $(OUTPUT)/common.o
CC = $(CROSS_COMPILE)gcc
CFLAGS = -g -Wall
SRCS=$(wildcard *.c)
OBJS=$(SRCS:.c=.o)
all: resctrl_tests
$(OBJS): $(SRCS)
$(CC) $(CFLAGS) -c $(SRCS)
resctrl_tests: $(OBJS)
$(CC) $(CFLAGS) -o $@ $^
.PHONY: clean
clean:
$(RM) $(OBJS) resctrl_tests
resctrl_tests - resctrl file system test suit
Authors:
Fenghua Yu <fenghua.yu@intel.com>
Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
resctrl_tests tests various resctrl functionalities and interfaces including
both software and hardware.
Currently it supports Memory Bandwidth Monitoring test and Memory Bandwidth
Allocation test on Intel RDT hardware. More tests will be added in the future.
And the test suit can be extended to cover AMD QoS and ARM MPAM hardware
as well.
BUILD
-----
Run "make" to build executable file "resctrl_tests".
RUN
---
To use resctrl_tests, root or sudoer privileges are required. This is because
the test needs to mount resctrl file system and change contents in the file
system.
Executing the test without any parameter will run all supported tests:
sudo ./resctrl_tests
OVERVIEW OF EXECUTION
---------------------
A test case has four stages:
- setup: mount resctrl file system, create group, setup schemata, move test
process pids to tasks, start benchmark.
- execute: let benchmark run
- verify: get resctrl data and verify the data with another source, e.g.
perf event.
- teardown: umount resctrl and clear temporary files.
ARGUMENTS
---------
Parameter '-h' shows usage information.
usage: resctrl_tests [-h] [-b "benchmark_cmd [options]"] [-t test list] [-n no_of_bits]
-b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CQM default benchmark is builtin fill_buf
-t test list: run tests specified in the test list, e.g. -t mbm, mba, cqm, cat
-n no_of_bits: run cache tests using specified no of bits in cache bit mask
-p cpu_no: specify CPU number to run the test. 1 is default
-h: help
// SPDX-License-Identifier: GPL-2.0
#include <stdint.h>
#include "resctrl.h"
struct read_format {
__u64 nr; /* The number of events */
struct {
__u64 value; /* The value of the event */
} values[2];
};
static struct perf_event_attr pea_llc_miss;
static struct read_format rf_cqm;
static int fd_lm;
char llc_occup_path[1024];
static void initialize_perf_event_attr(void)
{
pea_llc_miss.type = PERF_TYPE_HARDWARE;
pea_llc_miss.size = sizeof(struct perf_event_attr);
pea_llc_miss.read_format = PERF_FORMAT_GROUP;
pea_llc_miss.exclude_kernel = 1;
pea_llc_miss.exclude_hv = 1;
pea_llc_miss.exclude_idle = 1;
pea_llc_miss.exclude_callchain_kernel = 1;
pea_llc_miss.inherit = 1;
pea_llc_miss.exclude_guest = 1;
pea_llc_miss.disabled = 1;
}
static void ioctl_perf_event_ioc_reset_enable(void)
{
ioctl(fd_lm, PERF_EVENT_IOC_RESET, 0);
ioctl(fd_lm, PERF_EVENT_IOC_ENABLE, 0);
}
static int perf_event_open_llc_miss(pid_t pid, int cpu_no)
{
fd_lm = perf_event_open(&pea_llc_miss, pid, cpu_no, -1,
PERF_FLAG_FD_CLOEXEC);
if (fd_lm == -1) {
perror("Error opening leader");
ctrlc_handler(0, NULL, NULL);
return -1;
}
return 0;
}
static int initialize_llc_perf(void)
{
memset(&pea_llc_miss, 0, sizeof(struct perf_event_attr));
memset(&rf_cqm, 0, sizeof(struct read_format));
/* Initialize perf_event_attr structures for HW_CACHE_MISSES */
initialize_perf_event_attr();
pea_llc_miss.config = PERF_COUNT_HW_CACHE_MISSES;
rf_cqm.nr = 1;
return 0;
}
static int reset_enable_llc_perf(pid_t pid, int cpu_no)
{
int ret = 0;
ret = perf_event_open_llc_miss(pid, cpu_no);
if (ret < 0)
return ret;
/* Start counters to log values */
ioctl_perf_event_ioc_reset_enable();
return 0;
}
/*
* get_llc_perf: llc cache miss through perf events
* @cpu_no: CPU number that the benchmark PID is binded to
*
* Perf events like HW_CACHE_MISSES could be used to validate number of
* cache lines allocated.
*
* Return: =0 on success. <0 on failure.
*/
static int get_llc_perf(unsigned long *llc_perf_miss)
{
__u64 total_misses;
/* Stop counters after one span to get miss rate */
ioctl(fd_lm, PERF_EVENT_IOC_DISABLE, 0);
if (read(fd_lm, &rf_cqm, sizeof(struct read_format)) == -1) {
perror("Could not get llc misses through perf");
return -1;
}
total_misses = rf_cqm.values[0].value;
close(fd_lm);
*llc_perf_miss = total_misses;
return 0;
}
/*
* Get LLC Occupancy as reported by RESCTRL FS
* For CQM,
* 1. If con_mon grp and mon grp given, then read from mon grp in
* con_mon grp
* 2. If only con_mon grp given, then read from con_mon grp
* 3. If both not given, then read from root con_mon grp
* For CAT,
* 1. If con_mon grp given, then read from it
* 2. If con_mon grp not given, then read from root con_mon grp
*
* Return: =0 on success. <0 on failure.
*/
static int get_llc_occu_resctrl(unsigned long *llc_occupancy)
{
FILE *fp;
fp = fopen(llc_occup_path, "r");
if (!fp) {
perror("Failed to open results file");
return errno;
}
if (fscanf(fp, "%lu", llc_occupancy) <= 0) {
perror("Could not get llc occupancy");
fclose(fp);
return -1;
}
fclose(fp);
return 0;
}
/*
* print_results_cache: the cache results are stored in a file
* @filename: file that stores the results
* @bm_pid: child pid that runs benchmark
* @llc_value: perf miss value /
* llc occupancy value reported by resctrl FS
*
* Return: 0 on success. non-zero on failure.
*/
static int print_results_cache(char *filename, int bm_pid,
unsigned long llc_value)
{
FILE *fp;
if (strcmp(filename, "stdio") == 0 || strcmp(filename, "stderr") == 0) {
printf("Pid: %d \t LLC_value: %lu\n", bm_pid,
llc_value);
} else {
fp = fopen(filename, "a");
if (!fp) {
perror("Cannot open results file");
return errno;
}
fprintf(fp, "Pid: %d \t llc_value: %lu\n", bm_pid, llc_value);
fclose(fp);
}
return 0;
}
int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
{
unsigned long llc_perf_miss = 0, llc_occu_resc = 0, llc_value = 0;
int ret;
/*
* Measure cache miss from perf.
*/
if (!strcmp(param->resctrl_val, "cat")) {
ret = get_llc_perf(&llc_perf_miss);
if (ret < 0)
return ret;
llc_value = llc_perf_miss;
}
/*
* Measure llc occupancy from resctrl.
*/
if (!strcmp(param->resctrl_val, "cqm")) {
ret = get_llc_occu_resctrl(&llc_occu_resc);
if (ret < 0)
return ret;
llc_value = llc_occu_resc;
}
ret = print_results_cache(param->filename, bm_pid, llc_value);
if (ret)
return ret;
return 0;
}
/*
* cache_val: execute benchmark and measure LLC occupancy resctrl
* and perf cache miss for the benchmark
* @param: parameters passed to cache_val()
*
* Return: 0 on success. non-zero on failure.
*/
int cat_val(struct resctrl_val_param *param)
{
int malloc_and_init_memory = 1, memflush = 1, operation = 0, ret = 0;
char *resctrl_val = param->resctrl_val;
pid_t bm_pid;
if (strcmp(param->filename, "") == 0)
sprintf(param->filename, "stdio");
bm_pid = getpid();
/* Taskset benchmark to specified cpu */
ret = taskset_benchmark(bm_pid, param->cpu_no);
if (ret)
return ret;
/* Write benchmark to specified con_mon grp, mon_grp in resctrl FS*/
ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
resctrl_val);
if (ret)
return ret;
if ((strcmp(resctrl_val, "cat") == 0)) {
ret = initialize_llc_perf();
if (ret)
return ret;
}
/* Test runs until the callback setup() tells the test to stop. */
while (1) {
if (strcmp(resctrl_val, "cat") == 0) {
ret = param->setup(1, param);
if (ret) {
ret = 0;
break;
}
ret = reset_enable_llc_perf(bm_pid, param->cpu_no);
if (ret)
break;
if (run_fill_buf(param->span, malloc_and_init_memory,
memflush, operation, resctrl_val)) {
fprintf(stderr, "Error-running fill buffer\n");
ret = -1;
break;
}
sleep(1);
ret = measure_cache_vals(param, bm_pid);
if (ret)
break;
} else {
break;
}
}
return ret;
}
// SPDX-License-Identifier: GPL-2.0
/*
* Cache Allocation Technology (CAT) test
*
* Copyright (C) 2018 Intel Corporation
*
* Authors:
* Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
* Fenghua Yu <fenghua.yu@intel.com>
*/
#include "resctrl.h"
#include <unistd.h>
#define RESULT_FILE_NAME1 "result_cat1"
#define RESULT_FILE_NAME2 "result_cat2"
#define NUM_OF_RUNS 5
#define MAX_DIFF_PERCENT 4
#define MAX_DIFF 1000000
int count_of_bits;
char cbm_mask[256];
unsigned long long_mask;
unsigned long cache_size;
/*
* Change schemata. Write schemata to specified
* con_mon grp, mon_grp in resctrl FS.
* Run 5 times in order to get average values.
*/
static int cat_setup(int num, ...)
{
struct resctrl_val_param *p;
char schemata[64];
va_list param;
int ret = 0;
va_start(param, num);
p = va_arg(param, struct resctrl_val_param *);
va_end(param);
/* Run NUM_OF_RUNS times */
if (p->num_of_runs >= NUM_OF_RUNS)
return -1;
if (p->num_of_runs == 0) {
sprintf(schemata, "%lx", p->mask);
ret = write_schemata(p->ctrlgrp, schemata, p->cpu_no,
p->resctrl_val);
}
p->num_of_runs++;
return ret;
}
static void show_cache_info(unsigned long sum_llc_perf_miss, int no_of_bits,
unsigned long span)
{
unsigned long allocated_cache_lines = span / 64;
unsigned long avg_llc_perf_miss = 0;
float diff_percent;
avg_llc_perf_miss = sum_llc_perf_miss / (NUM_OF_RUNS - 1);
diff_percent = ((float)allocated_cache_lines - avg_llc_perf_miss) /
allocated_cache_lines * 100;
printf("%sok CAT: cache miss rate within %d%%\n",
!is_amd && abs((int)diff_percent) > MAX_DIFF_PERCENT ?
"not " : "", MAX_DIFF_PERCENT);
tests_run++;
printf("# Percent diff=%d\n", abs((int)diff_percent));
printf("# Number of bits: %d\n", no_of_bits);
printf("# Avg_llc_perf_miss: %lu\n", avg_llc_perf_miss);
printf("# Allocated cache lines: %lu\n", allocated_cache_lines);
}
static int check_results(struct resctrl_val_param *param)
{
char *token_array[8], temp[512];
unsigned long sum_llc_perf_miss = 0;
int runs = 0, no_of_bits = 0;
FILE *fp;
printf("# Checking for pass/fail\n");
fp = fopen(param->filename, "r");
if (!fp) {
perror("# Cannot open file");
return errno;
}
while (fgets(temp, sizeof(temp), fp)) {
char *token = strtok(temp, ":\t");
int fields = 0;
while (token) {
token_array[fields++] = token;
token = strtok(NULL, ":\t");
}
/*
* Discard the first value which is inaccurate due to monitoring
* setup transition phase.
*/
if (runs > 0)
sum_llc_perf_miss += strtoul(token_array[3], NULL, 0);
runs++;
}
fclose(fp);
no_of_bits = count_bits(param->mask);
show_cache_info(sum_llc_perf_miss, no_of_bits, param->span);
return 0;
}
void cat_test_cleanup(void)
{
remove(RESULT_FILE_NAME1);
remove(RESULT_FILE_NAME2);
}
int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
{
unsigned long l_mask, l_mask_1;
int ret, pipefd[2], sibling_cpu_no;
char pipe_message;
pid_t bm_pid;
cache_size = 0;
ret = remount_resctrlfs(true);
if (ret)
return ret;
if (!validate_resctrl_feature_request("cat"))
return -1;
/* Get default cbm mask for L3/L2 cache */
ret = get_cbm_mask(cache_type);
if (ret)
return ret;
long_mask = strtoul(cbm_mask, NULL, 16);
/* Get L3/L2 cache size */
ret = get_cache_size(cpu_no, cache_type, &cache_size);
if (ret)
return ret;
printf("cache size :%lu\n", cache_size);
/* Get max number of bits from default-cabm mask */
count_of_bits = count_bits(long_mask);
if (n < 1 || n > count_of_bits - 1) {
printf("Invalid input value for no_of_bits n!\n");
printf("Please Enter value in range 1 to %d\n",
count_of_bits - 1);
return -1;
}
/* Get core id from same socket for running another thread */
sibling_cpu_no = get_core_sibling(cpu_no);
if (sibling_cpu_no < 0)
return -1;
struct resctrl_val_param param = {
.resctrl_val = "cat",
.cpu_no = cpu_no,
.mum_resctrlfs = 0,
.setup = cat_setup,
};
l_mask = long_mask >> n;
l_mask_1 = ~l_mask & long_mask;
/* Set param values for parent thread which will be allocated bitmask
* with (max_bits - n) bits
*/
param.span = cache_size * (count_of_bits - n) / count_of_bits;
strcpy(param.ctrlgrp, "c2");
strcpy(param.mongrp, "m2");
strcpy(param.filename, RESULT_FILE_NAME2);
param.mask = l_mask;
param.num_of_runs = 0;
if (pipe(pipefd)) {
perror("# Unable to create pipe");
return errno;
}
bm_pid = fork();
/* Set param values for child thread which will be allocated bitmask
* with n bits
*/
if (bm_pid == 0) {
param.mask = l_mask_1;
strcpy(param.ctrlgrp, "c1");
strcpy(param.mongrp, "m1");
param.span = cache_size * n / count_of_bits;
strcpy(param.filename, RESULT_FILE_NAME1);
param.num_of_runs = 0;
param.cpu_no = sibling_cpu_no;
}
remove(param.filename);
ret = cat_val(&param);
if (ret)
return ret;
ret = check_results(&param);
if (ret)
return ret;
if (bm_pid == 0) {
/* Tell parent that child is ready */
close(pipefd[0]);
pipe_message = 1;
if (write(pipefd[1], &pipe_message, sizeof(pipe_message)) <
sizeof(pipe_message)) {
close(pipefd[1]);
perror("# failed signaling parent process");
return errno;
}
close(pipefd[1]);
while (1)
;
} else {
/* Parent waits for child to be ready. */
close(pipefd[1]);
pipe_message = 0;
while (pipe_message != 1) {
if (read(pipefd[0], &pipe_message,
sizeof(pipe_message)) < sizeof(pipe_message)) {
perror("# failed reading from child process");
break;
}
}
close(pipefd[0]);
kill(bm_pid, SIGKILL);
}
cat_test_cleanup();
if (bm_pid)
umount_resctrlfs();
return 0;
}
// SPDX-License-Identifier: GPL-2.0
/*
* Cache Monitoring Technology (CQM) test
*
* Copyright (C) 2018 Intel Corporation
*
* Authors:
* Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
* Fenghua Yu <fenghua.yu@intel.com>
*/
#include "resctrl.h"
#include <unistd.h>
#define RESULT_FILE_NAME "result_cqm"
#define NUM_OF_RUNS 5
#define MAX_DIFF 2000000
#define MAX_DIFF_PERCENT 15
int count_of_bits;
char cbm_mask[256];
unsigned long long_mask;
unsigned long cache_size;
static int cqm_setup(int num, ...)
{
struct resctrl_val_param *p;
va_list param;
va_start(param, num);
p = va_arg(param, struct resctrl_val_param *);
va_end(param);
/* Run NUM_OF_RUNS times */
if (p->num_of_runs >= NUM_OF_RUNS)
return -1;
p->num_of_runs++;
return 0;
}
static void show_cache_info(unsigned long sum_llc_occu_resc, int no_of_bits,
unsigned long span)
{
unsigned long avg_llc_occu_resc = 0;
float diff_percent;
long avg_diff = 0;
bool res;
avg_llc_occu_resc = sum_llc_occu_resc / (NUM_OF_RUNS - 1);
avg_diff = (long)abs(span - avg_llc_occu_resc);
diff_percent = (((float)span - avg_llc_occu_resc) / span) * 100;
if ((abs((int)diff_percent) <= MAX_DIFF_PERCENT) ||
(abs(avg_diff) <= MAX_DIFF))
res = true;
else
res = false;
printf("%sok CQM: diff within %d, %d\%%\n", res ? "" : "not",
MAX_DIFF, (int)MAX_DIFF_PERCENT);
printf("# diff: %ld\n", avg_diff);
printf("# percent diff=%d\n", abs((int)diff_percent));
printf("# Results are displayed in (Bytes)\n");
printf("# Number of bits: %d\n", no_of_bits);
printf("# Avg_llc_occu_resc: %lu\n", avg_llc_occu_resc);
printf("# llc_occu_exp (span): %lu\n", span);
tests_run++;
}
static int check_results(struct resctrl_val_param *param, int no_of_bits)
{
char *token_array[8], temp[512];
unsigned long sum_llc_occu_resc = 0;
int runs = 0;
FILE *fp;
printf("# checking for pass/fail\n");
fp = fopen(param->filename, "r");
if (!fp) {
perror("# Error in opening file\n");
return errno;
}
while (fgets(temp, 1024, fp)) {
char *token = strtok(temp, ":\t");
int fields = 0;
while (token) {
token_array[fields++] = token;
token = strtok(NULL, ":\t");
}
/* Field 3 is llc occ resc value */
if (runs > 0)
sum_llc_occu_resc += strtoul(token_array[3], NULL, 0);
runs++;
}
fclose(fp);
show_cache_info(sum_llc_occu_resc, no_of_bits, param->span);
return 0;
}
void cqm_test_cleanup(void)
{
remove(RESULT_FILE_NAME);
}
int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
{
int ret, mum_resctrlfs;
cache_size = 0;
mum_resctrlfs = 1;
ret = remount_resctrlfs(mum_resctrlfs);
if (ret)
return ret;
if (!validate_resctrl_feature_request("cqm"))
return -1;
ret = get_cbm_mask("L3");
if (ret)
return ret;
long_mask = strtoul(cbm_mask, NULL, 16);
ret = get_cache_size(cpu_no, "L3", &cache_size);
if (ret)
return ret;
printf("cache size :%lu\n", cache_size);
count_of_bits = count_bits(long_mask);
if (n < 1 || n > count_of_bits) {
printf("Invalid input value for numbr_of_bits n!\n");
printf("Please Enter value in range 1 to %d\n", count_of_bits);
return -1;
}
struct resctrl_val_param param = {
.resctrl_val = "cqm",
.ctrlgrp = "c1",
.mongrp = "m1",
.cpu_no = cpu_no,
.mum_resctrlfs = 0,
.filename = RESULT_FILE_NAME,
.mask = ~(long_mask << n) & long_mask,
.span = cache_size * n / count_of_bits,
.num_of_runs = 0,
.setup = cqm_setup,
};
if (strcmp(benchmark_cmd[0], "fill_buf") == 0)
sprintf(benchmark_cmd[1], "%lu", param.span);
remove(RESULT_FILE_NAME);
ret = resctrl_val(benchmark_cmd, &param);
if (ret)
return ret;
ret = check_results(&param, n);
if (ret)
return ret;
cqm_test_cleanup();
return 0;
}
// SPDX-License-Identifier: GPL-2.0
/*
* fill_buf benchmark
*
* Copyright (C) 2018 Intel Corporation
*
* Authors:
* Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
* Fenghua Yu <fenghua.yu@intel.com>
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <inttypes.h>
#include <malloc.h>
#include <string.h>
#include "resctrl.h"
#define CL_SIZE (64)
#define PAGE_SIZE (4 * 1024)
#define MB (1024 * 1024)
static unsigned char *startptr;
static void sb(void)
{
#if defined(__i386) || defined(__x86_64)
asm volatile("sfence\n\t"
: : : "memory");
#endif
}
static void ctrl_handler(int signo)
{
free(startptr);
printf("\nEnding\n");
sb();
exit(EXIT_SUCCESS);
}
static void cl_flush(void *p)
{
#if defined(__i386) || defined(__x86_64)
asm volatile("clflush (%0)\n\t"
: : "r"(p) : "memory");
#endif
}
static void mem_flush(void *p, size_t s)
{
char *cp = (char *)p;
size_t i = 0;
s = s / CL_SIZE; /* mem size in cache llines */
for (i = 0; i < s; i++)
cl_flush(&cp[i * CL_SIZE]);
sb();
}
static void *malloc_and_init_memory(size_t s)
{
uint64_t *p64;
size_t s64;
void *p = memalign(PAGE_SIZE, s);
p64 = (uint64_t *)p;
s64 = s / sizeof(uint64_t);
while (s64 > 0) {
*p64 = (uint64_t)rand();
p64 += (CL_SIZE / sizeof(uint64_t));
s64 -= (CL_SIZE / sizeof(uint64_t));
}
return p;
}
static int fill_one_span_read(unsigned char *start_ptr, unsigned char *end_ptr)
{
unsigned char sum, *p;
sum = 0;
p = start_ptr;
while (p < end_ptr) {
sum += *p;
p += (CL_SIZE / 2);
}
return sum;
}
static
void fill_one_span_write(unsigned char *start_ptr, unsigned char *end_ptr)
{
unsigned char *p;
p = start_ptr;
while (p < end_ptr) {
*p = '1';
p += (CL_SIZE / 2);
}
}
static int fill_cache_read(unsigned char *start_ptr, unsigned char *end_ptr,
char *resctrl_val)
{
int ret = 0;
FILE *fp;
while (1) {
ret = fill_one_span_read(start_ptr, end_ptr);
if (!strcmp(resctrl_val, "cat"))
break;
}
/* Consume read result so that reading memory is not optimized out. */
fp = fopen("/dev/null", "w");
if (!fp)
perror("Unable to write to /dev/null");
fprintf(fp, "Sum: %d ", ret);
fclose(fp);
return 0;
}
static int fill_cache_write(unsigned char *start_ptr, unsigned char *end_ptr,
char *resctrl_val)
{
while (1) {
fill_one_span_write(start_ptr, end_ptr);
if (!strcmp(resctrl_val, "cat"))
break;
}
return 0;
}
static int
fill_cache(unsigned long long buf_size, int malloc_and_init, int memflush,
int op, char *resctrl_val)
{
unsigned char *start_ptr, *end_ptr;
unsigned long long i;
int ret;
if (malloc_and_init)
start_ptr = malloc_and_init_memory(buf_size);
else
start_ptr = malloc(buf_size);
if (!start_ptr)
return -1;
startptr = start_ptr;
end_ptr = start_ptr + buf_size;
/*
* It's better to touch the memory once to avoid any compiler
* optimizations
*/
if (!malloc_and_init) {
for (i = 0; i < buf_size; i++)
*start_ptr++ = (unsigned char)rand();
}
start_ptr = startptr;
/* Flush the memory before using to avoid "cache hot pages" effect */
if (memflush)
mem_flush(start_ptr, buf_size);
if (op == 0)
ret = fill_cache_read(start_ptr, end_ptr, resctrl_val);
else
ret = fill_cache_write(start_ptr, end_ptr, resctrl_val);
if (ret) {
printf("\n Error in fill cache read/write...\n");
return -1;
}
free(startptr);
return 0;
}
int run_fill_buf(unsigned long span, int malloc_and_init_memory,
int memflush, int op, char *resctrl_val)
{
unsigned long long cache_size = span;
int ret;
/* set up ctrl-c handler */
if (signal(SIGINT, ctrl_handler) == SIG_ERR)
printf("Failed to catch SIGINT!\n");
if (signal(SIGHUP, ctrl_handler) == SIG_ERR)
printf("Failed to catch SIGHUP!\n");
ret = fill_cache(cache_size, malloc_and_init_memory, memflush, op,
resctrl_val);
if (ret) {
printf("\n Error in fill cache\n");
return -1;
}
return 0;
}
// SPDX-License-Identifier: GPL-2.0
/*
* Memory Bandwidth Allocation (MBA) test
*
* Copyright (C) 2018 Intel Corporation
*
* Authors:
* Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
* Fenghua Yu <fenghua.yu@intel.com>
*/
#include "resctrl.h"
#define RESULT_FILE_NAME "result_mba"
#define NUM_OF_RUNS 5
#define MAX_DIFF 300
#define ALLOCATION_MAX 100
#define ALLOCATION_MIN 10
#define ALLOCATION_STEP 10
/*
* Change schemata percentage from 100 to 10%. Write schemata to specified
* con_mon grp, mon_grp in resctrl FS.
* For each allocation, run 5 times in order to get average values.
*/
static int mba_setup(int num, ...)
{
static int runs_per_allocation, allocation = 100;
struct resctrl_val_param *p;
char allocation_str[64];
va_list param;
va_start(param, num);
p = va_arg(param, struct resctrl_val_param *);
va_end(param);
if (runs_per_allocation >= NUM_OF_RUNS)
runs_per_allocation = 0;
/* Only set up schemata once every NUM_OF_RUNS of allocations */
if (runs_per_allocation++ != 0)
return 0;
if (allocation < ALLOCATION_MIN || allocation > ALLOCATION_MAX)
return -1;
sprintf(allocation_str, "%d", allocation);
write_schemata(p->ctrlgrp, allocation_str, p->cpu_no, p->resctrl_val);
allocation -= ALLOCATION_STEP;
return 0;
}
static void show_mba_info(unsigned long *bw_imc, unsigned long *bw_resc)
{
int allocation, runs;
bool failed = false;
printf("# Results are displayed in (MB)\n");
/* Memory bandwidth from 100% down to 10% */
for (allocation = 0; allocation < ALLOCATION_MAX / ALLOCATION_STEP;
allocation++) {
unsigned long avg_bw_imc, avg_bw_resc;
unsigned long sum_bw_imc = 0, sum_bw_resc = 0;
unsigned long avg_diff;
/*
* The first run is discarded due to inaccurate value from
* phase transition.
*/
for (runs = NUM_OF_RUNS * allocation + 1;
runs < NUM_OF_RUNS * allocation + NUM_OF_RUNS ; runs++) {
sum_bw_imc += bw_imc[runs];
sum_bw_resc += bw_resc[runs];
}
avg_bw_imc = sum_bw_imc / (NUM_OF_RUNS - 1);
avg_bw_resc = sum_bw_resc / (NUM_OF_RUNS - 1);
avg_diff = labs((long)(avg_bw_resc - avg_bw_imc));
printf("%sok MBA schemata percentage %u smaller than %d %%\n",
avg_diff > MAX_DIFF ? "not " : "",
ALLOCATION_MAX - ALLOCATION_STEP * allocation,
MAX_DIFF);
tests_run++;
printf("# avg_diff: %lu\n", avg_diff);
printf("# avg_bw_imc: %lu\n", avg_bw_imc);
printf("# avg_bw_resc: %lu\n", avg_bw_resc);
if (avg_diff > MAX_DIFF)
failed = true;
}
printf("%sok schemata change using MBA%s\n", failed ? "not " : "",
failed ? " # at least one test failed" : "");
tests_run++;
}
static int check_results(void)
{
char *token_array[8], output[] = RESULT_FILE_NAME, temp[512];
unsigned long bw_imc[1024], bw_resc[1024];
int runs;
FILE *fp;
fp = fopen(output, "r");
if (!fp) {
perror(output);
return errno;
}
runs = 0;
while (fgets(temp, sizeof(temp), fp)) {
char *token = strtok(temp, ":\t");
int fields = 0;
while (token) {
token_array[fields++] = token;
token = strtok(NULL, ":\t");
}
/* Field 3 is perf imc value */
bw_imc[runs] = strtoul(token_array[3], NULL, 0);
/* Field 5 is resctrl value */
bw_resc[runs] = strtoul(token_array[5], NULL, 0);
runs++;
}
fclose(fp);
show_mba_info(bw_imc, bw_resc);
return 0;
}
void mba_test_cleanup(void)
{
remove(RESULT_FILE_NAME);
}
int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
{
struct resctrl_val_param param = {
.resctrl_val = "mba",
.ctrlgrp = "c1",
.mongrp = "m1",
.cpu_no = cpu_no,
.mum_resctrlfs = 1,
.filename = RESULT_FILE_NAME,
.bw_report = bw_report,
.setup = mba_setup
};
int ret;
remove(RESULT_FILE_NAME);
if (!validate_resctrl_feature_request("mba"))
return -1;
ret = resctrl_val(benchmark_cmd, &param);
if (ret)
return ret;
ret = check_results();
if (ret)
return ret;
mba_test_cleanup();
return 0;
}
// SPDX-License-Identifier: GPL-2.0
/*
* Memory Bandwidth Monitoring (MBM) test
*
* Copyright (C) 2018 Intel Corporation
*
* Authors:
* Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
* Fenghua Yu <fenghua.yu@intel.com>
*/
#include "resctrl.h"
#define RESULT_FILE_NAME "result_mbm"
#define MAX_DIFF 300
#define NUM_OF_RUNS 5
static void
show_bw_info(unsigned long *bw_imc, unsigned long *bw_resc, int span)
{
unsigned long avg_bw_imc = 0, avg_bw_resc = 0;
unsigned long sum_bw_imc = 0, sum_bw_resc = 0;
long avg_diff = 0;
int runs;
/*
* Discard the first value which is inaccurate due to monitoring setup
* transition phase.
*/
for (runs = 1; runs < NUM_OF_RUNS ; runs++) {
sum_bw_imc += bw_imc[runs];
sum_bw_resc += bw_resc[runs];
}
avg_bw_imc = sum_bw_imc / 4;
avg_bw_resc = sum_bw_resc / 4;
avg_diff = avg_bw_resc - avg_bw_imc;
printf("%sok MBM: diff within %d%%\n",
labs(avg_diff) > MAX_DIFF ? "not " : "", MAX_DIFF);
tests_run++;
printf("# avg_diff: %lu\n", labs(avg_diff));
printf("# Span (MB): %d\n", span);
printf("# avg_bw_imc: %lu\n", avg_bw_imc);
printf("# avg_bw_resc: %lu\n", avg_bw_resc);
}
static int check_results(int span)
{
unsigned long bw_imc[NUM_OF_RUNS], bw_resc[NUM_OF_RUNS];
char temp[1024], *token_array[8];
char output[] = RESULT_FILE_NAME;
int runs;
FILE *fp;
printf("# Checking for pass/fail\n");
fp = fopen(output, "r");
if (!fp) {
perror(output);
return errno;
}
runs = 0;
while (fgets(temp, sizeof(temp), fp)) {
char *token = strtok(temp, ":\t");
int i = 0;
while (token) {
token_array[i++] = token;
token = strtok(NULL, ":\t");
}
bw_resc[runs] = strtoul(token_array[5], NULL, 0);
bw_imc[runs] = strtoul(token_array[3], NULL, 0);
runs++;
}
show_bw_info(bw_imc, bw_resc, span);
fclose(fp);
return 0;
}
static int mbm_setup(int num, ...)
{
struct resctrl_val_param *p;
static int num_of_runs;
va_list param;
int ret = 0;
/* Run NUM_OF_RUNS times */
if (num_of_runs++ >= NUM_OF_RUNS)
return -1;
va_start(param, num);
p = va_arg(param, struct resctrl_val_param *);
va_end(param);
/* Set up shemata with 100% allocation on the first run. */
if (num_of_runs == 0)
ret = write_schemata(p->ctrlgrp, "100", p->cpu_no,
p->resctrl_val);
return ret;
}
void mbm_test_cleanup(void)
{
remove(RESULT_FILE_NAME);
}
int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd)
{
struct resctrl_val_param param = {
.resctrl_val = "mbm",
.ctrlgrp = "c1",
.mongrp = "m1",
.span = span,
.cpu_no = cpu_no,
.mum_resctrlfs = 1,
.filename = RESULT_FILE_NAME,
.bw_report = bw_report,
.setup = mbm_setup
};
int ret;
remove(RESULT_FILE_NAME);
if (!validate_resctrl_feature_request("mbm"))
return -1;
ret = resctrl_val(benchmark_cmd, &param);
if (ret)
return ret;
ret = check_results(span);
if (ret)
return ret;
mbm_test_cleanup();
return 0;
}
/* SPDX-License-Identifier: GPL-2.0 */
#define _GNU_SOURCE
#ifndef RESCTRL_H
#define RESCTRL_H
#include <stdio.h>
#include <stdarg.h>
#include <math.h>
#include <errno.h>
#include <sched.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <signal.h>
#include <dirent.h>
#include <stdbool.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <sys/mount.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/select.h>
#include <sys/time.h>
#include <sys/eventfd.h>
#include <asm/unistd.h>
#include <linux/perf_event.h>
#define MB (1024 * 1024)
#define RESCTRL_PATH "/sys/fs/resctrl"
#define PHYS_ID_PATH "/sys/devices/system/cpu/cpu"
#define CBM_MASK_PATH "/sys/fs/resctrl/info"
#define PARENT_EXIT(err_msg) \
do { \
perror(err_msg); \
kill(ppid, SIGKILL); \
exit(EXIT_FAILURE); \
} while (0)
/*
* resctrl_val_param: resctrl test parameters
* @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
* @ctrlgrp: Name of the control monitor group (con_mon grp)
* @mongrp: Name of the monitor group (mon grp)
* @cpu_no: CPU number to which the benchmark would be binded
* @span: Memory bytes accessed in each benchmark iteration
* @mum_resctrlfs: Should the resctrl FS be remounted?
* @filename: Name of file to which the o/p should be written
* @bw_report: Bandwidth report type (reads vs writes)
* @setup: Call back function to setup test environment
*/
struct resctrl_val_param {
char *resctrl_val;
char ctrlgrp[64];
char mongrp[64];
int cpu_no;
unsigned long span;
int mum_resctrlfs;
char filename[64];
char *bw_report;
unsigned long mask;
int num_of_runs;
int (*setup)(int num, ...);
};
pid_t bm_pid, ppid;
int tests_run;
char llc_occup_path[1024];
bool is_amd;
bool check_resctrlfs_support(void);
int filter_dmesg(void);
int remount_resctrlfs(bool mum_resctrlfs);
int get_resource_id(int cpu_no, int *resource_id);
int umount_resctrlfs(void);
int validate_bw_report_request(char *bw_report);
bool validate_resctrl_feature_request(char *resctrl_val);
char *fgrep(FILE *inf, const char *str);
int taskset_benchmark(pid_t bm_pid, int cpu_no);
void run_benchmark(int signum, siginfo_t *info, void *ucontext);
int write_schemata(char *ctrlgrp, char *schemata, int cpu_no,
char *resctrl_val);
int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
char *resctrl_val);
int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
int group_fd, unsigned long flags);
int run_fill_buf(unsigned long span, int malloc_and_init_memory, int memflush,
int op, char *resctrl_va);
int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param);
int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd);
void tests_cleanup(void);
void mbm_test_cleanup(void);
int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd);
void mba_test_cleanup(void);
int get_cbm_mask(char *cache_type);
int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size);
void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
int cat_val(struct resctrl_val_param *param);
void cat_test_cleanup(void);
int cat_perf_miss_val(int cpu_no, int no_of_bits, char *cache_type);
int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd);
unsigned int count_bits(unsigned long n);
void cqm_test_cleanup(void);
int get_core_sibling(int cpu_no);
int measure_cache_vals(struct resctrl_val_param *param, int bm_pid);
#endif /* RESCTRL_H */
// SPDX-License-Identifier: GPL-2.0
/*
* Resctrl tests
*
* Copyright (C) 2018 Intel Corporation
*
* Authors:
* Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
* Fenghua Yu <fenghua.yu@intel.com>
*/
#include "resctrl.h"
#define BENCHMARK_ARGS 64
#define BENCHMARK_ARG_SIZE 64
bool is_amd;
void detect_amd(void)
{
FILE *inf = fopen("/proc/cpuinfo", "r");
char *res;
if (!inf)
return;
res = fgrep(inf, "vendor_id");
if (res) {
char *s = strchr(res, ':');
is_amd = s && !strcmp(s, ": AuthenticAMD\n");
free(res);
}
fclose(inf);
}
static void cmd_help(void)
{
printf("usage: resctrl_tests [-h] [-b \"benchmark_cmd [options]\"] [-t test list] [-n no_of_bits]\n");
printf("\t-b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CQM");
printf("\t default benchmark is builtin fill_buf\n");
printf("\t-t test list: run tests specified in the test list, ");
printf("e.g. -t mbm, mba, cqm, cat\n");
printf("\t-n no_of_bits: run cache tests using specified no of bits in cache bit mask\n");
printf("\t-p cpu_no: specify CPU number to run the test. 1 is default\n");
printf("\t-h: help\n");
}
void tests_cleanup(void)
{
mbm_test_cleanup();
mba_test_cleanup();
cqm_test_cleanup();
cat_test_cleanup();
}
int main(int argc, char **argv)
{
bool has_ben = false, mbm_test = true, mba_test = true, cqm_test = true;
int res, c, cpu_no = 1, span = 250, argc_new = argc, i, no_of_bits = 5;
char *benchmark_cmd[BENCHMARK_ARGS], bw_report[64], bm_type[64];
char benchmark_cmd_area[BENCHMARK_ARGS][BENCHMARK_ARG_SIZE];
int ben_ind, ben_count;
bool cat_test = true;
for (i = 0; i < argc; i++) {
if (strcmp(argv[i], "-b") == 0) {
ben_ind = i + 1;
ben_count = argc - ben_ind;
argc_new = ben_ind - 1;
has_ben = true;
break;
}
}
while ((c = getopt(argc_new, argv, "ht:b:")) != -1) {
char *token;
switch (c) {
case 't':
token = strtok(optarg, ",");
mbm_test = false;
mba_test = false;
cqm_test = false;
cat_test = false;
while (token) {
if (!strcmp(token, "mbm")) {
mbm_test = true;
} else if (!strcmp(token, "mba")) {
mba_test = true;
} else if (!strcmp(token, "cqm")) {
cqm_test = true;
} else if (!strcmp(token, "cat")) {
cat_test = true;
} else {
printf("invalid argument\n");
return -1;
}
token = strtok(NULL, ":\t");
}
break;
case 'p':
cpu_no = atoi(optarg);
break;
case 'n':
no_of_bits = atoi(optarg);
break;
case 'h':
cmd_help();
return 0;
default:
printf("invalid argument\n");
return -1;
}
}
printf("TAP version 13\n");
/*
* Typically we need root privileges, because:
* 1. We write to resctrl FS
* 2. We execute perf commands
*/
if (geteuid() != 0)
printf("# WARNING: not running as root, tests may fail.\n");
/* Detect AMD vendor */
detect_amd();
if (has_ben) {
/* Extract benchmark command from command line. */
for (i = ben_ind; i < argc; i++) {
benchmark_cmd[i - ben_ind] = benchmark_cmd_area[i];
sprintf(benchmark_cmd[i - ben_ind], "%s", argv[i]);
}
benchmark_cmd[ben_count] = NULL;
} else {
/* If no benchmark is given by "-b" argument, use fill_buf. */
for (i = 0; i < 6; i++)
benchmark_cmd[i] = benchmark_cmd_area[i];
strcpy(benchmark_cmd[0], "fill_buf");
sprintf(benchmark_cmd[1], "%d", span);
strcpy(benchmark_cmd[2], "1");
strcpy(benchmark_cmd[3], "1");
strcpy(benchmark_cmd[4], "0");
strcpy(benchmark_cmd[5], "");
benchmark_cmd[6] = NULL;
}
sprintf(bw_report, "reads");
sprintf(bm_type, "fill_buf");
check_resctrlfs_support();
filter_dmesg();
if (!is_amd && mbm_test) {
printf("# Starting MBM BW change ...\n");
if (!has_ben)
sprintf(benchmark_cmd[5], "%s", "mba");
res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
printf("%sok MBM: bw change\n", res ? "not " : "");
mbm_test_cleanup();
tests_run++;
}
if (!is_amd && mba_test) {
printf("# Starting MBA Schemata change ...\n");
if (!has_ben)
sprintf(benchmark_cmd[1], "%d", span);
res = mba_schemata_change(cpu_no, bw_report, benchmark_cmd);
printf("%sok MBA: schemata change\n", res ? "not " : "");
mba_test_cleanup();
tests_run++;
}
if (cqm_test) {
printf("# Starting CQM test ...\n");
if (!has_ben)
sprintf(benchmark_cmd[5], "%s", "cqm");
res = cqm_resctrl_val(cpu_no, no_of_bits, benchmark_cmd);
printf("%sok CQM: test\n", res ? "not " : "");
cqm_test_cleanup();
tests_run++;
}
if (cat_test) {
printf("# Starting CAT test ...\n");
res = cat_perf_miss_val(cpu_no, no_of_bits, "L3");
printf("%sok CAT: test\n", res ? "not " : "");
tests_run++;
cat_test_cleanup();
}
printf("1..%d\n", tests_run);
return 0;
}
This diff is collapsed.
This diff is collapsed.
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
all:
include ../lib.mk
.PHONY: all clean
BINARIES := seccomp_bpf seccomp_benchmark
CFLAGS += -Wl,-no-as-needed -Wall CFLAGS += -Wl,-no-as-needed -Wall
LDFLAGS += -lpthread
seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h TEST_GEN_PROGS := seccomp_bpf seccomp_benchmark
$(CC) $(CFLAGS) $(LDFLAGS) $< -lpthread -o $@ include ../lib.mk
TEST_PROGS += $(BINARIES)
EXTRA_CLEAN := $(BINARIES)
all: $(BINARIES)
...@@ -913,7 +913,7 @@ TEST(ERRNO_order) ...@@ -913,7 +913,7 @@ TEST(ERRNO_order)
EXPECT_EQ(12, errno); EXPECT_EQ(12, errno);
} }
FIXTURE_DATA(TRAP) { FIXTURE(TRAP) {
struct sock_fprog prog; struct sock_fprog prog;
}; };
...@@ -1024,7 +1024,7 @@ TEST_F(TRAP, handler) ...@@ -1024,7 +1024,7 @@ TEST_F(TRAP, handler)
EXPECT_NE(0, (unsigned long)sigsys->_call_addr); EXPECT_NE(0, (unsigned long)sigsys->_call_addr);
} }
FIXTURE_DATA(precedence) { FIXTURE(precedence) {
struct sock_fprog allow; struct sock_fprog allow;
struct sock_fprog log; struct sock_fprog log;
struct sock_fprog trace; struct sock_fprog trace;
...@@ -1513,7 +1513,7 @@ void tracer_poke(struct __test_metadata *_metadata, pid_t tracee, int status, ...@@ -1513,7 +1513,7 @@ void tracer_poke(struct __test_metadata *_metadata, pid_t tracee, int status,
EXPECT_EQ(0, ret); EXPECT_EQ(0, ret);
} }
FIXTURE_DATA(TRACE_poke) { FIXTURE(TRACE_poke) {
struct sock_fprog prog; struct sock_fprog prog;
pid_t tracer; pid_t tracer;
long poked; long poked;
...@@ -1821,7 +1821,7 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee, ...@@ -1821,7 +1821,7 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
change_syscall(_metadata, tracee, -1, -ESRCH); change_syscall(_metadata, tracee, -1, -ESRCH);
} }
FIXTURE_DATA(TRACE_syscall) { FIXTURE(TRACE_syscall) {
struct sock_fprog prog; struct sock_fprog prog;
pid_t tracer, mytid, mypid, parent; pid_t tracer, mytid, mypid, parent;
}; };
...@@ -2326,7 +2326,7 @@ struct tsync_sibling { ...@@ -2326,7 +2326,7 @@ struct tsync_sibling {
} \ } \
} while (0) } while (0)
FIXTURE_DATA(TSYNC) { FIXTURE(TSYNC) {
struct sock_fprog root_prog, apply_prog; struct sock_fprog root_prog, apply_prog;
struct tsync_sibling sibling[TSYNC_SIBLINGS]; struct tsync_sibling sibling[TSYNC_SIBLINGS];
sem_t started; sem_t started;
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <sys/wait.h> #include <sys/wait.h>
#include <time.h> #include <time.h>
#include <unistd.h> #include <unistd.h>
#include <time.h>
#include <string.h> #include <string.h>
#include "log.h" #include "log.h"
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include <sys/types.h> #include <sys/types.h>
#include <time.h> #include <time.h>
#include <unistd.h> #include <unistd.h>
#include <time.h>
#include "log.h" #include "log.h"
#include "timens.h" #include "timens.h"
......
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
#include <sys/types.h> #include <sys/types.h>
#include <time.h> #include <time.h>
#include <unistd.h> #include <unistd.h>
#include <time.h>
#include <string.h> #include <string.h>
#include "log.h" #include "log.h"
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <stdio.h> #include <stdio.h>
#include <stdint.h> #include <stdint.h>
#include <signal.h> #include <signal.h>
#include <time.h>
#include "log.h" #include "log.h"
#include "timens.h" #include "timens.h"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment