Commit e4e31cf0 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
pull-request: bpf-next 2018-03-03

The following pull-request contains BPF updates for your *net-next* tree.

The main changes are:

1) Extend bpftool to build up CFG information of eBPF programs and add an
   option to dump this in DOT format such that this can later be used with
   DOT graphic tools (xdot, graphviz, etc) to visualize it. Part of the
   analysis performed is sub-program detection and basic-block partitioning,
   from Jiong.

2) Multiple enhancements for bpftool's batch mode, more specifically the
   parser now understands comments (#), continuation lines (\), and arguments
   enclosed between quotes. Also, allow to read from stdin via '-' as input
   file, all from Quentin.

3) Improve BPF kselftests by i) unifying the rlimit handling into a helper
   that is then used by all tests, and ii) add support for testing tail calls
   to test_verifier plus add tests covering all corner cases. The latter is
   especially useful for testing JITs, from Daniel.

4) Remove x64 JIT's bpf_flush_icache() since flush_icache_range() is a noop
   on x64, from Daniel.

5) Fix one more occasion in BPF samples where we do not detach the BPF program
   from the cgroup after completion, from Prashant.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b72c8a7e c51a6379
......@@ -11,10 +11,10 @@
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
#include <asm/cacheflush.h>
#include <linux/bpf.h>
#include <asm/set_memory.h>
#include <asm/nospec-branch.h>
#include <linux/bpf.h>
/*
* assembly code in arch/x86/net/bpf_jit.S
......@@ -103,16 +103,6 @@ static int bpf_size_to_x86_bytes(int bpf_size)
#define X86_JLE 0x7E
#define X86_JG 0x7F
static void bpf_flush_icache(void *start, void *end)
{
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
smp_wmb();
flush_icache_range((unsigned long)start, (unsigned long)end);
set_fs(old_fs);
}
#define CHOOSE_LOAD_FUNC(K, func) \
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
......@@ -1266,7 +1256,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bpf_jit_dump(prog->len, proglen, pass + 1, image);
if (image) {
bpf_flush_icache(header, image + proglen);
if (!prog->is_func || extra_pass) {
bpf_jit_binary_lock_ro(header);
} else {
......
......@@ -61,6 +61,7 @@ cleanup_and_exit()
[ -n "$msg" ] && echo "ERROR: $msg"
test_cgrp2_sock -d ${CGRP_MNT}/sockopts
ip li del cgrp2_sock
umount ${CGRP_MNT}
......
......@@ -28,6 +28,9 @@ function attach_bpf {
}
function cleanup {
if [ -d /tmp/cgroupv2/foo ]; then
test_cgrp2_sock -d /tmp/cgroupv2/foo
fi
ip link del veth0b
ip netns delete at_ns0
umount /tmp/cgroupv2
......
......@@ -21,7 +21,7 @@ MAP COMMANDS
=============
| **bpftool** **prog { show | list }** [*PROG*]
| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes**}]
| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual**}]
| **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes**}]
| **bpftool** **prog pin** *PROG* *FILE*
| **bpftool** **prog load** *OBJ* *FILE*
......@@ -39,12 +39,18 @@ DESCRIPTION
Output will start with program ID followed by program type and
zero or more named attributes (depending on kernel version).
**bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** }]
Dump eBPF instructions of the program from the kernel.
If *FILE* is specified image will be written to a file,
otherwise it will be disassembled and printed to stdout.
**bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** | **visual** }]
Dump eBPF instructions of the program from the kernel. By
default, eBPF will be disassembled and printed to standard
output in human-readable format. In this case, **opcodes**
controls if raw opcodes should be printed as well.
**opcodes** controls if raw opcodes will be printed.
If **file** is specified, the binary image will instead be
written to *FILE*.
If **visual** is specified, control flow graph (CFG) will be
built instead, and eBPF instructions will be presented with
CFG in DOT format, on standard output.
**bpftool prog dump jited** *PROG* [{ **file** *FILE* | **opcodes** }]
Dump jited image (host machine code) of the program.
......
......@@ -147,7 +147,7 @@ _bpftool()
# Deal with simplest keywords
case $prev in
help|key|opcodes)
help|key|opcodes|visual)
return 0
;;
tag)
......@@ -223,11 +223,16 @@ _bpftool()
return 0
;;
*)
_bpftool_once_attr 'file'
_bpftool_once_attr 'file'
if _bpftool_search_list 'xlated'; then
COMPREPLY+=( $( compgen -W 'opcodes visual' -- \
"$cur" ) )
else
COMPREPLY+=( $( compgen -W 'opcodes' -- \
"$cur" ) )
return 0
;;
fi
return 0
;;
esac
;;
pin)
......
This diff is collapsed.
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Copyright (C) 2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __BPF_TOOL_CFG_H
#define __BPF_TOOL_CFG_H
void dump_xlated_cfg(void *buf, unsigned int len);
#endif /* __BPF_TOOL_CFG_H */
......@@ -46,6 +46,9 @@
#include "main.h"
#define BATCH_LINE_LEN_MAX 65536
#define BATCH_ARG_NB_MAX 4096
const char *bin_name;
static int last_argc;
static char **last_argv;
......@@ -157,6 +160,54 @@ void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep)
}
}
/* Split command line into argument vector. */
static int make_args(char *line, char *n_argv[], int maxargs, int cmd_nb)
{
static const char ws[] = " \t\r\n";
char *cp = line;
int n_argc = 0;
while (*cp) {
/* Skip leading whitespace. */
cp += strspn(cp, ws);
if (*cp == '\0')
break;
if (n_argc >= (maxargs - 1)) {
p_err("too many arguments to command %d", cmd_nb);
return -1;
}
/* Word begins with quote. */
if (*cp == '\'' || *cp == '"') {
char quote = *cp++;
n_argv[n_argc++] = cp;
/* Find ending quote. */
cp = strchr(cp, quote);
if (!cp) {
p_err("unterminated quoted string in command %d",
cmd_nb);
return -1;
}
} else {
n_argv[n_argc++] = cp;
/* Find end of word. */
cp += strcspn(cp, ws);
if (*cp == '\0')
break;
}
/* Separate words. */
*cp++ = 0;
}
n_argv[n_argc] = NULL;
return n_argc;
}
static int do_batch(int argc, char **argv);
static const struct cmd cmds[] = {
......@@ -171,11 +222,12 @@ static const struct cmd cmds[] = {
static int do_batch(int argc, char **argv)
{
char buf[BATCH_LINE_LEN_MAX], contline[BATCH_LINE_LEN_MAX];
char *n_argv[BATCH_ARG_NB_MAX];
unsigned int lines = 0;
char *n_argv[4096];
char buf[65536];
int n_argc;
FILE *fp;
char *cp;
int err;
int i;
......@@ -191,7 +243,10 @@ static int do_batch(int argc, char **argv)
}
NEXT_ARG();
fp = fopen(*argv, "r");
if (!strcmp(*argv, "-"))
fp = stdin;
else
fp = fopen(*argv, "r");
if (!fp) {
p_err("Can't open file (%s): %s", *argv, strerror(errno));
return -1;
......@@ -200,27 +255,45 @@ static int do_batch(int argc, char **argv)
if (json_output)
jsonw_start_array(json_wtr);
while (fgets(buf, sizeof(buf), fp)) {
cp = strchr(buf, '#');
if (cp)
*cp = '\0';
if (strlen(buf) == sizeof(buf) - 1) {
errno = E2BIG;
break;
}
n_argc = 0;
n_argv[n_argc] = strtok(buf, " \t\n");
while (n_argv[n_argc]) {
n_argc++;
if (n_argc == ARRAY_SIZE(n_argv)) {
p_err("line %d has too many arguments, skip",
/* Append continuation lines if any (coming after a line ending
* with '\' in the batch file).
*/
while ((cp = strstr(buf, "\\\n")) != NULL) {
if (!fgets(contline, sizeof(contline), fp) ||
strlen(contline) == 0) {
p_err("missing continuation line on command %d",
lines);
n_argc = 0;
break;
err = -1;
goto err_close;
}
cp = strchr(contline, '#');
if (cp)
*cp = '\0';
if (strlen(buf) + strlen(contline) + 1 > sizeof(buf)) {
p_err("command %d is too long", lines);
err = -1;
goto err_close;
}
n_argv[n_argc] = strtok(NULL, " \t\n");
buf[strlen(buf) - 2] = '\0';
strcat(buf, contline);
}
n_argc = make_args(buf, n_argv, BATCH_ARG_NB_MAX, lines);
if (!n_argc)
continue;
if (n_argc < 0)
goto err_close;
if (json_output) {
jsonw_start_object(json_wtr);
......@@ -247,11 +320,12 @@ static int do_batch(int argc, char **argv)
p_err("reading batch file failed: %s", strerror(errno));
err = -1;
} else {
p_info("processed %d lines", lines);
p_info("processed %d commands", lines);
err = 0;
}
err_close:
fclose(fp);
if (fp != stdin)
fclose(fp);
if (json_output)
jsonw_end_array(json_wtr);
......
......@@ -47,8 +47,9 @@
#include <bpf.h>
#include <libbpf.h>
#include "cfg.h"
#include "main.h"
#include "disasm.h"
#include "xlated_dumper.h"
static const char * const prog_type_name[] = {
[BPF_PROG_TYPE_UNSPEC] = "unspec",
......@@ -407,259 +408,6 @@ static int do_show(int argc, char **argv)
return err;
}
#define SYM_MAX_NAME 256
struct kernel_sym {
unsigned long address;
char name[SYM_MAX_NAME];
};
struct dump_data {
unsigned long address_call_base;
struct kernel_sym *sym_mapping;
__u32 sym_count;
char scratch_buff[SYM_MAX_NAME];
};
static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
{
return ((struct kernel_sym *)sym_a)->address -
((struct kernel_sym *)sym_b)->address;
}
static void kernel_syms_load(struct dump_data *dd)
{
struct kernel_sym *sym;
char buff[256];
void *tmp, *address;
FILE *fp;
fp = fopen("/proc/kallsyms", "r");
if (!fp)
return;
while (!feof(fp)) {
if (!fgets(buff, sizeof(buff), fp))
break;
tmp = realloc(dd->sym_mapping,
(dd->sym_count + 1) *
sizeof(*dd->sym_mapping));
if (!tmp) {
out:
free(dd->sym_mapping);
dd->sym_mapping = NULL;
fclose(fp);
return;
}
dd->sym_mapping = tmp;
sym = &dd->sym_mapping[dd->sym_count];
if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2)
continue;
sym->address = (unsigned long)address;
if (!strcmp(sym->name, "__bpf_call_base")) {
dd->address_call_base = sym->address;
/* sysctl kernel.kptr_restrict was set */
if (!sym->address)
goto out;
}
if (sym->address)
dd->sym_count++;
}
fclose(fp);
qsort(dd->sym_mapping, dd->sym_count,
sizeof(*dd->sym_mapping), kernel_syms_cmp);
}
static void kernel_syms_destroy(struct dump_data *dd)
{
free(dd->sym_mapping);
}
static struct kernel_sym *kernel_syms_search(struct dump_data *dd,
unsigned long key)
{
struct kernel_sym sym = {
.address = key,
};
return dd->sym_mapping ?
bsearch(&sym, dd->sym_mapping, dd->sym_count,
sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
}
static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
vprintf(fmt, args);
va_end(args);
}
static const char *print_call_pcrel(struct dump_data *dd,
struct kernel_sym *sym,
unsigned long address,
const struct bpf_insn *insn)
{
if (sym)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"%+d#%s", insn->off, sym->name);
else
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"%+d#0x%lx", insn->off, address);
return dd->scratch_buff;
}
static const char *print_call_helper(struct dump_data *dd,
struct kernel_sym *sym,
unsigned long address)
{
if (sym)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"%s", sym->name);
else
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"0x%lx", address);
return dd->scratch_buff;
}
static const char *print_call(void *private_data,
const struct bpf_insn *insn)
{
struct dump_data *dd = private_data;
unsigned long address = dd->address_call_base + insn->imm;
struct kernel_sym *sym;
sym = kernel_syms_search(dd, address);
if (insn->src_reg == BPF_PSEUDO_CALL)
return print_call_pcrel(dd, sym, address, insn);
else
return print_call_helper(dd, sym, address);
}
static const char *print_imm(void *private_data,
const struct bpf_insn *insn,
__u64 full_imm)
{
struct dump_data *dd = private_data;
if (insn->src_reg == BPF_PSEUDO_MAP_FD)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"map[id:%u]", insn->imm);
else
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"0x%llx", (unsigned long long)full_imm);
return dd->scratch_buff;
}
static void dump_xlated_plain(struct dump_data *dd, void *buf,
unsigned int len, bool opcodes)
{
const struct bpf_insn_cbs cbs = {
.cb_print = print_insn,
.cb_call = print_call,
.cb_imm = print_imm,
.private_data = dd,
};
struct bpf_insn *insn = buf;
bool double_insn = false;
unsigned int i;
for (i = 0; i < len / sizeof(*insn); i++) {
if (double_insn) {
double_insn = false;
continue;
}
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
printf("% 4d: ", i);
print_bpf_insn(&cbs, NULL, insn + i, true);
if (opcodes) {
printf(" ");
fprint_hex(stdout, insn + i, 8, " ");
if (double_insn && i < len - 1) {
printf(" ");
fprint_hex(stdout, insn + i + 1, 8, " ");
}
printf("\n");
}
}
}
static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...)
{
unsigned int l = strlen(fmt);
char chomped_fmt[l];
va_list args;
va_start(args, fmt);
if (l > 0) {
strncpy(chomped_fmt, fmt, l - 1);
chomped_fmt[l - 1] = '\0';
}
jsonw_vprintf_enquote(json_wtr, chomped_fmt, args);
va_end(args);
}
static void dump_xlated_json(struct dump_data *dd, void *buf,
unsigned int len, bool opcodes)
{
const struct bpf_insn_cbs cbs = {
.cb_print = print_insn_json,
.cb_call = print_call,
.cb_imm = print_imm,
.private_data = dd,
};
struct bpf_insn *insn = buf;
bool double_insn = false;
unsigned int i;
jsonw_start_array(json_wtr);
for (i = 0; i < len / sizeof(*insn); i++) {
if (double_insn) {
double_insn = false;
continue;
}
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "disasm");
print_bpf_insn(&cbs, NULL, insn + i, true);
if (opcodes) {
jsonw_name(json_wtr, "opcodes");
jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "code");
jsonw_printf(json_wtr, "\"0x%02hhx\"", insn[i].code);
jsonw_name(json_wtr, "src_reg");
jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].src_reg);
jsonw_name(json_wtr, "dst_reg");
jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].dst_reg);
jsonw_name(json_wtr, "off");
print_hex_data_json((uint8_t *)(&insn[i].off), 2);
jsonw_name(json_wtr, "imm");
if (double_insn && i < len - 1)
print_hex_data_json((uint8_t *)(&insn[i].imm),
12);
else
print_hex_data_json((uint8_t *)(&insn[i].imm),
4);
jsonw_end_object(json_wtr);
}
jsonw_end_object(json_wtr);
}
jsonw_end_array(json_wtr);
}
static int do_dump(int argc, char **argv)
{
struct bpf_prog_info info = {};
......@@ -668,6 +416,7 @@ static int do_dump(int argc, char **argv)
unsigned int buf_size;
char *filepath = NULL;
bool opcodes = false;
bool visual = false;
unsigned char *buf;
__u32 *member_len;
__u64 *member_ptr;
......@@ -706,6 +455,9 @@ static int do_dump(int argc, char **argv)
} else if (is_prefix(*argv, "opcodes")) {
opcodes = true;
NEXT_ARG();
} else if (is_prefix(*argv, "visual")) {
visual = true;
NEXT_ARG();
}
if (argc) {
......@@ -777,27 +529,30 @@ static int do_dump(int argc, char **argv)
if (json_output)
jsonw_null(json_wtr);
} else {
if (member_len == &info.jited_prog_len) {
const char *name = NULL;
if (info.ifindex) {
name = ifindex_to_bfd_name_ns(info.ifindex,
info.netns_dev,
info.netns_ino);
if (!name)
goto err_free;
}
disasm_print_insn(buf, *member_len, opcodes, name);
} else {
kernel_syms_load(&dd);
if (json_output)
dump_xlated_json(&dd, buf, *member_len, opcodes);
else
dump_xlated_plain(&dd, buf, *member_len, opcodes);
kernel_syms_destroy(&dd);
} else if (member_len == &info.jited_prog_len) {
const char *name = NULL;
if (info.ifindex) {
name = ifindex_to_bfd_name_ns(info.ifindex,
info.netns_dev,
info.netns_ino);
if (!name)
goto err_free;
}
disasm_print_insn(buf, *member_len, opcodes, name);
} else if (visual) {
if (json_output)
jsonw_null(json_wtr);
else
dump_xlated_cfg(buf, *member_len);
} else {
kernel_syms_load(&dd);
if (json_output)
dump_xlated_json(&dd, buf, *member_len, opcodes);
else
dump_xlated_plain(&dd, buf, *member_len, opcodes);
kernel_syms_destroy(&dd);
}
free(buf);
......@@ -851,7 +606,7 @@ static int do_help(int argc, char **argv)
fprintf(stderr,
"Usage: %s %s { show | list } [PROG]\n"
" %s %s dump xlated PROG [{ file FILE | opcodes }]\n"
" %s %s dump xlated PROG [{ file FILE | opcodes | visual }]\n"
" %s %s dump jited PROG [{ file FILE | opcodes }]\n"
" %s %s pin PROG FILE\n"
" %s %s load OBJ FILE\n"
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Copyright (C) 2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include "disasm.h"
#include "json_writer.h"
#include "main.h"
#include "xlated_dumper.h"
static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
{
return ((struct kernel_sym *)sym_a)->address -
((struct kernel_sym *)sym_b)->address;
}
void kernel_syms_load(struct dump_data *dd)
{
struct kernel_sym *sym;
char buff[256];
void *tmp, *address;
FILE *fp;
fp = fopen("/proc/kallsyms", "r");
if (!fp)
return;
while (!feof(fp)) {
if (!fgets(buff, sizeof(buff), fp))
break;
tmp = realloc(dd->sym_mapping,
(dd->sym_count + 1) *
sizeof(*dd->sym_mapping));
if (!tmp) {
out:
free(dd->sym_mapping);
dd->sym_mapping = NULL;
fclose(fp);
return;
}
dd->sym_mapping = tmp;
sym = &dd->sym_mapping[dd->sym_count];
if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2)
continue;
sym->address = (unsigned long)address;
if (!strcmp(sym->name, "__bpf_call_base")) {
dd->address_call_base = sym->address;
/* sysctl kernel.kptr_restrict was set */
if (!sym->address)
goto out;
}
if (sym->address)
dd->sym_count++;
}
fclose(fp);
qsort(dd->sym_mapping, dd->sym_count,
sizeof(*dd->sym_mapping), kernel_syms_cmp);
}
void kernel_syms_destroy(struct dump_data *dd)
{
free(dd->sym_mapping);
}
static struct kernel_sym *kernel_syms_search(struct dump_data *dd,
unsigned long key)
{
struct kernel_sym sym = {
.address = key,
};
return dd->sym_mapping ?
bsearch(&sym, dd->sym_mapping, dd->sym_count,
sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
}
static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
vprintf(fmt, args);
va_end(args);
}
static void
print_insn_for_graph(struct bpf_verifier_env *env, const char *fmt, ...)
{
char buf[64], *p;
va_list args;
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
p = buf;
while (*p != '\0') {
if (*p == '\n') {
memmove(p + 3, p, strlen(buf) + 1 - (p - buf));
/* Align each instruction dump row left. */
*p++ = '\\';
*p++ = 'l';
/* Output multiline concatenation. */
*p++ = '\\';
} else if (*p == '<' || *p == '>' || *p == '|' || *p == '&') {
memmove(p + 1, p, strlen(buf) + 1 - (p - buf));
/* Escape special character. */
*p++ = '\\';
}
p++;
}
printf("%s", buf);
}
static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...)
{
unsigned int l = strlen(fmt);
char chomped_fmt[l];
va_list args;
va_start(args, fmt);
if (l > 0) {
strncpy(chomped_fmt, fmt, l - 1);
chomped_fmt[l - 1] = '\0';
}
jsonw_vprintf_enquote(json_wtr, chomped_fmt, args);
va_end(args);
}
static const char *print_call_pcrel(struct dump_data *dd,
struct kernel_sym *sym,
unsigned long address,
const struct bpf_insn *insn)
{
if (sym)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"%+d#%s", insn->off, sym->name);
else
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"%+d#0x%lx", insn->off, address);
return dd->scratch_buff;
}
static const char *print_call_helper(struct dump_data *dd,
struct kernel_sym *sym,
unsigned long address)
{
if (sym)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"%s", sym->name);
else
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"0x%lx", address);
return dd->scratch_buff;
}
static const char *print_call(void *private_data,
const struct bpf_insn *insn)
{
struct dump_data *dd = private_data;
unsigned long address = dd->address_call_base + insn->imm;
struct kernel_sym *sym;
sym = kernel_syms_search(dd, address);
if (insn->src_reg == BPF_PSEUDO_CALL)
return print_call_pcrel(dd, sym, address, insn);
else
return print_call_helper(dd, sym, address);
}
static const char *print_imm(void *private_data,
const struct bpf_insn *insn,
__u64 full_imm)
{
struct dump_data *dd = private_data;
if (insn->src_reg == BPF_PSEUDO_MAP_FD)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"map[id:%u]", insn->imm);
else
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"0x%llx", (unsigned long long)full_imm);
return dd->scratch_buff;
}
void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
bool opcodes)
{
const struct bpf_insn_cbs cbs = {
.cb_print = print_insn_json,
.cb_call = print_call,
.cb_imm = print_imm,
.private_data = dd,
};
struct bpf_insn *insn = buf;
bool double_insn = false;
unsigned int i;
jsonw_start_array(json_wtr);
for (i = 0; i < len / sizeof(*insn); i++) {
if (double_insn) {
double_insn = false;
continue;
}
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "disasm");
print_bpf_insn(&cbs, NULL, insn + i, true);
if (opcodes) {
jsonw_name(json_wtr, "opcodes");
jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "code");
jsonw_printf(json_wtr, "\"0x%02hhx\"", insn[i].code);
jsonw_name(json_wtr, "src_reg");
jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].src_reg);
jsonw_name(json_wtr, "dst_reg");
jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].dst_reg);
jsonw_name(json_wtr, "off");
print_hex_data_json((uint8_t *)(&insn[i].off), 2);
jsonw_name(json_wtr, "imm");
if (double_insn && i < len - 1)
print_hex_data_json((uint8_t *)(&insn[i].imm),
12);
else
print_hex_data_json((uint8_t *)(&insn[i].imm),
4);
jsonw_end_object(json_wtr);
}
jsonw_end_object(json_wtr);
}
jsonw_end_array(json_wtr);
}
void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
bool opcodes)
{
const struct bpf_insn_cbs cbs = {
.cb_print = print_insn,
.cb_call = print_call,
.cb_imm = print_imm,
.private_data = dd,
};
struct bpf_insn *insn = buf;
bool double_insn = false;
unsigned int i;
for (i = 0; i < len / sizeof(*insn); i++) {
if (double_insn) {
double_insn = false;
continue;
}
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
printf("% 4d: ", i);
print_bpf_insn(&cbs, NULL, insn + i, true);
if (opcodes) {
printf(" ");
fprint_hex(stdout, insn + i, 8, " ");
if (double_insn && i < len - 1) {
printf(" ");
fprint_hex(stdout, insn + i + 1, 8, " ");
}
printf("\n");
}
}
}
void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
unsigned int start_idx)
{
const struct bpf_insn_cbs cbs = {
.cb_print = print_insn_for_graph,
.cb_call = print_call,
.cb_imm = print_imm,
.private_data = dd,
};
struct bpf_insn *insn_start = buf_start;
struct bpf_insn *insn_end = buf_end;
struct bpf_insn *cur = insn_start;
for (; cur <= insn_end; cur++) {
printf("% 4d: ", (int)(cur - insn_start + start_idx));
print_bpf_insn(&cbs, NULL, cur, true);
if (cur != insn_end)
printf(" | ");
}
}
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Copyright (C) 2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __BPF_TOOL_XLATED_DUMPER_H
#define __BPF_TOOL_XLATED_DUMPER_H
#define SYM_MAX_NAME 256
struct kernel_sym {
unsigned long address;
char name[SYM_MAX_NAME];
};
struct dump_data {
unsigned long address_call_base;
struct kernel_sym *sym_mapping;
__u32 sym_count;
char scratch_buff[SYM_MAX_NAME];
};
void kernel_syms_load(struct dump_data *dd);
void kernel_syms_destroy(struct dump_data *dd);
void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
bool opcodes);
void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
bool opcodes);
void dump_xlated_for_graph(struct dump_data *dd, void *buf, void *buf_end,
unsigned int start_index);
#endif
#include <sys/resource.h>
#include <stdio.h>
static __attribute__((constructor)) void bpf_rlimit_ctor(void)
{
struct rlimit rlim_old, rlim_new = {
.rlim_cur = RLIM_INFINITY,
.rlim_max = RLIM_INFINITY,
};
getrlimit(RLIMIT_MEMLOCK, &rlim_old);
/* For the sake of running the test cases, we temporarily
* set rlimit to infinity in order for kernel to focus on
* errors from actual test cases and not getting noise
* from hitting memlock limits. The limit is on per-process
* basis and not a global one, hence destructor not really
* needed here.
*/
if (setrlimit(RLIMIT_MEMLOCK, &rlim_new) < 0) {
perror("Unable to lift memlock rlimit");
/* Trying out lower limit, but expect potential test
* case failures from this!
*/
rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
setrlimit(RLIMIT_MEMLOCK, &rlim_new);
}
}
......@@ -9,8 +9,6 @@
#include <stddef.h>
#include <stdbool.h>
#include <sys/resource.h>
#include <linux/unistd.h>
#include <linux/filter.h>
#include <linux/bpf_perf_event.h>
......@@ -19,6 +17,7 @@
#include <bpf/bpf.h>
#include "../../../include/linux/filter.h"
#include "bpf_rlimit.h"
#ifndef ARRAY_SIZE
# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
......@@ -702,9 +701,6 @@ static int do_test(unsigned int from, unsigned int to)
int main(int argc, char **argv)
{
unsigned int from = 0, to = ARRAY_SIZE(tests);
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
setrlimit(RLIMIT_MEMLOCK, &rinf);
if (argc == 3) {
unsigned int l = atoi(argv[argc - 2]);
......
......@@ -11,13 +11,13 @@
#include <errno.h>
#include <assert.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
#include "bpf_rlimit.h"
#define DEV_CGROUP_PROG "./dev_cgroup.o"
......@@ -25,15 +25,11 @@
int main(int argc, char **argv)
{
struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
struct bpf_object *obj;
int error = EXIT_FAILURE;
int prog_fd, cgroup_fd;
__u32 prog_cnt;
if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
perror("Unable to lift memlock rlimit");
if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
&obj, &prog_fd)) {
printf("Failed to load DEV_CGROUP program\n");
......
......@@ -22,10 +22,11 @@
#include <unistd.h>
#include <arpa/inet.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <bpf/bpf.h>
#include "bpf_util.h"
#include "bpf_rlimit.h"
struct tlpm_node {
struct tlpm_node *next;
......@@ -736,17 +737,11 @@ static void test_lpm_multi_thread(void)
int main(void)
{
struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
int i, ret;
int i;
/* we want predictable, pseudo random tests */
srand(0xf00ba1);
/* allow unlimited locked memory */
ret = setrlimit(RLIMIT_MEMLOCK, &limit);
if (ret < 0)
perror("Unable to lift memlock rlimit");
test_lpm_basic();
test_lpm_order();
......@@ -755,11 +750,8 @@ int main(void)
test_lpm_map(i);
test_lpm_ipaddr();
test_lpm_delete();
test_lpm_get_next_key();
test_lpm_multi_thread();
printf("test_lpm: OK\n");
......
......@@ -16,10 +16,11 @@
#include <time.h>
#include <sys/wait.h>
#include <sys/resource.h>
#include <bpf/bpf.h>
#include "bpf_util.h"
#include "bpf_rlimit.h"
#define LOCAL_FREE_TARGET (128)
#define PERCPU_FREE_TARGET (4)
......@@ -613,7 +614,6 @@ static void test_lru_sanity6(int map_type, int map_flags, int tgt_free)
int main(int argc, char **argv)
{
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
int map_types[] = {BPF_MAP_TYPE_LRU_HASH,
BPF_MAP_TYPE_LRU_PERCPU_HASH};
int map_flags[] = {0, BPF_F_NO_COMMON_LRU};
......@@ -621,8 +621,6 @@ int main(int argc, char **argv)
setbuf(stdout, NULL);
assert(!setrlimit(RLIMIT_MEMLOCK, &r));
nr_cpus = bpf_num_possible_cpus();
assert(nr_cpus != -1);
printf("nr_cpus:%d\n\n", nr_cpus);
......
......@@ -17,13 +17,14 @@
#include <stdlib.h>
#include <sys/wait.h>
#include <sys/resource.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_util.h"
#include "bpf_rlimit.h"
static int map_flags;
......@@ -1126,10 +1127,6 @@ static void run_all_tests(void)
int main(void)
{
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
setrlimit(RLIMIT_MEMLOCK, &rinf);
map_flags = 0;
run_all_tests();
......
......@@ -26,7 +26,6 @@ typedef __u16 __sum16;
#include <sys/ioctl.h>
#include <sys/wait.h>
#include <sys/resource.h>
#include <sys/types.h>
#include <fcntl.h>
......@@ -34,9 +33,11 @@ typedef __u16 __sum16;
#include <linux/err.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "test_iptunnel_common.h"
#include "bpf_util.h"
#include "bpf_endian.h"
#include "bpf_rlimit.h"
static int error_cnt, pass_cnt;
......@@ -965,10 +966,6 @@ static void test_stacktrace_map()
int main(void)
{
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
setrlimit(RLIMIT_MEMLOCK, &rinf);
test_pkt_access();
test_xdp();
test_l4lb_all();
......
......@@ -12,7 +12,6 @@
#include <assert.h>
#include <sys/socket.h>
#include <sys/resource.h>
#include <linux/filter.h>
#include <linux/bpf.h>
......@@ -21,6 +20,7 @@
#include <bpf/bpf.h>
#include "../../../include/linux/filter.h"
#include "bpf_rlimit.h"
static struct bpf_insn prog[BPF_MAXINSNS];
......@@ -184,11 +184,9 @@ static void do_test(uint32_t *tests, int start_insns, int fd_map,
int main(void)
{
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
uint32_t tests = 0;
int i, fd_map;
setrlimit(RLIMIT_MEMLOCK, &rinf);
fd_map = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(int),
sizeof(int), 1, BPF_F_NO_PREALLOC);
assert(fd_map > 0);
......
......@@ -12,13 +12,13 @@
#include <linux/bpf.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_util.h"
#include "bpf_rlimit.h"
#include <linux/perf_event.h>
#include "test_tcpbpf.h"
......@@ -44,7 +44,6 @@ static int bpf_find_map(const char *test, struct bpf_object *obj,
int main(int argc, char **argv)
{
struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
const char *file = "test_tcpbpf_kern.o";
struct tcpbpf_globals g = {0};
int cg_fd, prog_fd, map_fd;
......@@ -57,9 +56,6 @@ int main(int argc, char **argv)
int pid;
int rv;
if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
perror("Unable to lift memlock rlimit");
if (argc > 1 && strcmp(argv[1], "-d") == 0)
debug_flag = true;
......
......@@ -24,7 +24,6 @@
#include <limits.h>
#include <sys/capability.h>
#include <sys/resource.h>
#include <linux/unistd.h>
#include <linux/filter.h>
......@@ -41,7 +40,7 @@
# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
# endif
#endif
#include "bpf_rlimit.h"
#include "../../../include/linux/filter.h"
#ifndef ARRAY_SIZE
......@@ -2589,6 +2588,62 @@ static struct bpf_test tests[] = {
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"runtime/jit: tail_call within bounds, prog once",
.insns = {
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_tail_call),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_prog = { 1 },
.result = ACCEPT,
.retval = 42,
},
{
"runtime/jit: tail_call within bounds, prog loop",
.insns = {
BPF_MOV64_IMM(BPF_REG_3, 1),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_tail_call),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_prog = { 1 },
.result = ACCEPT,
.retval = 41,
},
{
"runtime/jit: tail_call within bounds, no prog",
.insns = {
BPF_MOV64_IMM(BPF_REG_3, 2),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_tail_call),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_prog = { 1 },
.result = ACCEPT,
.retval = 1,
},
{
"runtime/jit: tail_call out of bounds",
.insns = {
BPF_MOV64_IMM(BPF_REG_3, 256),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_tail_call),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.fixup_prog = { 1 },
.result = ACCEPT,
.retval = 2,
},
{
"runtime/jit: pass negative index to tail_call",
.insns = {
......@@ -2596,11 +2651,12 @@ static struct bpf_test tests[] = {
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_tail_call),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.fixup_prog = { 1 },
.result = ACCEPT,
.retval = 2,
},
{
"runtime/jit: pass > 32bit index to tail_call",
......@@ -2609,11 +2665,12 @@ static struct bpf_test tests[] = {
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_tail_call),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.fixup_prog = { 2 },
.result = ACCEPT,
.retval = 42,
},
{
"stack pointer arithmetic",
......@@ -11279,16 +11336,61 @@ static int create_map(uint32_t size_value, uint32_t max_elem)
return fd;
}
static int create_prog_dummy1(void)
{
struct bpf_insn prog[] = {
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_EXIT_INSN(),
};
return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog,
ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
}
static int create_prog_dummy2(int mfd, int idx)
{
struct bpf_insn prog[] = {
BPF_MOV64_IMM(BPF_REG_3, idx),
BPF_LD_MAP_FD(BPF_REG_2, mfd),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_tail_call),
BPF_MOV64_IMM(BPF_REG_0, 41),
BPF_EXIT_INSN(),
};
return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog,
ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
}
static int create_prog_array(void)
{
int fd;
int p1key = 0, p2key = 1;
int mfd, p1fd, p2fd;
fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
sizeof(int), 4, 0);
if (fd < 0)
mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
sizeof(int), 4, 0);
if (mfd < 0) {
printf("Failed to create prog array '%s'!\n", strerror(errno));
return -1;
}
return fd;
p1fd = create_prog_dummy1();
p2fd = create_prog_dummy2(mfd, p2key);
if (p1fd < 0 || p2fd < 0)
goto out;
if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
goto out;
if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
goto out;
close(p2fd);
close(p1fd);
return mfd;
out:
close(p2fd);
close(p1fd);
close(mfd);
return -1;
}
static int create_map_in_map(void)
......@@ -11543,8 +11645,6 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to)
int main(int argc, char **argv)
{
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
struct rlimit rlim = { 1 << 20, 1 << 20 };
unsigned int from = 0, to = ARRAY_SIZE(tests);
bool unpriv = !is_admin();
......@@ -11572,6 +11672,5 @@ int main(int argc, char **argv)
return EXIT_FAILURE;
}
setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
return do_test(unpriv, from, to);
}
......@@ -4,7 +4,6 @@
#include <string.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <linux/bpf.h>
#include <linux/filter.h>
......@@ -12,6 +11,8 @@
#include <bpf/bpf.h>
#include "bpf_rlimit.h"
#define LOG_SIZE (1 << 20)
#define err(str...) printf("ERROR: " str)
......@@ -133,16 +134,11 @@ static void test_log_bad(char *log, size_t log_len, int log_level)
int main(int argc, char **argv)
{
struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
char full_log[LOG_SIZE];
char log[LOG_SIZE];
size_t want_len;
int i;
/* allow unlimited locked memory to have more consistent error code */
if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
perror("Unable to lift memlock rlimit");
memset(log, 1, LOG_SIZE);
/* Test incorrect attr */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment