Commit 53f8dd43 authored by Andrii Nakryiko's avatar Andrii Nakryiko Committed by Alexei Starovoitov

libbpf: Fix global variable relocation

Similarly to a0d7da26 ("libbpf: Fix call relocation offset calculation
bug"), relocations against global variables need to take into account
referenced symbol's st_value, which holds offset into a corresponding data
section (and, subsequently, offset into internal backing map). For static
variables this offset is always zero and data offset is completely described
by respective instruction's imm field.

Convert a bunch of selftests to global variables. Previously they were relying
on `static volatile` trick to ensure Clang doesn't inline static variables,
which with global variables is not necessary anymore.

Fixes: 393cdfbe ("libbpf: Support initialized global variables")
Signed-off-by: default avatarAndrii Nakryiko <andriin@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarYonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20191127200651.1381348-1-andriin@fb.com
parent b5684058
...@@ -171,10 +171,8 @@ struct bpf_program { ...@@ -171,10 +171,8 @@ struct bpf_program {
RELO_DATA, RELO_DATA,
} type; } type;
int insn_idx; int insn_idx;
union { int map_idx;
int map_idx; int sym_off;
int text_off;
};
} *reloc_desc; } *reloc_desc;
int nr_reloc; int nr_reloc;
int log_level; int log_level;
...@@ -1824,7 +1822,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog, ...@@ -1824,7 +1822,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
} }
reloc_desc->type = RELO_CALL; reloc_desc->type = RELO_CALL;
reloc_desc->insn_idx = insn_idx; reloc_desc->insn_idx = insn_idx;
reloc_desc->text_off = sym->st_value / 8; reloc_desc->sym_off = sym->st_value;
obj->has_pseudo_calls = true; obj->has_pseudo_calls = true;
return 0; return 0;
} }
...@@ -1868,6 +1866,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog, ...@@ -1868,6 +1866,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
reloc_desc->type = RELO_LD64; reloc_desc->type = RELO_LD64;
reloc_desc->insn_idx = insn_idx; reloc_desc->insn_idx = insn_idx;
reloc_desc->map_idx = map_idx; reloc_desc->map_idx = map_idx;
reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
return 0; return 0;
} }
...@@ -1899,6 +1898,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog, ...@@ -1899,6 +1898,7 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
reloc_desc->type = RELO_DATA; reloc_desc->type = RELO_DATA;
reloc_desc->insn_idx = insn_idx; reloc_desc->insn_idx = insn_idx;
reloc_desc->map_idx = map_idx; reloc_desc->map_idx = map_idx;
reloc_desc->sym_off = sym->st_value;
return 0; return 0;
} }
...@@ -3563,8 +3563,8 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, ...@@ -3563,8 +3563,8 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
return -LIBBPF_ERRNO__RELOC; return -LIBBPF_ERRNO__RELOC;
if (prog->idx == obj->efile.text_shndx) { if (prog->idx == obj->efile.text_shndx) {
pr_warn("relo in .text insn %d into off %d\n", pr_warn("relo in .text insn %d into off %d (insn #%d)\n",
relo->insn_idx, relo->text_off); relo->insn_idx, relo->sym_off, relo->sym_off / 8);
return -LIBBPF_ERRNO__RELOC; return -LIBBPF_ERRNO__RELOC;
} }
...@@ -3599,7 +3599,7 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, ...@@ -3599,7 +3599,7 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
prog->section_name); prog->section_name);
} }
insn = &prog->insns[relo->insn_idx]; insn = &prog->insns[relo->insn_idx];
insn->imm += relo->text_off + prog->main_prog_cnt - relo->insn_idx; insn->imm += relo->sym_off / 8 + prog->main_prog_cnt - relo->insn_idx;
return 0; return 0;
} }
...@@ -3622,31 +3622,26 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) ...@@ -3622,31 +3622,26 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
return 0; return 0;
for (i = 0; i < prog->nr_reloc; i++) { for (i = 0; i < prog->nr_reloc; i++) {
if (prog->reloc_desc[i].type == RELO_LD64 || struct reloc_desc *relo = &prog->reloc_desc[i];
prog->reloc_desc[i].type == RELO_DATA) {
bool relo_data = prog->reloc_desc[i].type == RELO_DATA;
struct bpf_insn *insns = prog->insns;
int insn_idx, map_idx;
insn_idx = prog->reloc_desc[i].insn_idx; if (relo->type == RELO_LD64 || relo->type == RELO_DATA) {
map_idx = prog->reloc_desc[i].map_idx; struct bpf_insn *insn = &prog->insns[relo->insn_idx];
if (insn_idx + 1 >= (int)prog->insns_cnt) { if (relo->insn_idx + 1 >= (int)prog->insns_cnt) {
pr_warn("relocation out of range: '%s'\n", pr_warn("relocation out of range: '%s'\n",
prog->section_name); prog->section_name);
return -LIBBPF_ERRNO__RELOC; return -LIBBPF_ERRNO__RELOC;
} }
if (!relo_data) { if (relo->type != RELO_DATA) {
insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; insn[0].src_reg = BPF_PSEUDO_MAP_FD;
} else { } else {
insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE; insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
insns[insn_idx + 1].imm = insns[insn_idx].imm; insn[1].imm = insn[0].imm + relo->sym_off;
} }
insns[insn_idx].imm = obj->maps[map_idx].fd; insn[0].imm = obj->maps[relo->map_idx].fd;
} else if (prog->reloc_desc[i].type == RELO_CALL) { } else if (relo->type == RELO_CALL) {
err = bpf_program__reloc_text(prog, obj, err = bpf_program__reloc_text(prog, obj, relo);
&prog->reloc_desc[i]);
if (err) if (err)
return err; return err;
} }
......
...@@ -6,28 +6,28 @@ ...@@ -6,28 +6,28 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
static volatile __u64 test1_result; __u64 test1_result = 0;
BPF_TRACE_1("fentry/bpf_fentry_test1", test1, int, a) BPF_TRACE_1("fentry/bpf_fentry_test1", test1, int, a)
{ {
test1_result = a == 1; test1_result = a == 1;
return 0; return 0;
} }
static volatile __u64 test2_result; __u64 test2_result = 0;
BPF_TRACE_2("fentry/bpf_fentry_test2", test2, int, a, __u64, b) BPF_TRACE_2("fentry/bpf_fentry_test2", test2, int, a, __u64, b)
{ {
test2_result = a == 2 && b == 3; test2_result = a == 2 && b == 3;
return 0; return 0;
} }
static volatile __u64 test3_result; __u64 test3_result = 0;
BPF_TRACE_3("fentry/bpf_fentry_test3", test3, char, a, int, b, __u64, c) BPF_TRACE_3("fentry/bpf_fentry_test3", test3, char, a, int, b, __u64, c)
{ {
test3_result = a == 4 && b == 5 && c == 6; test3_result = a == 4 && b == 5 && c == 6;
return 0; return 0;
} }
static volatile __u64 test4_result; __u64 test4_result = 0;
BPF_TRACE_4("fentry/bpf_fentry_test4", test4, BPF_TRACE_4("fentry/bpf_fentry_test4", test4,
void *, a, char, b, int, c, __u64, d) void *, a, char, b, int, c, __u64, d)
{ {
...@@ -35,7 +35,7 @@ BPF_TRACE_4("fentry/bpf_fentry_test4", test4, ...@@ -35,7 +35,7 @@ BPF_TRACE_4("fentry/bpf_fentry_test4", test4,
return 0; return 0;
} }
static volatile __u64 test5_result; __u64 test5_result = 0;
BPF_TRACE_5("fentry/bpf_fentry_test5", test5, BPF_TRACE_5("fentry/bpf_fentry_test5", test5,
__u64, a, void *, b, short, c, int, d, __u64, e) __u64, a, void *, b, short, c, int, d, __u64, e)
{ {
...@@ -44,7 +44,7 @@ BPF_TRACE_5("fentry/bpf_fentry_test5", test5, ...@@ -44,7 +44,7 @@ BPF_TRACE_5("fentry/bpf_fentry_test5", test5,
return 0; return 0;
} }
static volatile __u64 test6_result; __u64 test6_result = 0;
BPF_TRACE_6("fentry/bpf_fentry_test6", test6, BPF_TRACE_6("fentry/bpf_fentry_test6", test6,
__u64, a, void *, b, short, c, int, d, void *, e, __u64, f) __u64, a, void *, b, short, c, int, d, void *, e, __u64, f)
{ {
......
...@@ -8,7 +8,7 @@ struct sk_buff { ...@@ -8,7 +8,7 @@ struct sk_buff {
unsigned int len; unsigned int len;
}; };
static volatile __u64 test_result; __u64 test_result = 0;
BPF_TRACE_2("fexit/test_pkt_access", test_main, BPF_TRACE_2("fexit/test_pkt_access", test_main,
struct sk_buff *, skb, int, ret) struct sk_buff *, skb, int, ret)
{ {
...@@ -23,7 +23,7 @@ BPF_TRACE_2("fexit/test_pkt_access", test_main, ...@@ -23,7 +23,7 @@ BPF_TRACE_2("fexit/test_pkt_access", test_main,
return 0; return 0;
} }
static volatile __u64 test_result_subprog1; __u64 test_result_subprog1 = 0;
BPF_TRACE_2("fexit/test_pkt_access_subprog1", test_subprog1, BPF_TRACE_2("fexit/test_pkt_access_subprog1", test_subprog1,
struct sk_buff *, skb, int, ret) struct sk_buff *, skb, int, ret)
{ {
...@@ -56,7 +56,7 @@ struct args_subprog2 { ...@@ -56,7 +56,7 @@ struct args_subprog2 {
__u64 args[5]; __u64 args[5];
__u64 ret; __u64 ret;
}; };
static volatile __u64 test_result_subprog2; __u64 test_result_subprog2 = 0;
SEC("fexit/test_pkt_access_subprog2") SEC("fexit/test_pkt_access_subprog2")
int test_subprog2(struct args_subprog2 *ctx) int test_subprog2(struct args_subprog2 *ctx)
{ {
......
...@@ -6,28 +6,28 @@ ...@@ -6,28 +6,28 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
static volatile __u64 test1_result; __u64 test1_result = 0;
BPF_TRACE_2("fexit/bpf_fentry_test1", test1, int, a, int, ret) BPF_TRACE_2("fexit/bpf_fentry_test1", test1, int, a, int, ret)
{ {
test1_result = a == 1 && ret == 2; test1_result = a == 1 && ret == 2;
return 0; return 0;
} }
static volatile __u64 test2_result; __u64 test2_result = 0;
BPF_TRACE_3("fexit/bpf_fentry_test2", test2, int, a, __u64, b, int, ret) BPF_TRACE_3("fexit/bpf_fentry_test2", test2, int, a, __u64, b, int, ret)
{ {
test2_result = a == 2 && b == 3 && ret == 5; test2_result = a == 2 && b == 3 && ret == 5;
return 0; return 0;
} }
static volatile __u64 test3_result; __u64 test3_result = 0;
BPF_TRACE_4("fexit/bpf_fentry_test3", test3, char, a, int, b, __u64, c, int, ret) BPF_TRACE_4("fexit/bpf_fentry_test3", test3, char, a, int, b, __u64, c, int, ret)
{ {
test3_result = a == 4 && b == 5 && c == 6 && ret == 15; test3_result = a == 4 && b == 5 && c == 6 && ret == 15;
return 0; return 0;
} }
static volatile __u64 test4_result; __u64 test4_result = 0;
BPF_TRACE_5("fexit/bpf_fentry_test4", test4, BPF_TRACE_5("fexit/bpf_fentry_test4", test4,
void *, a, char, b, int, c, __u64, d, int, ret) void *, a, char, b, int, c, __u64, d, int, ret)
{ {
...@@ -37,7 +37,7 @@ BPF_TRACE_5("fexit/bpf_fentry_test4", test4, ...@@ -37,7 +37,7 @@ BPF_TRACE_5("fexit/bpf_fentry_test4", test4,
return 0; return 0;
} }
static volatile __u64 test5_result; __u64 test5_result = 0;
BPF_TRACE_6("fexit/bpf_fentry_test5", test5, BPF_TRACE_6("fexit/bpf_fentry_test5", test5,
__u64, a, void *, b, short, c, int, d, __u64, e, int, ret) __u64, a, void *, b, short, c, int, d, __u64, e, int, ret)
{ {
...@@ -46,7 +46,7 @@ BPF_TRACE_6("fexit/bpf_fentry_test5", test5, ...@@ -46,7 +46,7 @@ BPF_TRACE_6("fexit/bpf_fentry_test5", test5,
return 0; return 0;
} }
static volatile __u64 test6_result; __u64 test6_result = 0;
BPF_TRACE_7("fexit/bpf_fentry_test6", test6, BPF_TRACE_7("fexit/bpf_fentry_test6", test6,
__u64, a, void *, b, short, c, int, d, void *, e, __u64, f, __u64, a, void *, b, short, c, int, d, void *, e, __u64, f,
int, ret) int, ret)
......
...@@ -15,8 +15,8 @@ struct { ...@@ -15,8 +15,8 @@ struct {
__type(value, __u64); __type(value, __u64);
} data_map SEC(".maps"); } data_map SEC(".maps");
static volatile __u64 in_val; __u64 in_val = 0;
static volatile __u64 out_val; __u64 out_val = 0;
SEC("raw_tracepoint/sys_enter") SEC("raw_tracepoint/sys_enter")
int test_mmap(void *ctx) int test_mmap(void *ctx)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment