Commit 3f5754b5 authored by Joanne Hugé's avatar Joanne Hugé

Update XDP, filter out packets not sent on the correct port

parent 166a884d
*.o
*.ll
*.d
latency-measure/build/main
latency-measure/build/main
......
......@@ -45,6 +45,21 @@ static inline uint64_t calcdiff_ns(struct timespec t1, struct timespec t2);
static inline uint64_t max(uint64_t a, uint64_t b);
static inline uint64_t min(uint64_t a, uint64_t b);
static void help(char *argv[]) {
printf(
"Usage: %s [-a CPU -p PRIO -i USEC -r USEC -s NS -l N -v]\n\n"
" -h Show help\n"
" -a CPU Pin the real time thread to CPU\n"
" -p PRIO RT thread priority\n"
" -i USEC RT thread wake-up interval (default: 10ms)\n"
" -r USEC non-RT main thread refresh interval\n"
" -l N_CYCLES RT thread cycles amount (default: infinite)\n"
" -f Trace\n"
" -g Use function_graph when tracing\n"
"\n",
argv[0]);
}
// Real-time thread
static void *timerthread(void *p) {
struct timespec previous, current, next;
......@@ -174,11 +189,15 @@ int main(int argc, char *argv[]) {
static void process_options(int argc, char *argv[], thread_param_t *param,
main_param_t *main_param) {
for (;;) {
int c = getopt(argc, argv, "l:p:i:r:f:ag");
int c = getopt(argc, argv, "hl:p:i:r:f:ag");
if (c == -1) break;
switch (c) {
case 'h':
help(argv);
exit(EXIT_SUCCESS);
break;
case 'p':
param->priority = atoi(optarg);
break;
......@@ -202,6 +221,7 @@ static void process_options(int argc, char *argv[], thread_param_t *param,
main_param->enable_graph = 1;
break;
default:
help(argv);
exit(EXIT_FAILURE);
break;
}
......
/* SPDX-License-Identifier: GPL-2.0 */
/* Copied from $(LINUX)/tools/testing/selftests/bpf/bpf_endian.h */
#ifndef __BPF_ENDIAN__
#define __BPF_ENDIAN__
#include <linux/swab.h>
/* LLVM's BPF target selects the endianness of the CPU
* it compiles on, or the user specifies (bpfel/bpfeb),
* respectively. The used __BYTE_ORDER__ is defined by
* the compiler, we cannot rely on __BYTE_ORDER from
* libc headers, since it doesn't reflect the actual
* requested byte order.
*
* Note, LLVM's BPF target has different __builtin_bswapX()
* semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE
* in bpfel and bpfeb case, which means below, that we map
* to cpu_to_be16(). We could use it unconditionally in BPF
* case, but better not rely on it, so that this header here
* can be used from application and BPF program side, which
* use different targets.
*/
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
# define __bpf_ntohs(x)__builtin_bswap16(x)
# define __bpf_htons(x)__builtin_bswap16(x)
# define __bpf_constant_ntohs(x)___constant_swab16(x)
# define __bpf_constant_htons(x)___constant_swab16(x)
# define __bpf_ntohl(x)__builtin_bswap32(x)
# define __bpf_htonl(x)__builtin_bswap32(x)
# define __bpf_constant_ntohl(x)___constant_swab32(x)
# define __bpf_constant_htonl(x)___constant_swab32(x)
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
# define __bpf_ntohs(x)(x)
# define __bpf_htons(x)(x)
# define __bpf_constant_ntohs(x)(x)
# define __bpf_constant_htons(x)(x)
# define __bpf_ntohl(x)(x)
# define __bpf_htonl(x)(x)
# define __bpf_constant_ntohl(x)(x)
# define __bpf_constant_htonl(x)(x)
#else
# error "Fix your compiler's __BYTE_ORDER__?!"
#endif
#define bpf_htons(x)\
(__builtin_constant_p(x) ?\
__bpf_constant_htons(x) : __bpf_htons(x))
#define bpf_ntohs(x)\
(__builtin_constant_p(x) ?\
__bpf_constant_ntohs(x) : __bpf_ntohs(x))
#define bpf_htonl(x)\
(__builtin_constant_p(x) ?\
__bpf_constant_htonl(x) : __bpf_htonl(x))
#define bpf_ntohl(x)\
(__builtin_constant_p(x) ?\
__bpf_constant_ntohl(x) : __bpf_ntohl(x))
#endif /* __BPF_ENDIAN__ */
......@@ -166,6 +166,8 @@ static void *packet_sending_thread(void *p) {
}
}
printf("Will start at %" PRIu64 "\n", ts_to_uint(next));
// Packet sending loop
for (nb_cycles = 0;; nb_cycles++) {
if (thread_params.max_cycles &&
......
......@@ -82,6 +82,14 @@ void add_ns(struct timespec *t, uint64_t ns) {
t->tv_nsec -= NSEC_PER_SEC;
}
}
void substract_ns(struct timespec *t, uint64_t ns) {
t->tv_nsec -= ns;
while (t->tv_nsec < 0) {
t->tv_sec -= 1;
t->tv_nsec += NSEC_PER_SEC;
}
}
uint64_t calcdiff_ns(struct timespec t1, struct timespec t2) {
uint64_t diff;
......@@ -89,6 +97,12 @@ uint64_t calcdiff_ns(struct timespec t1, struct timespec t2) {
diff += ((int)t1.tv_nsec - (int)t2.tv_nsec);
return diff;
}
int64_t calcdiff_ns_signed(struct timespec t1, struct timespec t2) {
int64_t diff;
diff = NSEC_PER_SEC * ((int)t1.tv_sec - (int)t2.tv_sec);
diff += ((int)t1.tv_nsec - (int)t2.tv_nsec);
return diff;
}
int _max_(int a, int b) { return a > b ? a : b; }
int _min_(int a, int b) { return a < b ? a : b; }
......
......@@ -41,7 +41,9 @@
uint64_t ts_to_uint(struct timespec t);
struct timespec uint_to_ts(uint64_t t);
void add_ns(struct timespec *t, uint64_t ns);
void substract_ns(struct timespec *t, uint64_t ns);
uint64_t calcdiff_ns(struct timespec t1, struct timespec t2);
int64_t calcdiff_ns_signed(struct timespec t1, struct timespec t2);
void set_latency_target(void);
void init_signals(void (*_sighand)(int));
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file contains parsing functions that are used in the packetXX XDP
* programs. The functions are marked as __always_inline, and fully defined in
* this header file to be included in the BPF program.
*
* Each helper parses a packet header, including doing bounds checking, and
* returns the type of its contents if successful, and -1 otherwise.
*
* For Ethernet and IP headers, the content type is the type of the payload
* (h_proto for Ethernet, nexthdr for IPv6), for ICMP it is the ICMP type field.
* All return values are in host byte order.
*
* The versions of the functions included here are slightly expanded versions of
* the functions in the packet01 lesson. For instance, the Ethernet header
* parsing has support for parsing VLAN tags.
*/
#ifndef __PARSING_HELPERS_H
#define __PARSING_HELPERS_H
#include <stddef.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/udp.h>
#include <linux/tcp.h>
/* Header cursor to keep track of current parsing position */
struct hdr_cursor {
void *pos;
};
/*
* struct vlan_hdr - vlan header
* @h_vlan_TCI: priority and VLAN ID
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct vlan_hdr {
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};
/*
* Struct icmphdr_common represents the common part of the icmphdr and icmp6hdr
* structures.
*/
struct icmphdr_common {
__u8 type;
__u8 code;
__sum16 cksum;
};
/* Allow users of header file to redefine VLAN max depth */
#ifndef VLAN_MAX_DEPTH
#define VLAN_MAX_DEPTH 4
#endif
static __always_inline int proto_is_vlan(__u16 h_proto)
{
return !!(h_proto == bpf_htons(ETH_P_8021Q) ||
h_proto == bpf_htons(ETH_P_8021AD));
}
/* Notice, parse_ethhdr() will skip VLAN tags, by advancing nh->pos and returns
* next header EtherType, BUT the ethhdr pointer supplied still points to the
* Ethernet header. Thus, caller can look at eth->h_proto to see if this was a
* VLAN tagged packet.
*/
static __always_inline int parse_ethhdr(struct hdr_cursor *nh, void *data_end,
struct ethhdr **ethhdr)
{
struct ethhdr *eth = nh->pos;
int hdrsize = sizeof(*eth);
struct vlan_hdr *vlh;
__u16 h_proto;
int i;
/* Byte-count bounds check; check if current pointer + size of header
* is after data_end.
*/
if (nh->pos + hdrsize > data_end)
return -1;
nh->pos += hdrsize;
*ethhdr = eth;
vlh = nh->pos;
h_proto = eth->h_proto;
/* Use loop unrolling to avoid the verifier restriction on loops;
* support up to VLAN_MAX_DEPTH layers of VLAN encapsulation.
*/
#pragma unroll
for (i = 0; i < VLAN_MAX_DEPTH; i++) {
if (!proto_is_vlan(h_proto))
break;
if (vlh + 1 > data_end)
break;
h_proto = vlh->h_vlan_encapsulated_proto;
vlh++;
}
nh->pos = vlh;
return h_proto; /* network-byte-order */
}
static __always_inline int parse_ip6hdr(struct hdr_cursor *nh,
void *data_end,
struct ipv6hdr **ip6hdr)
{
struct ipv6hdr *ip6h = nh->pos;
/* Pointer-arithmetic bounds check; pointer +1 points to after end of
* thing being pointed to. We will be using this style in the remainder
* of the tutorial.
*/
if (ip6h + 1 > data_end)
return -1;
nh->pos = ip6h + 1;
*ip6hdr = ip6h;
return ip6h->nexthdr;
}
static __always_inline int parse_iphdr(struct hdr_cursor *nh,
void *data_end,
struct iphdr **iphdr)
{
struct iphdr *iph = nh->pos;
int hdrsize;
if (iph + 1 > data_end)
return -1;
hdrsize = iph->ihl * 4;
/* Variable-length IPv4 header, need to use byte-based arithmetic */
if (nh->pos + hdrsize > data_end)
return -1;
nh->pos += hdrsize;
*iphdr = iph;
return iph->protocol;
}
static __always_inline int parse_icmp6hdr(struct hdr_cursor *nh,
void *data_end,
struct icmp6hdr **icmp6hdr)
{
struct icmp6hdr *icmp6h = nh->pos;
if (icmp6h + 1 > data_end)
return -1;
nh->pos = icmp6h + 1;
*icmp6hdr = icmp6h;
return icmp6h->icmp6_type;
}
static __always_inline int parse_icmphdr(struct hdr_cursor *nh,
void *data_end,
struct icmphdr **icmphdr)
{
struct icmphdr *icmph = nh->pos;
if (icmph + 1 > data_end)
return -1;
nh->pos = icmph + 1;
*icmphdr = icmph;
return icmph->type;
}
static __always_inline int parse_icmphdr_common(struct hdr_cursor *nh,
void *data_end,
struct icmphdr_common **icmphdr)
{
struct icmphdr_common *h = nh->pos;
if (h + 1 > data_end)
return -1;
nh->pos = h + 1;
*icmphdr = h;
return h->type;
}
/*
* parse_tcphdr: parse the udp header and return the length of the udp payload
*/
static __always_inline int parse_udphdr(struct hdr_cursor *nh,
void *data_end,
struct udphdr **udphdr)
{
int len;
struct udphdr *h = nh->pos;
if (h + 1 > data_end)
return -1;
nh->pos = h + 1;
*udphdr = h;
len = bpf_ntohs(h->len) - sizeof(struct udphdr);
if (len < 0)
return -1;
return len;
}
/*
* parse_tcphdr: parse and return the length of the tcp header
*/
static __always_inline int parse_tcphdr(struct hdr_cursor *nh,
void *data_end,
struct tcphdr **tcphdr)
{
int len;
struct tcphdr *h = nh->pos;
if (h + 1 > data_end)
return -1;
len = h->doff * 4;
if ((void *) h + len > data_end)
return -1;
nh->pos = h + 1;
*tcphdr = h;
return len;
}
#endif /* __PARSING_HELPERS_H */
......@@ -256,11 +256,11 @@ static void open_xdp_socket(char *network_if) {
/*
* Init XDP socket
*/
void init_xdp_recv(ingress_param_t * _params) {
void init_xdp_recv(ingress_param_t * _params, ingress_stat_t *_stats) {
int ret, prog_fd, xsks_map = 0;
struct bpf_prog_load_attr prog_load_attr = {
.prog_type = BPF_PROG_TYPE_XDP,
.file = "/home/oli/rt-measures/packet-exchange/build/xdp_kern.o",
.file = "/home/oli/tsn-measures/packet-exchange/build/xdp_kern.o",
};
struct xsk_umem_config cfg = {
.fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
......@@ -274,6 +274,7 @@ void init_xdp_recv(ingress_param_t * _params) {
void *buffer = NULL;
params = _params;
stats = _stats;
ret = bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd);
if (ret || prog_fd < 0) err("bpf_prog_load_xattr() failed");
......@@ -312,11 +313,55 @@ void setup_poll_fd(void) {
static int received;
static uint32_t idx_rx = 0, idx;
static void poll_wakeup(struct timespec ts, int margin) {
int ret;
struct timespec ts_prev, current;
ts_prev = ts;
substract_ns(&ts_prev, margin * 1000);
ret = clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &ts_prev, NULL);
if (ret) {
fprintf(stderr, "clock_nanosleep returned error: %d, aborting...\n", ret);
exit(EXIT_FAILURE);
}
do {
clock_gettime(CLOCK_REALTIME, &current);
} while(calcdiff_ns_signed(ts, current) > 1000);
}
static void parse_raw_packet(uint64_t addr, size_t len)
{
char *packet;
struct ethhdr *eth;
struct iphdr *ip;
struct udphdr *udp;
size_t min_len = sizeof(*eth) + sizeof(*ip) + sizeof(*udp);
if (len <= min_len) {
stats->xdp_data = NULL;
return;
}
packet = xsk_umem__get_data(xdp_socket.umem.buffer, addr);
eth = (struct ethhdr *)packet;
ip = (struct iphdr *)(packet + sizeof(*eth));
udp = (struct udphdr *)(packet + sizeof(*eth) + sizeof(*ip));
stats->xdp_data = packet + sizeof(*eth) + sizeof(*ip) + sizeof(*udp);
}
/*
* Receive XDP socket
*/
int recv_xdp_packet(void) {
int recv_xdp_packet(struct timespec next) {
int ret;
uint64_t addr;
uint32_t len;
struct timespec next_pre, current;
int k = 0;
if (params->xdp_polling_mode == 0) {
ret = poll(fds, 1, -1);
......@@ -327,17 +372,31 @@ int recv_xdp_packet(void) {
error(EXIT_FAILURE, errno, "poll failed");
received = xsk_ring_cons__peek(&xdp_socket.rx, 1, &idx_rx);
} else {
do {
received =
xsk_ring_cons__peek(&xdp_socket.rx, 1, &idx_rx);
} while (!received);
}
if (!received) return 1;
next_pre = next;
substract_ns(&next_pre, 120 * 1000);
ret = clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &next_pre, NULL);
if (ret) {
fprintf(stderr, "clock_nanosleep returned error: %d, aborting...\n", ret);
exit(EXIT_FAILURE);
}
do {
received = xsk_ring_cons__peek(&xdp_socket.rx, 1, &idx_rx);
} while(!received);
}
if (received != 1)
error(EXIT_FAILURE, errno,
"Received more packets than expected");
error(EXIT_FAILURE, errno, "Received %d packets", received);
/* Get the packet */
addr = xsk_ring_cons__rx_desc(&xdp_socket.rx, idx_rx)->addr;
len = xsk_ring_cons__rx_desc(&xdp_socket.rx, idx_rx)->len;
/* Parse it */
parse_raw_packet(xsk_umem__add_offset_to_addr(addr), len);
return 0;
}
void recv_xdp_cleanup(void) {
......@@ -368,10 +427,10 @@ void close_xdp_socket(void) {
}
#else
void init_xdp_recv(ingress_param_t * _params) { (void) _params; }
void init_xdp_recv(ingress_param_t * _params, ingress_stat_t *_stats) { (void) _params; (void) _stats; }
void setup_poll_fd(void) {}
void close_xdp_socket(void) {}
int recv_xdp_packet(void) { return 0; }
int recv_xdp_packet(struct timespec next) { (void) next; return 0; }
void recv_xdp_cleanup(void) {}
#endif
......
......@@ -29,6 +29,7 @@ typedef struct ingress_stat {
uint64_t high_jitter;
char data[MAX_BUFFER_SIZE];
char * xdp_data;
} ingress_stat_t;
void init_udp_recv(ingress_param_t *_params, ingress_stat_t *stats,
......@@ -55,8 +56,8 @@ struct xdpsock {
};
#endif
void init_xdp_recv(ingress_param_t * _params);
int recv_xdp_packet(void);
void init_xdp_recv(ingress_param_t * _params, ingress_stat_t *_stats);
int recv_xdp_packet(struct timespec next);
void recv_xdp_cleanup(void);
void setup_poll_fd(void);
void close_xdp_socket(void);
......
......@@ -30,6 +30,8 @@
#include "send_packet.h"
#include "tracer.h"
#define ERROR_MARGIN_NS 400000
// Structs
enum TSNTask { RECV_PACKET_TASK, RTT_TASK, XDP_TASK };
......@@ -44,6 +46,11 @@ typedef struct thread_param {
int enable_diff_ts;
int enable_receive_tracemark;
uint64_t start_ts;
int poll;
int poll_margin;
} thread_param_t;
typedef struct main_params {
......@@ -95,6 +102,8 @@ static void help(char *argv[]) {
" -r USEC non-RT main thread refresh interval\n"
" -d BUF_LEN Set the length of tx buffer\n"
" -c Receive timestamp and emit signal\n"
" -s NS Common start time reference\n"
" -P USEC Do polling to wakeup signal thread with specified margin\n"
" -C Receive timestamp and print difference with current time\n"
" -b CLIENT_IP Server side RTT\n"
" -g Print histograms to sdtout on exit\n"
......@@ -110,10 +119,30 @@ static void help(char *argv[]) {
argv[0]);
}
static void poll_wakeup(struct timespec ts, int margin) {
int ret;
struct timespec ts_prev, current;
ts_prev = ts;
substract_ns(&ts_prev, margin * 1000);
ret = clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &ts_prev, NULL);
if (ret) {
fprintf(stderr, "clock_nanosleep returned error: %d, aborting...\n", ret);
exit(EXIT_FAILURE);
}
do {
clock_gettime(CLOCK_REALTIME, &current);
} while(calcdiff_ns_signed(ts, current) > 1000);
}
static void *emit_signal_thread(void *p) {
(void)p;
cpu_set_t mask;
struct timespec current;
struct timespec previous_emit, previous_ts;
int64_t emit_diff, ts_diff;
// Set thread CPU affinity
if (thread_params.affinity_cpu) {
......@@ -130,9 +159,28 @@ static void *emit_signal_thread(void *p) {
pthread_cond_wait(&emit_signal_ts_received, &emit_signal_mutex);
clock_gettime(CLOCK_REALTIME, &current);
clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &emit_signal_next, NULL);
poll_wakeup(emit_signal_next, thread_params.poll_margin);
toggle_gpio();
// Check if something went wrong
if(i > 0) {
clock_gettime(CLOCK_REALTIME, &current);
emit_diff = calcdiff_ns_signed(current, previous_emit);
ts_diff = calcdiff_ns_signed(emit_signal_next, previous_ts);
if((emit_diff < ((int64_t)thread_params.interval) - ERROR_MARGIN_NS) ||
(emit_diff > ((int64_t)thread_params.interval) + ERROR_MARGIN_NS)) {
fprintf(stderr, "Signal emission interval reached error threshold: %" PRIi64 "\n", emit_diff);
exit(EXIT_FAILURE);
}
if((ts_diff < ((int64_t)thread_params.interval) - ERROR_MARGIN_NS) ||
(ts_diff > ((int64_t)thread_params.interval) + ERROR_MARGIN_NS)) {
fprintf(stderr, "Timestamp interval reached error threshold: %" PRIi64 "\n", ts_diff);
exit(EXIT_FAILURE);
}
}
previous_emit = current;
previous_ts = emit_signal_next;
}
pthread_mutex_unlock(&emit_signal_mutex);
......@@ -144,7 +192,7 @@ static void *emit_signal_thread(void *p) {
*/
static void *tsn_thread(void *p) {
(void)p;
struct timespec current, previous;
struct timespec current, previous, next;
cpu_set_t mask;
char tracemark_message[128];
......@@ -161,6 +209,23 @@ static void *tsn_thread(void *p) {
if (tsn_task == XDP_TASK) setup_poll_fd();
if (thread_params.start_ts) {
clock_gettime(CLOCK_REALTIME, &next);
if (thread_params.start_ts < ts_to_uint(next)) {
fprintf(stderr, "start timestamp is in the past, aborting...\n");
exit(EXIT_FAILURE);
}
if (thread_params.start_ts > (ts_to_uint(next) + UINT64_C(3600000000000))) {
fprintf(stderr, "start timestamp is too high, aborting...\n");
exit(EXIT_FAILURE);
}
next = uint_to_ts(thread_params.start_ts);
add_ns(&next, thread_params.interval);
}
if(thread_params.start_ts)
printf("Will start at %" PRIu64 "\n", ts_to_uint(next));
// Packet receiving loop
for (ingress_stats.packets_received = 0;; ingress_stats.packets_received++) {
// RTT
......@@ -170,32 +235,33 @@ static void *tsn_thread(void *p) {
// Receive packet
} else if (tsn_task == RECV_PACKET_TASK || tsn_task == XDP_TASK) {
// Receive UDP or XDP packet
if (tsn_task == RECV_PACKET_TASK)
recv_udp_packet();
else
recv_xdp_packet();
recv_xdp_packet(next);
// Get time for statistics
clock_gettime(CLOCK_REALTIME, &current);
recv_xdp_cleanup();
if (thread_params.start_ts)
add_ns(&next, thread_params.interval);
if(thread_params.enable_receive_tracemark) {
sprintf(ts_tracemark_buf, "%" PRIu64, ts_to_uint(current));
tracemark(ts_tracemark_buf);
}
if(thread_params.enable_diff_ts) {
uint64_t send_ts = decode(ingress_stats.data);
int64_t diff_us = (((int64_t) ts_to_uint(current)) - ((int64_t)send_ts)) / 1000;
min_diff_ts = _min_(diff_us, min_diff_ts);
max_diff_ts = _max_(diff_us, max_diff_ts);
avg_diff_ts = (avg_diff_ts * ingress_stats.packets_received + diff_us) / (ingress_stats.packets_received + 1);
}
// Emit signal
// Get signal timestamp
if (thread_params.emit_signal) {
uint64_t emit_signal_t = decode(ingress_stats.data);
uint64_t emit_signal_t;
if(tsn_task == XDP_TASK)
emit_signal_t = decode(ingress_stats.xdp_data);
else
emit_signal_t = decode(ingress_stats.data);
pthread_mutex_lock(&emit_signal_mutex);
emit_signal_next = uint_to_ts(emit_signal_t);
......@@ -203,41 +269,68 @@ static void *tsn_thread(void *p) {
pthread_mutex_unlock(&emit_signal_mutex);
}
// Update stats
if (ingress_stats.packets_received) {
int interval_us = calcdiff_ns(current, previous) / 1000;
ingress_stats.min_interval =
_min_(interval_us, ingress_stats.min_interval);
ingress_stats.max_interval =
_max_(interval_us, ingress_stats.max_interval);
ingress_stats.avg_interval =
(ingress_stats.avg_interval * ingress_stats.packets_received +
interval_us) /
(ingress_stats.packets_received + 1);
// Histogram
if (enable_histograms) {
int dist_to_interval = interval_us - (thread_params.interval / 1000);
dist_to_interval += MAX_JITTER / 2;
if (dist_to_interval > ((int)MAX_JITTER) || dist_to_interval < 0)
ingress_stats.high_jitter++;
else
jitter_hist[dist_to_interval]++;
// Update packet latency estimation statistics
if(thread_params.enable_diff_ts) {
uint64_t send_ts;
if(tsn_task == XDP_TASK)
send_ts = decode(ingress_stats.xdp_data);
else
send_ts = decode(ingress_stats.data);
int64_t diff_us = (((int64_t) ts_to_uint(current)) - ((int64_t)send_ts)) / 1000;
min_diff_ts = _min_(diff_us, min_diff_ts);
max_diff_ts = _max_(diff_us, max_diff_ts);
avg_diff_ts = (avg_diff_ts * ingress_stats.packets_received + diff_us) / (ingress_stats.packets_received + 1);
// If the latency hits the tracing threshold, stop tracing
if (main_params.enable_tracing &&
(max_diff_ts > ((int64_t)thread_params.latency_threshold))) {
sprintf(tracemark_message, "Threshold hit: %" PRIi64 "\n",
max_diff_ts);
tracemark(tracemark_message);
tracing(0);
printf(tracemark_message);
exit(EXIT_SUCCESS);
}
}
// If the latency hits the tracing threshold, stop tracing
int jitter = ingress_stats.max_interval - ingress_stats.min_interval;
if (main_params.enable_tracing &&
(jitter > ((int)thread_params.latency_threshold))) {
sprintf(tracemark_message, "Jitter threshold hit: %dus\n",
jitter);
tracemark(tracemark_message);
tracing(0);
printf(tracemark_message);
exit(EXIT_SUCCESS);
else {
// Update stats
if (ingress_stats.packets_received) {
int interval_us = calcdiff_ns(current, previous) / 1000;
ingress_stats.min_interval =
_min_(interval_us, ingress_stats.min_interval);
ingress_stats.max_interval =
_max_(interval_us, ingress_stats.max_interval);
ingress_stats.avg_interval =
(ingress_stats.avg_interval * ingress_stats.packets_received +
interval_us) /
(ingress_stats.packets_received + 1);
// Histogram
if (enable_histograms) {
int dist_to_interval = interval_us - (thread_params.interval / 1000);
dist_to_interval += MAX_JITTER / 2;
if (dist_to_interval > ((int)MAX_JITTER) || dist_to_interval < 0)
ingress_stats.high_jitter++;
else
jitter_hist[dist_to_interval]++;
}
// If the latency hits the tracing threshold, stop tracing
int jitter = ingress_stats.max_interval - ingress_stats.min_interval;
if (main_params.enable_tracing &&
(jitter > ((int)thread_params.latency_threshold))) {
sprintf(tracemark_message, "Jitter threshold hit: %dus\n",
jitter);
tracemark(tracemark_message);
tracing(0);
printf(tracemark_message);
exit(EXIT_SUCCESS);
}
}
}
previous = current;
......@@ -304,6 +397,9 @@ int main(int argc, char *argv[]) {
thread_params.affinity_cpu = 0;
thread_params.enable_diff_ts = 0;
thread_params.enable_receive_tracemark = 0;
thread_params.poll = 0;
thread_params.poll_margin = 75;
thread_params.start_ts = 0;
main_params.refresh_rate = 50000;
main_params.verbose = 0;
main_params.enable_tracing = 0;
......@@ -335,7 +431,7 @@ int main(int argc, char *argv[]) {
// Initialize the XDP or UDP packet receiving socket
if (tsn_task == XDP_TASK)
init_xdp_recv(&ingress_params);
init_xdp_recv(&ingress_params, &ingress_stats);
else
init_udp_recv(&ingress_params, &ingress_stats, enable_histograms,
kernel_latency_hist);
......@@ -461,7 +557,7 @@ static void process_options(int argc, char *argv[]) {
int network_if_specified = 0;
for (;;) {
int c = getopt(argc, argv, "a:b:cCd:f:ghi:p:r:tvx:XT:GMS");
int c = getopt(argc, argv, "a:b:cCs:d:f:ghi:p:r:tvx:XT:GMSP:");
if (c == -1) break;
......@@ -479,6 +575,9 @@ static void process_options(int argc, char *argv[]) {
case 'C':
thread_params.enable_diff_ts = 1;
break;
case 's':
thread_params.start_ts = strtoull(optarg, NULL, 10);
break;
case 'd':
ingress_params.tx_buffer_len = atoi(optarg);
if (ingress_params.tx_buffer_len < 1) {
......@@ -529,6 +628,10 @@ static void process_options(int argc, char *argv[]) {
case 'S':
ingress_params.enable_ts_tracemark = 1;
break;
case 'P':
thread_params.poll = 1;
thread_params.poll_margin = atoi(optarg);
break;
}
}
......
......@@ -9,6 +9,10 @@
#include <linux/udp.h>
#include "bpf_helpers.h"
#include "bpf_endian.h"
#include "parsing_helpers.h"
#define UDP_PORT 50000
#define bpf_printk(fmt, ...) \
({ \
......@@ -17,44 +21,51 @@
})
struct bpf_map_def SEC("maps") xsks_map = {
.type = BPF_MAP_TYPE_XSKMAP,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = 64,
.type = BPF_MAP_TYPE_XSKMAP,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = 64,
};
static inline int parse_ipv4(void *data, unsigned long long nh_off,
void *data_end) {
struct iphdr *iph = data + nh_off;
if ((void *)(iph + 1) > data_end) return 0;
return iph->protocol;
}
SEC("xdp_sock")
int xdp_sock_prog(struct xdp_md *ctx) {
int xdp_sock_prog(struct xdp_md *ctx)
{
int eth_type, ip_type, index;
struct ethhdr *eth;
struct iphdr *iphdr;
struct ipv6hdr *ipv6hdr;
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct ethhdr *eth = data;
int idx = ctx->rx_queue_index;
unsigned int ipproto = 0;
unsigned long long nh_off;
struct hdr_cursor nh = { .pos = data };
index = ctx->rx_queue_index;
eth_type = parse_ethhdr(&nh, data_end, &eth);
if (eth_type < 0)
return XDP_PASS;
/* Check if it's a UDP frame: If UDP -> Redirect to active xsk for user
* space. If not -> pass to stack.
*/
nh_off = sizeof(*eth);
if (data + nh_off > data_end) return XDP_PASS;
if (eth_type == bpf_htons(ETH_P_IP)) {
ip_type = parse_iphdr(&nh, data_end, &iphdr);
} else if (eth_type == bpf_htons(ETH_P_IPV6)) {
ip_type = parse_ip6hdr(&nh, data_end, &ipv6hdr);
} else {
return XDP_PASS;
}
if (eth->h_proto == __builtin_bswap16(ETH_P_IP))
ipproto = parse_ipv4(data, nh_off, data_end);
// only support UDP for now
if (ip_type != IPPROTO_UDP)
return XDP_PASS;
if (ipproto != IPPROTO_UDP) return XDP_PASS;
struct udphdr *udphdr;
// don't mess with ports outside our purview, if specified
if (parse_udphdr(&nh, data_end, &udphdr) < 0)
return XDP_PASS;
if (bpf_ntohs(udphdr->dest) != UDP_PORT)
return XDP_PASS;
/* If socket bound to rx_queue then redirect to user space */
if (bpf_map_lookup_elem(&xsks_map, &idx))
return bpf_redirect_map(&xsks_map, idx, 0);
if (bpf_map_lookup_elem(&xsks_map, &index))
return bpf_redirect_map(&xsks_map, index, 0);
/* Else pass to Linux' network stack */
return XDP_PASS;
......
......@@ -7,7 +7,7 @@ send -- "sudo echo\r"
expect "assword"
send -- "olimex\r"
expect "oli@"
send -- "sudo nohup [lindex $argv 1] > /dev/null > /dev/null &\r"
send -- "sudo nohup [lindex $argv 1] > /dev/null > [lindex $argv 2] &\r"
expect "stdout\r"
send -- \x03
expect "oli@"
......@@ -97,8 +97,8 @@ stop_opt_filename=${stop_opt_name}${stop_opt_index}
# Client histogram
if [ -n "$client_histogram" ]; then
$script_dir/exec-ssh-nohup $server_board "run-server";
$script_dir/exec-ssh-nohup $client_board "run-client -p -i $interval -t -g $server_board";
$script_dir/exec-ssh-nohup $server_board "run-server" server_log;
$script_dir/exec-ssh-nohup $client_board "run-client -p -i $interval -t -g $server_board" client_log;
echo "$stop_opts -c client_i${interval}_pfast -i $interval $client_board $server_board" > $script_dir/${stop_opt_filename};
......@@ -107,16 +107,16 @@ elif [ -n "$server_histogram" ]; then
# Server pfifo_fast qdisc histogram
if [ -n "$server_pfifo" ]; then
$script_dir/exec-ssh-nohup $server_board "run-server -g ${interval}$server_opts";
$script_dir/exec-ssh-nohup $client_board "run-client -p -i $interval $server_board";
$script_dir/exec-ssh-nohup $server_board "run-server -g ${interval}$server_opts" server_log;
$script_dir/exec-ssh-nohup $client_board "run-client -p -i $interval $server_board" client_log;
echo "$stop_opts -s server_i${interval} $server_opts -p $client_board $server_board" > $script_dir/${stop_opt_filename};
# Server ETF qdisc histogram
elif [ -n "$server_etf" ]; then
$script_dir/exec-ssh-nohup $server_board "run-server -g ${interval}$server_opts";
$script_dir/exec-ssh-nohup $client_board "run-client -e $etf_delta -o $etf_offset -i $interval $server_board";
$script_dir/exec-ssh-nohup $server_board "run-server -g ${interval}$server_opts" server_log;
$script_dir/exec-ssh-nohup $client_board "run-client -e $etf_delta -o $etf_offset -i $interval $server_board" client_log;
echo "$stop_opts -s server_i${interval} $server_opts -e $etf_delta -o $etf_offset $client_board $server_board" > $script_dir/${stop_opt_filename};
......@@ -127,7 +127,7 @@ elif [ -n "$server_histogram" ]; then
# cyclictest histogram
elif [ -n "$cyclictest_histogram" ]; then
$script_dir/exec-ssh-nohup $board "run-cyclictest -g $cyclictest_opts -i $interval";
$script_dir/exec-ssh-nohup $board "run-cyclictest -g $cyclictest_opts -i $interval" cyclictest_log;
echo "$stop_opts -C cyclictest_hist $cyclictest_opts $board" > $script_dir/${stop_opt_filename};
......
......@@ -6,14 +6,13 @@ usage() {
cat << ENDUSAGE
Usage: $0 [-h] QDISC_OPT [CLIENT_OPTS] BOARD_HOSTNAME
-h Show help
QDISC_OPTS: (-e DELTA [-o USEC] -H | -p) [-q]
QDISC_OPTS: [-e DELTA [-o USEC] -H] [-q]
Which qdisc to use (will call create-qdisc script)
-e DELTA Use ETF (Earlier Txtime First) qdisc with specified delta
(check tc-etf man page for more information)
-o USEC Offset in userspace program to set the timestamp passed to ETF qdisc
-H Use hardware offloading for the ETF qdisc (not all hardware supports this)
-p Use pfifo_fast qdisc (default one)
-q Don't setup qdiscs with create-qdisc script
-q Setup qdiscs with create-qdisc script
CLIENT_OPTS: -bgt (-c DELAY -s NS | -d TX_BUF_LEN) -i INTERVAL -I if -a CPU [TRACE_OPTS]
Options passed to the C client program (everything here is optional)
-b Measure round trip time
......@@ -46,19 +45,22 @@ ENDUSAGE
}
# Default interval
interval=100000
interval=1000
# Default options
interface="eth0"
cpu="1"
client_options="-p 98"
client_options="-p 95"
qdisc_options=""
etf_offset=500
tracecmd_events="-e irq -e sched -e net_dev_start_xmit -e net_dev_xmit -e net_dev_xmit_timeout"
tracecmd_opts=""
while getopts "a:bc:d:e:o:ghi:pqs:tB:E:I:HP:TS:" opt; do
while getopts "a:bc:d:e:o:ghi:qs:tB:E:I:HP:TS:" opt; do
case "${opt}" in
h )
usage
;;
a )
cpu=${OPTARG}
;;
......@@ -74,7 +76,6 @@ while getopts "a:bc:d:e:o:ghi:pqs:tB:E:I:HP:TS:" opt; do
e )
use_etf=1
delta=$((${OPTARG}*1000))
qdisc_options+="-e $delta"
;;
o )
etf_offset=${OPTARG}
......@@ -83,20 +84,14 @@ while getopts "a:bc:d:e:o:ghi:pqs:tB:E:I:HP:TS:" opt; do
client_options+=" -g"
use_histogram=1
;;
h )
usage
exit 1
;;
i )
interval=${OPTARG}
;;
p )
use_pfast=1
client_options+=" -q 1"
qdisc_options+="-p"
;;
q )
dont_create_qdisc=1
create_qdisc=1
;;
s )
client_options+=" -s ${OPTARG}"
......@@ -133,27 +128,16 @@ while getopts "a:bc:d:e:o:ghi:pqs:tB:E:I:HP:TS:" opt; do
;;
esac
done
shift $((OPTIND-1))
if [ -z "$1" ]; then
usage
fi
qdisc_options+=" -I $interface"
client_options+=" -a $cpu"
board_name=$1
board_ip=$(cat /etc/hosts | grep $board_name | awk '{print $1}')
if [ -z "${use_etf}" ] && [ -z "${use_pfast}" ]; then
usage
fi
if [ -n "${use_etf}" ] && [ -n "${use_pfast}" ]; then
usage
fi
if [ -z "${use_histogram}" ]; then
client_options+=" -v"
else
......@@ -170,11 +154,15 @@ fi
if [ -n "${use_etf}" ]; then
client_options+=" -e $etf_offset -q 8"
qdisc_options+="-e $delta"
else
client_options+=" -q 1"
qdisc_options+="-p"
fi
client_options+=" -i $interval"
if [ -z "$dont_create_qdisc" ]; then
if [ -n "$create_qdisc" ]; then
echo "create-qdisc $qdisc_options";
$script_dir/create-qdisc $qdisc_options;
fi
......@@ -185,13 +173,19 @@ make client;
cd $script_dir;
if [ -n "${use_histogram}" ]; then
echo "client $client_options $interface $board_ip > $output;mv $output ~/";
$script_dir/../packet-exchange/build/client $client_options $interface $board_ip > $output;
mv $output ~/;
elif [ -n "${use_tracer}" ]; then
echo "trace-cmd record $tracecmd_opts $tracecmd_events ./client $client_options $interface $board_ip";
trace-cmd record $tracecmd_opts $tracecmd_events $script_dir/../packet-exchange/build/client $client_options $interface $board_ip;
else
echo "client $client_options $interface $board_ip";
$script_dir/../packet-exchange/build/client $client_options $interface $board_ip;
fi
......@@ -13,14 +13,18 @@ Usage: $0 [-h] [-I if] [SERVER] | TCPDUMP [TRACE_OPTS]
SERVER: -bct ((-x | -X) POLL) -g INTERVAL -a CPU
Options passed to the C server program (everything here is optional)
-b Send back packets for round trip measurements
-c Emit a signal on GPIO at the timestamp given in packet
-c USEC Emit a signal on GPIO at the timestamp given in packet, specify interval
(to be used with -c option in client program)
-C Measure difference between current time and timestamp sent in tx data
-O USEC Do polling to wakeup signal thread with specified margin
-C USEC Measure difference between current time and timestamp sent in tx data
-t Use SO_TIMESTAMPS to see how much time packet spent in kernel
-x POLL Use XDP sockets, with a global libbpf installation
-X POLL Use XDP sockets, with libbpf located in \$HOME/libbpf folder
POLL: Polling mode used in server program, 0 to poll with poll function,
1 to do active polling
-s NS Specify a CLOCK_REALTIME timestamp at which client should start
(to be used with PTP) (interval needs to be specified with -j)
-j USEC Specify interval (used with -s)
-g USEC Generate histograms for measures with the specified interval
-a CPU CPU on which to pin the program
TCPDUMP: -d NB_PACKETS [-i INTERVAL]
......@@ -48,7 +52,7 @@ ENDUSAGE
# Default options
interface="eth0"
server_options="-p 98"
server_options="-p 95"
make_opts=""
ip="10.100.21."
tcpdump_interval=1000000
......@@ -56,8 +60,14 @@ tracecmd_events="-e irq -e sched -e net -e napi"
tracecmd_opts=""
cpu=1
while getopts "a:b:cChtx:X:d:i:g:I:T:E:P:B:MSQ" opt; do
while getopts "j:a:b:c:C:htx:X:s:d:i:g:I:T:E:P:B:MSQO:" opt; do
case "${opt}" in
h )
usage
;;
j )
server_options+=" -i ${OPTARG}"
;;
a )
cpu=${OPTARG}
;;
......@@ -66,10 +76,13 @@ while getopts "a:b:cChtx:X:d:i:g:I:T:E:P:B:MSQ" opt; do
board_name=${OPTARG}
;;
c )
server_options+=" -c"
server_options+=" -c -i ${OPTARG}"
;;
O )
server_options+=" -P ${OPTARG} "
;;
C )
server_options+=" -C"
server_options+=" -C -i ${OPTARG}"
;;
d )
use_tcpdump=1
......@@ -83,10 +96,6 @@ while getopts "a:b:cChtx:X:d:i:g:I:T:E:P:B:MSQ" opt; do
server_options+=" -g -i $interval"
use_histogram=1
;;
h )
usage
exit 1
;;
t )
server_options+=" -t"
;;
......@@ -101,6 +110,9 @@ while getopts "a:b:cChtx:X:d:i:g:I:T:E:P:B:MSQ" opt; do
server_options+=" -x ${OPTARG}"
make_opts=" -e WITH_GIT_XDP=1"
;;
s )
server_options+=" -s ${OPTARG}"
;;
B )
tracecmd_opts+=" -m ${OPTARG} -b ${OPTARG}"
;;
......
......@@ -9,6 +9,7 @@ Usage: $0 [-h] [-i USEC -g -s -t MSEC] BOARD1_HOSTNAME BOARD2_HOSTNAME
-i USEC Signal period
-g Use GPIO instead of serial
-t MSEC Set the start timestamp offset
-P USEC Do polling to wakeup with specified margin
BOARD_HOSTNAME Uses /etc/hosts to find the IP address associated to the hostname
ENDUSAGE
1>&2;
......@@ -20,7 +21,7 @@ interval=1000
pp_opts=""
ts_offset=6000
while getopts "hi:gt:" opt; do
while getopts "hi:gt:P:" opt; do
case "$opt" in
h)
usage
......@@ -34,6 +35,9 @@ while getopts "hi:gt:" opt; do
t )
ts_offset=${OPTARG}
;;
P )
pps_opts+=" -P ${OPTARG} "
;;
*)
usage
;;
......@@ -54,7 +58,9 @@ ts=$($script_dir/get-ptp-time -m $ts_offset)
echo "Timestamp: $ts";
ssh $board1 "cd tsn-measures/software-pps/build;make";
$script_dir/exec-ssh-nohup $board1 "tsn-measures/software-pps/build/software-pps -a1 -p97 -i $interval $pps_opts -s $ts";
echo "$script_dir/exec-ssh-nohup $board1 \"tsn-measures/software-pps/build/software-pps -a1 -p97 -i $interval $pps_opts -s $ts\"" server_log;
$script_dir/exec-ssh-nohup $board1 "tsn-measures/software-pps/build/software-pps -a1 -p97 -i $interval $pps_opts -s $ts" server_log;
ssh $board2 "cd tsn-measures/software-pps/build;make";
$script_dir/exec-ssh-nohup $board2 "tsn-measures/software-pps/build/software-pps -a1 -p97 -i $interval $pps_opts -s $ts";
echo "$script_dir/exec-ssh-nohup $board2 \"tsn-measures/software-pps/build/software-pps -a1 -p97 -i $interval $pps_opts -s $ts\"" server_log;
$script_dir/exec-ssh-nohup $board2 "tsn-measures/software-pps/build/software-pps -a1 -p97 -i $interval $pps_opts -s $ts" server_log;
......@@ -9,6 +9,8 @@ Usage: $0 [-h] [-i USEC -c USEC -t MSEC] [-T] BOARD1_HOSTNAME BOARD2_HOSTNAME
-i USEC Specify which interval to use in client
-c USEC Specify which offset to use for the timestamp in the packet
-t MSEC Set the start timestamp offset
-P USEC Do polling to wakeup signal thread with specified margin
-X POLL_MODE Use XDP with specified poll mode
-T Enable tracing on the boards
BOARD_HOSTNAME Uses /etc/hosts to find the IP address associated to the hostname
ENDUSAGE
......@@ -21,7 +23,7 @@ interval=1000
server_opts=""
ts_offset=2000
while getopts "hc:i:o:rt:T" opt; do
while getopts "hc:i:o:rt:TP:X:" opt; do
case "${opt}" in
h )
usage
......@@ -38,6 +40,12 @@ while getopts "hc:i:o:rt:T" opt; do
T )
enable_tracing=1
;;
P )
server_opts+=" -P ${OPTARG} "
;;
X )
server_opts+=" -X ${OPTARG} "
;;
* )
usage
;;
......@@ -62,8 +70,8 @@ $script_dir/sudossh $board2 "killall server";
killall client;
killall run-client;
$script_dir/exec-ssh-nohup $board1 "run-server -c $server_opts";
$script_dir/exec-ssh-nohup $board2 "run-server -c $server_opts";
$script_dir/exec-ssh-nohup $board1 "run-server -c $interval $server_opts" server_log;
$script_dir/exec-ssh-nohup $board2 "run-server -c $interval $server_opts" server_log;
ts=$($script_dir/get-ptp-time -m $ts_offset)
......@@ -76,8 +84,8 @@ if [ -z "$delay" ]; then
delay=$((interval / 2))
fi
echo "$script_dir/run-client -a 1 -q -i $interval -p -I enp1s0 -c $delay -s $ts $board1 &> client_enp1s0_log&";
echo "$script_dir/run-client -a 2 -q -i $interval -p -I enp2s0 -c $delay -s $ts $board2 &> client_enp2s0_log&";
echo "$script_dir/run-client -a 1 -i $interval -I enp1s0 -c $delay -s $ts $board1 &> client_enp1s0_log&";
echo "$script_dir/run-client -a 2 -i $interval -I enp2s0 -c $delay -s $ts $board2 &> client_enp2s0_log&";
$script_dir/run-client -a 1 -q -i $interval -p -I enp1s0 -c $delay -s $ts $board1 &> client_enp1s0_log&
$script_dir/run-client -a 2 -q -i $interval -p -I enp2s0 -c $delay -s $ts $board2 &> client_enp2s0_log&
$script_dir/run-client -a 1 -i $interval -I enp1s0 -c $delay -s $ts $board1 &> client_enp1s0_log&
$script_dir/run-client -a 2 -i $interval -I enp2s0 -c $delay -s $ts $board2 &> client_enp2s0_log&
......@@ -28,6 +28,15 @@ void add_ns(struct timespec *t, uint64_t ns) {
}
}
void substract_ns(struct timespec *t, uint64_t ns) {
t->tv_nsec -= ns;
while (t->tv_nsec < 0) {
t->tv_sec -= 1;
t->tv_nsec += NSEC_PER_SEC;
}
}
uint64_t calcdiff_ns(struct timespec t1, struct timespec t2) {
uint64_t diff;
diff = NSEC_PER_SEC * (uint64_t)((int)t1.tv_sec - (int)t2.tv_sec);
......@@ -35,6 +44,13 @@ uint64_t calcdiff_ns(struct timespec t1, struct timespec t2) {
return diff;
}
int64_t calcdiff_ns_signed(struct timespec t1, struct timespec t2) {
int64_t diff;
diff = NSEC_PER_SEC * ((int)t1.tv_sec - (int)t2.tv_sec);
diff += ((int)t1.tv_nsec - (int)t2.tv_nsec);
return diff;
}
int _max_(int a, int b) { return a > b ? a : b; }
int _min_(int a, int b) { return a < b ? a : b; }
......@@ -41,7 +41,9 @@
uint64_t ts_to_uint(struct timespec t);
struct timespec uint_to_ts(uint64_t t);
void add_ns(struct timespec *t, uint64_t ns);
void substract_ns(struct timespec *t, uint64_t ns);
uint64_t calcdiff_ns(struct timespec t1, struct timespec t2);
int64_t calcdiff_ns_signed(struct timespec t1, struct timespec t2);
void init_signals(void (*_sighand)(int));
......
......@@ -20,9 +20,9 @@
#include <time.h>
#include <unistd.h>
#include "pulse.h"
#include "gpio.h"
#include "common.h"
#include "gpio.h"
#include "pulse.h"
// Structs
......@@ -31,6 +31,8 @@ typedef struct thread_param {
unsigned int max_cycles;
int priority;
int affinity_cpu;
int poll;
int poll_margin;
uint64_t start_ts;
} thread_param_t;
......@@ -62,11 +64,30 @@ static void help(char *argv[]) {
" -l N_CYCLES RT thread cycles amount (default: infinite)\n"
" -s NS Common start time reference\n"
" -g Use GPIO instead of serial\n"
" -P USEC Do polling to wakeup with specified margin\n"
" -v Verbose\n"
"\n",
argv[0]);
}
static void poll_wakeup(struct timespec ts, int margin) {
int ret;
struct timespec ts_prev, current;
ts_prev = ts;
substract_ns(&ts_prev, margin * 1000);
ret = clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &ts_prev, NULL);
if (ret) {
fprintf(stderr, "clock_nanosleep returned error: %d, aborting...\n", ret);
exit(EXIT_FAILURE);
}
do {
clock_gettime(CLOCK_REALTIME, &current);
} while(calcdiff_ns_signed(ts, current) > 1000);
}
/*
* Real-time thread:
*/
......@@ -88,11 +109,11 @@ static void *pps_thread(void *p) {
clock_gettime(CLOCK_REALTIME, &next);
if (thread_params.start_ts) {
if(thread_params.start_ts < ts_to_uint(next)) {
if (thread_params.start_ts < ts_to_uint(next)) {
fprintf(stderr, "start timestamp is in the past, aborting...\n");
exit(EXIT_FAILURE);
}
if(thread_params.start_ts > (ts_to_uint(next) + UINT64_C(3600000000000))) {
if (thread_params.start_ts > (ts_to_uint(next) + UINT64_C(3600000000000))) {
fprintf(stderr, "start timestamp is too high, aborting...\n");
exit(EXIT_FAILURE);
}
......@@ -105,18 +126,22 @@ static void *pps_thread(void *p) {
nb_cycles >= ((unsigned int)thread_params.max_cycles))
break;
if(use_gpio)
if (use_gpio)
toggle_gpio();
else
send_pulse();
add_ns(&next, thread_params.interval);
ret = clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &next, NULL);
if(ret) {
fprintf(stderr, "clock_nanosleep returned error: %d, aborting...\n", ret);
exit(EXIT_FAILURE);
if(thread_params.poll) {
poll_wakeup(next, thread_params.poll_margin);
}
else {
ret = clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &next, NULL);
if (ret) {
fprintf(stderr, "clock_nanosleep returned error: %d, aborting...\n", ret);
exit(EXIT_FAILURE);
}
}
}
......@@ -139,6 +164,8 @@ int main(int argc, char *argv[]) {
thread_params.priority = 98;
thread_params.affinity_cpu = -1;
thread_params.start_ts = 0;
thread_params.poll = 0;
thread_params.poll_margin = 75;
main_params.refresh_rate = 50000;
main_params.verbose = 0;
use_gpio = 0;
......@@ -153,7 +180,7 @@ int main(int argc, char *argv[]) {
// Process bash options
process_options(argc, argv);
if(use_gpio)
if (use_gpio)
enable_gpio(86);
else
enable_pulse();
......@@ -193,8 +220,8 @@ int main(int argc, char *argv[]) {
usleep(main_params.refresh_rate);
if (main_params.verbose) {
printf("Toggles: %9" PRIu64 "\n", nb_cycles);
printf("\033[%dA", 1);
printf("Toggles: %9" PRIu64 "\n", nb_cycles);
printf("\033[%dA", 1);
}
if (thread_params.max_cycles)
......@@ -204,12 +231,11 @@ int main(int argc, char *argv[]) {
exit(EXIT_SUCCESS);
}
/* Process bash options
*/
static void process_options(int argc, char *argv[]) {
for (;;) {
int c = getopt(argc, argv, "a:hgi:l:p:r:s:v");
int c = getopt(argc, argv, "a:hgi:l:p:r:s:vP:");
if (c == -1) break;
......@@ -242,6 +268,10 @@ static void process_options(int argc, char *argv[]) {
case 'v':
main_params.verbose = 1;
break;
case 'P':
thread_params.poll = 1;
thread_params.poll_margin = atoi(optarg);
break;
}
}
}
IFLAGS += -I ${HOME}/libbpf/include
xdp_%.o: xdp_%.c
clang $(IFLAGS) -isystem /usr/include/arm-linux-gnueabihf -S -target bpf -D __BPF_TRACING__ -Wall -O2 -emit-llvm -c -g -o xdp_$*.ll $^
llc -march=bpf -filetype=obj -o $@ xdp_$*.ll
clean:
$(RM) xdp_*.o xdp_*.ll
.PHONY: clean
/* SPDX-License-Identifier: GPL-2.0 */
/* Copied from $(LINUX)/tools/testing/selftests/bpf/bpf_helpers.h */
/* Added to fix compilation on old Ubuntu systems - please preserve when
updating file! */
#ifndef __always_inline
# define __always_inline inline __attribute__((always_inline))
#endif
#ifndef __BPF_HELPERS_H
#define __BPF_HELPERS_H
/* helper macro to place programs, maps, license in
* different sections in elf_bpf file. Section names
* are interpreted by elf_bpf loader
*/
#define SEC(NAME) __attribute__((section(NAME), used))
/* helper functions called from eBPF programs written in C */
static void *(*bpf_map_lookup_elem)(void *map, void *key) =
(void *) BPF_FUNC_map_lookup_elem;
static int (*bpf_map_update_elem)(void *map, void *key, void *value,
unsigned long long flags) =
(void *) BPF_FUNC_map_update_elem;
static int (*bpf_map_delete_elem)(void *map, void *key) =
(void *) BPF_FUNC_map_delete_elem;
static int (*bpf_map_push_elem)(void *map, void *value,
unsigned long long flags) =
(void *) BPF_FUNC_map_push_elem;
static int (*bpf_map_pop_elem)(void *map, void *value) =
(void *) BPF_FUNC_map_pop_elem;
static int (*bpf_map_peek_elem)(void *map, void *value) =
(void *) BPF_FUNC_map_peek_elem;
static int (*bpf_probe_read)(void *dst, int size, void *unsafe_ptr) =
(void *) BPF_FUNC_probe_read;
static unsigned long long (*bpf_ktime_get_ns)(void) =
(void *) BPF_FUNC_ktime_get_ns;
static int (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
(void *) BPF_FUNC_trace_printk;
static void (*bpf_tail_call)(void *ctx, void *map, int index) =
(void *) BPF_FUNC_tail_call;
static unsigned long long (*bpf_get_smp_processor_id)(void) =
(void *) BPF_FUNC_get_smp_processor_id;
static unsigned long long (*bpf_get_current_pid_tgid)(void) =
(void *) BPF_FUNC_get_current_pid_tgid;
static unsigned long long (*bpf_get_current_uid_gid)(void) =
(void *) BPF_FUNC_get_current_uid_gid;
static int (*bpf_get_current_comm)(void *buf, int buf_size) =
(void *) BPF_FUNC_get_current_comm;
static unsigned long long (*bpf_perf_event_read)(void *map,
unsigned long long flags) =
(void *) BPF_FUNC_perf_event_read;
static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) =
(void *) BPF_FUNC_clone_redirect;
static int (*bpf_redirect)(int ifindex, int flags) =
(void *) BPF_FUNC_redirect;
static int (*bpf_redirect_map)(void *map, int key, int flags) =
(void *) BPF_FUNC_redirect_map;
static int (*bpf_perf_event_output)(void *ctx, void *map,
unsigned long long flags, void *data,
int size) =
(void *) BPF_FUNC_perf_event_output;
static int (*bpf_get_stackid)(void *ctx, void *map, int flags) =
(void *) BPF_FUNC_get_stackid;
static int (*bpf_probe_write_user)(void *dst, void *src, int size) =
(void *) BPF_FUNC_probe_write_user;
static int (*bpf_current_task_under_cgroup)(void *map, int index) =
(void *) BPF_FUNC_current_task_under_cgroup;
static int (*bpf_skb_get_tunnel_key)(void *ctx, void *key, int size, int flags) =
(void *) BPF_FUNC_skb_get_tunnel_key;
static int (*bpf_skb_set_tunnel_key)(void *ctx, void *key, int size, int flags) =
(void *) BPF_FUNC_skb_set_tunnel_key;
static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, int size) =
(void *) BPF_FUNC_skb_get_tunnel_opt;
static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, int size) =
(void *) BPF_FUNC_skb_set_tunnel_opt;
static unsigned long long (*bpf_get_prandom_u32)(void) =
(void *) BPF_FUNC_get_prandom_u32;
static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
(void *) BPF_FUNC_xdp_adjust_head;
static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) =
(void *) BPF_FUNC_xdp_adjust_meta;
static int (*bpf_get_socket_cookie)(void *ctx) =
(void *) BPF_FUNC_get_socket_cookie;
static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
int optlen) =
(void *) BPF_FUNC_setsockopt;
static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval,
int optlen) =
(void *) BPF_FUNC_getsockopt;
static int (*bpf_sock_ops_cb_flags_set)(void *ctx, int flags) =
(void *) BPF_FUNC_sock_ops_cb_flags_set;
static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
(void *) BPF_FUNC_sk_redirect_map;
static int (*bpf_sk_redirect_hash)(void *ctx, void *map, void *key, int flags) =
(void *) BPF_FUNC_sk_redirect_hash;
static int (*bpf_sock_map_update)(void *map, void *key, void *value,
unsigned long long flags) =
(void *) BPF_FUNC_sock_map_update;
static int (*bpf_sock_hash_update)(void *map, void *key, void *value,
unsigned long long flags) =
(void *) BPF_FUNC_sock_hash_update;
static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags,
void *buf, unsigned int buf_size) =
(void *) BPF_FUNC_perf_event_read_value;
static int (*bpf_perf_prog_read_value)(void *ctx, void *buf,
unsigned int buf_size) =
(void *) BPF_FUNC_perf_prog_read_value;
static int (*bpf_override_return)(void *ctx, unsigned long rc) =
(void *) BPF_FUNC_override_return;
static int (*bpf_msg_redirect_map)(void *ctx, void *map, int key, int flags) =
(void *) BPF_FUNC_msg_redirect_map;
static int (*bpf_msg_redirect_hash)(void *ctx,
void *map, void *key, int flags) =
(void *) BPF_FUNC_msg_redirect_hash;
static int (*bpf_msg_apply_bytes)(void *ctx, int len) =
(void *) BPF_FUNC_msg_apply_bytes;
static int (*bpf_msg_cork_bytes)(void *ctx, int len) =
(void *) BPF_FUNC_msg_cork_bytes;
static int (*bpf_msg_pull_data)(void *ctx, int start, int end, int flags) =
(void *) BPF_FUNC_msg_pull_data;
static int (*bpf_msg_push_data)(void *ctx, int start, int end, int flags) =
(void *) BPF_FUNC_msg_push_data;
static int (*bpf_msg_pop_data)(void *ctx, int start, int cut, int flags) =
(void *) BPF_FUNC_msg_pop_data;
static int (*bpf_bind)(void *ctx, void *addr, int addr_len) =
(void *) BPF_FUNC_bind;
static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) =
(void *) BPF_FUNC_xdp_adjust_tail;
static int (*bpf_skb_get_xfrm_state)(void *ctx, int index, void *state,
int size, int flags) =
(void *) BPF_FUNC_skb_get_xfrm_state;
static int (*bpf_sk_select_reuseport)(void *ctx, void *map, void *key, __u32 flags) =
(void *) BPF_FUNC_sk_select_reuseport;
static int (*bpf_get_stack)(void *ctx, void *buf, int size, int flags) =
(void *) BPF_FUNC_get_stack;
static int (*bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params,
int plen, __u32 flags) =
(void *) BPF_FUNC_fib_lookup;
static int (*bpf_lwt_push_encap)(void *ctx, unsigned int type, void *hdr,
unsigned int len) =
(void *) BPF_FUNC_lwt_push_encap;
static int (*bpf_lwt_seg6_store_bytes)(void *ctx, unsigned int offset,
void *from, unsigned int len) =
(void *) BPF_FUNC_lwt_seg6_store_bytes;
static int (*bpf_lwt_seg6_action)(void *ctx, unsigned int action, void *param,
unsigned int param_len) =
(void *) BPF_FUNC_lwt_seg6_action;
static int (*bpf_lwt_seg6_adjust_srh)(void *ctx, unsigned int offset,
unsigned int len) =
(void *) BPF_FUNC_lwt_seg6_adjust_srh;
static int (*bpf_rc_repeat)(void *ctx) =
(void *) BPF_FUNC_rc_repeat;
static int (*bpf_rc_keydown)(void *ctx, unsigned int protocol,
unsigned long long scancode, unsigned int toggle) =
(void *) BPF_FUNC_rc_keydown;
static unsigned long long (*bpf_get_current_cgroup_id)(void) =
(void *) BPF_FUNC_get_current_cgroup_id;
static void *(*bpf_get_local_storage)(void *map, unsigned long long flags) =
(void *) BPF_FUNC_get_local_storage;
static unsigned long long (*bpf_skb_cgroup_id)(void *ctx) =
(void *) BPF_FUNC_skb_cgroup_id;
static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) =
(void *) BPF_FUNC_skb_ancestor_cgroup_id;
static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
struct bpf_sock_tuple *tuple,
int size, unsigned long long netns_id,
unsigned long long flags) =
(void *) BPF_FUNC_sk_lookup_tcp;
static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
struct bpf_sock_tuple *tuple,
int size, unsigned long long netns_id,
unsigned long long flags) =
(void *) BPF_FUNC_sk_lookup_udp;
static int (*bpf_sk_release)(struct bpf_sock *sk) =
(void *) BPF_FUNC_sk_release;
static int (*bpf_skb_vlan_push)(void *ctx, __be16 vlan_proto, __u16 vlan_tci) =
(void *) BPF_FUNC_skb_vlan_push;
static int (*bpf_skb_vlan_pop)(void *ctx) =
(void *) BPF_FUNC_skb_vlan_pop;
static int (*bpf_rc_pointer_rel)(void *ctx, int rel_x, int rel_y) =
(void *) BPF_FUNC_rc_pointer_rel;
static void (*bpf_spin_lock)(struct bpf_spin_lock *lock) =
(void *) BPF_FUNC_spin_lock;
static void (*bpf_spin_unlock)(struct bpf_spin_lock *lock) =
(void *) BPF_FUNC_spin_unlock;
static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) =
(void *) BPF_FUNC_sk_fullsock;
static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) =
(void *) BPF_FUNC_tcp_sock;
static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) =
(void *) BPF_FUNC_get_listener_sock;
static int (*bpf_skb_ecn_set_ce)(void *ctx) =
(void *) BPF_FUNC_skb_ecn_set_ce;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
*/
struct sk_buff;
unsigned long long load_byte(void *skb,
unsigned long long off) asm("llvm.bpf.load.byte");
unsigned long long load_half(void *skb,
unsigned long long off) asm("llvm.bpf.load.half");
unsigned long long load_word(void *skb,
unsigned long long off) asm("llvm.bpf.load.word");
/* a helper structure used by eBPF C program
* to describe map attributes to elf_bpf loader
*/
struct bpf_map_def {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
unsigned int map_flags;
unsigned int inner_map_idx;
unsigned int numa_node;
};
#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \
struct ____btf_map_##name { \
type_key key; \
type_val value; \
}; \
struct ____btf_map_##name \
__attribute__ ((section(".maps." #name), used)) \
____btf_map_##name = { }
static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) =
(void *) BPF_FUNC_skb_load_bytes;
static int (*bpf_skb_load_bytes_relative)(void *ctx, int off, void *to, int len, __u32 start_header) =
(void *) BPF_FUNC_skb_load_bytes_relative;
static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
(void *) BPF_FUNC_skb_store_bytes;
static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) =
(void *) BPF_FUNC_l3_csum_replace;
static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
(void *) BPF_FUNC_l4_csum_replace;
static int (*bpf_csum_diff)(void *from, int from_size, void *to, int to_size, int seed) =
(void *) BPF_FUNC_csum_diff;
static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
(void *) BPF_FUNC_skb_under_cgroup;
static int (*bpf_skb_change_head)(void *, int len, int flags) =
(void *) BPF_FUNC_skb_change_head;
static int (*bpf_skb_pull_data)(void *, int len) =
(void *) BPF_FUNC_skb_pull_data;
static unsigned int (*bpf_get_cgroup_classid)(void *ctx) =
(void *) BPF_FUNC_get_cgroup_classid;
static unsigned int (*bpf_get_route_realm)(void *ctx) =
(void *) BPF_FUNC_get_route_realm;
static int (*bpf_skb_change_proto)(void *ctx, __be16 proto, __u64 flags) =
(void *) BPF_FUNC_skb_change_proto;
static int (*bpf_skb_change_type)(void *ctx, __u32 type) =
(void *) BPF_FUNC_skb_change_type;
static unsigned int (*bpf_get_hash_recalc)(void *ctx) =
(void *) BPF_FUNC_get_hash_recalc;
static unsigned long long (*bpf_get_current_task)(void *ctx) =
(void *) BPF_FUNC_get_current_task;
static int (*bpf_skb_change_tail)(void *ctx, __u32 len, __u64 flags) =
(void *) BPF_FUNC_skb_change_tail;
static long long (*bpf_csum_update)(void *ctx, __u32 csum) =
(void *) BPF_FUNC_csum_update;
static void (*bpf_set_hash_invalid)(void *ctx) =
(void *) BPF_FUNC_set_hash_invalid;
static int (*bpf_get_numa_node_id)(void) =
(void *) BPF_FUNC_get_numa_node_id;
static int (*bpf_probe_read_str)(void *ctx, __u32 size,
const void *unsafe_ptr) =
(void *) BPF_FUNC_probe_read_str;
static unsigned int (*bpf_get_socket_uid)(void *ctx) =
(void *) BPF_FUNC_get_socket_uid;
static unsigned int (*bpf_set_hash)(void *ctx, __u32 hash) =
(void *) BPF_FUNC_set_hash;
static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
unsigned long long flags) =
(void *) BPF_FUNC_skb_adjust_room;
/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
#if defined(__TARGET_ARCH_x86)
#define bpf_target_x86
#define bpf_target_defined
#elif defined(__TARGET_ARCH_s930x)
#define bpf_target_s930x
#define bpf_target_defined
#elif defined(__TARGET_ARCH_arm64)
#define bpf_target_arm64
#define bpf_target_defined
#elif defined(__TARGET_ARCH_mips)
#define bpf_target_mips
#define bpf_target_defined
#elif defined(__TARGET_ARCH_powerpc)
#define bpf_target_powerpc
#define bpf_target_defined
#elif defined(__TARGET_ARCH_sparc)
#define bpf_target_sparc
#define bpf_target_defined
#else
#undef bpf_target_defined
#endif
/* Fall back to what the compiler says */
#ifndef bpf_target_defined
#if defined(__x86_64__)
#define bpf_target_x86
#elif defined(__s390x__)
#define bpf_target_s930x
#elif defined(__aarch64__)
#define bpf_target_arm64
#elif defined(__mips__)
#define bpf_target_mips
#elif defined(__powerpc__)
#define bpf_target_powerpc
#elif defined(__sparc__)
#define bpf_target_sparc
#endif
#endif
#if defined(bpf_target_x86)
#define PT_REGS_PARM1(x) ((x)->di)
#define PT_REGS_PARM2(x) ((x)->si)
#define PT_REGS_PARM3(x) ((x)->dx)
#define PT_REGS_PARM4(x) ((x)->cx)
#define PT_REGS_PARM5(x) ((x)->r8)
#define PT_REGS_RET(x) ((x)->sp)
#define PT_REGS_FP(x) ((x)->bp)
#define PT_REGS_RC(x) ((x)->ax)
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->ip)
#elif defined(bpf_target_s390x)
#define PT_REGS_PARM1(x) ((x)->gprs[2])
#define PT_REGS_PARM2(x) ((x)->gprs[3])
#define PT_REGS_PARM3(x) ((x)->gprs[4])
#define PT_REGS_PARM4(x) ((x)->gprs[5])
#define PT_REGS_PARM5(x) ((x)->gprs[6])
#define PT_REGS_RET(x) ((x)->gprs[14])
#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
#define PT_REGS_RC(x) ((x)->gprs[2])
#define PT_REGS_SP(x) ((x)->gprs[15])
#define PT_REGS_IP(x) ((x)->psw.addr)
#elif defined(bpf_target_arm64)
#define PT_REGS_PARM1(x) ((x)->regs[0])
#define PT_REGS_PARM2(x) ((x)->regs[1])
#define PT_REGS_PARM3(x) ((x)->regs[2])
#define PT_REGS_PARM4(x) ((x)->regs[3])
#define PT_REGS_PARM5(x) ((x)->regs[4])
#define PT_REGS_RET(x) ((x)->regs[30])
#define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */
#define PT_REGS_RC(x) ((x)->regs[0])
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->pc)
#elif defined(bpf_target_mips)
#define PT_REGS_PARM1(x) ((x)->regs[4])
#define PT_REGS_PARM2(x) ((x)->regs[5])
#define PT_REGS_PARM3(x) ((x)->regs[6])
#define PT_REGS_PARM4(x) ((x)->regs[7])
#define PT_REGS_PARM5(x) ((x)->regs[8])
#define PT_REGS_RET(x) ((x)->regs[31])
#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */
#define PT_REGS_RC(x) ((x)->regs[1])
#define PT_REGS_SP(x) ((x)->regs[29])
#define PT_REGS_IP(x) ((x)->cp0_epc)
#elif defined(bpf_target_powerpc)
#define PT_REGS_PARM1(x) ((x)->gpr[3])
#define PT_REGS_PARM2(x) ((x)->gpr[4])
#define PT_REGS_PARM3(x) ((x)->gpr[5])
#define PT_REGS_PARM4(x) ((x)->gpr[6])
#define PT_REGS_PARM5(x) ((x)->gpr[7])
#define PT_REGS_RC(x) ((x)->gpr[3])
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->nip)
#elif defined(bpf_target_sparc)
#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2])
#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3])
#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4])
#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
/* Should this also be a bpf_target check for the sparc case? */
#if defined(__arch64__)
#define PT_REGS_IP(x) ((x)->tpc)
#else
#define PT_REGS_IP(x) ((x)->pc)
#endif
#endif
#ifdef bpf_target_powerpc
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
#elif bpf_target_sparc
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
#else
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ \
bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ \
bpf_probe_read(&(ip), sizeof(ip), \
(void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
#endif
#endif
#define KBUILD_MODNAME "blub"
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/if_link.h>
#include <linux/if_xdp.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include "bpf_helpers.h"
#define bpf_printk(fmt, ...) \
({ \
char ____fmt[] = fmt; \
bpf_trace_printk(____fmt, sizeof(____fmt), ##__VA_ARGS__); \
})
struct bpf_map_def SEC("maps") xsks_map = {
.type = BPF_MAP_TYPE_XSKMAP,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = 64,
};
static inline int parse_ipv4(void *data, unsigned long long nh_off,
void *data_end) {
struct iphdr *iph = data + nh_off;
if ((void *)(iph + 1) > data_end) return 0;
return iph->protocol;
}
SEC("xdp_sock")
int xdp_sock_prog(struct xdp_md *ctx) {
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct ethhdr *eth = data;
int idx = ctx->rx_queue_index;
unsigned int ipproto = 0;
unsigned long long nh_off;
/* Check if it's a UDP frame: If UDP -> Redirect to active xsk for user
* space. If not -> pass to stack.
*/
nh_off = sizeof(*eth);
if (data + nh_off > data_end) return XDP_PASS;
if (eth->h_proto == __builtin_bswap16(ETH_P_IP))
ipproto = parse_ipv4(data, nh_off, data_end);
if (ipproto != IPPROTO_UDP) return XDP_PASS;
/* If socket bound to rx_queue then redirect to user space */
if (bpf_map_lookup_elem(&xsks_map, &idx))
return bpf_redirect_map(&xsks_map, idx, 0);
/* Else pass to Linux' network stack */
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
SEC("xdp")
int xdp_prog_simple(struct xdp_md *ctx)
{
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment