Commit a693ff3e authored by Maciej Fijalkowski's avatar Maciej Fijalkowski Committed by Daniel Borkmann

selftests/xsk: Add support for executing tests on physical device

Currently, architecture of xdpxceiver is designed strictly for
conducting veth based tests. Veth pair is created together with a
network namespace and one of the veth interfaces is moved to the
mentioned netns. Then, separate threads for Tx and Rx are spawned which
will utilize described setup.

Infrastructure described in the paragraph above can not be used for
testing AF_XDP support on physical devices. That testing will be
conducted on a single network interface and same queue. Xskxceiver
needs to be extended to distinguish between veth tests and physical
interface tests.

Since same iface/queue id pair will be used by both Tx/Rx threads for
physical device testing, Tx thread, which happen to run after the Rx
thread, is going to create XSK socket with shared umem flag. In order to
track this setting throughout the lifetime of spawned threads, introduce
'shared_umem' boolean variable to struct ifobject and set it to true
when xdpxceiver is run against physical device. In such case, UMEM size
needs to be doubled, so half of it will be used by Rx thread and other
half by Tx thread. For two step based test types, value of XSKMAP
element under key 0 has to be updated as there is now another socket for
the second step. Also, to avoid race conditions when destroying XSK
resources, move this activity to the main thread after spawned Rx and Tx
threads have finished its job. This way it is possible to gracefully
remove shared umem without introducing synchronization mechanisms.

To run xsk selftests suite on physical device, append "-i $IFACE" when
invoking test_xsk.sh. For veth based tests, simply skip it. When "-i
$IFACE" is in place, under the hood test_xsk.sh will use $IFACE for both
interfaces supplied to xdpxceiver, which in turn will interpret that
this execution of test suite is for a physical device.

Note that currently this makes it possible only to test SKB and DRV mode
(in case underlying device has native XDP support). ZC testing support
is added in a later patch.
Signed-off-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Link: https://lore.kernel.org/bpf/20220901114813.16275-5-maciej.fijalkowski@intel.com
parent 24037ba7
...@@ -73,14 +73,20 @@ ...@@ -73,14 +73,20 @@
# #
# Run and dump packet contents: # Run and dump packet contents:
# sudo ./test_xsk.sh -D # sudo ./test_xsk.sh -D
#
# Run test suite for physical device in loopback mode
# sudo ./test_xsk.sh -i IFACE
. xsk_prereqs.sh . xsk_prereqs.sh
while getopts "vD" flag ETH=""
while getopts "vDi:" flag
do do
case "${flag}" in case "${flag}" in
v) verbose=1;; v) verbose=1;;
D) dump_pkts=1;; D) dump_pkts=1;;
i) ETH=${OPTARG};;
esac esac
done done
...@@ -132,18 +138,25 @@ setup_vethPairs() { ...@@ -132,18 +138,25 @@ setup_vethPairs() {
ip link set ${VETH0} up ip link set ${VETH0} up
} }
validate_root_exec if [ ! -z $ETH ]; then
validate_veth_support ${VETH0} VETH0=${ETH}
validate_ip_utility VETH1=${ETH}
setup_vethPairs NS1=""
else
retval=$? validate_root_exec
if [ $retval -ne 0 ]; then validate_veth_support ${VETH0}
test_status $retval "${TEST_NAME}" validate_ip_utility
cleanup_exit ${VETH0} ${VETH1} ${NS1} setup_vethPairs
exit $retval
retval=$?
if [ $retval -ne 0 ]; then
test_status $retval "${TEST_NAME}"
cleanup_exit ${VETH0} ${VETH1} ${NS1}
exit $retval
fi
fi fi
if [[ $verbose -eq 1 ]]; then if [[ $verbose -eq 1 ]]; then
ARGS+="-v " ARGS+="-v "
fi fi
...@@ -152,26 +165,33 @@ if [[ $dump_pkts -eq 1 ]]; then ...@@ -152,26 +165,33 @@ if [[ $dump_pkts -eq 1 ]]; then
ARGS="-D " ARGS="-D "
fi fi
retval=$?
test_status $retval "${TEST_NAME}" test_status $retval "${TEST_NAME}"
## START TESTS ## START TESTS
statusList=() statusList=()
TEST_NAME="XSK_SELFTESTS_SOFTIRQ" TEST_NAME="XSK_SELFTESTS_${VETH0}_SOFTIRQ"
exec_xskxceiver exec_xskxceiver
cleanup_exit ${VETH0} ${VETH1} ${NS1} if [ -z $ETH ]; then
TEST_NAME="XSK_SELFTESTS_BUSY_POLL" cleanup_exit ${VETH0} ${VETH1} ${NS1}
fi
TEST_NAME="XSK_SELFTESTS_${VETH0}_BUSY_POLL"
busy_poll=1 busy_poll=1
setup_vethPairs if [ -z $ETH ]; then
setup_vethPairs
fi
exec_xskxceiver exec_xskxceiver
## END TESTS ## END TESTS
cleanup_exit ${VETH0} ${VETH1} ${NS1} if [ -z $ETH ]; then
cleanup_exit ${VETH0} ${VETH1} ${NS1}
fi
failures=0 failures=0
echo -e "\nSummary:" echo -e "\nSummary:"
......
...@@ -301,8 +301,8 @@ static void enable_busy_poll(struct xsk_socket_info *xsk) ...@@ -301,8 +301,8 @@ static void enable_busy_poll(struct xsk_socket_info *xsk)
exit_with_error(errno); exit_with_error(errno);
} }
static int xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
struct ifobject *ifobject, bool shared) struct ifobject *ifobject, bool shared)
{ {
struct xsk_socket_config cfg = {}; struct xsk_socket_config cfg = {};
struct xsk_ring_cons *rxr; struct xsk_ring_cons *rxr;
...@@ -448,6 +448,9 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, ...@@ -448,6 +448,9 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
memset(ifobj->umem, 0, sizeof(*ifobj->umem)); memset(ifobj->umem, 0, sizeof(*ifobj->umem));
ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS; ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS;
ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
if (ifobj->shared_umem && ifobj->rx_on)
ifobj->umem->base_addr = DEFAULT_UMEM_BUFFERS *
XSK_UMEM__DEFAULT_FRAME_SIZE;
for (j = 0; j < MAX_SOCKETS; j++) { for (j = 0; j < MAX_SOCKETS; j++) {
memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j])); memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
...@@ -1146,6 +1149,70 @@ static int validate_tx_invalid_descs(struct ifobject *ifobject) ...@@ -1146,6 +1149,70 @@ static int validate_tx_invalid_descs(struct ifobject *ifobject)
return TEST_PASS; return TEST_PASS;
} }
static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject,
struct xsk_umem_info *umem, bool tx)
{
int i, ret;
for (i = 0; i < test->nb_sockets; i++) {
bool shared = (ifobject->shared_umem && tx) ? true : !!i;
u32 ctr = 0;
while (ctr++ < SOCK_RECONF_CTR) {
ret = __xsk_configure_socket(&ifobject->xsk_arr[i], umem,
ifobject, shared);
if (!ret)
break;
/* Retry if it fails as xsk_socket__create() is asynchronous */
if (ctr >= SOCK_RECONF_CTR)
exit_with_error(-ret);
usleep(USLEEP_MAX);
}
if (ifobject->busy_poll)
enable_busy_poll(&ifobject->xsk_arr[i]);
}
}
static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject)
{
xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true);
ifobject->xsk = &ifobject->xsk_arr[0];
ifobject->xsk_map_fd = test->ifobj_rx->xsk_map_fd;
memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info));
}
static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream)
{
u32 idx = 0, i, buffers_to_fill;
int ret;
if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
buffers_to_fill = umem->num_frames;
else
buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS;
ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
if (ret != buffers_to_fill)
exit_with_error(ENOSPC);
for (i = 0; i < buffers_to_fill; i++) {
u64 addr;
if (pkt_stream->use_addr_for_fill) {
struct pkt *pkt = pkt_stream_get_pkt(pkt_stream, i);
if (!pkt)
break;
addr = pkt->addr;
} else {
addr = i * umem->frame_size;
}
*xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
}
xsk_ring_prod__submit(&umem->fq, buffers_to_fill);
}
static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
{ {
u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size; u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
...@@ -1153,13 +1220,15 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) ...@@ -1153,13 +1220,15 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
LIBBPF_OPTS(bpf_xdp_query_opts, opts); LIBBPF_OPTS(bpf_xdp_query_opts, opts);
int ret, ifindex; int ret, ifindex;
void *bufs; void *bufs;
u32 i;
ifobject->ns_fd = switch_namespace(ifobject->nsname); ifobject->ns_fd = switch_namespace(ifobject->nsname);
if (ifobject->umem->unaligned_mode) if (ifobject->umem->unaligned_mode)
mmap_flags |= MAP_HUGETLB; mmap_flags |= MAP_HUGETLB;
if (ifobject->shared_umem)
umem_sz *= 2;
bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
if (bufs == MAP_FAILED) if (bufs == MAP_FAILED)
exit_with_error(errno); exit_with_error(errno);
...@@ -1168,24 +1237,9 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) ...@@ -1168,24 +1237,9 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
if (ret) if (ret)
exit_with_error(-ret); exit_with_error(-ret);
for (i = 0; i < test->nb_sockets; i++) { xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream);
u32 ctr = 0;
while (ctr++ < SOCK_RECONF_CTR) {
ret = xsk_configure_socket(&ifobject->xsk_arr[i], ifobject->umem,
ifobject, !!i);
if (!ret)
break;
/* Retry if it fails as xsk_socket__create() is asynchronous */
if (ctr >= SOCK_RECONF_CTR)
exit_with_error(-ret);
usleep(USLEEP_MAX);
}
if (ifobject->busy_poll) xsk_configure_socket(test, ifobject, ifobject->umem, false);
enable_busy_poll(&ifobject->xsk_arr[i]);
}
ifobject->xsk = &ifobject->xsk_arr[0]; ifobject->xsk = &ifobject->xsk_arr[0];
...@@ -1221,22 +1275,18 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) ...@@ -1221,22 +1275,18 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
exit_with_error(-ret); exit_with_error(-ret);
} }
static void testapp_cleanup_xsk_res(struct ifobject *ifobj)
{
print_verbose("Destroying socket\n");
xsk_socket__delete(ifobj->xsk->xsk);
munmap(ifobj->umem->buffer, ifobj->umem->num_frames * ifobj->umem->frame_size);
xsk_umem__delete(ifobj->umem->umem);
}
static void *worker_testapp_validate_tx(void *arg) static void *worker_testapp_validate_tx(void *arg)
{ {
struct test_spec *test = (struct test_spec *)arg; struct test_spec *test = (struct test_spec *)arg;
struct ifobject *ifobject = test->ifobj_tx; struct ifobject *ifobject = test->ifobj_tx;
int err; int err;
if (test->current_step == 1) if (test->current_step == 1) {
thread_common_ops(test, ifobject); if (!ifobject->shared_umem)
thread_common_ops(test, ifobject);
else
thread_common_ops_tx(test, ifobject);
}
print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts, print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts,
ifobject->ifname); ifobject->ifname);
...@@ -1247,53 +1297,23 @@ static void *worker_testapp_validate_tx(void *arg) ...@@ -1247,53 +1297,23 @@ static void *worker_testapp_validate_tx(void *arg)
if (err) if (err)
report_failure(test); report_failure(test);
if (test->total_steps == test->current_step || err)
testapp_cleanup_xsk_res(ifobject);
pthread_exit(NULL); pthread_exit(NULL);
} }
static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream)
{
u32 idx = 0, i, buffers_to_fill;
int ret;
if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
buffers_to_fill = umem->num_frames;
else
buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS;
ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
if (ret != buffers_to_fill)
exit_with_error(ENOSPC);
for (i = 0; i < buffers_to_fill; i++) {
u64 addr;
if (pkt_stream->use_addr_for_fill) {
struct pkt *pkt = pkt_stream_get_pkt(pkt_stream, i);
if (!pkt)
break;
addr = pkt->addr;
} else {
addr = i * umem->frame_size;
}
*xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
}
xsk_ring_prod__submit(&umem->fq, buffers_to_fill);
}
static void *worker_testapp_validate_rx(void *arg) static void *worker_testapp_validate_rx(void *arg)
{ {
struct test_spec *test = (struct test_spec *)arg; struct test_spec *test = (struct test_spec *)arg;
struct ifobject *ifobject = test->ifobj_rx; struct ifobject *ifobject = test->ifobj_rx;
struct pollfd fds = { }; struct pollfd fds = { };
int id = 0;
int err; int err;
if (test->current_step == 1) if (test->current_step == 1) {
thread_common_ops(test, ifobject); thread_common_ops(test, ifobject);
} else {
xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream); bpf_map_delete_elem(ifobject->xsk_map_fd, &id);
xsk_socket__update_xskmap(ifobject->xsk->xsk, ifobject->xsk_map_fd);
}
fds.fd = xsk_socket__fd(ifobject->xsk->xsk); fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
fds.events = POLLIN; fds.events = POLLIN;
...@@ -1311,25 +1331,38 @@ static void *worker_testapp_validate_rx(void *arg) ...@@ -1311,25 +1331,38 @@ static void *worker_testapp_validate_rx(void *arg)
pthread_mutex_unlock(&pacing_mutex); pthread_mutex_unlock(&pacing_mutex);
} }
if (test->total_steps == test->current_step || err)
testapp_cleanup_xsk_res(ifobject);
pthread_exit(NULL); pthread_exit(NULL);
} }
static void testapp_clean_xsk_umem(struct ifobject *ifobj)
{
u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size;
if (ifobj->shared_umem)
umem_sz *= 2;
xsk_umem__delete(ifobj->umem->umem);
munmap(ifobj->umem->buffer, umem_sz);
}
static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj, static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj,
enum test_type type) enum test_type type)
{ {
bool old_shared_umem = ifobj->shared_umem;
pthread_t t0; pthread_t t0;
if (pthread_barrier_init(&barr, NULL, 2)) if (pthread_barrier_init(&barr, NULL, 2))
exit_with_error(errno); exit_with_error(errno);
test->current_step++; test->current_step++;
if (type == TEST_TYPE_POLL_RXQ_TMOUT) if (type == TEST_TYPE_POLL_RXQ_TMOUT)
pkt_stream_reset(ifobj->pkt_stream); pkt_stream_reset(ifobj->pkt_stream);
pkts_in_flight = 0; pkts_in_flight = 0;
/*Spawn thread */ test->ifobj_rx->shared_umem = false;
test->ifobj_tx->shared_umem = false;
/* Spawn thread */
pthread_create(&t0, NULL, ifobj->func_ptr, test); pthread_create(&t0, NULL, ifobj->func_ptr, test);
if (type != TEST_TYPE_POLL_TXQ_TMOUT) if (type != TEST_TYPE_POLL_TXQ_TMOUT)
...@@ -1340,6 +1373,14 @@ static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ...@@ -1340,6 +1373,14 @@ static int testapp_validate_traffic_single_thread(struct test_spec *test, struct
pthread_join(t0, NULL); pthread_join(t0, NULL);
if (test->total_steps == test->current_step || test->fail) {
xsk_socket__delete(ifobj->xsk->xsk);
testapp_clean_xsk_umem(ifobj);
}
test->ifobj_rx->shared_umem = old_shared_umem;
test->ifobj_tx->shared_umem = old_shared_umem;
return !!test->fail; return !!test->fail;
} }
...@@ -1369,6 +1410,14 @@ static int testapp_validate_traffic(struct test_spec *test) ...@@ -1369,6 +1410,14 @@ static int testapp_validate_traffic(struct test_spec *test)
pthread_join(t1, NULL); pthread_join(t1, NULL);
pthread_join(t0, NULL); pthread_join(t0, NULL);
if (test->total_steps == test->current_step || test->fail) {
xsk_socket__delete(ifobj_tx->xsk->xsk);
xsk_socket__delete(ifobj_rx->xsk->xsk);
testapp_clean_xsk_umem(ifobj_rx);
if (!ifobj_tx->shared_umem)
testapp_clean_xsk_umem(ifobj_tx);
}
return !!test->fail; return !!test->fail;
} }
...@@ -1448,9 +1497,9 @@ static void testapp_headroom(struct test_spec *test) ...@@ -1448,9 +1497,9 @@ static void testapp_headroom(struct test_spec *test)
static void testapp_stats_rx_dropped(struct test_spec *test) static void testapp_stats_rx_dropped(struct test_spec *test)
{ {
test_spec_set_name(test, "STAT_RX_DROPPED"); test_spec_set_name(test, "STAT_RX_DROPPED");
pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0);
test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size - test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3; XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3;
pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0);
pkt_stream_receive_half(test); pkt_stream_receive_half(test);
test->ifobj_rx->validation_func = validate_rx_dropped; test->ifobj_rx->validation_func = validate_rx_dropped;
testapp_validate_traffic(test); testapp_validate_traffic(test);
...@@ -1573,6 +1622,11 @@ static void testapp_invalid_desc(struct test_spec *test) ...@@ -1573,6 +1622,11 @@ static void testapp_invalid_desc(struct test_spec *test)
pkts[7].valid = false; pkts[7].valid = false;
} }
if (test->ifobj_tx->shared_umem) {
pkts[4].addr += UMEM_SIZE;
pkts[5].addr += UMEM_SIZE;
}
pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
testapp_validate_traffic(test); testapp_validate_traffic(test);
pkt_stream_restore_default(test); pkt_stream_restore_default(test);
...@@ -1769,6 +1823,7 @@ int main(int argc, char **argv) ...@@ -1769,6 +1823,7 @@ int main(int argc, char **argv)
int modes = TEST_MODE_SKB + 1; int modes = TEST_MODE_SKB + 1;
u32 i, j, failed_tests = 0; u32 i, j, failed_tests = 0;
struct test_spec test; struct test_spec test;
bool shared_umem;
/* Use libbpf 1.0 API mode */ /* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL); libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
...@@ -1783,6 +1838,10 @@ int main(int argc, char **argv) ...@@ -1783,6 +1838,10 @@ int main(int argc, char **argv)
setlocale(LC_ALL, ""); setlocale(LC_ALL, "");
parse_command_line(ifobj_tx, ifobj_rx, argc, argv); parse_command_line(ifobj_tx, ifobj_rx, argc, argv);
shared_umem = !strcmp(ifobj_tx->ifname, ifobj_rx->ifname);
ifobj_tx->shared_umem = shared_umem;
ifobj_rx->shared_umem = shared_umem;
if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) { if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) {
usage(basename(argv[0])); usage(basename(argv[0]));
...@@ -1819,6 +1878,9 @@ int main(int argc, char **argv) ...@@ -1819,6 +1878,9 @@ int main(int argc, char **argv)
pkt_stream_delete(tx_pkt_stream_default); pkt_stream_delete(tx_pkt_stream_default);
pkt_stream_delete(rx_pkt_stream_default); pkt_stream_delete(rx_pkt_stream_default);
free(ifobj_rx->umem);
if (!ifobj_tx->shared_umem)
free(ifobj_tx->umem);
ifobject_delete(ifobj_tx); ifobject_delete(ifobj_tx);
ifobject_delete(ifobj_rx); ifobject_delete(ifobj_rx);
......
...@@ -153,6 +153,7 @@ struct ifobject { ...@@ -153,6 +153,7 @@ struct ifobject {
bool busy_poll; bool busy_poll;
bool use_fill_ring; bool use_fill_ring;
bool release_rx; bool release_rx;
bool shared_umem;
u8 dst_mac[ETH_ALEN]; u8 dst_mac[ETH_ALEN];
u8 src_mac[ETH_ALEN]; u8 src_mac[ETH_ALEN];
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment