Commit 3143d10b authored by Shibin Koikkara Reeny's avatar Shibin Koikkara Reeny Committed by Daniel Borkmann

selftests/xsk: Update poll test cases

Poll test case was not testing all the functionality of the poll feature
in the test suite. This patch updates the poll test case which contains 2
test cases to test the RX and the TX poll functionality and additional 2
more test cases to check the timeout feature of the poll event.

Poll test suite has 4 test cases:

1. TEST_TYPE_RX_POLL: Check if RX path POLLIN function works as expect.
   TX path can use any method to send the traffic.

2. TEST_TYPE_TX_POLL: Check if TX path POLLOUT function works as expect.
   RX path can use any method to receive the traffic.

3. TEST_TYPE_POLL_RXQ_EMPTY: Call poll function with parameter POLLIN on
   empty RX queue will cause timeout. If timeout then test case passes.

4. TEST_TYPE_POLL_TXQ_FULL: When TX queue is filled and packets are not
   cleaned by the kernel then if we invoke the poll function with POLLOUT
   it should trigger timeout.
Signed-off-by: default avatarShibin Koikkara Reeny <shibin.koikkara.reeny@intel.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Reviewed-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Link: https://lore.kernel.org/bpf/20220803144354.98122-1-shibin.koikkara.reeny@intel.com
parent dc444be8
...@@ -244,6 +244,11 @@ static void gen_udp_hdr(u32 payload, void *pkt, struct ifobject *ifobject, ...@@ -244,6 +244,11 @@ static void gen_udp_hdr(u32 payload, void *pkt, struct ifobject *ifobject,
memset32_htonl(pkt + PKT_HDR_SIZE, payload, UDP_PKT_DATA_SIZE); memset32_htonl(pkt + PKT_HDR_SIZE, payload, UDP_PKT_DATA_SIZE);
} }
static bool is_umem_valid(struct ifobject *ifobj)
{
return !!ifobj->umem->umem;
}
static void gen_udp_csum(struct udphdr *udp_hdr, struct iphdr *ip_hdr) static void gen_udp_csum(struct udphdr *udp_hdr, struct iphdr *ip_hdr)
{ {
udp_hdr->check = 0; udp_hdr->check = 0;
...@@ -817,12 +822,13 @@ static int complete_pkts(struct xsk_socket_info *xsk, int batch_size) ...@@ -817,12 +822,13 @@ static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
return TEST_PASS; return TEST_PASS;
} }
static int receive_pkts(struct ifobject *ifobj, struct pollfd *fds) static int receive_pkts(struct test_spec *test, struct pollfd *fds)
{ {
struct timeval tv_end, tv_now, tv_timeout = {RECV_TMOUT, 0}; struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
struct pkt_stream *pkt_stream = test->ifobj_rx->pkt_stream;
u32 idx_rx = 0, idx_fq = 0, rcvd, i, pkts_sent = 0; u32 idx_rx = 0, idx_fq = 0, rcvd, i, pkts_sent = 0;
struct pkt_stream *pkt_stream = ifobj->pkt_stream; struct xsk_socket_info *xsk = test->ifobj_rx->xsk;
struct xsk_socket_info *xsk = ifobj->xsk; struct ifobject *ifobj = test->ifobj_rx;
struct xsk_umem_info *umem = xsk->umem; struct xsk_umem_info *umem = xsk->umem;
struct pkt *pkt; struct pkt *pkt;
int ret; int ret;
...@@ -843,17 +849,28 @@ static int receive_pkts(struct ifobject *ifobj, struct pollfd *fds) ...@@ -843,17 +849,28 @@ static int receive_pkts(struct ifobject *ifobj, struct pollfd *fds)
} }
kick_rx(xsk); kick_rx(xsk);
if (ifobj->use_poll) {
rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
if (!rcvd) {
if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
ret = poll(fds, 1, POLL_TMOUT); ret = poll(fds, 1, POLL_TMOUT);
if (ret < 0) if (ret < 0)
exit_with_error(-ret); exit_with_error(-ret);
if (!ret) {
if (!is_umem_valid(test->ifobj_tx))
return TEST_PASS;
ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
return TEST_FAILURE;
} }
if (!(fds->revents & POLLIN))
continue; continue;
} }
rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
if (!rcvd)
continue;
if (ifobj->use_fill_ring) { if (ifobj->use_fill_ring) {
ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
while (ret != rcvd) { while (ret != rcvd) {
...@@ -900,13 +917,35 @@ static int receive_pkts(struct ifobject *ifobj, struct pollfd *fds) ...@@ -900,13 +917,35 @@ static int receive_pkts(struct ifobject *ifobj, struct pollfd *fds)
return TEST_PASS; return TEST_PASS;
} }
static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb) static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fds,
bool timeout)
{ {
struct xsk_socket_info *xsk = ifobject->xsk; struct xsk_socket_info *xsk = ifobject->xsk;
u32 i, idx, valid_pkts = 0; bool use_poll = ifobject->use_poll;
u32 i, idx, ret, valid_pkts = 0;
while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) {
if (use_poll) {
ret = poll(fds, 1, POLL_TMOUT);
if (timeout) {
if (ret < 0) {
ksft_print_msg("ERROR: [%s] Poll error %d\n",
__func__, ret);
return TEST_FAILURE;
}
if (ret == 0)
return TEST_PASS;
break;
}
if (ret <= 0) {
ksft_print_msg("ERROR: [%s] Poll error %d\n",
__func__, ret);
return TEST_FAILURE;
}
}
while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE)
complete_pkts(xsk, BATCH_SIZE); complete_pkts(xsk, BATCH_SIZE);
}
for (i = 0; i < BATCH_SIZE; i++) { for (i = 0; i < BATCH_SIZE; i++) {
struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i); struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
...@@ -933,11 +972,27 @@ static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb) ...@@ -933,11 +972,27 @@ static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb)
xsk_ring_prod__submit(&xsk->tx, i); xsk_ring_prod__submit(&xsk->tx, i);
xsk->outstanding_tx += valid_pkts; xsk->outstanding_tx += valid_pkts;
if (use_poll) {
ret = poll(fds, 1, POLL_TMOUT);
if (ret <= 0) {
if (ret == 0 && timeout)
return TEST_PASS;
ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret);
return TEST_FAILURE;
}
}
if (!timeout) {
if (complete_pkts(xsk, i)) if (complete_pkts(xsk, i))
return TEST_FAILURE; return TEST_FAILURE;
usleep(10); usleep(10);
return TEST_PASS; return TEST_PASS;
}
return TEST_CONTINUE;
} }
static void wait_for_tx_completion(struct xsk_socket_info *xsk) static void wait_for_tx_completion(struct xsk_socket_info *xsk)
...@@ -948,29 +1003,19 @@ static void wait_for_tx_completion(struct xsk_socket_info *xsk) ...@@ -948,29 +1003,19 @@ static void wait_for_tx_completion(struct xsk_socket_info *xsk)
static int send_pkts(struct test_spec *test, struct ifobject *ifobject) static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
{ {
bool timeout = !is_umem_valid(test->ifobj_rx);
struct pollfd fds = { }; struct pollfd fds = { };
u32 pkt_cnt = 0; u32 pkt_cnt = 0, ret;
fds.fd = xsk_socket__fd(ifobject->xsk->xsk); fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
fds.events = POLLOUT; fds.events = POLLOUT;
while (pkt_cnt < ifobject->pkt_stream->nb_pkts) { while (pkt_cnt < ifobject->pkt_stream->nb_pkts) {
int err; ret = __send_pkts(ifobject, &pkt_cnt, &fds, timeout);
if ((ret || test->fail) && !timeout)
if (ifobject->use_poll) {
int ret;
ret = poll(&fds, 1, POLL_TMOUT);
if (ret <= 0)
continue;
if (!(fds.revents & POLLOUT))
continue;
}
err = __send_pkts(ifobject, &pkt_cnt);
if (err || test->fail)
return TEST_FAILURE; return TEST_FAILURE;
else if (ret == TEST_PASS && timeout)
return ret;
} }
wait_for_tx_completion(ifobject->xsk); wait_for_tx_completion(ifobject->xsk);
...@@ -1235,7 +1280,7 @@ static void *worker_testapp_validate_rx(void *arg) ...@@ -1235,7 +1280,7 @@ static void *worker_testapp_validate_rx(void *arg)
pthread_barrier_wait(&barr); pthread_barrier_wait(&barr);
err = receive_pkts(ifobject, &fds); err = receive_pkts(test, &fds);
if (!err && ifobject->validation_func) if (!err && ifobject->validation_func)
err = ifobject->validation_func(ifobject); err = ifobject->validation_func(ifobject);
...@@ -1251,6 +1296,33 @@ static void *worker_testapp_validate_rx(void *arg) ...@@ -1251,6 +1296,33 @@ static void *worker_testapp_validate_rx(void *arg)
pthread_exit(NULL); pthread_exit(NULL);
} }
static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj,
enum test_type type)
{
pthread_t t0;
if (pthread_barrier_init(&barr, NULL, 2))
exit_with_error(errno);
test->current_step++;
if (type == TEST_TYPE_POLL_RXQ_TMOUT)
pkt_stream_reset(ifobj->pkt_stream);
pkts_in_flight = 0;
/*Spawn thread */
pthread_create(&t0, NULL, ifobj->func_ptr, test);
if (type != TEST_TYPE_POLL_TXQ_TMOUT)
pthread_barrier_wait(&barr);
if (pthread_barrier_destroy(&barr))
exit_with_error(errno);
pthread_join(t0, NULL);
return !!test->fail;
}
static int testapp_validate_traffic(struct test_spec *test) static int testapp_validate_traffic(struct test_spec *test)
{ {
struct ifobject *ifobj_tx = test->ifobj_tx; struct ifobject *ifobj_tx = test->ifobj_tx;
...@@ -1548,12 +1620,30 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_ ...@@ -1548,12 +1620,30 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_
pkt_stream_restore_default(test); pkt_stream_restore_default(test);
break; break;
case TEST_TYPE_POLL: case TEST_TYPE_RX_POLL:
test->ifobj_tx->use_poll = true;
test->ifobj_rx->use_poll = true; test->ifobj_rx->use_poll = true;
test_spec_set_name(test, "POLL"); test_spec_set_name(test, "POLL_RX");
testapp_validate_traffic(test); testapp_validate_traffic(test);
break; break;
case TEST_TYPE_TX_POLL:
test->ifobj_tx->use_poll = true;
test_spec_set_name(test, "POLL_TX");
testapp_validate_traffic(test);
break;
case TEST_TYPE_POLL_TXQ_TMOUT:
test_spec_set_name(test, "POLL_TXQ_FULL");
test->ifobj_tx->use_poll = true;
/* create invalid frame by set umem frame_size and pkt length equal to 2048 */
test->ifobj_tx->umem->frame_size = 2048;
pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048);
testapp_validate_traffic_single_thread(test, test->ifobj_tx, type);
pkt_stream_restore_default(test);
break;
case TEST_TYPE_POLL_RXQ_TMOUT:
test_spec_set_name(test, "POLL_RXQ_EMPTY");
test->ifobj_rx->use_poll = true;
testapp_validate_traffic_single_thread(test, test->ifobj_rx, type);
break;
case TEST_TYPE_ALIGNED_INV_DESC: case TEST_TYPE_ALIGNED_INV_DESC:
test_spec_set_name(test, "ALIGNED_INV_DESC"); test_spec_set_name(test, "ALIGNED_INV_DESC");
testapp_invalid_desc(test); testapp_invalid_desc(test);
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#define TEST_PASS 0 #define TEST_PASS 0
#define TEST_FAILURE -1 #define TEST_FAILURE -1
#define TEST_CONTINUE 1
#define MAX_INTERFACES 2 #define MAX_INTERFACES 2
#define MAX_INTERFACE_NAME_CHARS 7 #define MAX_INTERFACE_NAME_CHARS 7
#define MAX_INTERFACES_NAMESPACE_CHARS 10 #define MAX_INTERFACES_NAMESPACE_CHARS 10
...@@ -48,7 +49,7 @@ ...@@ -48,7 +49,7 @@
#define SOCK_RECONF_CTR 10 #define SOCK_RECONF_CTR 10
#define BATCH_SIZE 64 #define BATCH_SIZE 64
#define POLL_TMOUT 1000 #define POLL_TMOUT 1000
#define RECV_TMOUT 3 #define THREAD_TMOUT 3
#define DEFAULT_PKT_CNT (4 * 1024) #define DEFAULT_PKT_CNT (4 * 1024)
#define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4) #define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4)
#define UMEM_SIZE (DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE) #define UMEM_SIZE (DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE)
...@@ -68,7 +69,10 @@ enum test_type { ...@@ -68,7 +69,10 @@ enum test_type {
TEST_TYPE_RUN_TO_COMPLETION, TEST_TYPE_RUN_TO_COMPLETION,
TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME, TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME,
TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT, TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT,
TEST_TYPE_POLL, TEST_TYPE_RX_POLL,
TEST_TYPE_TX_POLL,
TEST_TYPE_POLL_RXQ_TMOUT,
TEST_TYPE_POLL_TXQ_TMOUT,
TEST_TYPE_UNALIGNED, TEST_TYPE_UNALIGNED,
TEST_TYPE_ALIGNED_INV_DESC, TEST_TYPE_ALIGNED_INV_DESC,
TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME, TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment