Commit c15800b6 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'xdp-redirect-implementation-for-ena-driver'

Shay Agroskin says:

====================
XDP Redirect implementation for ENA driver

ENA is adding XDP Redirect support for its driver and some other
small tweaks.

This series adds the following:

- Make log messages in the driver have a uniform format using
  netdev_* function
- Improve code readability
- Add support for XDP Redirect
====================

Link: https://lore.kernel.org/r/20201208180208.26111-1-shayagr@amazon.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents da948233 f1a25589
This diff is collapsed.
...@@ -303,6 +303,7 @@ struct ena_com_dev { ...@@ -303,6 +303,7 @@ struct ena_com_dev {
u8 __iomem *reg_bar; u8 __iomem *reg_bar;
void __iomem *mem_bar; void __iomem *mem_bar;
void *dmadev; void *dmadev;
struct net_device *net_device;
enum ena_admin_placement_policy_type tx_mem_queue_type; enum ena_admin_placement_policy_type tx_mem_queue_type;
u32 tx_max_header_size; u32 tx_max_header_size;
...@@ -604,7 +605,7 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, ...@@ -604,7 +605,7 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
* *
* @return: 0 on Success and negative value otherwise. * @return: 0 on Success and negative value otherwise.
*/ */
int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu); int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu);
/* ena_com_get_offload_settings - Retrieve the device offloads capabilities /* ena_com_get_offload_settings - Retrieve the device offloads capabilities
* @ena_dev: ENA communication layer struct * @ena_dev: ENA communication layer struct
...@@ -931,6 +932,26 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, ...@@ -931,6 +932,26 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
struct ena_admin_feature_llq_desc *llq_features, struct ena_admin_feature_llq_desc *llq_features,
struct ena_llq_configurations *llq_default_config); struct ena_llq_configurations *llq_default_config);
/* ena_com_io_sq_to_ena_dev - Extract ena_com_dev using contained field io_sq.
* @io_sq: IO submit queue struct
*
* @return - ena_com_dev struct extracted from io_sq
*/
static inline struct ena_com_dev *ena_com_io_sq_to_ena_dev(struct ena_com_io_sq *io_sq)
{
return container_of(io_sq, struct ena_com_dev, io_sq_queues[io_sq->qid]);
}
/* ena_com_io_cq_to_ena_dev - Extract ena_com_dev using contained field io_cq.
* @io_sq: IO submit queue struct
*
* @return - ena_com_dev struct extracted from io_sq
*/
static inline struct ena_com_dev *ena_com_io_cq_to_ena_dev(struct ena_com_io_cq *io_cq)
{
return container_of(io_cq, struct ena_com_dev, io_cq_queues[io_cq->qid]);
}
static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
{ {
return ena_dev->adaptive_coalescing; return ena_dev->adaptive_coalescing;
......
...@@ -58,13 +58,15 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, ...@@ -58,13 +58,15 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
if (is_llq_max_tx_burst_exists(io_sq)) { if (is_llq_max_tx_burst_exists(io_sq)) {
if (unlikely(!io_sq->entries_in_tx_burst_left)) { if (unlikely(!io_sq->entries_in_tx_burst_left)) {
pr_err("Error: trying to send more packets than tx burst allows\n"); netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Error: trying to send more packets than tx burst allows\n");
return -ENOSPC; return -ENOSPC;
} }
io_sq->entries_in_tx_burst_left--; io_sq->entries_in_tx_burst_left--;
pr_debug("Decreasing entries_in_tx_burst_left of queue %d to %d\n", netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
io_sq->qid, io_sq->entries_in_tx_burst_left); "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
io_sq->qid, io_sq->entries_in_tx_burst_left);
} }
/* Make sure everything was written into the bounce buffer before /* Make sure everything was written into the bounce buffer before
...@@ -102,12 +104,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq, ...@@ -102,12 +104,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
if (unlikely((header_offset + header_len) > if (unlikely((header_offset + header_len) >
llq_info->desc_list_entry_size)) { llq_info->desc_list_entry_size)) {
pr_err("Trying to write header larger than llq entry can accommodate\n"); netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Trying to write header larger than llq entry can accommodate\n");
return -EFAULT; return -EFAULT;
} }
if (unlikely(!bounce_buffer)) { if (unlikely(!bounce_buffer)) {
pr_err("Bounce buffer is NULL\n"); netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Bounce buffer is NULL\n");
return -EFAULT; return -EFAULT;
} }
...@@ -125,7 +129,8 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq) ...@@ -125,7 +129,8 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
bounce_buffer = pkt_ctrl->curr_bounce_buf; bounce_buffer = pkt_ctrl->curr_bounce_buf;
if (unlikely(!bounce_buffer)) { if (unlikely(!bounce_buffer)) {
pr_err("Bounce buffer is NULL\n"); netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Bounce buffer is NULL\n");
return NULL; return NULL;
} }
...@@ -250,8 +255,9 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, ...@@ -250,8 +255,9 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
io_cq->cur_rx_pkt_cdesc_count = 0; io_cq->cur_rx_pkt_cdesc_count = 0;
io_cq->cur_rx_pkt_cdesc_start_idx = head_masked; io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
pr_debug("ENA q_id: %d packets were completed. first desc idx %u descs# %d\n", netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
io_cq->qid, *first_cdesc_idx, count); "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
io_cq->qid, *first_cdesc_idx, count);
} else { } else {
io_cq->cur_rx_pkt_cdesc_count += count; io_cq->cur_rx_pkt_cdesc_count += count;
count = 0; count = 0;
...@@ -335,7 +341,8 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, ...@@ -335,7 +341,8 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
return 0; return 0;
} }
static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
struct ena_com_rx_ctx *ena_rx_ctx,
struct ena_eth_io_rx_cdesc_base *cdesc) struct ena_eth_io_rx_cdesc_base *cdesc)
{ {
ena_rx_ctx->l3_proto = cdesc->status & ena_rx_ctx->l3_proto = cdesc->status &
...@@ -357,10 +364,11 @@ static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, ...@@ -357,10 +364,11 @@ static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT; ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
pr_debug("l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n", netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err, ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status); ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
} }
/*****************************************************************************/ /*****************************************************************************/
...@@ -385,13 +393,15 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, ...@@ -385,13 +393,15 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
/* num_bufs +1 for potential meta desc */ /* num_bufs +1 for potential meta desc */
if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) { if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
pr_debug("Not enough space in the tx queue\n"); netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Not enough space in the tx queue\n");
return -ENOMEM; return -ENOMEM;
} }
if (unlikely(header_len > io_sq->tx_max_header_size)) { if (unlikely(header_len > io_sq->tx_max_header_size)) {
pr_err("Header size is too large %d max header: %d\n", netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
header_len, io_sq->tx_max_header_size); "Header size is too large %d max header: %d\n",
header_len, io_sq->tx_max_header_size);
return -EINVAL; return -EINVAL;
} }
...@@ -405,7 +415,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, ...@@ -405,7 +415,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta); rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
if (unlikely(rc)) { if (unlikely(rc)) {
pr_err("Failed to create and store tx meta desc\n"); netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Failed to create and store tx meta desc\n");
return rc; return rc;
} }
...@@ -529,12 +540,14 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, ...@@ -529,12 +540,14 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
return 0; return 0;
} }
pr_debug("Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
nb_hw_desc); "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
nb_hw_desc);
if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) { if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
pr_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
ena_rx_ctx->max_bufs); "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
ena_rx_ctx->max_bufs);
return -ENOSPC; return -ENOSPC;
} }
...@@ -557,13 +570,15 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, ...@@ -557,13 +570,15 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
/* Update SQ head ptr */ /* Update SQ head ptr */
io_sq->next_to_comp += nb_hw_desc; io_sq->next_to_comp += nb_hw_desc;
pr_debug("[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid, netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
io_sq->next_to_comp); "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
io_sq->qid, io_sq->next_to_comp);
/* Get rx flags from the last pkt */ /* Get rx flags from the last pkt */
ena_com_rx_set_flags(ena_rx_ctx, cdesc); ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
ena_rx_ctx->descs = nb_hw_desc; ena_rx_ctx->descs = nb_hw_desc;
return 0; return 0;
} }
...@@ -588,11 +603,15 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, ...@@ -588,11 +603,15 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK | desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
ENA_ETH_IO_RX_DESC_LAST_MASK | ENA_ETH_IO_RX_DESC_LAST_MASK |
(io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) | ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
desc->req_id = req_id; desc->req_id = req_id;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
__func__, io_sq->qid, req_id);
desc->buff_addr_lo = (u32)ena_buf->paddr; desc->buff_addr_lo = (u32)ena_buf->paddr;
desc->buff_addr_hi = desc->buff_addr_hi =
((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
......
...@@ -140,8 +140,9 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq, ...@@ -140,8 +140,9 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
llq_info->descs_per_entry); llq_info->descs_per_entry);
} }
pr_debug("Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
num_descs, num_entries_needed); "Queue: %d num_descs: %d num_entries_needed: %d\n",
io_sq->qid, num_descs, num_entries_needed);
return num_entries_needed > io_sq->entries_in_tx_burst_left; return num_entries_needed > io_sq->entries_in_tx_burst_left;
} }
...@@ -151,14 +152,16 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) ...@@ -151,14 +152,16 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst; u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
u16 tail = io_sq->tail; u16 tail = io_sq->tail;
pr_debug("Write submission queue doorbell for queue: %d tail: %d\n", netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
io_sq->qid, tail); "Write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
writel(tail, io_sq->db_addr); writel(tail, io_sq->db_addr);
if (is_llq_max_tx_burst_exists(io_sq)) { if (is_llq_max_tx_burst_exists(io_sq)) {
pr_debug("Reset available entries in tx burst for queue %d to %d\n", netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
io_sq->qid, max_entries_in_tx_burst); "Reset available entries in tx burst for queue %d to %d\n",
io_sq->qid, max_entries_in_tx_burst);
io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst; io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
} }
...@@ -176,8 +179,9 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq) ...@@ -176,8 +179,9 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
if (unlikely(need_update)) { if (unlikely(need_update)) {
pr_debug("Write completion queue doorbell for queue %d: head: %d\n", netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
io_cq->qid, head); "Write completion queue doorbell for queue %d: head: %d\n",
io_cq->qid, head);
writel(head, io_cq->cq_head_db_reg); writel(head, io_cq->cq_head_db_reg);
io_cq->last_head_update = head; io_cq->last_head_update = head;
} }
...@@ -240,7 +244,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, ...@@ -240,7 +244,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
*req_id = READ_ONCE(cdesc->req_id); *req_id = READ_ONCE(cdesc->req_id);
if (unlikely(*req_id >= io_cq->q_depth)) { if (unlikely(*req_id >= io_cq->q_depth)) {
pr_err("Invalid req id %d\n", cdesc->req_id); netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
"Invalid req id %d\n", cdesc->req_id);
return -EINVAL; return -EINVAL;
} }
......
...@@ -95,6 +95,7 @@ static const struct ena_stats ena_stats_rx_strings[] = { ...@@ -95,6 +95,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(xdp_pass), ENA_STAT_RX_ENTRY(xdp_pass),
ENA_STAT_RX_ENTRY(xdp_tx), ENA_STAT_RX_ENTRY(xdp_tx),
ENA_STAT_RX_ENTRY(xdp_invalid), ENA_STAT_RX_ENTRY(xdp_invalid),
ENA_STAT_RX_ENTRY(xdp_redirect),
}; };
static const struct ena_stats ena_stats_ena_com_strings[] = { static const struct ena_stats ena_stats_ena_com_strings[] = {
...@@ -839,7 +840,7 @@ static int ena_set_channels(struct net_device *netdev, ...@@ -839,7 +840,7 @@ static int ena_set_channels(struct net_device *netdev,
/* The check for max value is already done in ethtool */ /* The check for max value is already done in ethtool */
if (count < ENA_MIN_NUM_IO_QUEUES || if (count < ENA_MIN_NUM_IO_QUEUES ||
(ena_xdp_present(adapter) && (ena_xdp_present(adapter) &&
!ena_xdp_legal_queue_count(adapter, channels->combined_count))) !ena_xdp_legal_queue_count(adapter, count)))
return -EINVAL; return -EINVAL;
return ena_update_queue_count(adapter, count); return ena_update_queue_count(adapter, count);
......
...@@ -170,12 +170,6 @@ struct ena_tx_buffer { ...@@ -170,12 +170,6 @@ struct ena_tx_buffer {
* the xdp queues * the xdp queues
*/ */
struct xdp_frame *xdpf; struct xdp_frame *xdpf;
/* The rx page for the rx buffer that was received in rx and
* re transmitted on xdp tx queues as a result of XDP_TX action.
* We need to free the page once we finished cleaning the buffer in
* clean_xdp_irq()
*/
struct page *xdp_rx_page;
/* Indicate if bufs[0] map the linear data of the skb. */ /* Indicate if bufs[0] map the linear data of the skb. */
u8 map_linear_data; u8 map_linear_data;
...@@ -239,6 +233,7 @@ struct ena_stats_rx { ...@@ -239,6 +233,7 @@ struct ena_stats_rx {
u64 xdp_pass; u64 xdp_pass;
u64 xdp_tx; u64 xdp_tx;
u64 xdp_invalid; u64 xdp_invalid;
u64 xdp_redirect;
}; };
struct ena_ring { struct ena_ring {
...@@ -263,6 +258,7 @@ struct ena_ring { ...@@ -263,6 +258,7 @@ struct ena_ring {
struct ena_com_io_sq *ena_com_io_sq; struct ena_com_io_sq *ena_com_io_sq;
struct bpf_prog *xdp_bpf_prog; struct bpf_prog *xdp_bpf_prog;
struct xdp_rxq_info xdp_rxq; struct xdp_rxq_info xdp_rxq;
spinlock_t xdp_tx_lock; /* synchronize XDP TX/Redirect traffic */
u16 next_to_use; u16 next_to_use;
u16 next_to_clean; u16 next_to_clean;
...@@ -433,8 +429,8 @@ static inline bool ena_xdp_present_ring(struct ena_ring *ring) ...@@ -433,8 +429,8 @@ static inline bool ena_xdp_present_ring(struct ena_ring *ring)
return !!ring->xdp_bpf_prog; return !!ring->xdp_bpf_prog;
} }
static inline int ena_xdp_legal_queue_count(struct ena_adapter *adapter, static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
u32 queues) u32 queues)
{ {
return 2 * queues <= adapter->max_num_io_queues; return 2 * queues <= adapter->max_num_io_queues;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment