Commit 43c7f919 authored by Krzysztof Kazimierczak's avatar Krzysztof Kazimierczak Committed by Tony Nguyen

ice: Refactor ice_setup_rx_ctx

Move AF_XDP logic and buffer allocation out of ice_setup_rx_ctx() to a
new function ice_vsi_cfg_rxq(), so the function actually sets up the Rx
context.
Signed-off-by: default avatarKrzysztof Kazimierczak <krzysztof.kazimierczak@intel.com>
Co-developed-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
Tested-by: default avatarKiran Bhandare <kiranx.bhandare@intel.com>
parent f28cd5ce
......@@ -319,11 +319,9 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
*
* Configure the Rx descriptor ring in RLAN context.
*/
int ice_setup_rx_ctx(struct ice_ring *ring)
static int ice_setup_rx_ctx(struct ice_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
int chain_len = ICE_MAX_CHAINED_RX_BUFS;
u16 num_bufs = ICE_DESC_UNUSED(ring);
struct ice_vsi *vsi = ring->vsi;
u32 rxdid = ICE_RXDID_FLEX_NIC;
struct ice_rlan_ctx rlan_ctx;
......@@ -339,48 +337,6 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* clear the context structure first */
memset(&rlan_ctx, 0, sizeof(rlan_ctx));
ring->rx_buf_len = vsi->rx_buf_len;
if (ring->vsi->type == ICE_VSI_PF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index, ring->q_vector->napi.napi_id);
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
*/
chain_len = 1;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
if (err)
return err;
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
ring->netdev,
ring->q_index, ring->q_vector->napi.napi_id);
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
NULL);
if (err)
return err;
}
}
/* Receive Queue Base Address.
* Indicates the starting address of the descriptor queue defined in
* 128 Byte units.
......@@ -415,6 +371,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
*/
rlan_ctx.showiv = 0;
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
*/
if (ring->xsk_pool)
chain_len = 1;
/* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF
*/
......@@ -438,7 +400,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
if (err) {
dev_err(dev, "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
pf_q, err);
return -EIO;
}
......@@ -458,6 +420,66 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
return 0;
}
/**
* ice_vsi_cfg_rxq - Configure an Rx queue
* @ring: the ring being configured
*
* Return 0 on success and a negative value on error.
*/
int ice_vsi_cfg_rxq(struct ice_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
u16 num_bufs = ICE_DESC_UNUSED(ring);
int err;
ring->rx_buf_len = ring->vsi->rx_buf_len;
if (ring->vsi->type == ICE_VSI_PF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index, ring->q_vector->napi.napi_id);
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
if (err)
return err;
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
ring->netdev,
ring->q_index, ring->q_vector->napi.napi_id);
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
NULL);
if (err)
return err;
}
}
err = ice_setup_rx_ctx(ring);
if (err) {
dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
ring->q_index, err);
return err;
}
if (ring->xsk_pool) {
bool ok;
......@@ -470,9 +492,13 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
}
ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
if (!ok)
if (!ok) {
u16 pf_q = ring->vsi->rxq_map[ring->q_index];
dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
ring->q_index, pf_q);
}
return 0;
}
......
......@@ -6,7 +6,7 @@
#include "ice.h"
int ice_setup_rx_ctx(struct ice_ring *ring);
int ice_vsi_cfg_rxq(struct ice_ring *ring);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
......
......@@ -1698,16 +1698,12 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
ice_vsi_cfg_frame_size(vsi);
setup_rings:
/* set up individual rings */
for (i = 0; i < vsi->num_rxq; i++) {
int err;
ice_for_each_rxq(vsi, i) {
int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
err = ice_setup_rx_ctx(vsi->rx_rings[i]);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
i, err);
if (err)
return err;
}
}
return 0;
}
......
......@@ -236,7 +236,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
}
err = ice_setup_rx_ctx(rx_ring);
err = ice_vsi_cfg_rxq(rx_ring);
if (err)
goto free_buf;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment