Commit f3fef2b6 authored by Jan Sokolowski's avatar Jan Sokolowski Committed by Jeff Kirsher

i40e: Remove umem from VSI

As current implementation of netdev already contains and provides
umems for us, we no longer have the need to contain these
structures in i40e_vsi.

Refactor the code to operate on netdev-provided umems.
Signed-off-by: default avatarJan Sokolowski <jan.sokolowski@intel.com>
Acked-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 5f4f3b2d
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_mirred.h>
#include <net/xdp_sock.h>
#include "i40e_type.h" #include "i40e_type.h"
#include "i40e_prototype.h" #include "i40e_prototype.h"
#include "i40e_client.h" #include "i40e_client.h"
...@@ -787,11 +788,6 @@ struct i40e_vsi { ...@@ -787,11 +788,6 @@ struct i40e_vsi {
/* VSI specific handlers */ /* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data); irqreturn_t (*irq_handler)(int irq, void *data);
/* AF_XDP zero-copy */
struct xdp_umem **xsk_umems;
u16 num_xsk_umems_used;
u16 num_xsk_umems;
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
struct i40e_netdev_priv { struct i40e_netdev_priv {
...@@ -1104,10 +1100,10 @@ static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring) ...@@ -1104,10 +1100,10 @@ static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
if (ring_is_xdp(ring)) if (ring_is_xdp(ring))
qid -= ring->vsi->alloc_queue_pairs; qid -= ring->vsi->alloc_queue_pairs;
if (!ring->vsi->xsk_umems || !ring->vsi->xsk_umems[qid] || !xdp_on) if (!xdp_on)
return NULL; return NULL;
return ring->vsi->xsk_umems[qid]; return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
} }
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
......
...@@ -9,69 +9,6 @@ ...@@ -9,69 +9,6 @@
#include "i40e_txrx_common.h" #include "i40e_txrx_common.h"
#include "i40e_xsk.h" #include "i40e_xsk.h"
/**
* i40e_alloc_xsk_umems - Allocate an array to store per ring UMEMs
* @vsi: Current VSI
*
* Returns 0 on success, <0 on failure
**/
static int i40e_alloc_xsk_umems(struct i40e_vsi *vsi)
{
if (vsi->xsk_umems)
return 0;
vsi->num_xsk_umems_used = 0;
vsi->num_xsk_umems = vsi->alloc_queue_pairs;
vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
GFP_KERNEL);
if (!vsi->xsk_umems) {
vsi->num_xsk_umems = 0;
return -ENOMEM;
}
return 0;
}
/**
* i40e_add_xsk_umem - Store a UMEM for a certain ring/qid
* @vsi: Current VSI
* @umem: UMEM to store
* @qid: Ring/qid to associate with the UMEM
*
* Returns 0 on success, <0 on failure
**/
static int i40e_add_xsk_umem(struct i40e_vsi *vsi, struct xdp_umem *umem,
u16 qid)
{
int err;
err = i40e_alloc_xsk_umems(vsi);
if (err)
return err;
vsi->xsk_umems[qid] = umem;
vsi->num_xsk_umems_used++;
return 0;
}
/**
* i40e_remove_xsk_umem - Remove a UMEM for a certain ring/qid
* @vsi: Current VSI
* @qid: Ring/qid associated with the UMEM
**/
static void i40e_remove_xsk_umem(struct i40e_vsi *vsi, u16 qid)
{
vsi->xsk_umems[qid] = NULL;
vsi->num_xsk_umems_used--;
if (vsi->num_xsk_umems == 0) {
kfree(vsi->xsk_umems);
vsi->xsk_umems = NULL;
vsi->num_xsk_umems = 0;
}
}
/** /**
* i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev
* @vsi: Current VSI * @vsi: Current VSI
...@@ -140,6 +77,7 @@ static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem) ...@@ -140,6 +77,7 @@ static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
u16 qid) u16 qid)
{ {
struct net_device *netdev = vsi->netdev;
struct xdp_umem_fq_reuse *reuseq; struct xdp_umem_fq_reuse *reuseq;
bool if_running; bool if_running;
int err; int err;
...@@ -150,12 +88,9 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, ...@@ -150,12 +88,9 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
if (qid >= vsi->num_queue_pairs) if (qid >= vsi->num_queue_pairs)
return -EINVAL; return -EINVAL;
if (vsi->xsk_umems) { if (qid >= netdev->real_num_rx_queues ||
if (qid >= vsi->num_xsk_umems) qid >= netdev->real_num_tx_queues)
return -EINVAL; return -EINVAL;
if (vsi->xsk_umems[qid])
return -EBUSY;
}
reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count); reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
if (!reuseq) if (!reuseq)
...@@ -173,13 +108,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, ...@@ -173,13 +108,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
err = i40e_queue_pair_disable(vsi, qid); err = i40e_queue_pair_disable(vsi, qid);
if (err) if (err)
return err; return err;
}
err = i40e_add_xsk_umem(vsi, umem, qid);
if (err)
return err;
if (if_running) {
err = i40e_queue_pair_enable(vsi, qid); err = i40e_queue_pair_enable(vsi, qid);
if (err) if (err)
return err; return err;
...@@ -197,11 +126,13 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, ...@@ -197,11 +126,13 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
**/ **/
static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
{ {
struct net_device *netdev = vsi->netdev;
struct xdp_umem *umem;
bool if_running; bool if_running;
int err; int err;
if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems || umem = xdp_get_umem_from_qid(netdev, qid);
!vsi->xsk_umems[qid]) if (!umem)
return -EINVAL; return -EINVAL;
if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
...@@ -212,8 +143,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) ...@@ -212,8 +143,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
return err; return err;
} }
i40e_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]); i40e_xsk_umem_dma_unmap(vsi, umem);
i40e_remove_xsk_umem(vsi, qid);
if (if_running) { if (if_running) {
err = i40e_queue_pair_enable(vsi, qid); err = i40e_queue_pair_enable(vsi, qid);
...@@ -237,20 +167,18 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) ...@@ -237,20 +167,18 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem, int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem,
u16 qid) u16 qid)
{ {
struct net_device *netdev = vsi->netdev;
struct xdp_umem *queried_umem;
if (vsi->type != I40E_VSI_MAIN) if (vsi->type != I40E_VSI_MAIN)
return -EINVAL; return -EINVAL;
if (qid >= vsi->num_queue_pairs) queried_umem = xdp_get_umem_from_qid(netdev, qid);
return -EINVAL;
if (vsi->xsk_umems) { if (!queried_umem)
if (qid >= vsi->num_xsk_umems) return -EINVAL;
return -EINVAL;
*umem = vsi->xsk_umems[qid];
return 0;
}
*umem = NULL; *umem = queried_umem;
return 0; return 0;
} }
...@@ -945,13 +873,11 @@ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) ...@@ -945,13 +873,11 @@ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
**/ **/
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi) bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
{ {
struct net_device *netdev = vsi->netdev;
int i; int i;
if (!vsi->xsk_umems)
return false;
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
if (vsi->xsk_umems[i]) if (xdp_get_umem_from_qid(netdev, i))
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment