Commit 095174da authored by Robert-Ionut Alexa's avatar Robert-Ionut Alexa Committed by David S. Miller

net: dpaa2-eth: add support for multiple buffer pools per DPNI

This patch allows the configuration of multiple buffer pools associated
with a single DPNI object, each distinct DPBP object not necessarily
shared among all queues.
The user can interogate both the number of buffer pools and the buffer
count in each buffer pool by using the .get_ethtool_stats() callback.
Signed-off-by: default avatarRobert-Ionut Alexa <robert-ionut.alexa@nxp.com>
Signed-off-by: default avatarIoana Ciornei <ioana.ciornei@nxp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 33132068
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright 2016-2020 NXP
* Copyright 2016-2022 NXP
*/
#include <linux/init.h>
#include <linux/module.h>
......@@ -304,7 +304,7 @@ static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
return;
while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
ch->recycled_bufs,
ch->recycled_bufs_cnt)) == -EBUSY) {
if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
......@@ -1631,7 +1631,7 @@ static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
* to the specified buffer pool
*/
static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch, u16 bpid)
struct dpaa2_eth_channel *ch)
{
struct device *dev = priv->net_dev->dev.parent;
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
......@@ -1663,12 +1663,12 @@ static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page),
DPAA2_ETH_RX_BUF_RAW_SIZE,
addr, priv->rx_buf_size,
bpid);
ch->bp->bpid);
}
release_bufs:
/* In case the portal is busy, retry until successful */
while ((err = dpaa2_io_service_release(ch->dpio, bpid,
while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
buf_array, i)) == -EBUSY) {
if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
break;
......@@ -1697,39 +1697,59 @@ static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
return 0;
}
static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch)
{
int i, j;
int i;
int new_count;
for (j = 0; j < priv->num_channels; j++) {
for (i = 0; i < DPAA2_ETH_NUM_BUFS;
i += DPAA2_ETH_BUFS_PER_CMD) {
new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
priv->channel[j]->buf_count += new_count;
for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) {
new_count = dpaa2_eth_add_bufs(priv, ch);
ch->buf_count += new_count;
if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
return -ENOMEM;
}
}
if (new_count < DPAA2_ETH_BUFS_PER_CMD)
return -ENOMEM;
}
return 0;
}
static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
struct dpaa2_eth_channel *channel;
int i, err = 0;
for (i = 0; i < priv->num_channels; i++) {
channel = priv->channel[i];
err = dpaa2_eth_seed_pool(priv, channel);
/* Not much to do; the buffer pool, though not filled up,
* may still contain some buffers which would enable us
* to limp on.
*/
if (err)
netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
channel->bp->dev->obj_desc.id,
channel->bp->bpid);
}
}
/*
* Drain the specified number of buffers from the DPNI's private buffer pool.
* Drain the specified number of buffers from one of the DPNI's private buffer
* pools.
* @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
*/
static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid,
int count)
{
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
int retries = 0;
int ret;
do {
ret = dpaa2_io_service_acquire(NULL, priv->bpid,
buf_array, count);
ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count);
if (ret < 0) {
if (ret == -EBUSY &&
retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
......@@ -1742,23 +1762,35 @@ static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
} while (ret);
}
static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid)
{
int i;
dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
dpaa2_eth_drain_bufs(priv, 1);
/* Drain the buffer pool */
dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD);
dpaa2_eth_drain_bufs(priv, bpid, 1);
/* Setup to zero the buffer count of all channels which were
* using this buffer pool.
*/
for (i = 0; i < priv->num_channels; i++)
priv->channel[i]->buf_count = 0;
if (priv->channel[i]->bp->bpid == bpid)
priv->channel[i]->buf_count = 0;
}
static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv)
{
int i;
for (i = 0; i < priv->num_bps; i++)
dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid);
}
/* Function is called from softirq context only, so we don't need to guard
* the access to percpu count
*/
static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch,
u16 bpid)
struct dpaa2_eth_channel *ch)
{
int new_count;
......@@ -1766,7 +1798,7 @@ static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
return 0;
do {
new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
new_count = dpaa2_eth_add_bufs(priv, ch);
if (unlikely(!new_count)) {
/* Out of memory; abort for now, we'll try later on */
break;
......@@ -1848,7 +1880,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
break;
/* Refill pool if appropriate */
dpaa2_eth_refill_pool(priv, ch, priv->bpid);
dpaa2_eth_refill_pool(priv, ch);
store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
if (store_cleaned <= 0)
......@@ -2047,15 +2079,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
int err;
err = dpaa2_eth_seed_pool(priv, priv->bpid);
if (err) {
/* Not much to do; the buffer pool, though not filled up,
* may still contain some buffers which would enable us
* to limp on.
*/
netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
priv->dpbp_dev->obj_desc.id, priv->bpid);
}
dpaa2_eth_seed_pools(priv);
if (!dpaa2_eth_is_type_phy(priv)) {
/* We'll only start the txqs when the link is actually ready;
......@@ -2088,7 +2112,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
enable_err:
dpaa2_eth_disable_ch_napi(priv);
dpaa2_eth_drain_pool(priv);
dpaa2_eth_drain_pools(priv);
return err;
}
......@@ -2193,7 +2217,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
dpaa2_eth_disable_ch_napi(priv);
/* Empty the buffer pool */
dpaa2_eth_drain_pool(priv);
dpaa2_eth_drain_pools(priv);
/* Empty the Scatter-Gather Buffer cache */
dpaa2_eth_sgt_cache_drain(priv);
......@@ -3204,13 +3228,14 @@ static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
dpaa2_eth_set_fq_affinity(priv);
}
/* Allocate and configure one buffer pool for each interface */
static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
/* Allocate and configure a buffer pool */
struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv)
{
int err;
struct fsl_mc_device *dpbp_dev;
struct device *dev = priv->net_dev->dev.parent;
struct fsl_mc_device *dpbp_dev;
struct dpbp_attr dpbp_attrs;
struct dpaa2_eth_bp *bp;
int err;
err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
&dpbp_dev);
......@@ -3219,12 +3244,16 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
err = -EPROBE_DEFER;
else
dev_err(dev, "DPBP device allocation failed\n");
return err;
return ERR_PTR(err);
}
priv->dpbp_dev = dpbp_dev;
bp = kzalloc(sizeof(*bp), GFP_KERNEL);
if (!bp) {
err = -ENOMEM;
goto err_alloc;
}
err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
err = dpbp_open(priv->mc_io, 0, dpbp_dev->obj_desc.id,
&dpbp_dev->mc_handle);
if (err) {
dev_err(dev, "dpbp_open() failed\n");
......@@ -3249,9 +3278,11 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
dev_err(dev, "dpbp_get_attributes() failed\n");
goto err_get_attr;
}
priv->bpid = dpbp_attrs.bpid;
return 0;
bp->dev = dpbp_dev;
bp->bpid = dpbp_attrs.bpid;
return bp;
err_get_attr:
dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
......@@ -3259,17 +3290,58 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
err_reset:
dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
err_open:
kfree(bp);
err_alloc:
fsl_mc_object_free(dpbp_dev);
return err;
return ERR_PTR(err);
}
static int dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv *priv)
{
struct dpaa2_eth_bp *bp;
int i;
bp = dpaa2_eth_allocate_dpbp(priv);
if (IS_ERR(bp))
return PTR_ERR(bp);
priv->bp[DPAA2_ETH_DEFAULT_BP_IDX] = bp;
priv->num_bps++;
for (i = 0; i < priv->num_channels; i++)
priv->channel[i]->bp = bp;
return 0;
}
void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp)
{
int idx_bp;
/* Find the index at which this BP is stored */
for (idx_bp = 0; idx_bp < priv->num_bps; idx_bp++)
if (priv->bp[idx_bp] == bp)
break;
/* Drain the pool and disable the associated MC object */
dpaa2_eth_drain_pool(priv, bp->bpid);
dpbp_disable(priv->mc_io, 0, bp->dev->mc_handle);
dpbp_close(priv->mc_io, 0, bp->dev->mc_handle);
fsl_mc_object_free(bp->dev);
kfree(bp);
/* Move the last in use DPBP over in this position */
priv->bp[idx_bp] = priv->bp[priv->num_bps - 1];
priv->num_bps--;
}
static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
static void dpaa2_eth_free_dpbps(struct dpaa2_eth_priv *priv)
{
dpaa2_eth_drain_pool(priv);
dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
fsl_mc_object_free(priv->dpbp_dev);
int i;
for (i = 0; i < priv->num_bps; i++)
dpaa2_eth_free_dpbp(priv, priv->bp[i]);
}
static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
......@@ -4154,6 +4226,7 @@ static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
*/
static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
{
struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX];
struct net_device *net_dev = priv->net_dev;
struct device *dev = net_dev->dev.parent;
struct dpni_pools_cfg pools_params;
......@@ -4162,7 +4235,7 @@ static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
int i;
pools_params.num_dpbp = 1;
pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id;
pools_params.pools[0].backup_pool = 0;
pools_params.pools[0].buffer_size = priv->rx_buf_size;
err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
......@@ -4641,7 +4714,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
dpaa2_eth_setup_fqs(priv);
err = dpaa2_eth_setup_dpbp(priv);
err = dpaa2_eth_setup_default_dpbp(priv);
if (err)
goto err_dpbp_setup;
......@@ -4777,7 +4850,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
err_alloc_percpu_stats:
dpaa2_eth_del_ch_napi(priv);
err_bind:
dpaa2_eth_free_dpbp(priv);
dpaa2_eth_free_dpbps(priv);
err_dpbp_setup:
dpaa2_eth_free_dpio(priv);
err_dpio_setup:
......@@ -4830,7 +4903,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
free_percpu(priv->percpu_extras);
dpaa2_eth_del_ch_napi(priv);
dpaa2_eth_free_dpbp(priv);
dpaa2_eth_free_dpbps(priv);
dpaa2_eth_free_dpio(priv);
dpaa2_eth_free_dpni(priv);
if (priv->onestep_reg_base)
......
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright 2016-2020 NXP
* Copyright 2016-2022 NXP
*/
#ifndef __DPAA2_ETH_H
......@@ -109,6 +109,14 @@
#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256
#define DPAA2_ETH_RX_BUF_ALIGN 64
/* The firmware allows assigning multiple buffer pools to a single DPNI -
* maximum 8 DPBP objects. By default, only the first DPBP (idx 0) is used for
* all queues. Thus, when enabling AF_XDP we must accommodate up to 9 DPBPs
* object: the default and 8 other distinct buffer pools, one for each queue.
*/
#define DPAA2_ETH_DEFAULT_BP_IDX 0
#define DPAA2_ETH_MAX_BPS 9
/* We are accommodating a skb backpointer and some S/G info
* in the frame's software annotation. The hardware
* options are either 0 or 64, so we choose the latter.
......@@ -454,6 +462,11 @@ struct dpaa2_eth_ch_xdp {
unsigned int res;
};
struct dpaa2_eth_bp {
struct fsl_mc_device *dev;
int bpid;
};
struct dpaa2_eth_channel {
struct dpaa2_io_notification_ctx nctx;
struct fsl_mc_device *dpcon;
......@@ -472,6 +485,8 @@ struct dpaa2_eth_channel {
/* Buffers to be recycled back in the buffer pool */
u64 recycled_bufs[DPAA2_ETH_BUFS_PER_CMD];
int recycled_bufs_cnt;
struct dpaa2_eth_bp *bp;
};
struct dpaa2_eth_dist_fields {
......@@ -535,14 +550,16 @@ struct dpaa2_eth_priv {
u8 ptp_correction_off;
void (*dpaa2_set_onestep_params_cb)(struct dpaa2_eth_priv *priv,
u32 offset, u8 udp);
struct fsl_mc_device *dpbp_dev;
u16 rx_buf_size;
u16 bpid;
struct iommu_domain *iommu_domain;
enum hwtstamp_tx_types tx_tstamp_type; /* Tx timestamping type */
bool rx_tstamp; /* Rx timestamping enabled */
/* Buffer pool management */
struct dpaa2_eth_bp *bp[DPAA2_ETH_MAX_BPS];
int num_bps;
u16 tx_qdid;
struct fsl_mc_io *mc_io;
/* Cores which have an affine DPIO/DPCON.
......@@ -771,4 +788,7 @@ void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv);
struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv,
struct dpaa2_fapr *fapr);
struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv);
void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp);
#endif /* __DPAA2_H */
......@@ -241,9 +241,9 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
struct dpaa2_eth_ch_stats *ch_stats;
struct dpaa2_eth_drv_stats *extras;
u32 buf_cnt, buf_cnt_total = 0;
int j, k, err, num_cnt, i = 0;
u32 fcnt, bcnt;
u32 buf_cnt;
memset(data, 0,
sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
......@@ -305,12 +305,15 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
*(data + i++) = fcnt_tx_total;
*(data + i++) = bcnt_tx_total;
err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
if (err) {
netdev_warn(net_dev, "Buffer count query error %d\n", err);
return;
for (j = 0; j < priv->num_bps; j++) {
err = dpaa2_io_query_bp_count(NULL, priv->bp[j]->bpid, &buf_cnt);
if (err) {
netdev_warn(net_dev, "Buffer count query error %d\n", err);
return;
}
buf_cnt_total += buf_cnt;
}
*(data + i++) = buf_cnt;
*(data + i++) = buf_cnt_total;
if (dpaa2_eth_has_mac(priv))
dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment