Commit 157611c8 authored by David S. Miller's avatar David S. Miller

Merge branch 'enetc-cleanups'

Vladimir Oltean says:

====================
Refactoring/cleanup for NXP ENETC

This series performs the following:
- makes the API for Control Buffer Descriptor Rings in enetc_cbdr.c a
  bit more tightly knit.
- moves more logic into enetc_rxbd_next to make the callers simpler
- moves more logic into enetc_refill_rx_ring to make the callers simpler
- removes forward declarations
- simplifies the probe path to unify probing for used and unused PFs.

Nothing radical.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e2359fad 7a5222cb
......@@ -13,44 +13,6 @@
#define ENETC_MAX_SKB_FRAGS 13
#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
int active_offloads);
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring;
int count;
tx_ring = priv->tx_ring[skb->queue_mapping];
if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
if (unlikely(skb_linearize(skb)))
goto drop_packet_err;
count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
netif_stop_subqueue(ndev, tx_ring->index);
return NETDEV_TX_BUSY;
}
enetc_lock_mdio();
count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
enetc_unlock_mdio();
if (unlikely(!count))
goto drop_packet_err;
if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
netif_stop_subqueue(ndev, tx_ring->index);
return NETDEV_TX_OK;
drop_packet_err:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
struct enetc_tx_swbd *tx_swbd)
{
......@@ -221,6 +183,41 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
return 0;
}
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring;
int count;
tx_ring = priv->tx_ring[skb->queue_mapping];
if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
if (unlikely(skb_linearize(skb)))
goto drop_packet_err;
count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
netif_stop_subqueue(ndev, tx_ring->index);
return NETDEV_TX_BUSY;
}
enetc_lock_mdio();
count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
enetc_unlock_mdio();
if (unlikely(!count))
goto drop_packet_err;
if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
netif_stop_subqueue(ndev, tx_ring->index);
return NETDEV_TX_OK;
drop_packet_err:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
static irqreturn_t enetc_msix(int irq, void *data)
{
struct enetc_int_vector *v = data;
......@@ -242,10 +239,6 @@ static irqreturn_t enetc_msix(int irq, void *data)
return IRQ_HANDLED;
}
static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
struct napi_struct *napi, int work_limit);
static void enetc_rx_dim_work(struct work_struct *w)
{
struct dim *dim = container_of(w, struct dim, work);
......@@ -274,50 +267,6 @@ static void enetc_rx_net_dim(struct enetc_int_vector *v)
net_dim(&v->rx_dim, dim_sample);
}
static int enetc_poll(struct napi_struct *napi, int budget)
{
struct enetc_int_vector
*v = container_of(napi, struct enetc_int_vector, napi);
bool complete = true;
int work_done;
int i;
enetc_lock_mdio();
for (i = 0; i < v->count_tx_rings; i++)
if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
complete = false;
work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
if (work_done == budget)
complete = false;
if (work_done)
v->rx_napi_work = true;
if (!complete) {
enetc_unlock_mdio();
return budget;
}
napi_complete_done(napi, work_done);
if (likely(v->rx_dim_en))
enetc_rx_net_dim(v);
v->rx_napi_work = false;
/* enable interrupts */
enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
ENETC_TBIER_TXTIE);
enetc_unlock_mdio();
return work_done;
}
static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
{
int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
......@@ -479,18 +428,16 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
/* clear 'R" as well */
rxbd->r.lstatus = 0;
rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
rx_swbd++;
i++;
if (unlikely(i == rx_ring->bd_count)) {
i = 0;
rx_swbd = rx_ring->rx_swbd;
}
enetc_rxbd_next(rx_ring, &rxbd, &i);
rx_swbd = &rx_ring->rx_swbd[i];
}
if (likely(j)) {
rx_ring->next_to_alloc = i; /* keep track from page reuse */
rx_ring->next_to_use = i;
/* update ENETC's consumer index */
enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
}
return j;
......@@ -676,13 +623,9 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
u32 bd_status;
u16 size;
if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
/* update ENETC's consumer index */
enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
cleaned_cnt -= count;
}
if (cleaned_cnt >= ENETC_RXBD_BUNDLE)
cleaned_cnt -= enetc_refill_rx_ring(rx_ring,
cleaned_cnt);
rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
......@@ -700,9 +643,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
cleaned_cnt++;
rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
if (unlikely(++i == rx_ring->bd_count))
i = 0;
enetc_rxbd_next(rx_ring, &rxbd, &i);
if (unlikely(bd_status &
ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
......@@ -711,9 +652,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
dma_rmb();
bd_status = le32_to_cpu(rxbd->r.lstatus);
rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
if (unlikely(++i == rx_ring->bd_count))
i = 0;
enetc_rxbd_next(rx_ring, &rxbd, &i);
}
rx_ring->ndev->stats.rx_dropped++;
......@@ -736,9 +675,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
cleaned_cnt++;
rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
if (unlikely(++i == rx_ring->bd_count))
i = 0;
enetc_rxbd_next(rx_ring, &rxbd, &i);
}
rx_byte_cnt += skb->len;
......@@ -758,6 +695,50 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
return rx_frm_cnt;
}
static int enetc_poll(struct napi_struct *napi, int budget)
{
struct enetc_int_vector
*v = container_of(napi, struct enetc_int_vector, napi);
bool complete = true;
int work_done;
int i;
enetc_lock_mdio();
for (i = 0; i < v->count_tx_rings; i++)
if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
complete = false;
work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
if (work_done == budget)
complete = false;
if (work_done)
v->rx_napi_work = true;
if (!complete) {
enetc_unlock_mdio();
return budget;
}
napi_complete_done(napi, work_done);
if (likely(v->rx_dim_en))
enetc_rx_net_dim(v);
v->rx_napi_work = false;
/* enable interrupts */
enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
ENETC_TBIER_TXTIE);
enetc_unlock_mdio();
return work_done;
}
/* Probing and Init */
#define ENETC_MAX_RFS_SIZE 64
void enetc_get_si_caps(struct enetc_si *si)
......@@ -991,60 +972,6 @@ static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
enetc_free_tx_ring(priv->tx_ring[i]);
}
int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
{
int size = cbdr->bd_count * sizeof(struct enetc_cbd);
cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
GFP_KERNEL);
if (!cbdr->bd_base)
return -ENOMEM;
/* h/w requires 128B alignment */
if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
return -EINVAL;
}
cbdr->next_to_clean = 0;
cbdr->next_to_use = 0;
return 0;
}
void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
{
int size = cbdr->bd_count * sizeof(struct enetc_cbd);
dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
cbdr->bd_base = NULL;
}
void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
{
/* set CBDR cache attributes */
enetc_wr(hw, ENETC_SICAR2,
ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
enetc_wr(hw, ENETC_SICBDRPIR, 0);
enetc_wr(hw, ENETC_SICBDRCIR, 0);
/* enable ring */
enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
cbdr->pir = hw->reg + ENETC_SICBDRPIR;
cbdr->cir = hw->reg + ENETC_SICBDRCIR;
}
void enetc_clear_cbdr(struct enetc_hw *hw)
{
enetc_wr(hw, ENETC_SICBDRMR, 0);
}
static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
{
int *rss_table;
......@@ -1104,45 +1031,22 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
priv->bdr_int_num = cpus;
priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
priv->tx_ictt = ENETC_TXIC_TIMETHR;
/* SI specific */
si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
}
int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
int err;
err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
if (err)
return err;
enetc_setup_cbdr(&si->hw, &si->cbd_ring);
priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
GFP_KERNEL);
if (!priv->cls_rules) {
err = -ENOMEM;
goto err_alloc_cls;
}
if (!priv->cls_rules)
return -ENOMEM;
return 0;
err_alloc_cls:
enetc_clear_cbdr(&si->hw);
enetc_free_cbdr(priv->dev, &si->cbd_ring);
return err;
}
void enetc_free_si_resources(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
enetc_clear_cbdr(&si->hw);
enetc_free_cbdr(priv->dev, &si->cbd_ring);
kfree(priv->cls_rules);
}
......@@ -1213,9 +1117,9 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
rx_ring->idr = hw->reg + ENETC_SIRXIDR;
enetc_lock_mdio();
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
/* update ENETC's consumer index */
enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, rx_ring->next_to_use);
enetc_unlock_mdio();
/* enable ring */
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
......
......@@ -98,12 +98,14 @@ struct enetc_cbdr {
void *bd_base; /* points to Rx or Tx BD ring */
void __iomem *pir;
void __iomem *cir;
void __iomem *mr; /* mode register */
int bd_count; /* # of BDs */
int next_to_use;
int next_to_clean;
dma_addr_t bd_dma_base;
struct device *dma_dev;
};
#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i]))
......@@ -119,19 +121,26 @@ static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
}
static inline union enetc_rx_bd *enetc_rxbd_next(struct enetc_bdr *rx_ring,
union enetc_rx_bd *rxbd,
int i)
static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring,
union enetc_rx_bd **old_rxbd, int *old_index)
{
rxbd++;
union enetc_rx_bd *new_rxbd = *old_rxbd;
int new_index = *old_index;
new_rxbd++;
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
if (rx_ring->ext_en)
rxbd++;
new_rxbd++;
#endif
if (unlikely(++i == rx_ring->bd_count))
rxbd = rx_ring->bd_base;
return rxbd;
if (unlikely(++new_index == rx_ring->bd_count)) {
new_rxbd = rx_ring->bd_base;
new_index = 0;
}
*old_rxbd = new_rxbd;
*old_index = new_index;
}
static inline union enetc_rx_bd *enetc_rxbd_ext(union enetc_rx_bd *rxbd)
......@@ -252,7 +261,7 @@ struct enetc_ndev_priv {
u16 rx_bd_count, tx_bd_count;
u16 msg_enable;
int active_offloads;
enum enetc_active_offloads active_offloads;
u32 speed; /* store speed for compare update pspeed */
......@@ -310,10 +319,9 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void enetc_set_ethtool_ops(struct net_device *ndev);
/* control buffer descriptor ring (CBDR) */
int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr);
void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr);
void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr);
void enetc_clear_cbdr(struct enetc_hw *hw);
int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
struct enetc_cbdr *cbdr);
void enetc_teardown_cbdr(struct enetc_cbdr *cbdr);
int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
char *mac_addr, int si_map);
int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
......
......@@ -3,9 +3,63 @@
#include "enetc.h"
static void enetc_clean_cbdr(struct enetc_si *si)
int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
struct enetc_cbdr *cbdr)
{
int size = bd_count * sizeof(struct enetc_cbd);
cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
GFP_KERNEL);
if (!cbdr->bd_base)
return -ENOMEM;
/* h/w requires 128B alignment */
if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
dma_free_coherent(dev, size, cbdr->bd_base,
cbdr->bd_dma_base);
return -EINVAL;
}
cbdr->next_to_clean = 0;
cbdr->next_to_use = 0;
cbdr->dma_dev = dev;
cbdr->bd_count = bd_count;
cbdr->pir = hw->reg + ENETC_SICBDRPIR;
cbdr->cir = hw->reg + ENETC_SICBDRCIR;
cbdr->mr = hw->reg + ENETC_SICBDRMR;
/* set CBDR cache attributes */
enetc_wr(hw, ENETC_SICAR2,
ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
enetc_wr_reg(cbdr->pir, cbdr->next_to_clean);
enetc_wr_reg(cbdr->cir, cbdr->next_to_use);
/* enable ring */
enetc_wr_reg(cbdr->mr, BIT(31));
return 0;
}
void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
{
int size = cbdr->bd_count * sizeof(struct enetc_cbd);
/* disable ring */
enetc_wr_reg(cbdr->mr, 0);
dma_free_coherent(cbdr->dma_dev, size, cbdr->bd_base,
cbdr->bd_dma_base);
cbdr->bd_base = NULL;
cbdr->dma_dev = NULL;
}
static void enetc_clean_cbdr(struct enetc_cbdr *ring)
{
struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd *dest_cbd;
int i, status;
......@@ -15,7 +69,7 @@ static void enetc_clean_cbdr(struct enetc_si *si)
dest_cbd = ENETC_CBD(*ring, i);
status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
if (status)
dev_warn(&si->pdev->dev, "CMD err %04x for cmd %04x\n",
dev_warn(ring->dma_dev, "CMD err %04x for cmd %04x\n",
status, dest_cbd->cmd);
memset(dest_cbd, 0, sizeof(*dest_cbd));
......@@ -43,7 +97,7 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
return -EIO;
if (unlikely(!enetc_cbd_unused(ring)))
enetc_clean_cbdr(si);
enetc_clean_cbdr(ring);
i = ring->next_to_use;
dest_cbd = ENETC_CBD(*ring, i);
......@@ -69,7 +123,7 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
/* CBD may writeback data, feedback up level */
*cbd = *dest_cbd;
enetc_clean_cbdr(si);
enetc_clean_cbdr(ring);
return 0;
}
......@@ -117,6 +171,7 @@ int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
int index)
{
struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd cbd = {.cmd = 0};
dma_addr_t dma, dma_align;
void *tmp, *tmp_align;
......@@ -129,10 +184,10 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
cbd.length = cpu_to_le16(sizeof(*rfse));
cbd.opt[3] = cpu_to_le32(0); /* SI */
tmp = dma_alloc_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
tmp = dma_alloc_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
&dma, GFP_KERNEL);
if (!tmp) {
dev_err(&si->pdev->dev, "DMA mapping of RFS entry failed!\n");
dev_err(ring->dma_dev, "DMA mapping of RFS entry failed!\n");
return -ENOMEM;
}
......@@ -145,9 +200,9 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
err = enetc_send_cmd(si, &cbd);
if (err)
dev_err(&si->pdev->dev, "FS entry add failed (%d)!", err);
dev_err(ring->dma_dev, "FS entry add failed (%d)!", err);
dma_free_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
dma_free_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
tmp, dma);
return err;
......@@ -157,6 +212,7 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
bool read)
{
struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd cbd = {.cmd = 0};
dma_addr_t dma, dma_align;
u8 *tmp, *tmp_align;
......@@ -166,10 +222,10 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
/* HW only takes in a full 64 entry table */
return -EINVAL;
tmp = dma_alloc_coherent(&si->pdev->dev, count + RSSE_ALIGN,
tmp = dma_alloc_coherent(ring->dma_dev, count + RSSE_ALIGN,
&dma, GFP_KERNEL);
if (!tmp) {
dev_err(&si->pdev->dev, "DMA mapping of RSS table failed!\n");
dev_err(ring->dma_dev, "DMA mapping of RSS table failed!\n");
return -ENOMEM;
}
dma_align = ALIGN(dma, RSSE_ALIGN);
......@@ -189,13 +245,13 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
err = enetc_send_cmd(si, &cbd);
if (err)
dev_err(&si->pdev->dev, "RSS cmd failed (%d)!", err);
dev_err(ring->dma_dev, "RSS cmd failed (%d)!", err);
if (read)
for (i = 0; i < count; i++)
table[i] = tmp_align[i];
dma_free_coherent(&si->pdev->dev, count + RSSE_ALIGN, tmp, dma);
dma_free_coherent(ring->dma_dev, count + RSSE_ALIGN, tmp, dma);
return err;
}
......
......@@ -1081,26 +1081,6 @@ static int enetc_init_port_rss_memory(struct enetc_si *si)
return err;
}
static void enetc_init_unused_port(struct enetc_si *si)
{
struct device *dev = &si->pdev->dev;
struct enetc_hw *hw = &si->hw;
int err;
si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
err = enetc_alloc_cbdr(dev, &si->cbd_ring);
if (err)
return;
enetc_setup_cbdr(hw, &si->cbd_ring);
enetc_init_port_rfs_memory(si);
enetc_init_port_rss_memory(si);
enetc_clear_cbdr(hw);
enetc_free_cbdr(dev, &si->cbd_ring);
}
static int enetc_pf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
......@@ -1124,8 +1104,24 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_map_pf_space;
}
err = enetc_setup_cbdr(&pdev->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE,
&si->cbd_ring);
if (err)
goto err_setup_cbdr;
err = enetc_init_port_rfs_memory(si);
if (err) {
dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
goto err_init_port_rfs;
}
err = enetc_init_port_rss_memory(si);
if (err) {
dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
goto err_init_port_rss;
}
if (node && !of_device_is_available(node)) {
enetc_init_unused_port(si);
dev_info(&pdev->dev, "device is disabled, skipping\n");
err = -ENODEV;
goto err_device_disabled;
......@@ -1158,18 +1154,6 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_alloc_si_res;
}
err = enetc_init_port_rfs_memory(si);
if (err) {
dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
goto err_init_port_rfs;
}
err = enetc_init_port_rss_memory(si);
if (err) {
dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
goto err_init_port_rss;
}
err = enetc_configure_si(priv);
if (err) {
dev_err(&pdev->dev, "Failed to configure SI\n");
......@@ -1205,15 +1189,17 @@ static int enetc_pf_probe(struct pci_dev *pdev,
err_mdiobus_create:
enetc_free_msix(priv);
err_config_si:
err_init_port_rss:
err_init_port_rfs:
err_alloc_msix:
enetc_free_si_resources(priv);
err_alloc_si_res:
si->ndev = NULL;
free_netdev(ndev);
err_alloc_netdev:
err_init_port_rss:
err_init_port_rfs:
err_device_disabled:
enetc_teardown_cbdr(&si->cbd_ring);
err_setup_cbdr:
err_map_pf_space:
enetc_pci_remove(pdev);
......
......@@ -165,6 +165,11 @@ static int enetc_vf_probe(struct pci_dev *pdev,
enetc_init_si_rings_params(priv);
err = enetc_setup_cbdr(priv->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE,
&si->cbd_ring);
if (err)
goto err_setup_cbdr;
err = enetc_alloc_si_resources(priv);
if (err) {
dev_err(&pdev->dev, "SI resource alloc failed\n");
......@@ -197,6 +202,8 @@ static int enetc_vf_probe(struct pci_dev *pdev,
err_alloc_msix:
enetc_free_si_resources(priv);
err_alloc_si_res:
enetc_teardown_cbdr(&si->cbd_ring);
err_setup_cbdr:
si->ndev = NULL;
free_netdev(ndev);
err_alloc_netdev:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment