Commit 5eeba397 authored by David S. Miller's avatar David S. Miller

Merge branch 'stmmac-multi-queue-fixes-and-cleanups'

Niklas Cassel says:

====================
stmmac multi-queue fixes and cleanups
====================
Reviewed-by: default avatarJose Abreu <joabreu@synopsys.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3fef2b62 ce339abc
...@@ -120,7 +120,7 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw, ...@@ -120,7 +120,7 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
writel(value, ioaddr + base_register); writel(value, ioaddr + base_register);
} }
static void dwmac4_tx_queue_routing(struct mac_device_info *hw, static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
u8 packet, u32 queue) u8 packet, u32 queue)
{ {
void __iomem *ioaddr = hw->pcsr; void __iomem *ioaddr = hw->pcsr;
...@@ -713,7 +713,7 @@ static const struct stmmac_ops dwmac4_ops = { ...@@ -713,7 +713,7 @@ static const struct stmmac_ops dwmac4_ops = {
.rx_queue_enable = dwmac4_rx_queue_enable, .rx_queue_enable = dwmac4_rx_queue_enable,
.rx_queue_prio = dwmac4_rx_queue_priority, .rx_queue_prio = dwmac4_rx_queue_priority,
.tx_queue_prio = dwmac4_tx_queue_priority, .tx_queue_prio = dwmac4_tx_queue_priority,
.rx_queue_routing = dwmac4_tx_queue_routing, .rx_queue_routing = dwmac4_rx_queue_routing,
.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
...@@ -744,7 +744,7 @@ static const struct stmmac_ops dwmac410_ops = { ...@@ -744,7 +744,7 @@ static const struct stmmac_ops dwmac410_ops = {
.rx_queue_enable = dwmac4_rx_queue_enable, .rx_queue_enable = dwmac4_rx_queue_enable,
.rx_queue_prio = dwmac4_rx_queue_priority, .rx_queue_prio = dwmac4_rx_queue_priority,
.tx_queue_prio = dwmac4_tx_queue_priority, .tx_queue_prio = dwmac4_tx_queue_priority,
.rx_queue_routing = dwmac4_tx_queue_routing, .rx_queue_routing = dwmac4_rx_queue_routing,
.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
......
...@@ -58,6 +58,7 @@ struct stmmac_tx_queue { ...@@ -58,6 +58,7 @@ struct stmmac_tx_queue {
unsigned int dirty_tx; unsigned int dirty_tx;
dma_addr_t dma_tx_phy; dma_addr_t dma_tx_phy;
u32 tx_tail_addr; u32 tx_tail_addr;
u32 mss;
}; };
struct stmmac_rx_queue { struct stmmac_rx_queue {
...@@ -138,7 +139,6 @@ struct stmmac_priv { ...@@ -138,7 +139,6 @@ struct stmmac_priv {
spinlock_t ptp_lock; spinlock_t ptp_lock;
void __iomem *mmcaddr; void __iomem *mmcaddr;
void __iomem *ptpaddr; void __iomem *ptpaddr;
u32 mss;
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
struct dentry *dbgfs_dir; struct dentry *dbgfs_dir;
......
...@@ -1355,6 +1355,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev) ...@@ -1355,6 +1355,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
tx_q->dirty_tx = 0; tx_q->dirty_tx = 0;
tx_q->cur_tx = 0; tx_q->cur_tx = 0;
tx_q->mss = 0;
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
} }
...@@ -1946,6 +1947,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) ...@@ -1946,6 +1947,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
(i == DMA_TX_SIZE - 1)); (i == DMA_TX_SIZE - 1));
tx_q->dirty_tx = 0; tx_q->dirty_tx = 0;
tx_q->cur_tx = 0; tx_q->cur_tx = 0;
tx_q->mss = 0;
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
stmmac_start_tx_dma(priv, chan); stmmac_start_tx_dma(priv, chan);
...@@ -2430,7 +2432,7 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) ...@@ -2430,7 +2432,7 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
continue; continue;
packet = priv->plat->rx_queues_cfg[queue].pkt_route; packet = priv->plat->rx_queues_cfg[queue].pkt_route;
priv->hw->mac->rx_queue_prio(priv->hw, packet, queue); priv->hw->mac->rx_queue_routing(priv->hw, packet, queue);
} }
} }
...@@ -2632,7 +2634,6 @@ static int stmmac_open(struct net_device *dev) ...@@ -2632,7 +2634,6 @@ static int stmmac_open(struct net_device *dev)
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
priv->rx_copybreak = STMMAC_RX_COPYBREAK; priv->rx_copybreak = STMMAC_RX_COPYBREAK;
priv->mss = 0;
ret = alloc_dma_desc_resources(priv); ret = alloc_dma_desc_resources(priv);
if (ret < 0) { if (ret < 0) {
...@@ -2793,6 +2794,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, ...@@ -2793,6 +2794,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
while (tmp_len > 0) { while (tmp_len > 0) {
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
desc = tx_q->dma_tx + tx_q->cur_tx; desc = tx_q->dma_tx + tx_q->cur_tx;
desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
...@@ -2872,11 +2874,12 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2872,11 +2874,12 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
mss = skb_shinfo(skb)->gso_size; mss = skb_shinfo(skb)->gso_size;
/* set new MSS value if needed */ /* set new MSS value if needed */
if (mss != priv->mss) { if (mss != tx_q->mss) {
mss_desc = tx_q->dma_tx + tx_q->cur_tx; mss_desc = tx_q->dma_tx + tx_q->cur_tx;
priv->hw->desc->set_mss(mss_desc, mss); priv->hw->desc->set_mss(mss_desc, mss);
priv->mss = mss; tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
} }
if (netif_msg_tx_queued(priv)) { if (netif_msg_tx_queued(priv)) {
...@@ -2887,6 +2890,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2887,6 +2890,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
} }
first_entry = tx_q->cur_tx; first_entry = tx_q->cur_tx;
WARN_ON(tx_q->tx_skbuff[first_entry]);
desc = tx_q->dma_tx + first_entry; desc = tx_q->dma_tx + first_entry;
first = desc; first = desc;
...@@ -2926,7 +2930,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2926,7 +2930,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
} }
...@@ -3062,6 +3065,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3062,6 +3065,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
entry = tx_q->cur_tx; entry = tx_q->cur_tx;
first_entry = entry; first_entry = entry;
WARN_ON(tx_q->tx_skbuff[first_entry]);
csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
...@@ -3090,6 +3094,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3090,6 +3094,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
bool last_segment = (i == (nfrags - 1)); bool last_segment = (i == (nfrags - 1));
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
WARN_ON(tx_q->tx_skbuff[entry]);
if (likely(priv->extend_desc)) if (likely(priv->extend_desc))
desc = (struct dma_desc *)(tx_q->dma_etx + entry); desc = (struct dma_desc *)(tx_q->dma_etx + entry);
...@@ -3101,8 +3106,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3101,8 +3106,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (dma_mapping_error(priv->device, des)) if (dma_mapping_error(priv->device, des))
goto dma_map_err; /* should reuse desc w/o issues */ goto dma_map_err; /* should reuse desc w/o issues */
tx_q->tx_skbuff[entry] = NULL;
tx_q->tx_skbuff_dma[entry].buf = des; tx_q->tx_skbuff_dma[entry].buf = des;
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
desc->des0 = cpu_to_le32(des); desc->des0 = cpu_to_le32(des);
...@@ -4436,6 +4439,7 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv) ...@@ -4436,6 +4439,7 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv)
tx_q->cur_tx = 0; tx_q->cur_tx = 0;
tx_q->dirty_tx = 0; tx_q->dirty_tx = 0;
tx_q->mss = 0;
} }
} }
...@@ -4481,11 +4485,6 @@ int stmmac_resume(struct device *dev) ...@@ -4481,11 +4485,6 @@ int stmmac_resume(struct device *dev)
stmmac_reset_queues_param(priv); stmmac_reset_queues_param(priv);
/* reset private mss value to force mss context settings at
* next tso xmit (only used for gmac4).
*/
priv->mss = 0;
stmmac_clear_descriptors(priv); stmmac_clear_descriptors(priv);
stmmac_hw_setup(ndev, false); stmmac_hw_setup(ndev, false);
......
...@@ -135,13 +135,14 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) ...@@ -135,13 +135,14 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
* stmmac_mtl_setup - parse DT parameters for multiple queues configuration * stmmac_mtl_setup - parse DT parameters for multiple queues configuration
* @pdev: platform device * @pdev: platform device
*/ */
static void stmmac_mtl_setup(struct platform_device *pdev, static int stmmac_mtl_setup(struct platform_device *pdev,
struct plat_stmmacenet_data *plat) struct plat_stmmacenet_data *plat)
{ {
struct device_node *q_node; struct device_node *q_node;
struct device_node *rx_node; struct device_node *rx_node;
struct device_node *tx_node; struct device_node *tx_node;
u8 queue = 0; u8 queue = 0;
int ret = 0;
/* For backwards-compatibility with device trees that don't have any /* For backwards-compatibility with device trees that don't have any
* snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
...@@ -159,12 +160,12 @@ static void stmmac_mtl_setup(struct platform_device *pdev, ...@@ -159,12 +160,12 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0); rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
if (!rx_node) if (!rx_node)
return; return ret;
tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0); tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
if (!tx_node) { if (!tx_node) {
of_node_put(rx_node); of_node_put(rx_node);
return; return ret;
} }
/* Processing RX queues common config */ /* Processing RX queues common config */
...@@ -220,6 +221,11 @@ static void stmmac_mtl_setup(struct platform_device *pdev, ...@@ -220,6 +221,11 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
queue++; queue++;
} }
if (queue != plat->rx_queues_to_use) {
ret = -EINVAL;
dev_err(&pdev->dev, "Not all RX queues were configured\n");
goto out;
}
/* Processing TX queues common config */ /* Processing TX queues common config */
if (of_property_read_u32(tx_node, "snps,tx-queues-to-use", if (of_property_read_u32(tx_node, "snps,tx-queues-to-use",
...@@ -281,10 +287,18 @@ static void stmmac_mtl_setup(struct platform_device *pdev, ...@@ -281,10 +287,18 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
queue++; queue++;
} }
if (queue != plat->tx_queues_to_use) {
ret = -EINVAL;
dev_err(&pdev->dev, "Not all TX queues were configured\n");
goto out;
}
out:
of_node_put(rx_node); of_node_put(rx_node);
of_node_put(tx_node); of_node_put(tx_node);
of_node_put(q_node); of_node_put(q_node);
return ret;
} }
/** /**
...@@ -376,6 +390,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) ...@@ -376,6 +390,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
struct device_node *np = pdev->dev.of_node; struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat; struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg; struct stmmac_dma_cfg *dma_cfg;
int rc;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat) if (!plat)
...@@ -402,8 +417,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) ...@@ -402,8 +417,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
/* To Configure PHY by using all device-tree supported properties */ /* To Configure PHY by using all device-tree supported properties */
if (stmmac_dt_phy(plat, np, &pdev->dev)) rc = stmmac_dt_phy(plat, np, &pdev->dev);
return ERR_PTR(-ENODEV); if (rc)
return ERR_PTR(rc);
of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
...@@ -499,7 +515,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) ...@@ -499,7 +515,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->axi = stmmac_axi_setup(pdev); plat->axi = stmmac_axi_setup(pdev);
stmmac_mtl_setup(pdev, plat); rc = stmmac_mtl_setup(pdev, plat);
if (rc) {
stmmac_remove_config_dt(pdev, plat);
return ERR_PTR(rc);
}
/* clock setup */ /* clock setup */
plat->stmmac_clk = devm_clk_get(&pdev->dev, plat->stmmac_clk = devm_clk_get(&pdev->dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment