Commit ce736788 authored by Joao Pinto's avatar Joao Pinto Committed by David S. Miller

net: stmmac: adding multiple buffers for TX

This patch adds the structure stmmac_tx_queue which contains
tx queues specific data (previously in stmmac_priv).
Signed-off-by: default avatarJoao Pinto <jpinto@synopsys.com>
Tested-by: default avatarNiklas Cassel <niklas.cassel@axis.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 54139cf3
...@@ -26,12 +26,15 @@ ...@@ -26,12 +26,15 @@
static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{ {
struct stmmac_priv *priv = (struct stmmac_priv *)p; struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
unsigned int entry = priv->cur_tx;
struct dma_desc *desc = priv->dma_tx + entry;
unsigned int nopaged_len = skb_headlen(skb); unsigned int nopaged_len = skb_headlen(skb);
struct stmmac_priv *priv = tx_q->priv_data;
unsigned int entry = tx_q->cur_tx;
unsigned int bmax, des2; unsigned int bmax, des2;
unsigned int i = 1, len; unsigned int i = 1, len;
struct dma_desc *desc;
desc = tx_q->dma_tx + entry;
if (priv->plat->enh_desc) if (priv->plat->enh_desc)
bmax = BUF_SIZE_8KiB; bmax = BUF_SIZE_8KiB;
...@@ -45,16 +48,16 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) ...@@ -45,16 +48,16 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = cpu_to_le32(des2); desc->des2 = cpu_to_le32(des2);
if (dma_mapping_error(priv->device, des2)) if (dma_mapping_error(priv->device, des2))
return -1; return -1;
priv->tx_skbuff_dma[entry].buf = des2; tx_q->tx_skbuff_dma[entry].buf = des2;
priv->tx_skbuff_dma[entry].len = bmax; tx_q->tx_skbuff_dma[entry].len = bmax;
/* do not close the descriptor and do not set own bit */ /* do not close the descriptor and do not set own bit */
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE, priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
0, false); 0, false);
while (len != 0) { while (len != 0) {
priv->tx_skbuff[entry] = NULL; tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
desc = priv->dma_tx + entry; desc = tx_q->dma_tx + entry;
if (len > bmax) { if (len > bmax) {
des2 = dma_map_single(priv->device, des2 = dma_map_single(priv->device,
...@@ -63,8 +66,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) ...@@ -63,8 +66,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = cpu_to_le32(des2); desc->des2 = cpu_to_le32(des2);
if (dma_mapping_error(priv->device, des2)) if (dma_mapping_error(priv->device, des2))
return -1; return -1;
priv->tx_skbuff_dma[entry].buf = des2; tx_q->tx_skbuff_dma[entry].buf = des2;
priv->tx_skbuff_dma[entry].len = bmax; tx_q->tx_skbuff_dma[entry].len = bmax;
priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
STMMAC_CHAIN_MODE, 1, STMMAC_CHAIN_MODE, 1,
false); false);
...@@ -77,8 +80,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) ...@@ -77,8 +80,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = cpu_to_le32(des2); desc->des2 = cpu_to_le32(des2);
if (dma_mapping_error(priv->device, des2)) if (dma_mapping_error(priv->device, des2))
return -1; return -1;
priv->tx_skbuff_dma[entry].buf = des2; tx_q->tx_skbuff_dma[entry].buf = des2;
priv->tx_skbuff_dma[entry].len = len; tx_q->tx_skbuff_dma[entry].len = len;
/* last descriptor can be set now */ /* last descriptor can be set now */
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_CHAIN_MODE, 1, STMMAC_CHAIN_MODE, 1,
...@@ -87,7 +90,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) ...@@ -87,7 +90,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
} }
} }
priv->cur_tx = entry; tx_q->cur_tx = entry;
return entry; return entry;
} }
...@@ -152,17 +155,18 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) ...@@ -152,17 +155,18 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
{ {
struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
unsigned int entry = priv->dirty_tx; struct stmmac_priv *priv = tx_q->priv_data;
unsigned int entry = tx_q->dirty_tx;
if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc && if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
priv->hwts_tx_en) priv->hwts_tx_en)
/* NOTE: Device will overwrite des3 with timestamp value if /* NOTE: Device will overwrite des3 with timestamp value if
* 1588-2002 time stamping is enabled, hence reinitialize it * 1588-2002 time stamping is enabled, hence reinitialize it
* to keep explicit chaining in the descriptor. * to keep explicit chaining in the descriptor.
*/ */
p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy + p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
((priv->dirty_tx + 1) % DMA_TX_SIZE)) ((tx_q->dirty_tx + 1) % DMA_TX_SIZE))
* sizeof(struct dma_desc))); * sizeof(struct dma_desc)));
} }
......
...@@ -26,16 +26,17 @@ ...@@ -26,16 +26,17 @@
static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{ {
struct stmmac_priv *priv = (struct stmmac_priv *)p; struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
unsigned int entry = priv->cur_tx;
struct dma_desc *desc;
unsigned int nopaged_len = skb_headlen(skb); unsigned int nopaged_len = skb_headlen(skb);
struct stmmac_priv *priv = tx_q->priv_data;
unsigned int entry = tx_q->cur_tx;
unsigned int bmax, len, des2; unsigned int bmax, len, des2;
struct dma_desc *desc;
if (priv->extend_desc) if (priv->extend_desc)
desc = (struct dma_desc *)(priv->dma_etx + entry); desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else else
desc = priv->dma_tx + entry; desc = tx_q->dma_tx + entry;
if (priv->plat->enh_desc) if (priv->plat->enh_desc)
bmax = BUF_SIZE_8KiB; bmax = BUF_SIZE_8KiB;
...@@ -52,29 +53,29 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) ...@@ -52,29 +53,29 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (dma_mapping_error(priv->device, des2)) if (dma_mapping_error(priv->device, des2))
return -1; return -1;
priv->tx_skbuff_dma[entry].buf = des2; tx_q->tx_skbuff_dma[entry].buf = des2;
priv->tx_skbuff_dma[entry].len = bmax; tx_q->tx_skbuff_dma[entry].len = bmax;
priv->tx_skbuff_dma[entry].is_jumbo = true; tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
STMMAC_RING_MODE, 0, false); STMMAC_RING_MODE, 0, false);
priv->tx_skbuff[entry] = NULL; tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
if (priv->extend_desc) if (priv->extend_desc)
desc = (struct dma_desc *)(priv->dma_etx + entry); desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else else
desc = priv->dma_tx + entry; desc = tx_q->dma_tx + entry;
des2 = dma_map_single(priv->device, skb->data + bmax, len, des2 = dma_map_single(priv->device, skb->data + bmax, len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
desc->des2 = cpu_to_le32(des2); desc->des2 = cpu_to_le32(des2);
if (dma_mapping_error(priv->device, des2)) if (dma_mapping_error(priv->device, des2))
return -1; return -1;
priv->tx_skbuff_dma[entry].buf = des2; tx_q->tx_skbuff_dma[entry].buf = des2;
priv->tx_skbuff_dma[entry].len = len; tx_q->tx_skbuff_dma[entry].len = len;
priv->tx_skbuff_dma[entry].is_jumbo = true; tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
...@@ -85,15 +86,15 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) ...@@ -85,15 +86,15 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = cpu_to_le32(des2); desc->des2 = cpu_to_le32(des2);
if (dma_mapping_error(priv->device, des2)) if (dma_mapping_error(priv->device, des2))
return -1; return -1;
priv->tx_skbuff_dma[entry].buf = des2; tx_q->tx_skbuff_dma[entry].buf = des2;
priv->tx_skbuff_dma[entry].len = nopaged_len; tx_q->tx_skbuff_dma[entry].len = nopaged_len;
priv->tx_skbuff_dma[entry].is_jumbo = true; tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
STMMAC_RING_MODE, 0, true); STMMAC_RING_MODE, 0, true);
} }
priv->cur_tx = entry; tx_q->cur_tx = entry;
return entry; return entry;
} }
...@@ -125,12 +126,13 @@ static void stmmac_init_desc3(struct dma_desc *p) ...@@ -125,12 +126,13 @@ static void stmmac_init_desc3(struct dma_desc *p)
static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
{ {
struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
unsigned int entry = priv->dirty_tx; struct stmmac_priv *priv = tx_q->priv_data;
unsigned int entry = tx_q->dirty_tx;
/* des3 is only used for jumbo frames tx or time stamping */ /* des3 is only used for jumbo frames tx or time stamping */
if (unlikely(priv->tx_skbuff_dma[entry].is_jumbo || if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo ||
(priv->tx_skbuff_dma[entry].last_segment && (tx_q->tx_skbuff_dma[entry].last_segment &&
!priv->extend_desc && priv->hwts_tx_en))) !priv->extend_desc && priv->hwts_tx_en)))
p->des3 = 0; p->des3 = 0;
} }
......
...@@ -46,6 +46,20 @@ struct stmmac_tx_info { ...@@ -46,6 +46,20 @@ struct stmmac_tx_info {
bool is_jumbo; bool is_jumbo;
}; };
/* Frequently used values are kept adjacent for cache effect */
struct stmmac_tx_queue {
u32 queue_index;
struct stmmac_priv *priv_data;
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
struct dma_desc *dma_tx;
struct sk_buff **tx_skbuff;
struct stmmac_tx_info *tx_skbuff_dma;
unsigned int cur_tx;
unsigned int dirty_tx;
dma_addr_t dma_tx_phy;
u32 tx_tail_addr;
};
struct stmmac_rx_queue { struct stmmac_rx_queue {
u32 queue_index; u32 queue_index;
struct stmmac_priv *priv_data; struct stmmac_priv *priv_data;
...@@ -62,16 +76,10 @@ struct stmmac_rx_queue { ...@@ -62,16 +76,10 @@ struct stmmac_rx_queue {
struct stmmac_priv { struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */ /* Frequently used values are kept adjacent for cache effect */
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
struct dma_desc *dma_tx;
struct sk_buff **tx_skbuff;
unsigned int cur_tx;
unsigned int dirty_tx;
u32 tx_count_frames; u32 tx_count_frames;
u32 tx_coal_frames; u32 tx_coal_frames;
u32 tx_coal_timer; u32 tx_coal_timer;
struct stmmac_tx_info *tx_skbuff_dma;
dma_addr_t dma_tx_phy;
int tx_coalesce; int tx_coalesce;
int hwts_tx_en; int hwts_tx_en;
bool tx_path_in_lpi_mode; bool tx_path_in_lpi_mode;
...@@ -94,6 +102,9 @@ struct stmmac_priv { ...@@ -94,6 +102,9 @@ struct stmmac_priv {
/* RX Queue */ /* RX Queue */
struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES]; struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
/* TX Queue */
struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
int oldlink; int oldlink;
int speed; int speed;
int oldduplex; int oldduplex;
...@@ -128,7 +139,6 @@ struct stmmac_priv { ...@@ -128,7 +139,6 @@ struct stmmac_priv {
spinlock_t ptp_lock; spinlock_t ptp_lock;
void __iomem *mmcaddr; void __iomem *mmcaddr;
void __iomem *ptpaddr; void __iomem *ptpaddr;
u32 tx_tail_addr;
u32 mss; u32 mss;
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment