Commit 4d494cdc authored by Fugang Duan's avatar Fugang Duan Committed by David S. Miller

net: fec: change data structure to support multiqueue

This patch just change data structure to support multi-queue.
Only 1 queue enabled.

Ethernet multiqueue mechanism can improve performance in SMP system.
For single hw queue, multiqueue can balance cpu loading.
For multi hw queues, multiple cores can process network packets in parallel,
and refer the article for the detail advantage for multiqueue:
http://vger.kernel.org/~davem/davem_nyc09.pdfSigned-off-by: default avatarFugang Duan <B38611@freescale.com>
Signed-off-by: default avatarFrank Li <frank.li@freescale.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 95a77470
...@@ -27,8 +27,8 @@ ...@@ -27,8 +27,8 @@
*/ */
#define FEC_IEVENT 0x004 /* Interrupt event reg */ #define FEC_IEVENT 0x004 /* Interrupt event reg */
#define FEC_IMASK 0x008 /* Interrupt mask reg */ #define FEC_IMASK 0x008 /* Interrupt mask reg */
#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */ #define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */
#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */ #define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */
#define FEC_ECNTRL 0x024 /* Ethernet control reg */ #define FEC_ECNTRL 0x024 /* Ethernet control reg */
#define FEC_MII_DATA 0x040 /* MII manage frame reg */ #define FEC_MII_DATA 0x040 /* MII manage frame reg */
#define FEC_MII_SPEED 0x044 /* MII speed control reg */ #define FEC_MII_SPEED 0x044 /* MII speed control reg */
...@@ -45,14 +45,26 @@ ...@@ -45,14 +45,26 @@
#define FEC_X_WMRK 0x144 /* FIFO transmit water mark */ #define FEC_X_WMRK 0x144 /* FIFO transmit water mark */
#define FEC_R_BOUND 0x14c /* FIFO receive bound reg */ #define FEC_R_BOUND 0x14c /* FIFO receive bound reg */
#define FEC_R_FSTART 0x150 /* FIFO receive start reg */ #define FEC_R_FSTART 0x150 /* FIFO receive start reg */
#define FEC_R_DES_START 0x180 /* Receive descriptor ring */ #define FEC_R_DES_START_1 0x160 /* Receive descriptor ring 1 */
#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */ #define FEC_X_DES_START_1 0x164 /* Transmit descriptor ring 1 */
#define FEC_R_DES_START_2 0x16c /* Receive descriptor ring 2 */
#define FEC_X_DES_START_2 0x170 /* Transmit descriptor ring 2 */
#define FEC_R_DES_START_0 0x180 /* Receive descriptor ring */
#define FEC_X_DES_START_0 0x184 /* Transmit descriptor ring */
#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */ #define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */
#define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */ #define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */
#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ #define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */
#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ #define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */
#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ #define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */
#define FEC_RACC 0x1C4 /* Receive Accelerator function */ #define FEC_RACC 0x1C4 /* Receive Accelerator function */
#define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */
#define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */
#define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */
#define FEC_DMA_CFG_2 0x1dc /* DMA class Configuration for ring 2 */
#define FEC_R_DES_ACTIVE_1 0x1e0 /* Rx descriptor active for ring 1 */
#define FEC_X_DES_ACTIVE_1 0x1e4 /* Tx descriptor active for ring 1 */
#define FEC_R_DES_ACTIVE_2 0x1e8 /* Rx descriptor active for ring 2 */
#define FEC_X_DES_ACTIVE_2 0x1ec /* Tx descriptor active for ring 2 */
#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ #define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ #define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
...@@ -233,6 +245,43 @@ struct bufdesc_ex { ...@@ -233,6 +245,43 @@ struct bufdesc_ex {
/* This device has up to three irqs on some platforms */ /* This device has up to three irqs on some platforms */
#define FEC_IRQ_NUM 3 #define FEC_IRQ_NUM 3
/* Maximum number of queues supported
* ENET with AVB IP can support up to 3 independent tx queues and rx queues.
* User can point the queue number that is less than or equal to 3.
*/
#define FEC_ENET_MAX_TX_QS 3
#define FEC_ENET_MAX_RX_QS 3
#define FEC_R_DES_START(X) ((X == 1) ? FEC_R_DES_START_1 : \
((X == 2) ? \
FEC_R_DES_START_2 : FEC_R_DES_START_0))
#define FEC_X_DES_START(X) ((X == 1) ? FEC_X_DES_START_1 : \
((X == 2) ? \
FEC_X_DES_START_2 : FEC_X_DES_START_0))
#define FEC_R_DES_ACTIVE(X) ((X == 1) ? FEC_R_DES_ACTIVE_1 : \
((X == 2) ? \
FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0))
#define FEC_X_DES_ACTIVE(X) ((X == 1) ? FEC_X_DES_ACTIVE_1 : \
((X == 2) ? \
FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0))
#define FEC_DMA_CFG(X) ((X == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
#define DMA_CLASS_EN (1 << 16)
#define FEC_RCMR(X) ((X == 2) ? FEC_RCMR_2 : FEC_RCMR_1)
#define IDLE_SLOPE_MASK 0xFFFF
#define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */
#define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */
#define IDLE_SLOPE(X) ((X == 1) ? (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \
(IDLE_SLOPE_2 & IDLE_SLOPE_MASK))
#define RCMR_MATCHEN (0x1 << 16)
#define RCMR_CMP_CFG(v, n) ((v & 0x7) << (n << 2))
#define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \
RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3))
#define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \
RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3))
#define RCMR_CMP(X) ((X == 1) ? RCMR_CMP_1 : RCMR_CMP_2)
/* The number of Tx and Rx buffers. These are allocated from the page /* The number of Tx and Rx buffers. These are allocated from the page
* pool. The code may assume these are power of two, so it it best * pool. The code may assume these are power of two, so it it best
* to keep them that size. * to keep them that size.
...@@ -256,6 +305,35 @@ struct bufdesc_ex { ...@@ -256,6 +305,35 @@ struct bufdesc_ex {
#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR) #define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
#define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR) #define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
struct fec_enet_priv_tx_q {
int index;
unsigned char *tx_bounce[TX_RING_SIZE];
struct sk_buff *tx_skbuff[TX_RING_SIZE];
dma_addr_t bd_dma;
struct bufdesc *tx_bd_base;
uint tx_ring_size;
unsigned short tx_stop_threshold;
unsigned short tx_wake_threshold;
struct bufdesc *cur_tx;
struct bufdesc *dirty_tx;
char *tso_hdrs;
dma_addr_t tso_hdrs_dma;
};
struct fec_enet_priv_rx_q {
int index;
struct sk_buff *rx_skbuff[RX_RING_SIZE];
dma_addr_t bd_dma;
struct bufdesc *rx_bd_base;
uint rx_ring_size;
struct bufdesc *cur_rx;
};
/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
* tx_bd_base always point to the base of the buffer descriptors. The * tx_bd_base always point to the base of the buffer descriptors. The
* cur_rx and cur_tx point to the currently available buffer. * cur_rx and cur_tx point to the currently available buffer.
...@@ -280,29 +358,18 @@ struct fec_enet_private { ...@@ -280,29 +358,18 @@ struct fec_enet_private {
struct mutex ptp_clk_mutex; struct mutex ptp_clk_mutex;
/* The saved address of a sent-in-place packet/buffer, for skfree(). */ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
unsigned char *tx_bounce[TX_RING_SIZE]; struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS];
struct sk_buff *tx_skbuff[TX_RING_SIZE]; struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS];
struct sk_buff *rx_skbuff[RX_RING_SIZE];
/* CPM dual port RAM relative addresses */ unsigned int total_tx_ring_size;
dma_addr_t bd_dma; unsigned int total_rx_ring_size;
/* Address of Rx and Tx buffers */
struct bufdesc *rx_bd_base;
struct bufdesc *tx_bd_base;
/* The next free ring entry */
struct bufdesc *cur_rx, *cur_tx;
/* The ring entries to be free()ed */
struct bufdesc *dirty_tx;
unsigned short bufdesc_size; unsigned long work_tx;
unsigned short tx_ring_size; unsigned long work_rx;
unsigned short rx_ring_size; unsigned long work_ts;
unsigned short tx_stop_threshold; unsigned long work_mdio;
unsigned short tx_wake_threshold;
/* Software TSO */ unsigned short bufdesc_size;
char *tso_hdrs;
dma_addr_t tso_hdrs_dma;
struct platform_device *pdev; struct platform_device *pdev;
......
...@@ -72,6 +72,8 @@ static void set_multicast_list(struct net_device *ndev); ...@@ -72,6 +72,8 @@ static void set_multicast_list(struct net_device *ndev);
#define DRIVER_NAME "fec" #define DRIVER_NAME "fec"
#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
/* Pause frame feild and FIFO threshold */ /* Pause frame feild and FIFO threshold */
#define FEC_ENET_FCE (1 << 5) #define FEC_ENET_FCE (1 << 5)
#define FEC_ENET_RSEM_V 0x84 #define FEC_ENET_RSEM_V 0x84
...@@ -258,22 +260,26 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); ...@@ -258,22 +260,26 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
static int mii_cnt; static int mii_cnt;
static inline static inline
struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep) struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
struct fec_enet_private *fep,
int queue_id)
{ {
struct bufdesc *new_bd = bdp + 1; struct bufdesc *new_bd = bdp + 1;
struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
struct bufdesc_ex *ex_base; struct bufdesc_ex *ex_base;
struct bufdesc *base; struct bufdesc *base;
int ring_size; int ring_size;
if (bdp >= fep->tx_bd_base) { if (bdp >= txq->tx_bd_base) {
base = fep->tx_bd_base; base = txq->tx_bd_base;
ring_size = fep->tx_ring_size; ring_size = txq->tx_ring_size;
ex_base = (struct bufdesc_ex *)fep->tx_bd_base; ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
} else { } else {
base = fep->rx_bd_base; base = rxq->rx_bd_base;
ring_size = fep->rx_ring_size; ring_size = rxq->rx_ring_size;
ex_base = (struct bufdesc_ex *)fep->rx_bd_base; ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
} }
if (fep->bufdesc_ex) if (fep->bufdesc_ex)
...@@ -285,22 +291,26 @@ struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_priva ...@@ -285,22 +291,26 @@ struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_priva
} }
static inline static inline
struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep) struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
struct fec_enet_private *fep,
int queue_id)
{ {
struct bufdesc *new_bd = bdp - 1; struct bufdesc *new_bd = bdp - 1;
struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
struct bufdesc_ex *ex_base; struct bufdesc_ex *ex_base;
struct bufdesc *base; struct bufdesc *base;
int ring_size; int ring_size;
if (bdp >= fep->tx_bd_base) { if (bdp >= txq->tx_bd_base) {
base = fep->tx_bd_base; base = txq->tx_bd_base;
ring_size = fep->tx_ring_size; ring_size = txq->tx_ring_size;
ex_base = (struct bufdesc_ex *)fep->tx_bd_base; ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
} else { } else {
base = fep->rx_bd_base; base = rxq->rx_bd_base;
ring_size = fep->rx_ring_size; ring_size = rxq->rx_ring_size;
ex_base = (struct bufdesc_ex *)fep->rx_bd_base; ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
} }
if (fep->bufdesc_ex) if (fep->bufdesc_ex)
...@@ -316,14 +326,15 @@ static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, ...@@ -316,14 +326,15 @@ static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
} }
static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep) static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep,
struct fec_enet_priv_tx_q *txq)
{ {
int entries; int entries;
entries = ((const char *)fep->dirty_tx - entries = ((const char *)txq->dirty_tx -
(const char *)fep->cur_tx) / fep->bufdesc_size - 1; (const char *)txq->cur_tx) / fep->bufdesc_size - 1;
return entries > 0 ? entries : entries + fep->tx_ring_size; return entries > 0 ? entries : entries + txq->tx_ring_size;
} }
static void *swap_buffer(void *bufaddr, int len) static void *swap_buffer(void *bufaddr, int len)
...@@ -340,22 +351,26 @@ static void *swap_buffer(void *bufaddr, int len) ...@@ -340,22 +351,26 @@ static void *swap_buffer(void *bufaddr, int len)
static void fec_dump(struct net_device *ndev) static void fec_dump(struct net_device *ndev)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
struct bufdesc *bdp = fep->tx_bd_base; struct bufdesc *bdp;
unsigned int index = 0; struct fec_enet_priv_tx_q *txq;
int index = 0;
netdev_info(ndev, "TX ring dump\n"); netdev_info(ndev, "TX ring dump\n");
pr_info("Nr SC addr len SKB\n"); pr_info("Nr SC addr len SKB\n");
txq = fep->tx_queue[0];
bdp = txq->tx_bd_base;
do { do {
pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
index, index,
bdp == fep->cur_tx ? 'S' : ' ', bdp == txq->cur_tx ? 'S' : ' ',
bdp == fep->dirty_tx ? 'H' : ' ', bdp == txq->dirty_tx ? 'H' : ' ',
bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
fep->tx_skbuff[index]); txq->tx_skbuff[index]);
bdp = fec_enet_get_nextdesc(bdp, fep); bdp = fec_enet_get_nextdesc(bdp, fep, 0);
index++; index++;
} while (bdp != fep->tx_bd_base); } while (bdp != txq->tx_bd_base);
} }
static inline bool is_ipv4_pkt(struct sk_buff *skb) static inline bool is_ipv4_pkt(struct sk_buff *skb)
...@@ -381,14 +396,17 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) ...@@ -381,14 +396,17 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
} }
static int static int
fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
struct sk_buff *skb,
struct net_device *ndev)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry = const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev); platform_get_device_id(fep->pdev);
struct bufdesc *bdp = fep->cur_tx; struct bufdesc *bdp = txq->cur_tx;
struct bufdesc_ex *ebdp; struct bufdesc_ex *ebdp;
int nr_frags = skb_shinfo(skb)->nr_frags; int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned short queue = skb_get_queue_mapping(skb);
int frag, frag_len; int frag, frag_len;
unsigned short status; unsigned short status;
unsigned int estatus = 0; unsigned int estatus = 0;
...@@ -400,7 +418,7 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) ...@@ -400,7 +418,7 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
for (frag = 0; frag < nr_frags; frag++) { for (frag = 0; frag < nr_frags; frag++) {
this_frag = &skb_shinfo(skb)->frags[frag]; this_frag = &skb_shinfo(skb)->frags[frag];
bdp = fec_enet_get_nextdesc(bdp, fep); bdp = fec_enet_get_nextdesc(bdp, fep, queue);
ebdp = (struct bufdesc_ex *)bdp; ebdp = (struct bufdesc_ex *)bdp;
status = bdp->cbd_sc; status = bdp->cbd_sc;
...@@ -428,11 +446,11 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) ...@@ -428,11 +446,11 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
if (((unsigned long) bufaddr) & FEC_ALIGNMENT || if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
memcpy(fep->tx_bounce[index], bufaddr, frag_len); memcpy(txq->tx_bounce[index], bufaddr, frag_len);
bufaddr = fep->tx_bounce[index]; bufaddr = txq->tx_bounce[index];
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, frag_len); swap_buffer(bufaddr, frag_len);
...@@ -452,21 +470,22 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) ...@@ -452,21 +470,22 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
bdp->cbd_sc = status; bdp->cbd_sc = status;
} }
fep->cur_tx = bdp; txq->cur_tx = bdp;
return 0; return 0;
dma_mapping_error: dma_mapping_error:
bdp = fep->cur_tx; bdp = txq->cur_tx;
for (i = 0; i < frag; i++) { for (i = 0; i < frag; i++) {
bdp = fec_enet_get_nextdesc(bdp, fep); bdp = fec_enet_get_nextdesc(bdp, fep, queue);
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
bdp->cbd_datlen, DMA_TO_DEVICE); bdp->cbd_datlen, DMA_TO_DEVICE);
} }
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
struct sk_buff *skb, struct net_device *ndev)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry = const struct platform_device_id *id_entry =
...@@ -477,12 +496,13 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) ...@@ -477,12 +496,13 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
dma_addr_t addr; dma_addr_t addr;
unsigned short status; unsigned short status;
unsigned short buflen; unsigned short buflen;
unsigned short queue;
unsigned int estatus = 0; unsigned int estatus = 0;
unsigned int index; unsigned int index;
int entries_free; int entries_free;
int ret; int ret;
entries_free = fec_enet_get_free_txdesc_num(fep); entries_free = fec_enet_get_free_txdesc_num(fep, txq);
if (entries_free < MAX_SKB_FRAGS + 1) { if (entries_free < MAX_SKB_FRAGS + 1) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
if (net_ratelimit()) if (net_ratelimit())
...@@ -497,7 +517,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) ...@@ -497,7 +517,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
} }
/* Fill in a Tx ring entry */ /* Fill in a Tx ring entry */
bdp = fep->cur_tx; bdp = txq->cur_tx;
status = bdp->cbd_sc; status = bdp->cbd_sc;
status &= ~BD_ENET_TX_STATS; status &= ~BD_ENET_TX_STATS;
...@@ -505,11 +525,12 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) ...@@ -505,11 +525,12 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
bufaddr = skb->data; bufaddr = skb->data;
buflen = skb_headlen(skb); buflen = skb_headlen(skb);
index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); queue = skb_get_queue_mapping(skb);
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
if (((unsigned long) bufaddr) & FEC_ALIGNMENT || if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
memcpy(fep->tx_bounce[index], skb->data, buflen); memcpy(txq->tx_bounce[index], skb->data, buflen);
bufaddr = fep->tx_bounce[index]; bufaddr = txq->tx_bounce[index];
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, buflen); swap_buffer(bufaddr, buflen);
...@@ -525,7 +546,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) ...@@ -525,7 +546,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
} }
if (nr_frags) { if (nr_frags) {
ret = fec_enet_txq_submit_frag_skb(skb, ndev); ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
if (ret) if (ret)
return ret; return ret;
} else { } else {
...@@ -553,10 +574,10 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) ...@@ -553,10 +574,10 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
ebdp->cbd_esc = estatus; ebdp->cbd_esc = estatus;
} }
last_bdp = fep->cur_tx; last_bdp = txq->cur_tx;
index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep); index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
/* Save skb pointer */ /* Save skb pointer */
fep->tx_skbuff[index] = skb; txq->tx_skbuff[index] = skb;
bdp->cbd_datlen = buflen; bdp->cbd_datlen = buflen;
bdp->cbd_bufaddr = addr; bdp->cbd_bufaddr = addr;
...@@ -568,20 +589,21 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) ...@@ -568,20 +589,21 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
bdp->cbd_sc = status; bdp->cbd_sc = status;
/* If this was the last BD in the ring, start at the beginning again. */ /* If this was the last BD in the ring, start at the beginning again. */
bdp = fec_enet_get_nextdesc(last_bdp, fep); bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
fep->cur_tx = bdp; txq->cur_tx = bdp;
/* Trigger transmission start */ /* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE); writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
return 0; return 0;
} }
static int static int
fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
struct net_device *ndev,
struct bufdesc *bdp, int index, char *data, struct bufdesc *bdp, int index, char *data,
int size, bool last_tcp, bool is_last) int size, bool last_tcp, bool is_last)
{ {
...@@ -600,8 +622,8 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, ...@@ -600,8 +622,8 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
if (((unsigned long) data) & FEC_ALIGNMENT || if (((unsigned long) data) & FEC_ALIGNMENT ||
id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
memcpy(fep->tx_bounce[index], data, size); memcpy(txq->tx_bounce[index], data, size);
data = fep->tx_bounce[index]; data = txq->tx_bounce[index];
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(data, size); swap_buffer(data, size);
...@@ -640,7 +662,8 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, ...@@ -640,7 +662,8 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
} }
static int static int
fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
struct sk_buff *skb, struct net_device *ndev,
struct bufdesc *bdp, int index) struct bufdesc *bdp, int index)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
...@@ -657,12 +680,12 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, ...@@ -657,12 +680,12 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
status &= ~BD_ENET_TX_STATS; status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE; bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE; dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
if (((unsigned long) bufaddr) & FEC_ALIGNMENT || if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
memcpy(fep->tx_bounce[index], skb->data, hdr_len); memcpy(txq->tx_bounce[index], skb->data, hdr_len);
bufaddr = fep->tx_bounce[index]; bufaddr = txq->tx_bounce[index];
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, hdr_len); swap_buffer(bufaddr, hdr_len);
...@@ -692,17 +715,20 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, ...@@ -692,17 +715,20 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
return 0; return 0;
} }
static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
struct sk_buff *skb,
struct net_device *ndev)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
int total_len, data_left; int total_len, data_left;
struct bufdesc *bdp = fep->cur_tx; struct bufdesc *bdp = txq->cur_tx;
unsigned short queue = skb_get_queue_mapping(skb);
struct tso_t tso; struct tso_t tso;
unsigned int index = 0; unsigned int index = 0;
int ret; int ret;
if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) { if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
if (net_ratelimit()) if (net_ratelimit())
netdev_err(ndev, "NOT enough BD for TSO!\n"); netdev_err(ndev, "NOT enough BD for TSO!\n");
...@@ -722,14 +748,14 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) ...@@ -722,14 +748,14 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
while (total_len > 0) { while (total_len > 0) {
char *hdr; char *hdr;
index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
total_len -= data_left; total_len -= data_left;
/* prepare packet headers: MAC + IP + TCP */ /* prepare packet headers: MAC + IP + TCP */
hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE; hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index); ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
if (ret) if (ret)
goto err_release; goto err_release;
...@@ -737,10 +763,13 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) ...@@ -737,10 +763,13 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
int size; int size;
size = min_t(int, tso.size, data_left); size = min_t(int, tso.size, data_left);
bdp = fec_enet_get_nextdesc(bdp, fep); bdp = fec_enet_get_nextdesc(bdp, fep, queue);
index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); index = fec_enet_get_bd_index(txq->tx_bd_base,
ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data, bdp, fep);
size, size == data_left, ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
bdp, index,
tso.data, size,
size == data_left,
total_len == 0); total_len == 0);
if (ret) if (ret)
goto err_release; goto err_release;
...@@ -749,17 +778,17 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) ...@@ -749,17 +778,17 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
tso_build_data(skb, &tso, size); tso_build_data(skb, &tso, size);
} }
bdp = fec_enet_get_nextdesc(bdp, fep); bdp = fec_enet_get_nextdesc(bdp, fep, queue);
} }
/* Save skb pointer */ /* Save skb pointer */
fep->tx_skbuff[index] = skb; txq->tx_skbuff[index] = skb;
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
fep->cur_tx = bdp; txq->cur_tx = bdp;
/* Trigger transmission start */ /* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE); writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
return 0; return 0;
...@@ -773,18 +802,25 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -773,18 +802,25 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
int entries_free; int entries_free;
unsigned short queue;
struct fec_enet_priv_tx_q *txq;
struct netdev_queue *nq;
int ret; int ret;
queue = skb_get_queue_mapping(skb);
txq = fep->tx_queue[queue];
nq = netdev_get_tx_queue(ndev, queue);
if (skb_is_gso(skb)) if (skb_is_gso(skb))
ret = fec_enet_txq_submit_tso(skb, ndev); ret = fec_enet_txq_submit_tso(txq, skb, ndev);
else else
ret = fec_enet_txq_submit_skb(skb, ndev); ret = fec_enet_txq_submit_skb(txq, skb, ndev);
if (ret) if (ret)
return ret; return ret;
entries_free = fec_enet_get_free_txdesc_num(fep); entries_free = fec_enet_get_free_txdesc_num(fep, txq);
if (entries_free <= fep->tx_stop_threshold) if (entries_free <= txq->tx_stop_threshold)
netif_stop_queue(ndev); netif_tx_stop_queue(nq);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -794,46 +830,51 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -794,46 +830,51 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
static void fec_enet_bd_init(struct net_device *dev) static void fec_enet_bd_init(struct net_device *dev)
{ {
struct fec_enet_private *fep = netdev_priv(dev); struct fec_enet_private *fep = netdev_priv(dev);
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
struct bufdesc *bdp; struct bufdesc *bdp;
unsigned int i; unsigned int i;
/* Initialize the receive buffer descriptors. */ /* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base; rxq = fep->rx_queue[0];
for (i = 0; i < fep->rx_ring_size; i++) { bdp = rxq->rx_bd_base;
for (i = 0; i < rxq->rx_ring_size; i++) {
/* Initialize the BD for every fragment in the page. */ /* Initialize the BD for every fragment in the page. */
if (bdp->cbd_bufaddr) if (bdp->cbd_bufaddr)
bdp->cbd_sc = BD_ENET_RX_EMPTY; bdp->cbd_sc = BD_ENET_RX_EMPTY;
else else
bdp->cbd_sc = 0; bdp->cbd_sc = 0;
bdp = fec_enet_get_nextdesc(bdp, fep); bdp = fec_enet_get_nextdesc(bdp, fep, 0);
} }
/* Set the last buffer to wrap */ /* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep); bdp = fec_enet_get_prevdesc(bdp, fep, 0);
bdp->cbd_sc |= BD_SC_WRAP; bdp->cbd_sc |= BD_SC_WRAP;
fep->cur_rx = fep->rx_bd_base; rxq->cur_rx = rxq->rx_bd_base;
/* ...and the same for transmit */ /* ...and the same for transmit */
bdp = fep->tx_bd_base; txq = fep->tx_queue[0];
fep->cur_tx = bdp; bdp = txq->tx_bd_base;
for (i = 0; i < fep->tx_ring_size; i++) { txq->cur_tx = bdp;
for (i = 0; i < txq->tx_ring_size; i++) {
/* Initialize the BD for every fragment in the page. */ /* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0; bdp->cbd_sc = 0;
if (fep->tx_skbuff[i]) { if (txq->tx_skbuff[i]) {
dev_kfree_skb_any(fep->tx_skbuff[i]); dev_kfree_skb_any(txq->tx_skbuff[i]);
fep->tx_skbuff[i] = NULL; txq->tx_skbuff[i] = NULL;
} }
bdp->cbd_bufaddr = 0; bdp->cbd_bufaddr = 0;
bdp = fec_enet_get_nextdesc(bdp, fep); bdp = fec_enet_get_nextdesc(bdp, fep, 0);
} }
/* Set the last buffer to wrap */ /* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep); bdp = fec_enet_get_prevdesc(bdp, fep, 0);
bdp->cbd_sc |= BD_SC_WRAP; bdp->cbd_sc |= BD_SC_WRAP;
fep->dirty_tx = bdp; txq->dirty_tx = bdp;
} }
/* /*
...@@ -852,6 +893,8 @@ fec_restart(struct net_device *ndev) ...@@ -852,6 +893,8 @@ fec_restart(struct net_device *ndev)
u32 temp_mac[2]; u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = 0x2; /* ETHEREN */ u32 ecntl = 0x2; /* ETHEREN */
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
/* Whack a reset. We should wait for this. */ /* Whack a reset. We should wait for this. */
writel(1, fep->hwp + FEC_ECNTRL); writel(1, fep->hwp + FEC_ECNTRL);
...@@ -876,19 +919,21 @@ fec_restart(struct net_device *ndev) ...@@ -876,19 +919,21 @@ fec_restart(struct net_device *ndev)
fec_enet_bd_init(ndev); fec_enet_bd_init(ndev);
/* Set receive and transmit descriptor base. */ /* Set receive and transmit descriptor base. */
writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); rxq = fep->rx_queue[0];
writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(0));
if (fep->bufdesc_ex) if (fep->bufdesc_ex)
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) writel((unsigned long)rxq->bd_dma + sizeof(struct bufdesc_ex)
* fep->rx_ring_size, fep->hwp + FEC_X_DES_START); * rxq->rx_ring_size, fep->hwp + FEC_X_DES_START(0));
else else
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) writel((unsigned long)rxq->bd_dma + sizeof(struct bufdesc)
* fep->rx_ring_size, fep->hwp + FEC_X_DES_START); * rxq->rx_ring_size, fep->hwp + FEC_X_DES_START(0));
txq = fep->tx_queue[0];
for (i = 0; i <= TX_RING_MOD_MASK; i++) { for (i = 0; i <= TX_RING_MOD_MASK; i++) {
if (fep->tx_skbuff[i]) { if (txq->tx_skbuff[i]) {
dev_kfree_skb_any(fep->tx_skbuff[i]); dev_kfree_skb_any(txq->tx_skbuff[i]);
fep->tx_skbuff[i] = NULL; txq->tx_skbuff[i] = NULL;
} }
} }
...@@ -1012,7 +1057,7 @@ fec_restart(struct net_device *ndev) ...@@ -1012,7 +1057,7 @@ fec_restart(struct net_device *ndev)
/* And last, enable the transmit and receive processing */ /* And last, enable the transmit and receive processing */
writel(ecntl, fep->hwp + FEC_ECNTRL); writel(ecntl, fep->hwp + FEC_ECNTRL);
writel(0, fep->hwp + FEC_R_DES_ACTIVE); writel(0, fep->hwp + FEC_R_DES_ACTIVE(0));
if (fep->bufdesc_ex) if (fep->bufdesc_ex)
fec_ptp_start_cyclecounter(ndev); fec_ptp_start_cyclecounter(ndev);
...@@ -1097,37 +1142,45 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, ...@@ -1097,37 +1142,45 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
} }
static void static void
fec_enet_tx(struct net_device *ndev) fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
{ {
struct fec_enet_private *fep; struct fec_enet_private *fep;
struct bufdesc *bdp; struct bufdesc *bdp;
unsigned short status; unsigned short status;
struct sk_buff *skb; struct sk_buff *skb;
struct fec_enet_priv_tx_q *txq;
struct netdev_queue *nq;
int index = 0; int index = 0;
int entries_free; int entries_free;
fep = netdev_priv(ndev); fep = netdev_priv(ndev);
bdp = fep->dirty_tx;
queue_id = FEC_ENET_GET_QUQUE(queue_id);
txq = fep->tx_queue[queue_id];
/* get next bdp of dirty_tx */
nq = netdev_get_tx_queue(ndev, queue_id);
bdp = txq->dirty_tx;
/* get next bdp of dirty_tx */ /* get next bdp of dirty_tx */
bdp = fec_enet_get_nextdesc(bdp, fep); bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
/* current queue is empty */ /* current queue is empty */
if (bdp == fep->cur_tx) if (bdp == txq->cur_tx)
break; break;
index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
skb = fep->tx_skbuff[index]; skb = txq->tx_skbuff[index];
fep->tx_skbuff[index] = NULL; txq->tx_skbuff[index] = NULL;
if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr)) if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
bdp->cbd_datlen, DMA_TO_DEVICE); bdp->cbd_datlen, DMA_TO_DEVICE);
bdp->cbd_bufaddr = 0; bdp->cbd_bufaddr = 0;
if (!skb) { if (!skb) {
bdp = fec_enet_get_nextdesc(bdp, fep); bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
continue; continue;
} }
...@@ -1169,23 +1222,37 @@ fec_enet_tx(struct net_device *ndev) ...@@ -1169,23 +1222,37 @@ fec_enet_tx(struct net_device *ndev)
/* Free the sk buffer associated with this last transmit */ /* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
fep->dirty_tx = bdp; txq->dirty_tx = bdp;
/* Update pointer to next buffer descriptor to be transmitted */ /* Update pointer to next buffer descriptor to be transmitted */
bdp = fec_enet_get_nextdesc(bdp, fep); bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
/* Since we have freed up a buffer, the ring is no longer full /* Since we have freed up a buffer, the ring is no longer full
*/ */
if (netif_queue_stopped(ndev)) { if (netif_queue_stopped(ndev)) {
entries_free = fec_enet_get_free_txdesc_num(fep); entries_free = fec_enet_get_free_txdesc_num(fep, txq);
if (entries_free >= fep->tx_wake_threshold) if (entries_free >= txq->tx_wake_threshold)
netif_wake_queue(ndev); netif_tx_wake_queue(nq);
} }
} }
/* ERR006538: Keep the transmitter going */ /* ERR006538: Keep the transmitter going */
if (bdp != fep->cur_tx && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0) if (bdp != txq->cur_tx &&
writel(0, fep->hwp + FEC_X_DES_ACTIVE); readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0)
writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id));
}
static void
fec_enet_tx(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
u16 queue_id;
/* First process class A queue, then Class B and Best Effort queue */
for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
clear_bit(queue_id, &fep->work_tx);
fec_enet_tx_queue(ndev, queue_id);
}
return;
} }
/* During a receive, the cur_rx points to the current incoming buffer. /* During a receive, the cur_rx points to the current incoming buffer.
...@@ -1194,11 +1261,12 @@ fec_enet_tx(struct net_device *ndev) ...@@ -1194,11 +1261,12 @@ fec_enet_tx(struct net_device *ndev)
* effectively tossing the packet. * effectively tossing the packet.
*/ */
static int static int
fec_enet_rx(struct net_device *ndev, int budget) fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry = const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev); platform_get_device_id(fep->pdev);
struct fec_enet_priv_rx_q *rxq;
struct bufdesc *bdp; struct bufdesc *bdp;
unsigned short status; unsigned short status;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -1213,11 +1281,13 @@ fec_enet_rx(struct net_device *ndev, int budget) ...@@ -1213,11 +1281,13 @@ fec_enet_rx(struct net_device *ndev, int budget)
#ifdef CONFIG_M532x #ifdef CONFIG_M532x
flush_cache_all(); flush_cache_all();
#endif #endif
queue_id = FEC_ENET_GET_QUQUE(queue_id);
rxq = fep->rx_queue[queue_id];
/* First, grab all of the stats for the incoming packet. /* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition. * These get messed up if we get called due to a busy condition.
*/ */
bdp = fep->cur_rx; bdp = rxq->cur_rx;
while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
...@@ -1231,7 +1301,6 @@ fec_enet_rx(struct net_device *ndev, int budget) ...@@ -1231,7 +1301,6 @@ fec_enet_rx(struct net_device *ndev, int budget)
if ((status & BD_ENET_RX_LAST) == 0) if ((status & BD_ENET_RX_LAST) == 0)
netdev_err(ndev, "rcv is not +last\n"); netdev_err(ndev, "rcv is not +last\n");
writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
/* Check for errors. */ /* Check for errors. */
if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
...@@ -1264,8 +1333,8 @@ fec_enet_rx(struct net_device *ndev, int budget) ...@@ -1264,8 +1333,8 @@ fec_enet_rx(struct net_device *ndev, int budget)
pkt_len = bdp->cbd_datlen; pkt_len = bdp->cbd_datlen;
ndev->stats.rx_bytes += pkt_len; ndev->stats.rx_bytes += pkt_len;
index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep); index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
data = fep->rx_skbuff[index]->data; data = rxq->rx_skbuff[index]->data;
dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
...@@ -1357,19 +1426,48 @@ fec_enet_rx(struct net_device *ndev, int budget) ...@@ -1357,19 +1426,48 @@ fec_enet_rx(struct net_device *ndev, int budget)
} }
/* Update BD pointer to next entry */ /* Update BD pointer to next entry */
bdp = fec_enet_get_nextdesc(bdp, fep); bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
/* Doing this here will keep the FEC running while we process /* Doing this here will keep the FEC running while we process
* incoming frames. On a heavily loaded network, we should be * incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources. * able to keep up at the expense of system resources.
*/ */
writel(0, fep->hwp + FEC_R_DES_ACTIVE); writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id));
} }
fep->cur_rx = bdp; rxq->cur_rx = bdp;
return pkt_received;
}
static int
fec_enet_rx(struct net_device *ndev, int budget)
{
int pkt_received = 0;
u16 queue_id;
struct fec_enet_private *fep = netdev_priv(ndev);
for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
clear_bit(queue_id, &fep->work_rx);
pkt_received += fec_enet_rx_queue(ndev,
budget - pkt_received, queue_id);
}
return pkt_received; return pkt_received;
} }
static bool
fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
{
if (int_events == 0)
return false;
if (int_events & FEC_ENET_RXF)
fep->work_rx |= (1 << 2);
if (int_events & FEC_ENET_TXF)
fep->work_tx |= (1 << 2);
return true;
}
static irqreturn_t static irqreturn_t
fec_enet_interrupt(int irq, void *dev_id) fec_enet_interrupt(int irq, void *dev_id)
{ {
...@@ -1381,6 +1479,7 @@ fec_enet_interrupt(int irq, void *dev_id) ...@@ -1381,6 +1479,7 @@ fec_enet_interrupt(int irq, void *dev_id)
int_events = readl(fep->hwp + FEC_IEVENT); int_events = readl(fep->hwp + FEC_IEVENT);
writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT); writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT);
fec_enet_collect_events(fep, int_events);
if (int_events & napi_mask) { if (int_events & napi_mask) {
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
...@@ -2132,25 +2231,29 @@ static void fec_enet_free_buffers(struct net_device *ndev) ...@@ -2132,25 +2231,29 @@ static void fec_enet_free_buffers(struct net_device *ndev)
unsigned int i; unsigned int i;
struct sk_buff *skb; struct sk_buff *skb;
struct bufdesc *bdp; struct bufdesc *bdp;
struct fec_enet_priv_tx_q *txq;
bdp = fep->rx_bd_base; struct fec_enet_priv_rx_q *rxq;
for (i = 0; i < fep->rx_ring_size; i++) {
skb = fep->rx_skbuff[i]; rxq = fep->rx_queue[0];
fep->rx_skbuff[i] = NULL; bdp = rxq->rx_bd_base;
for (i = 0; i < rxq->rx_ring_size; i++) {
skb = rxq->rx_skbuff[i];
rxq->rx_skbuff[i] = NULL;
if (skb) { if (skb) {
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
dev_kfree_skb(skb); dev_kfree_skb(skb);
} }
bdp = fec_enet_get_nextdesc(bdp, fep); bdp = fec_enet_get_nextdesc(bdp, fep, 0);
} }
bdp = fep->tx_bd_base; txq = fep->tx_queue[0];
for (i = 0; i < fep->tx_ring_size; i++) { bdp = txq->tx_bd_base;
kfree(fep->tx_bounce[i]); for (i = 0; i < txq->tx_ring_size; i++) {
fep->tx_bounce[i] = NULL; kfree(txq->tx_bounce[i]);
skb = fep->tx_skbuff[i]; txq->tx_bounce[i] = NULL;
fep->tx_skbuff[i] = NULL; skb = txq->tx_skbuff[i];
txq->tx_skbuff[i] = NULL;
dev_kfree_skb(skb); dev_kfree_skb(skb);
} }
} }
...@@ -2161,9 +2264,12 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ...@@ -2161,9 +2264,12 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
unsigned int i; unsigned int i;
struct sk_buff *skb; struct sk_buff *skb;
struct bufdesc *bdp; struct bufdesc *bdp;
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
bdp = fep->rx_bd_base; rxq = fep->rx_queue[0];
for (i = 0; i < fep->rx_ring_size; i++) { bdp = rxq->rx_bd_base;
for (i = 0; i < rxq->rx_ring_size; i++) {
dma_addr_t addr; dma_addr_t addr;
skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
...@@ -2179,7 +2285,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ...@@ -2179,7 +2285,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
goto err_alloc; goto err_alloc;
} }
fep->rx_skbuff[i] = skb; rxq->rx_skbuff[i] = skb;
bdp->cbd_bufaddr = addr; bdp->cbd_bufaddr = addr;
bdp->cbd_sc = BD_ENET_RX_EMPTY; bdp->cbd_sc = BD_ENET_RX_EMPTY;
...@@ -2188,17 +2294,18 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ...@@ -2188,17 +2294,18 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_RX_INT; ebdp->cbd_esc = BD_ENET_RX_INT;
} }
bdp = fec_enet_get_nextdesc(bdp, fep); bdp = fec_enet_get_nextdesc(bdp, fep, 0);
} }
/* Set the last buffer to wrap. */ /* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, fep); bdp = fec_enet_get_prevdesc(bdp, fep, 0);
bdp->cbd_sc |= BD_SC_WRAP; bdp->cbd_sc |= BD_SC_WRAP;
bdp = fep->tx_bd_base; txq = fep->tx_queue[0];
for (i = 0; i < fep->tx_ring_size; i++) { bdp = txq->tx_bd_base;
fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); for (i = 0; i < txq->tx_ring_size; i++) {
if (!fep->tx_bounce[i]) txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
if (!txq->tx_bounce[i])
goto err_alloc; goto err_alloc;
bdp->cbd_sc = 0; bdp->cbd_sc = 0;
...@@ -2209,11 +2316,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ...@@ -2209,11 +2316,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_TX_INT; ebdp->cbd_esc = BD_ENET_TX_INT;
} }
bdp = fec_enet_get_nextdesc(bdp, fep); bdp = fec_enet_get_nextdesc(bdp, fep, 0);
} }
/* Set the last buffer to wrap. */ /* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, fep); bdp = fec_enet_get_prevdesc(bdp, fep, 0);
bdp->cbd_sc |= BD_SC_WRAP; bdp->cbd_sc |= BD_SC_WRAP;
return 0; return 0;
...@@ -2252,7 +2359,8 @@ fec_enet_open(struct net_device *ndev) ...@@ -2252,7 +2359,8 @@ fec_enet_open(struct net_device *ndev)
fec_restart(ndev); fec_restart(ndev);
napi_enable(&fep->napi); napi_enable(&fep->napi);
phy_start(fep->phy_dev); phy_start(fep->phy_dev);
netif_start_queue(ndev); netif_tx_start_all_queues(ndev);
return 0; return 0;
} }
...@@ -2426,7 +2534,7 @@ static int fec_set_features(struct net_device *netdev, ...@@ -2426,7 +2534,7 @@ static int fec_set_features(struct net_device *netdev,
/* Resume the device after updates */ /* Resume the device after updates */
if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
fec_restart(netdev); fec_restart(netdev);
netif_wake_queue(netdev); netif_tx_wake_all_queues(netdev);
netif_tx_unlock_bh(netdev); netif_tx_unlock_bh(netdev);
napi_enable(&fep->napi); napi_enable(&fep->napi);
} }
...@@ -2434,10 +2542,17 @@ static int fec_set_features(struct net_device *netdev, ...@@ -2434,10 +2542,17 @@ static int fec_set_features(struct net_device *netdev,
return 0; return 0;
} }
u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
return skb_tx_hash(ndev, skb);
}
static const struct net_device_ops fec_netdev_ops = { static const struct net_device_ops fec_netdev_ops = {
.ndo_open = fec_enet_open, .ndo_open = fec_enet_open,
.ndo_stop = fec_enet_close, .ndo_stop = fec_enet_close,
.ndo_start_xmit = fec_enet_start_xmit, .ndo_start_xmit = fec_enet_start_xmit,
.ndo_select_queue = fec_enet_select_queue,
.ndo_set_rx_mode = set_multicast_list, .ndo_set_rx_mode = set_multicast_list,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
...@@ -2459,39 +2574,60 @@ static int fec_enet_init(struct net_device *ndev) ...@@ -2459,39 +2574,60 @@ static int fec_enet_init(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry = const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev); platform_get_device_id(fep->pdev);
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
struct bufdesc *cbd_base; struct bufdesc *cbd_base;
dma_addr_t bd_dma;
int bd_size; int bd_size;
/* init the tx & rx ring size */ txq = kzalloc(sizeof(*txq), GFP_KERNEL);
fep->tx_ring_size = TX_RING_SIZE; if (!txq)
fep->rx_ring_size = RX_RING_SIZE; return -ENOMEM;
fep->tx_queue[0] = txq;
rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
if (!rxq) {
kfree(txq);
return -ENOMEM;
}
fep->rx_queue[0] = rxq;
txq->tx_ring_size = TX_RING_SIZE;
rxq->rx_ring_size = RX_RING_SIZE;
fep->total_tx_ring_size = txq->tx_ring_size;
fep->total_rx_ring_size = rxq->rx_ring_size;
fep->tx_stop_threshold = FEC_MAX_SKB_DESCS; txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2; txq->tx_wake_threshold = (txq->tx_ring_size - txq->tx_stop_threshold) / 2;
if (fep->bufdesc_ex) if (fep->bufdesc_ex)
fep->bufdesc_size = sizeof(struct bufdesc_ex); fep->bufdesc_size = sizeof(struct bufdesc_ex);
else else
fep->bufdesc_size = sizeof(struct bufdesc); fep->bufdesc_size = sizeof(struct bufdesc);
bd_size = (fep->tx_ring_size + fep->rx_ring_size) * bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) *
fep->bufdesc_size; fep->bufdesc_size;
/* Allocate memory for buffer descriptors. */ /* Allocate memory for buffer descriptors. */
cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma, cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma,
GFP_KERNEL); GFP_KERNEL);
if (!cbd_base) if (!cbd_base) {
kfree(rxq);
kfree(txq);
return -ENOMEM; return -ENOMEM;
}
fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE, txq->tso_hdrs = dma_alloc_coherent(NULL, txq->tx_ring_size * TSO_HEADER_SIZE,
&fep->tso_hdrs_dma, GFP_KERNEL); &txq->tso_hdrs_dma, GFP_KERNEL);
if (!fep->tso_hdrs) { if (!txq->tso_hdrs) {
dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma); kfree(rxq);
kfree(txq);
dma_free_coherent(NULL, bd_size, cbd_base, bd_dma);
return -ENOMEM; return -ENOMEM;
} }
memset(cbd_base, 0, PAGE_SIZE); memset(cbd_base, 0, bd_size);
fep->netdev = ndev;
/* Get the Ethernet address */ /* Get the Ethernet address */
fec_get_mac(ndev); fec_get_mac(ndev);
...@@ -2499,12 +2635,13 @@ static int fec_enet_init(struct net_device *ndev) ...@@ -2499,12 +2635,13 @@ static int fec_enet_init(struct net_device *ndev)
fec_set_mac_address(ndev, NULL); fec_set_mac_address(ndev, NULL);
/* Set receive and transmit descriptor base. */ /* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base; rxq->rx_bd_base = cbd_base;
if (fep->bufdesc_ex) if (fep->bufdesc_ex)
fep->tx_bd_base = (struct bufdesc *) txq->tx_bd_base = (struct bufdesc *)
(((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size); (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size);
else else
fep->tx_bd_base = cbd_base + fep->rx_ring_size; txq->tx_bd_base = cbd_base + rxq->rx_ring_size;
/* The FEC Ethernet specific entries in the device structure */ /* The FEC Ethernet specific entries in the device structure */
ndev->watchdog_timeo = TX_TIMEOUT; ndev->watchdog_timeo = TX_TIMEOUT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment