Commit f773abf6 authored by David S. Miller's avatar David S. Miller

Merge branch 'fec-next'

Troy Kisky says:

====================
net: fec: cleanup/fixes

V2 is a rebase on top of johannes endian-safe patch and
is only the 1st eight patches.
The testing for this series was done on a nitrogen6x.
The base commit was
commit b45efa30
    Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Testing showed no change in performance.
Testing used imx_v6_v7_defconfig + CONFIG_MICREL_PHY.
The processor was running at 996Mhz.
The following commands were used to get the transfer rates.

On an x86 ubunto system,
iperf -s -i.5 -u

On a nitrogen6x board, running via SD Card.
I first stopped some background processes

stop cron
stop upstart-file-bridge
stop upstart-socket-bridge
stop upstart-udev-bridge
stop rsyslog
stop dbus
killall dhclient
echo performance >/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor

taskset 0x2 iperf -c 192.168.0.201 -u -t60 -b500M -r

There is a branch available on github with this series, and the rest of
my fec patches, for those who would like to test it.
https://github.com:boundarydevices/linux-imx6.git branch net-next_master
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7f20cd25 fc75ba51
...@@ -64,6 +64,7 @@ ...@@ -64,6 +64,7 @@
#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ #define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */
#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ #define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */
#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ #define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */
#define FEC_FTRL 0x1b0 /* Frame truncation receive length*/
#define FEC_RACC 0x1c4 /* Receive Accelerator function */ #define FEC_RACC 0x1c4 /* Receive Accelerator function */
#define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */ #define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */
#define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */ #define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */
...@@ -309,12 +310,6 @@ struct bufdesc_ex { ...@@ -309,12 +310,6 @@ struct bufdesc_ex {
#define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \ #define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \
(((X) == 2) ? \ (((X) == 2) ? \
FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0)) FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0))
#define FEC_R_DES_ACTIVE(X) (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \
(((X) == 2) ? \
FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0))
#define FEC_X_DES_ACTIVE(X) (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \
(((X) == 2) ? \
FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0))
#define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1) #define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
...@@ -380,6 +375,7 @@ struct bufdesc_ex { ...@@ -380,6 +375,7 @@ struct bufdesc_ex {
#define FEC_ENET_TS_TIMER ((uint)0x00008000) #define FEC_ENET_TS_TIMER ((uint)0x00008000)
#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER) #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER)
#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER)
#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
/* ENET interrupt coalescing macro define */ /* ENET interrupt coalescing macro define */
...@@ -447,33 +443,35 @@ struct bufdesc_ex { ...@@ -447,33 +443,35 @@ struct bufdesc_ex {
/* Controller supports RACC register */ /* Controller supports RACC register */
#define FEC_QUIRK_HAS_RACC (1 << 12) #define FEC_QUIRK_HAS_RACC (1 << 12)
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
struct bufdesc *base;
struct bufdesc *last;
struct bufdesc *cur;
void __iomem *reg_desc_active;
dma_addr_t dma;
unsigned short ring_size;
unsigned char dsize;
unsigned char dsize_log2;
};
struct fec_enet_priv_tx_q { struct fec_enet_priv_tx_q {
int index; struct bufdesc_prop bd;
unsigned char *tx_bounce[TX_RING_SIZE]; unsigned char *tx_bounce[TX_RING_SIZE];
struct sk_buff *tx_skbuff[TX_RING_SIZE]; struct sk_buff *tx_skbuff[TX_RING_SIZE];
dma_addr_t bd_dma;
struct bufdesc *tx_bd_base;
uint tx_ring_size;
unsigned short tx_stop_threshold; unsigned short tx_stop_threshold;
unsigned short tx_wake_threshold; unsigned short tx_wake_threshold;
struct bufdesc *cur_tx;
struct bufdesc *dirty_tx; struct bufdesc *dirty_tx;
char *tso_hdrs; char *tso_hdrs;
dma_addr_t tso_hdrs_dma; dma_addr_t tso_hdrs_dma;
}; };
struct fec_enet_priv_rx_q { struct fec_enet_priv_rx_q {
int index; struct bufdesc_prop bd;
struct sk_buff *rx_skbuff[RX_RING_SIZE]; struct sk_buff *rx_skbuff[RX_RING_SIZE];
dma_addr_t bd_dma;
struct bufdesc *rx_bd_base;
uint rx_ring_size;
struct bufdesc *cur_rx;
}; };
/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
...@@ -513,8 +511,6 @@ struct fec_enet_private { ...@@ -513,8 +511,6 @@ struct fec_enet_private {
unsigned long work_ts; unsigned long work_ts;
unsigned long work_mdio; unsigned long work_mdio;
unsigned short bufdesc_size;
struct platform_device *pdev; struct platform_device *pdev;
int dev_id; int dev_id;
......
...@@ -217,86 +217,38 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); ...@@ -217,86 +217,38 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define IS_TSO_HEADER(txq, addr) \ #define IS_TSO_HEADER(txq, addr) \
((addr >= txq->tso_hdrs_dma) && \ ((addr >= txq->tso_hdrs_dma) && \
(addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
static int mii_cnt; static int mii_cnt;
static inline static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct bufdesc_prop *bd)
struct fec_enet_private *fep, {
int queue_id) return (bdp >= bd->last) ? bd->base
{ : (struct bufdesc *)(((unsigned)bdp) + bd->dsize);
struct bufdesc *new_bd = bdp + 1; }
struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
struct bufdesc_ex *ex_base;
struct bufdesc *base;
int ring_size;
if (bdp >= txq->tx_bd_base) {
base = txq->tx_bd_base;
ring_size = txq->tx_ring_size;
ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
} else {
base = rxq->rx_bd_base;
ring_size = rxq->rx_ring_size;
ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
}
if (fep->bufdesc_ex)
return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
ex_base : ex_new_bd);
else
return (new_bd >= (base + ring_size)) ?
base : new_bd;
}
static inline
struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
struct fec_enet_private *fep,
int queue_id)
{
struct bufdesc *new_bd = bdp - 1;
struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
struct bufdesc_ex *ex_base;
struct bufdesc *base;
int ring_size;
if (bdp >= txq->tx_bd_base) {
base = txq->tx_bd_base;
ring_size = txq->tx_ring_size;
ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
} else {
base = rxq->rx_bd_base;
ring_size = rxq->rx_ring_size;
ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
}
if (fep->bufdesc_ex) static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
return (struct bufdesc *)((ex_new_bd < ex_base) ? struct bufdesc_prop *bd)
(ex_new_bd + ring_size) : ex_new_bd); {
else return (bdp <= bd->base) ? bd->last
return (new_bd < base) ? (new_bd + ring_size) : new_bd; : (struct bufdesc *)(((unsigned)bdp) - bd->dsize);
} }
static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, static int fec_enet_get_bd_index(struct bufdesc *bdp,
struct fec_enet_private *fep) struct bufdesc_prop *bd)
{ {
return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
} }
static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
struct fec_enet_priv_tx_q *txq)
{ {
int entries; int entries;
entries = ((const char *)txq->dirty_tx - entries = (((const char *)txq->dirty_tx -
(const char *)txq->cur_tx) / fep->bufdesc_size - 1; (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
return entries > 0 ? entries : entries + txq->tx_ring_size; return entries >= 0 ? entries : entries + txq->bd.ring_size;
} }
static void swap_buffer(void *bufaddr, int len) static void swap_buffer(void *bufaddr, int len)
...@@ -329,20 +281,20 @@ static void fec_dump(struct net_device *ndev) ...@@ -329,20 +281,20 @@ static void fec_dump(struct net_device *ndev)
pr_info("Nr SC addr len SKB\n"); pr_info("Nr SC addr len SKB\n");
txq = fep->tx_queue[0]; txq = fep->tx_queue[0];
bdp = txq->tx_bd_base; bdp = txq->bd.base;
do { do {
pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n", pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
index, index,
bdp == txq->cur_tx ? 'S' : ' ', bdp == txq->bd.cur ? 'S' : ' ',
bdp == txq->dirty_tx ? 'H' : ' ', bdp == txq->dirty_tx ? 'H' : ' ',
fec16_to_cpu(bdp->cbd_sc), fec16_to_cpu(bdp->cbd_sc),
fec32_to_cpu(bdp->cbd_bufaddr), fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen), fec16_to_cpu(bdp->cbd_datlen),
txq->tx_skbuff[index]); txq->tx_skbuff[index]);
bdp = fec_enet_get_nextdesc(bdp, fep, 0); bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
index++; index++;
} while (bdp != txq->tx_bd_base); } while (bdp != txq->bd.base);
} }
static inline bool is_ipv4_pkt(struct sk_buff *skb) static inline bool is_ipv4_pkt(struct sk_buff *skb)
...@@ -373,10 +325,9 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, ...@@ -373,10 +325,9 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
struct net_device *ndev) struct net_device *ndev)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
struct bufdesc *bdp = txq->cur_tx; struct bufdesc *bdp = txq->bd.cur;
struct bufdesc_ex *ebdp; struct bufdesc_ex *ebdp;
int nr_frags = skb_shinfo(skb)->nr_frags; int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned short queue = skb_get_queue_mapping(skb);
int frag, frag_len; int frag, frag_len;
unsigned short status; unsigned short status;
unsigned int estatus = 0; unsigned int estatus = 0;
...@@ -388,7 +339,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, ...@@ -388,7 +339,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
for (frag = 0; frag < nr_frags; frag++) { for (frag = 0; frag < nr_frags; frag++) {
this_frag = &skb_shinfo(skb)->frags[frag]; this_frag = &skb_shinfo(skb)->frags[frag];
bdp = fec_enet_get_nextdesc(bdp, fep, queue); bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
ebdp = (struct bufdesc_ex *)bdp; ebdp = (struct bufdesc_ex *)bdp;
status = fec16_to_cpu(bdp->cbd_sc); status = fec16_to_cpu(bdp->cbd_sc);
...@@ -409,7 +360,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, ...@@ -409,7 +360,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
if (fep->quirks & FEC_QUIRK_HAS_AVB) if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue); estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0; ebdp->cbd_bdu = 0;
...@@ -418,7 +369,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, ...@@ -418,7 +369,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); index = fec_enet_get_bd_index(bdp, &txq->bd);
if (((unsigned long) bufaddr) & fep->tx_align || if (((unsigned long) bufaddr) & fep->tx_align ||
fep->quirks & FEC_QUIRK_SWAP_FRAME) { fep->quirks & FEC_QUIRK_SWAP_FRAME) {
memcpy(txq->tx_bounce[index], bufaddr, frag_len); memcpy(txq->tx_bounce[index], bufaddr, frag_len);
...@@ -431,7 +382,6 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, ...@@ -431,7 +382,6 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(&fep->pdev->dev, addr)) { if (dma_mapping_error(&fep->pdev->dev, addr)) {
dev_kfree_skb_any(skb);
if (net_ratelimit()) if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n"); netdev_err(ndev, "Tx DMA memory map failed\n");
goto dma_mapping_error; goto dma_mapping_error;
...@@ -439,14 +389,18 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, ...@@ -439,14 +389,18 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
bdp->cbd_bufaddr = cpu_to_fec32(addr); bdp->cbd_bufaddr = cpu_to_fec32(addr);
bdp->cbd_datlen = cpu_to_fec16(frag_len); bdp->cbd_datlen = cpu_to_fec16(frag_len);
/* Make sure the updates to rest of the descriptor are
* performed before transferring ownership.
*/
wmb();
bdp->cbd_sc = cpu_to_fec16(status); bdp->cbd_sc = cpu_to_fec16(status);
} }
return bdp; return bdp;
dma_mapping_error: dma_mapping_error:
bdp = txq->cur_tx; bdp = txq->bd.cur;
for (i = 0; i < frag; i++) { for (i = 0; i < frag; i++) {
bdp = fec_enet_get_nextdesc(bdp, fep, queue); bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
} }
...@@ -463,12 +417,11 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -463,12 +417,11 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
dma_addr_t addr; dma_addr_t addr;
unsigned short status; unsigned short status;
unsigned short buflen; unsigned short buflen;
unsigned short queue;
unsigned int estatus = 0; unsigned int estatus = 0;
unsigned int index; unsigned int index;
int entries_free; int entries_free;
entries_free = fec_enet_get_free_txdesc_num(fep, txq); entries_free = fec_enet_get_free_txdesc_num(txq);
if (entries_free < MAX_SKB_FRAGS + 1) { if (entries_free < MAX_SKB_FRAGS + 1) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
if (net_ratelimit()) if (net_ratelimit())
...@@ -483,7 +436,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -483,7 +436,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
} }
/* Fill in a Tx ring entry */ /* Fill in a Tx ring entry */
bdp = txq->cur_tx; bdp = txq->bd.cur;
last_bdp = bdp; last_bdp = bdp;
status = fec16_to_cpu(bdp->cbd_sc); status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS; status &= ~BD_ENET_TX_STATS;
...@@ -492,8 +445,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -492,8 +445,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
bufaddr = skb->data; bufaddr = skb->data;
buflen = skb_headlen(skb); buflen = skb_headlen(skb);
queue = skb_get_queue_mapping(skb); index = fec_enet_get_bd_index(bdp, &txq->bd);
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
if (((unsigned long) bufaddr) & fep->tx_align || if (((unsigned long) bufaddr) & fep->tx_align ||
fep->quirks & FEC_QUIRK_SWAP_FRAME) { fep->quirks & FEC_QUIRK_SWAP_FRAME) {
memcpy(txq->tx_bounce[index], skb->data, buflen); memcpy(txq->tx_bounce[index], skb->data, buflen);
...@@ -514,8 +466,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -514,8 +466,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
if (nr_frags) { if (nr_frags) {
last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
if (IS_ERR(last_bdp)) if (IS_ERR(last_bdp)) {
dma_unmap_single(&fep->pdev->dev, addr,
buflen, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
}
} else { } else {
status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
...@@ -525,6 +481,8 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -525,6 +481,8 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
estatus |= BD_ENET_TX_TS; estatus |= BD_ENET_TX_TS;
} }
} }
bdp->cbd_bufaddr = cpu_to_fec32(addr);
bdp->cbd_datlen = cpu_to_fec16(buflen);
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
...@@ -535,7 +493,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -535,7 +493,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
if (fep->quirks & FEC_QUIRK_HAS_AVB) if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue); estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
...@@ -544,12 +502,14 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -544,12 +502,14 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
ebdp->cbd_esc = cpu_to_fec32(estatus); ebdp->cbd_esc = cpu_to_fec32(estatus);
} }
index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); index = fec_enet_get_bd_index(last_bdp, &txq->bd);
/* Save skb pointer */ /* Save skb pointer */
txq->tx_skbuff[index] = skb; txq->tx_skbuff[index] = skb;
bdp->cbd_datlen = cpu_to_fec16(buflen); /* Make sure the updates to rest of the descriptor are performed before
bdp->cbd_bufaddr = cpu_to_fec32(addr); * transferring ownership.
*/
wmb();
/* Send it on its way. Tell FEC it's ready, interrupt when done, /* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end. * it's the last BD of the frame, and to put the CRC on the end.
...@@ -558,18 +518,18 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -558,18 +518,18 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
bdp->cbd_sc = cpu_to_fec16(status); bdp->cbd_sc = cpu_to_fec16(status);
/* If this was the last BD in the ring, start at the beginning again. */ /* If this was the last BD in the ring, start at the beginning again. */
bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
/* Make sure the update to bdp and tx_skbuff are performed before /* Make sure the update to bdp and tx_skbuff are performed before
* cur_tx. * txq->bd.cur.
*/ */
wmb(); wmb();
txq->cur_tx = bdp; txq->bd.cur = bdp;
/* Trigger transmission start */ /* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); writel(0, txq->bd.reg_desc_active);
return 0; return 0;
} }
...@@ -582,7 +542,6 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, ...@@ -582,7 +542,6 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
unsigned short queue = skb_get_queue_mapping(skb);
unsigned short status; unsigned short status;
unsigned int estatus = 0; unsigned int estatus = 0;
dma_addr_t addr; dma_addr_t addr;
...@@ -614,7 +573,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, ...@@ -614,7 +573,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
if (fep->quirks & FEC_QUIRK_HAS_AVB) if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue); estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0; ebdp->cbd_bdu = 0;
...@@ -643,7 +602,6 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, ...@@ -643,7 +602,6 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
unsigned short queue = skb_get_queue_mapping(skb);
void *bufaddr; void *bufaddr;
unsigned long dmabuf; unsigned long dmabuf;
unsigned short status; unsigned short status;
...@@ -678,7 +636,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, ...@@ -678,7 +636,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
if (fep->quirks & FEC_QUIRK_HAS_AVB) if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue); estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0; ebdp->cbd_bdu = 0;
...@@ -697,13 +655,12 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, ...@@ -697,13 +655,12 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
int total_len, data_left; int total_len, data_left;
struct bufdesc *bdp = txq->cur_tx; struct bufdesc *bdp = txq->bd.cur;
unsigned short queue = skb_get_queue_mapping(skb);
struct tso_t tso; struct tso_t tso;
unsigned int index = 0; unsigned int index = 0;
int ret; int ret;
if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
if (net_ratelimit()) if (net_ratelimit())
netdev_err(ndev, "NOT enough BD for TSO!\n"); netdev_err(ndev, "NOT enough BD for TSO!\n");
...@@ -723,7 +680,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, ...@@ -723,7 +680,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
while (total_len > 0) { while (total_len > 0) {
char *hdr; char *hdr;
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); index = fec_enet_get_bd_index(bdp, &txq->bd);
data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
total_len -= data_left; total_len -= data_left;
...@@ -738,9 +695,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, ...@@ -738,9 +695,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
int size; int size;
size = min_t(int, tso.size, data_left); size = min_t(int, tso.size, data_left);
bdp = fec_enet_get_nextdesc(bdp, fep, queue); bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
index = fec_enet_get_bd_index(txq->tx_bd_base, index = fec_enet_get_bd_index(bdp, &txq->bd);
bdp, fep);
ret = fec_enet_txq_put_data_tso(txq, skb, ndev, ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
bdp, index, bdp, index,
tso.data, size, tso.data, size,
...@@ -753,22 +709,22 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, ...@@ -753,22 +709,22 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
tso_build_data(skb, &tso, size); tso_build_data(skb, &tso, size);
} }
bdp = fec_enet_get_nextdesc(bdp, fep, queue); bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
} }
/* Save skb pointer */ /* Save skb pointer */
txq->tx_skbuff[index] = skb; txq->tx_skbuff[index] = skb;
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
txq->cur_tx = bdp; txq->bd.cur = bdp;
/* Trigger transmission start */ /* Trigger transmission start */
if (!(fep->quirks & FEC_QUIRK_ERR007885) || if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
!readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || !readl(txq->bd.reg_desc_active) ||
!readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || !readl(txq->bd.reg_desc_active) ||
!readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || !readl(txq->bd.reg_desc_active) ||
!readl(fep->hwp + FEC_X_DES_ACTIVE(queue))) !readl(txq->bd.reg_desc_active))
writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); writel(0, txq->bd.reg_desc_active);
return 0; return 0;
...@@ -798,7 +754,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -798,7 +754,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (ret) if (ret)
return ret; return ret;
entries_free = fec_enet_get_free_txdesc_num(fep, txq); entries_free = fec_enet_get_free_txdesc_num(txq);
if (entries_free <= txq->tx_stop_threshold) if (entries_free <= txq->tx_stop_threshold)
netif_tx_stop_queue(nq); netif_tx_stop_queue(nq);
...@@ -819,32 +775,32 @@ static void fec_enet_bd_init(struct net_device *dev) ...@@ -819,32 +775,32 @@ static void fec_enet_bd_init(struct net_device *dev)
for (q = 0; q < fep->num_rx_queues; q++) { for (q = 0; q < fep->num_rx_queues; q++) {
/* Initialize the receive buffer descriptors. */ /* Initialize the receive buffer descriptors. */
rxq = fep->rx_queue[q]; rxq = fep->rx_queue[q];
bdp = rxq->rx_bd_base; bdp = rxq->bd.base;
for (i = 0; i < rxq->rx_ring_size; i++) { for (i = 0; i < rxq->bd.ring_size; i++) {
/* Initialize the BD for every fragment in the page. */ /* Initialize the BD for every fragment in the page. */
if (bdp->cbd_bufaddr) if (bdp->cbd_bufaddr)
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
else else
bdp->cbd_sc = cpu_to_fec16(0); bdp->cbd_sc = cpu_to_fec16(0);
bdp = fec_enet_get_nextdesc(bdp, fep, q); bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
} }
/* Set the last buffer to wrap */ /* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep, q); bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
rxq->cur_rx = rxq->rx_bd_base; rxq->bd.cur = rxq->bd.base;
} }
for (q = 0; q < fep->num_tx_queues; q++) { for (q = 0; q < fep->num_tx_queues; q++) {
/* ...and the same for transmit */ /* ...and the same for transmit */
txq = fep->tx_queue[q]; txq = fep->tx_queue[q];
bdp = txq->tx_bd_base; bdp = txq->bd.base;
txq->cur_tx = bdp; txq->bd.cur = bdp;
for (i = 0; i < txq->tx_ring_size; i++) { for (i = 0; i < txq->bd.ring_size; i++) {
/* Initialize the BD for every fragment in the page. */ /* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = cpu_to_fec16(0); bdp->cbd_sc = cpu_to_fec16(0);
if (txq->tx_skbuff[i]) { if (txq->tx_skbuff[i]) {
...@@ -852,11 +808,11 @@ static void fec_enet_bd_init(struct net_device *dev) ...@@ -852,11 +808,11 @@ static void fec_enet_bd_init(struct net_device *dev)
txq->tx_skbuff[i] = NULL; txq->tx_skbuff[i] = NULL;
} }
bdp->cbd_bufaddr = cpu_to_fec32(0); bdp->cbd_bufaddr = cpu_to_fec32(0);
bdp = fec_enet_get_nextdesc(bdp, fep, q); bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
} }
/* Set the last buffer to wrap */ /* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep, q); bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
txq->dirty_tx = bdp; txq->dirty_tx = bdp;
} }
...@@ -868,7 +824,7 @@ static void fec_enet_active_rxring(struct net_device *ndev) ...@@ -868,7 +824,7 @@ static void fec_enet_active_rxring(struct net_device *ndev)
int i; int i;
for (i = 0; i < fep->num_rx_queues; i++) for (i = 0; i < fep->num_rx_queues; i++)
writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); writel(0, fep->rx_queue[i]->bd.reg_desc_active);
} }
static void fec_enet_enable_ring(struct net_device *ndev) static void fec_enet_enable_ring(struct net_device *ndev)
...@@ -880,7 +836,7 @@ static void fec_enet_enable_ring(struct net_device *ndev) ...@@ -880,7 +836,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
for (i = 0; i < fep->num_rx_queues; i++) { for (i = 0; i < fep->num_rx_queues; i++) {
rxq = fep->rx_queue[i]; rxq = fep->rx_queue[i];
writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
/* enable DMA1/2 */ /* enable DMA1/2 */
...@@ -891,7 +847,7 @@ static void fec_enet_enable_ring(struct net_device *ndev) ...@@ -891,7 +847,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
for (i = 0; i < fep->num_tx_queues; i++) { for (i = 0; i < fep->num_tx_queues; i++) {
txq = fep->tx_queue[i]; txq = fep->tx_queue[i];
writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
/* enable DMA1/2 */ /* enable DMA1/2 */
if (i) if (i)
...@@ -909,7 +865,7 @@ static void fec_enet_reset_skb(struct net_device *ndev) ...@@ -909,7 +865,7 @@ static void fec_enet_reset_skb(struct net_device *ndev)
for (i = 0; i < fep->num_tx_queues; i++) { for (i = 0; i < fep->num_tx_queues; i++) {
txq = fep->tx_queue[i]; txq = fep->tx_queue[i];
for (j = 0; j < txq->tx_ring_size; j++) { for (j = 0; j < txq->bd.ring_size; j++) {
if (txq->tx_skbuff[j]) { if (txq->tx_skbuff[j]) {
dev_kfree_skb_any(txq->tx_skbuff[j]); dev_kfree_skb_any(txq->tx_skbuff[j]);
txq->tx_skbuff[j] = NULL; txq->tx_skbuff[j] = NULL;
...@@ -988,6 +944,7 @@ fec_restart(struct net_device *ndev) ...@@ -988,6 +944,7 @@ fec_restart(struct net_device *ndev)
val &= ~FEC_RACC_OPTIONS; val &= ~FEC_RACC_OPTIONS;
writel(val, fep->hwp + FEC_RACC); writel(val, fep->hwp + FEC_RACC);
} }
writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
#endif #endif
/* /*
...@@ -1221,16 +1178,16 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) ...@@ -1221,16 +1178,16 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
bdp = txq->dirty_tx; bdp = txq->dirty_tx;
/* get next bdp of dirty_tx */ /* get next bdp of dirty_tx */
bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
while (bdp != READ_ONCE(txq->cur_tx)) { while (bdp != READ_ONCE(txq->bd.cur)) {
/* Order the load of cur_tx and cbd_sc */ /* Order the load of bd.cur and cbd_sc */
rmb(); rmb();
status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
if (status & BD_ENET_TX_READY) if (status & BD_ENET_TX_READY)
break; break;
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); index = fec_enet_get_bd_index(bdp, &txq->bd);
skb = txq->tx_skbuff[index]; skb = txq->tx_skbuff[index];
txq->tx_skbuff[index] = NULL; txq->tx_skbuff[index] = NULL;
...@@ -1241,7 +1198,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) ...@@ -1241,7 +1198,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
DMA_TO_DEVICE); DMA_TO_DEVICE);
bdp->cbd_bufaddr = cpu_to_fec32(0); bdp->cbd_bufaddr = cpu_to_fec32(0);
if (!skb) { if (!skb) {
bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
continue; continue;
} }
...@@ -1290,21 +1247,21 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) ...@@ -1290,21 +1247,21 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
txq->dirty_tx = bdp; txq->dirty_tx = bdp;
/* Update pointer to next buffer descriptor to be transmitted */ /* Update pointer to next buffer descriptor to be transmitted */
bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
/* Since we have freed up a buffer, the ring is no longer full /* Since we have freed up a buffer, the ring is no longer full
*/ */
if (netif_queue_stopped(ndev)) { if (netif_queue_stopped(ndev)) {
entries_free = fec_enet_get_free_txdesc_num(fep, txq); entries_free = fec_enet_get_free_txdesc_num(txq);
if (entries_free >= txq->tx_wake_threshold) if (entries_free >= txq->tx_wake_threshold)
netif_tx_wake_queue(nq); netif_tx_wake_queue(nq);
} }
} }
/* ERR006538: Keep the transmitter going */ /* ERR006538: Keep the transmitter going */
if (bdp != txq->cur_tx && if (bdp != txq->bd.cur &&
readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) readl(txq->bd.reg_desc_active) == 0)
writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); writel(0, txq->bd.reg_desc_active);
} }
static void static void
...@@ -1366,7 +1323,7 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, ...@@ -1366,7 +1323,7 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
return true; return true;
} }
/* During a receive, the cur_rx points to the current incoming buffer. /* During a receive, the bd_rx.cur points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has * When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator, * not been given to the system, we just set the empty indicator,
* effectively tossing the packet. * effectively tossing the packet.
...@@ -1399,7 +1356,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ...@@ -1399,7 +1356,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* First, grab all of the stats for the incoming packet. /* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition. * These get messed up if we get called due to a busy condition.
*/ */
bdp = rxq->cur_rx; bdp = rxq->bd.cur;
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
...@@ -1407,37 +1364,31 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ...@@ -1407,37 +1364,31 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
break; break;
pkt_received++; pkt_received++;
/* Since we have allocated space to hold a complete frame,
* the last indicator should be set.
*/
if ((status & BD_ENET_RX_LAST) == 0)
netdev_err(ndev, "rcv is not +last\n");
writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
/* Check for errors. */ /* Check for errors. */
status ^= BD_ENET_RX_LAST;
if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
BD_ENET_RX_CR | BD_ENET_RX_OV)) { BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
BD_ENET_RX_CL)) {
ndev->stats.rx_errors++; ndev->stats.rx_errors++;
if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { if (status & BD_ENET_RX_OV) {
/* FIFO overrun */
ndev->stats.rx_fifo_errors++;
goto rx_processing_done;
}
if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
| BD_ENET_RX_LAST)) {
/* Frame too long or too short. */ /* Frame too long or too short. */
ndev->stats.rx_length_errors++; ndev->stats.rx_length_errors++;
if (status & BD_ENET_RX_LAST)
netdev_err(ndev, "rcv is not +last\n");
} }
if (status & BD_ENET_RX_NO) /* Frame alignment */
ndev->stats.rx_frame_errors++;
if (status & BD_ENET_RX_CR) /* CRC Error */ if (status & BD_ENET_RX_CR) /* CRC Error */
ndev->stats.rx_crc_errors++; ndev->stats.rx_crc_errors++;
if (status & BD_ENET_RX_OV) /* FIFO overrun */ /* Report late collisions as a frame error. */
ndev->stats.rx_fifo_errors++; if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
} ndev->stats.rx_frame_errors++;
/* Report late collisions as a frame error.
* On this error, the BD is closed, but we don't know what we
* have in the buffer. So, just drop this frame on the floor.
*/
if (status & BD_ENET_RX_CL) {
ndev->stats.rx_errors++;
ndev->stats.rx_frame_errors++;
goto rx_processing_done; goto rx_processing_done;
} }
...@@ -1446,7 +1397,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ...@@ -1446,7 +1397,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
pkt_len = fec16_to_cpu(bdp->cbd_datlen); pkt_len = fec16_to_cpu(bdp->cbd_datlen);
ndev->stats.rx_bytes += pkt_len; ndev->stats.rx_bytes += pkt_len;
index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); index = fec_enet_get_bd_index(bdp, &rxq->bd);
skb = rxq->rx_skbuff[index]; skb = rxq->rx_skbuff[index];
/* The packet length includes FCS, but we don't want to /* The packet length includes FCS, but we don't want to
...@@ -1535,7 +1486,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ...@@ -1535,7 +1486,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* Mark the buffer empty */ /* Mark the buffer empty */
status |= BD_ENET_RX_EMPTY; status |= BD_ENET_RX_EMPTY;
bdp->cbd_sc = cpu_to_fec16(status);
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
...@@ -1544,17 +1494,22 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ...@@ -1544,17 +1494,22 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ebdp->cbd_prot = 0; ebdp->cbd_prot = 0;
ebdp->cbd_bdu = 0; ebdp->cbd_bdu = 0;
} }
/* Make sure the updates to rest of the descriptor are
* performed before transferring ownership.
*/
wmb();
bdp->cbd_sc = cpu_to_fec16(status);
/* Update BD pointer to next entry */ /* Update BD pointer to next entry */
bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
/* Doing this here will keep the FEC running while we process /* Doing this here will keep the FEC running while we process
* incoming frames. On a heavily loaded network, we should be * incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources. * able to keep up at the expense of system resources.
*/ */
writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); writel(0, rxq->bd.reg_desc_active);
} }
rxq->cur_rx = bdp; rxq->bd.cur = bdp;
return pkt_received; return pkt_received;
} }
...@@ -1613,7 +1568,7 @@ fec_enet_interrupt(int irq, void *dev_id) ...@@ -1613,7 +1568,7 @@ fec_enet_interrupt(int irq, void *dev_id)
if (napi_schedule_prep(&fep->napi)) { if (napi_schedule_prep(&fep->napi)) {
/* Disable the NAPI interrupts */ /* Disable the NAPI interrupts */
writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK);
__napi_schedule(&fep->napi); __napi_schedule(&fep->napi);
} }
} }
...@@ -2663,8 +2618,8 @@ static void fec_enet_free_buffers(struct net_device *ndev) ...@@ -2663,8 +2618,8 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (q = 0; q < fep->num_rx_queues; q++) { for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q]; rxq = fep->rx_queue[q];
bdp = rxq->rx_bd_base; bdp = rxq->bd.base;
for (i = 0; i < rxq->rx_ring_size; i++) { for (i = 0; i < rxq->bd.ring_size; i++) {
skb = rxq->rx_skbuff[i]; skb = rxq->rx_skbuff[i];
rxq->rx_skbuff[i] = NULL; rxq->rx_skbuff[i] = NULL;
if (skb) { if (skb) {
...@@ -2674,14 +2629,14 @@ static void fec_enet_free_buffers(struct net_device *ndev) ...@@ -2674,14 +2629,14 @@ static void fec_enet_free_buffers(struct net_device *ndev)
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
dev_kfree_skb(skb); dev_kfree_skb(skb);
} }
bdp = fec_enet_get_nextdesc(bdp, fep, q); bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
} }
} }
for (q = 0; q < fep->num_tx_queues; q++) { for (q = 0; q < fep->num_tx_queues; q++) {
txq = fep->tx_queue[q]; txq = fep->tx_queue[q];
bdp = txq->tx_bd_base; bdp = txq->bd.base;
for (i = 0; i < txq->tx_ring_size; i++) { for (i = 0; i < txq->bd.ring_size; i++) {
kfree(txq->tx_bounce[i]); kfree(txq->tx_bounce[i]);
txq->tx_bounce[i] = NULL; txq->tx_bounce[i] = NULL;
skb = txq->tx_skbuff[i]; skb = txq->tx_skbuff[i];
...@@ -2701,7 +2656,7 @@ static void fec_enet_free_queue(struct net_device *ndev) ...@@ -2701,7 +2656,7 @@ static void fec_enet_free_queue(struct net_device *ndev)
if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
txq = fep->tx_queue[i]; txq = fep->tx_queue[i];
dma_free_coherent(NULL, dma_free_coherent(NULL,
txq->tx_ring_size * TSO_HEADER_SIZE, txq->bd.ring_size * TSO_HEADER_SIZE,
txq->tso_hdrs, txq->tso_hdrs,
txq->tso_hdrs_dma); txq->tso_hdrs_dma);
} }
...@@ -2727,15 +2682,15 @@ static int fec_enet_alloc_queue(struct net_device *ndev) ...@@ -2727,15 +2682,15 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
} }
fep->tx_queue[i] = txq; fep->tx_queue[i] = txq;
txq->tx_ring_size = TX_RING_SIZE; txq->bd.ring_size = TX_RING_SIZE;
fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size; fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
txq->tx_wake_threshold = txq->tx_wake_threshold =
(txq->tx_ring_size - txq->tx_stop_threshold) / 2; (txq->bd.ring_size - txq->tx_stop_threshold) / 2;
txq->tso_hdrs = dma_alloc_coherent(NULL, txq->tso_hdrs = dma_alloc_coherent(NULL,
txq->tx_ring_size * TSO_HEADER_SIZE, txq->bd.ring_size * TSO_HEADER_SIZE,
&txq->tso_hdrs_dma, &txq->tso_hdrs_dma,
GFP_KERNEL); GFP_KERNEL);
if (!txq->tso_hdrs) { if (!txq->tso_hdrs) {
...@@ -2752,8 +2707,8 @@ static int fec_enet_alloc_queue(struct net_device *ndev) ...@@ -2752,8 +2707,8 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
goto alloc_failed; goto alloc_failed;
} }
fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE; fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size; fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
} }
return ret; return ret;
...@@ -2772,8 +2727,8 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) ...@@ -2772,8 +2727,8 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
struct fec_enet_priv_rx_q *rxq; struct fec_enet_priv_rx_q *rxq;
rxq = fep->rx_queue[queue]; rxq = fep->rx_queue[queue];
bdp = rxq->rx_bd_base; bdp = rxq->bd.base;
for (i = 0; i < rxq->rx_ring_size; i++) { for (i = 0; i < rxq->bd.ring_size; i++) {
skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
if (!skb) if (!skb)
goto err_alloc; goto err_alloc;
...@@ -2791,11 +2746,11 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) ...@@ -2791,11 +2746,11 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
} }
bdp = fec_enet_get_nextdesc(bdp, fep, queue); bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
} }
/* Set the last buffer to wrap. */ /* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, fep, queue); bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
return 0; return 0;
...@@ -2813,8 +2768,8 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) ...@@ -2813,8 +2768,8 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
struct fec_enet_priv_tx_q *txq; struct fec_enet_priv_tx_q *txq;
txq = fep->tx_queue[queue]; txq = fep->tx_queue[queue];
bdp = txq->tx_bd_base; bdp = txq->bd.base;
for (i = 0; i < txq->tx_ring_size; i++) { for (i = 0; i < txq->bd.ring_size; i++) {
txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
if (!txq->tx_bounce[i]) if (!txq->tx_bounce[i])
goto err_alloc; goto err_alloc;
...@@ -2827,11 +2782,11 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) ...@@ -2827,11 +2782,11 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
} }
bdp = fec_enet_get_nextdesc(bdp, fep, queue); bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
} }
/* Set the last buffer to wrap. */ /* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, fep, queue); bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
return 0; return 0;
...@@ -3115,6 +3070,14 @@ static const struct net_device_ops fec_netdev_ops = { ...@@ -3115,6 +3070,14 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_set_features = fec_set_features, .ndo_set_features = fec_set_features,
}; };
static const unsigned short offset_des_active_rxq[] = {
FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
};
static const unsigned short offset_des_active_txq[] = {
FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
};
/* /*
* XXX: We need to clean up on failure exits here. * XXX: We need to clean up on failure exits here.
* *
...@@ -3122,13 +3085,15 @@ static const struct net_device_ops fec_netdev_ops = { ...@@ -3122,13 +3085,15 @@ static const struct net_device_ops fec_netdev_ops = {
static int fec_enet_init(struct net_device *ndev) static int fec_enet_init(struct net_device *ndev)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
struct bufdesc *cbd_base; struct bufdesc *cbd_base;
dma_addr_t bd_dma; dma_addr_t bd_dma;
int bd_size; int bd_size;
unsigned int i; unsigned int i;
unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
sizeof(struct bufdesc);
unsigned dsize_log2 = __fls(dsize);
WARN_ON(dsize != (1 << dsize_log2));
#if defined(CONFIG_ARM) #if defined(CONFIG_ARM)
fep->rx_align = 0xf; fep->rx_align = 0xf;
fep->tx_align = 0xf; fep->tx_align = 0xf;
...@@ -3139,12 +3104,7 @@ static int fec_enet_init(struct net_device *ndev) ...@@ -3139,12 +3104,7 @@ static int fec_enet_init(struct net_device *ndev)
fec_enet_alloc_queue(ndev); fec_enet_alloc_queue(ndev);
if (fep->bufdesc_ex) bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
fep->bufdesc_size = sizeof(struct bufdesc_ex);
else
fep->bufdesc_size = sizeof(struct bufdesc);
bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) *
fep->bufdesc_size;
/* Allocate memory for buffer descriptors. */ /* Allocate memory for buffer descriptors. */
cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
...@@ -3162,33 +3122,35 @@ static int fec_enet_init(struct net_device *ndev) ...@@ -3162,33 +3122,35 @@ static int fec_enet_init(struct net_device *ndev)
/* Set receive and transmit descriptor base. */ /* Set receive and transmit descriptor base. */
for (i = 0; i < fep->num_rx_queues; i++) { for (i = 0; i < fep->num_rx_queues; i++) {
rxq = fep->rx_queue[i]; struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
rxq->index = i; unsigned size = dsize * rxq->bd.ring_size;
rxq->rx_bd_base = (struct bufdesc *)cbd_base;
rxq->bd_dma = bd_dma; rxq->bd.qid = i;
if (fep->bufdesc_ex) { rxq->bd.base = cbd_base;
bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size; rxq->bd.cur = cbd_base;
cbd_base = (struct bufdesc *) rxq->bd.dma = bd_dma;
(((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); rxq->bd.dsize = dsize;
} else { rxq->bd.dsize_log2 = dsize_log2;
bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size; rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
cbd_base += rxq->rx_ring_size; bd_dma += size;
} cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
} }
for (i = 0; i < fep->num_tx_queues; i++) { for (i = 0; i < fep->num_tx_queues; i++) {
txq = fep->tx_queue[i]; struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
txq->index = i; unsigned size = dsize * txq->bd.ring_size;
txq->tx_bd_base = (struct bufdesc *)cbd_base;
txq->bd_dma = bd_dma; txq->bd.qid = i;
if (fep->bufdesc_ex) { txq->bd.base = cbd_base;
bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size; txq->bd.cur = cbd_base;
cbd_base = (struct bufdesc *) txq->bd.dma = bd_dma;
(((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size); txq->bd.dsize = dsize;
} else { txq->bd.dsize_log2 = dsize_log2;
bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size; txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
cbd_base += txq->tx_ring_size; bd_dma += size;
} cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment