Commit 4a7d666a authored by Giuseppe CAVALLARO's avatar Giuseppe CAVALLARO Committed by David S. Miller

stmmac: reorganize chain/ring modes removing Koptions

Previously we had two Koptions to decide if the stmmac
had to use either a ring or a chain to manage its descriptors.
This patch removes the Kernel configuration options and it allow us
to use the chain mode by passing a module option.
Ring mode continues to be the default.

Also with this patch, it will be easier to validate the driver built and
guarantee that all the two modes always compile fine.
Signed-off-by: default avatarGiuseppe Cavallaro <peppe.cavallaro@st.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ad999eee
...@@ -54,22 +54,4 @@ config STMMAC_DA ...@@ -54,22 +54,4 @@ config STMMAC_DA
By default, the DMA arbitration scheme is based on Round-robin By default, the DMA arbitration scheme is based on Round-robin
(rx:tx priority is 1:1). (rx:tx priority is 1:1).
choice
prompt "Select the DMA TX/RX descriptor operating modes"
depends on STMMAC_ETH
---help---
This driver supports DMA descriptor to operate both in dual buffer
(RING) and linked-list(CHAINED) mode. In RING mode each descriptor
points to two data buffer pointers whereas in CHAINED mode they
points to only one data buffer pointer.
config STMMAC_RING
bool "Enable Descriptor Ring Mode"
config STMMAC_CHAINED
bool "Enable Descriptor Chained Mode"
endchoice
endif endif
obj-$(CONFIG_STMMAC_ETH) += stmmac.o obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
mmc_core.o $(stmmac-y) mmc_core.o $(stmmac-y)
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include "stmmac.h" #include "stmmac.h"
unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{ {
struct stmmac_priv *priv = (struct stmmac_priv *) p; struct stmmac_priv *priv = (struct stmmac_priv *) p;
unsigned int txsize = priv->dma_tx_size; unsigned int txsize = priv->dma_tx_size;
...@@ -47,7 +47,7 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) ...@@ -47,7 +47,7 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device, skb->data, desc->des2 = dma_map_single(priv->device, skb->data,
bmax, DMA_TO_DEVICE); bmax, DMA_TO_DEVICE);
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum); priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
while (len != 0) { while (len != 0) {
entry = (++priv->cur_tx) % txsize; entry = (++priv->cur_tx) % txsize;
...@@ -57,8 +57,8 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) ...@@ -57,8 +57,8 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device, desc->des2 = dma_map_single(priv->device,
(skb->data + bmax * i), (skb->data + bmax * i),
bmax, DMA_TO_DEVICE); bmax, DMA_TO_DEVICE);
priv->hw->desc->prepare_tx_desc(desc, 0, bmax, priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
csum); STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc); priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL; priv->tx_skbuff[entry] = NULL;
len -= bmax; len -= bmax;
...@@ -67,8 +67,8 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) ...@@ -67,8 +67,8 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device, desc->des2 = dma_map_single(priv->device,
(skb->data + bmax * i), len, (skb->data + bmax * i), len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
priv->hw->desc->prepare_tx_desc(desc, 0, len, priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
csum); STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc); priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL; priv->tx_skbuff[entry] = NULL;
len = 0; len = 0;
...@@ -89,18 +89,6 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc) ...@@ -89,18 +89,6 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
return ret; return ret;
} }
static void stmmac_refill_desc3(int bfsize, struct dma_desc *p)
{
}
static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p)
{
}
static void stmmac_clean_desc3(struct dma_desc *p)
{
}
static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr, static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr,
unsigned int size) unsigned int size)
{ {
...@@ -120,18 +108,8 @@ static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr, ...@@ -120,18 +108,8 @@ static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr,
p->des3 = (unsigned int)phy_addr; p->des3 = (unsigned int)phy_addr;
} }
static int stmmac_set_16kib_bfsize(int mtu) const struct stmmac_chain_mode_ops chain_mode_ops = {
{
/* Not supported */
return 0;
}
const struct stmmac_ring_mode_ops ring_mode_ops = {
.is_jumbo_frm = stmmac_is_jumbo_frm, .is_jumbo_frm = stmmac_is_jumbo_frm,
.jumbo_frm = stmmac_jumbo_frm, .jumbo_frm = stmmac_jumbo_frm,
.refill_desc3 = stmmac_refill_desc3,
.init_desc3 = stmmac_init_desc3,
.init_dma_chain = stmmac_init_dma_chain, .init_dma_chain = stmmac_init_dma_chain,
.clean_desc3 = stmmac_clean_desc3,
.set_16kib_bfsize = stmmac_set_16kib_bfsize,
}; };
...@@ -255,23 +255,27 @@ struct dma_features { ...@@ -255,23 +255,27 @@ struct dma_features {
#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8 #define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8
#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0 #define STMMAC_DEFAULT_TWT_LS_TIMER 0x0
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
struct stmmac_desc_ops { struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */ /* DMA RX descriptor ring initialization */
void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size, void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
int disable_rx_ic); int disable_rx_ic, int mode);
/* DMA TX descriptor ring initialization */ /* DMA TX descriptor ring initialization */
void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size); void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size,
int mode);
/* Invoked by the xmit function to prepare the tx descriptor */ /* Invoked by the xmit function to prepare the tx descriptor */
void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
int csum_flag); int csum_flag, int mode);
/* Set/get the owner of the descriptor */ /* Set/get the owner of the descriptor */
void (*set_tx_owner) (struct dma_desc *p); void (*set_tx_owner) (struct dma_desc *p);
int (*get_tx_owner) (struct dma_desc *p); int (*get_tx_owner) (struct dma_desc *p);
/* Invoked by the xmit function to close the tx descriptor */ /* Invoked by the xmit function to close the tx descriptor */
void (*close_tx_desc) (struct dma_desc *p); void (*close_tx_desc) (struct dma_desc *p);
/* Clean the tx descriptor as soon as the tx irq is received */ /* Clean the tx descriptor as soon as the tx irq is received */
void (*release_tx_desc) (struct dma_desc *p); void (*release_tx_desc) (struct dma_desc *p, int mode);
/* Clear interrupt on tx frame completion. When this bit is /* Clear interrupt on tx frame completion. When this bit is
* set an interrupt happens as soon as the frame is transmitted */ * set an interrupt happens as soon as the frame is transmitted */
void (*clear_tx_ic) (struct dma_desc *p); void (*clear_tx_ic) (struct dma_desc *p);
...@@ -361,18 +365,24 @@ struct stmmac_ring_mode_ops { ...@@ -361,18 +365,24 @@ struct stmmac_ring_mode_ops {
unsigned int (*is_jumbo_frm) (int len, int ehn_desc); unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
void (*refill_desc3) (int bfsize, struct dma_desc *p); void (*refill_desc3) (int bfsize, struct dma_desc *p);
void (*init_desc3) (int des3_as_data_buf, struct dma_desc *p); void (*init_desc3) (struct dma_desc *p);
void (*init_dma_chain) (struct dma_desc *des, dma_addr_t phy_addr,
unsigned int size);
void (*clean_desc3) (struct dma_desc *p); void (*clean_desc3) (struct dma_desc *p);
int (*set_16kib_bfsize) (int mtu); int (*set_16kib_bfsize) (int mtu);
}; };
struct stmmac_chain_mode_ops {
unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
void (*init_dma_chain) (struct dma_desc *des, dma_addr_t phy_addr,
unsigned int size);
};
struct mac_device_info { struct mac_device_info {
const struct stmmac_ops *mac; const struct stmmac_ops *mac;
const struct stmmac_desc_ops *desc; const struct stmmac_desc_ops *desc;
const struct stmmac_dma_ops *dma; const struct stmmac_dma_ops *dma;
const struct stmmac_ring_mode_ops *ring; const struct stmmac_ring_mode_ops *ring;
const struct stmmac_chain_mode_ops *chain;
struct mii_regs mii; /* MII register Addresses */ struct mii_regs mii; /* MII register Addresses */
struct mac_link link; struct mac_link link;
unsigned int synopsys_uid; unsigned int synopsys_uid;
...@@ -390,5 +400,6 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable); ...@@ -390,5 +400,6 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_ring_mode_ops ring_mode_ops; extern const struct stmmac_ring_mode_ops ring_mode_ops;
extern const struct stmmac_chain_mode_ops chain_mode_ops;
#endif /* __COMMON_H__ */ #endif /* __COMMON_H__ */
...@@ -30,26 +30,28 @@ ...@@ -30,26 +30,28 @@
#ifndef __DESC_COM_H__ #ifndef __DESC_COM_H__
#define __DESC_COM_H__ #define __DESC_COM_H__
#if defined(CONFIG_STMMAC_RING) /* Specific functions used for Ring mode */
static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
/* Enhanced descriptors */
static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
{ {
p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1; p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
if (end) if (end)
p->des01.erx.end_ring = 1; p->des01.erx.end_ring = 1;
} }
static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end) static inline void ehn_desc_tx_set_on_ring(struct dma_desc *p, int end)
{ {
if (end) if (end)
p->des01.etx.end_ring = 1; p->des01.etx.end_ring = 1;
} }
static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter) static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
{ {
p->des01.etx.end_ring = ter; p->des01.etx.end_ring = ter;
} }
static inline void enh_set_tx_desc_len(struct dma_desc *p, int len) static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
{ {
if (unlikely(len > BUF_SIZE_4KiB)) { if (unlikely(len > BUF_SIZE_4KiB)) {
p->des01.etx.buffer1_size = BUF_SIZE_4KiB; p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
...@@ -58,25 +60,26 @@ static inline void enh_set_tx_desc_len(struct dma_desc *p, int len) ...@@ -58,25 +60,26 @@ static inline void enh_set_tx_desc_len(struct dma_desc *p, int len)
p->des01.etx.buffer1_size = len; p->des01.etx.buffer1_size = len;
} }
static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end) /* Normal descriptors */
static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
{ {
p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1; p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1;
if (end) if (end)
p->des01.rx.end_ring = 1; p->des01.rx.end_ring = 1;
} }
static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int end) static inline void ndesc_tx_set_on_ring(struct dma_desc *p, int end)
{ {
if (end) if (end)
p->des01.tx.end_ring = 1; p->des01.tx.end_ring = 1;
} }
static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter) static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
{ {
p->des01.tx.end_ring = ter; p->des01.tx.end_ring = ter;
} }
static inline void norm_set_tx_desc_len(struct dma_desc *p, int len) static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
{ {
if (unlikely(len > BUF_SIZE_2KiB)) { if (unlikely(len > BUF_SIZE_2KiB)) {
p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1; p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1;
...@@ -85,47 +88,48 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len) ...@@ -85,47 +88,48 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
p->des01.tx.buffer1_size = len; p->des01.tx.buffer1_size = len;
} }
#else /* Specific functions used for Chain mode */
static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) /* Enhanced descriptors */
static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p, int end)
{ {
p->des01.erx.second_address_chained = 1; p->des01.erx.second_address_chained = 1;
} }
static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end) static inline void ehn_desc_tx_set_on_chain(struct dma_desc *p, int end)
{ {
p->des01.etx.second_address_chained = 1; p->des01.etx.second_address_chained = 1;
} }
static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter) static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
{ {
p->des01.etx.second_address_chained = 1; p->des01.etx.second_address_chained = 1;
} }
static inline void enh_set_tx_desc_len(struct dma_desc *p, int len) static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
{ {
p->des01.etx.buffer1_size = len; p->des01.etx.buffer1_size = len;
} }
static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end) /* Normal descriptors */
static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
{ {
p->des01.rx.second_address_chained = 1; p->des01.rx.second_address_chained = 1;
} }
static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int ring_size) static inline void ndesc_tx_set_on_chain(struct dma_desc *p, int
ring_size)
{ {
p->des01.tx.second_address_chained = 1; p->des01.tx.second_address_chained = 1;
} }
static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter) static inline void ndesc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
{ {
p->des01.tx.second_address_chained = 1; p->des01.tx.second_address_chained = 1;
} }
static inline void norm_set_tx_desc_len(struct dma_desc *p, int len) static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
{ {
p->des01.tx.buffer1_size = len; p->des01.tx.buffer1_size = len;
} }
#endif
#endif /* __DESC_COM_H__ */ #endif /* __DESC_COM_H__ */
...@@ -229,14 +229,17 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, ...@@ -229,14 +229,17 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
} }
static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
int disable_rx_ic) int disable_rx_ic, int mode)
{ {
int i; int i;
for (i = 0; i < ring_size; i++) { for (i = 0; i < ring_size; i++) {
p->des01.erx.own = 1; p->des01.erx.own = 1;
p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
ehn_desc_rx_set_on_ring_chain(p, (i == ring_size - 1)); if (mode == STMMAC_CHAIN_MODE)
ehn_desc_rx_set_on_chain(p, (i == ring_size - 1));
else
ehn_desc_rx_set_on_ring(p, (i == ring_size - 1));
if (disable_rx_ic) if (disable_rx_ic)
p->des01.erx.disable_ic = 1; p->des01.erx.disable_ic = 1;
...@@ -244,13 +247,17 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, ...@@ -244,13 +247,17 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
} }
} }
static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size) static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size,
int mode)
{ {
int i; int i;
for (i = 0; i < ring_size; i++) { for (i = 0; i < ring_size; i++) {
p->des01.etx.own = 0; p->des01.etx.own = 0;
ehn_desc_tx_set_on_ring_chain(p, (i == ring_size - 1)); if (mode == STMMAC_CHAIN_MODE)
ehn_desc_tx_set_on_chain(p, (i == ring_size - 1));
else
ehn_desc_tx_set_on_ring(p, (i == ring_size - 1));
p++; p++;
} }
} }
...@@ -280,20 +287,26 @@ static int enh_desc_get_tx_ls(struct dma_desc *p) ...@@ -280,20 +287,26 @@ static int enh_desc_get_tx_ls(struct dma_desc *p)
return p->des01.etx.last_segment; return p->des01.etx.last_segment;
} }
static void enh_desc_release_tx_desc(struct dma_desc *p) static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
{ {
int ter = p->des01.etx.end_ring; int ter = p->des01.etx.end_ring;
memset(p, 0, offsetof(struct dma_desc, des2)); memset(p, 0, offsetof(struct dma_desc, des2));
enh_desc_end_tx_desc(p, ter); if (mode == STMMAC_CHAIN_MODE)
enh_desc_end_tx_desc_on_chain(p, ter);
else
enh_desc_end_tx_desc_on_ring(p, ter);
} }
static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
int csum_flag) int csum_flag, int mode)
{ {
p->des01.etx.first_segment = is_fs; p->des01.etx.first_segment = is_fs;
enh_set_tx_desc_len(p, len); if (mode == STMMAC_CHAIN_MODE)
enh_set_tx_desc_len_on_chain(p, len);
else
enh_set_tx_desc_len_on_ring(p, len);
if (likely(csum_flag)) if (likely(csum_flag))
p->des01.etx.checksum_insertion = cic_full; p->des01.etx.checksum_insertion = cic_full;
......
...@@ -123,14 +123,17 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, ...@@ -123,14 +123,17 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
} }
static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
int disable_rx_ic) int disable_rx_ic, int mode)
{ {
int i; int i;
for (i = 0; i < ring_size; i++) { for (i = 0; i < ring_size; i++) {
p->des01.rx.own = 1; p->des01.rx.own = 1;
p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1; p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
ndesc_rx_set_on_ring_chain(p, (i == ring_size - 1)); if (mode == STMMAC_CHAIN_MODE)
ndesc_rx_set_on_chain(p, (i == ring_size - 1));
else
ndesc_rx_set_on_ring(p, (i == ring_size - 1));
if (disable_rx_ic) if (disable_rx_ic)
p->des01.rx.disable_ic = 1; p->des01.rx.disable_ic = 1;
...@@ -138,12 +141,16 @@ static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, ...@@ -138,12 +141,16 @@ static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
} }
} }
static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size) static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size,
int mode)
{ {
int i; int i;
for (i = 0; i < ring_size; i++) { for (i = 0; i < ring_size; i++) {
p->des01.tx.own = 0; p->des01.tx.own = 0;
ndesc_tx_set_on_ring_chain(p, (i == (ring_size - 1))); if (mode == STMMAC_CHAIN_MODE)
ndesc_tx_set_on_chain(p, (i == (ring_size - 1)));
else
ndesc_tx_set_on_ring(p, (i == (ring_size - 1)));
p++; p++;
} }
} }
...@@ -173,19 +180,25 @@ static int ndesc_get_tx_ls(struct dma_desc *p) ...@@ -173,19 +180,25 @@ static int ndesc_get_tx_ls(struct dma_desc *p)
return p->des01.tx.last_segment; return p->des01.tx.last_segment;
} }
static void ndesc_release_tx_desc(struct dma_desc *p) static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
{ {
int ter = p->des01.tx.end_ring; int ter = p->des01.tx.end_ring;
memset(p, 0, offsetof(struct dma_desc, des2)); memset(p, 0, offsetof(struct dma_desc, des2));
ndesc_end_tx_desc(p, ter); if (mode == STMMAC_CHAIN_MODE)
ndesc_end_tx_desc_on_chain(p, ter);
else
ndesc_end_tx_desc_on_ring(p, ter);
} }
static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
int csum_flag) int csum_flag, int mode)
{ {
p->des01.tx.first_segment = is_fs; p->des01.tx.first_segment = is_fs;
norm_set_tx_desc_len(p, len); if (mode == STMMAC_CHAIN_MODE)
norm_set_tx_desc_len_on_chain(p, len);
else
norm_set_tx_desc_len_on_ring(p, len);
if (likely(csum_flag)) if (likely(csum_flag))
p->des01.tx.checksum_insertion = cic_full; p->des01.tx.checksum_insertion = cic_full;
......
...@@ -49,8 +49,8 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) ...@@ -49,8 +49,8 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device, skb->data, desc->des2 = dma_map_single(priv->device, skb->data,
bmax, DMA_TO_DEVICE); bmax, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB; desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
csum); STMMAC_RING_MODE);
wmb(); wmb();
entry = (++priv->cur_tx) % txsize; entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry; desc = priv->dma_tx + entry;
...@@ -58,7 +58,8 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) ...@@ -58,7 +58,8 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device, skb->data + bmax, desc->des2 = dma_map_single(priv->device, skb->data + bmax,
len, DMA_TO_DEVICE); len, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB; desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum); priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_RING_MODE);
wmb(); wmb();
priv->hw->desc->set_tx_owner(desc); priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL; priv->tx_skbuff[entry] = NULL;
...@@ -66,7 +67,8 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) ...@@ -66,7 +67,8 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device, skb->data, desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE); nopaged_len, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB; desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum); priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
STMMAC_RING_MODE);
} }
return entry; return entry;
...@@ -89,19 +91,12 @@ static void stmmac_refill_desc3(int bfsize, struct dma_desc *p) ...@@ -89,19 +91,12 @@ static void stmmac_refill_desc3(int bfsize, struct dma_desc *p)
p->des3 = p->des2 + BUF_SIZE_8KiB; p->des3 = p->des2 + BUF_SIZE_8KiB;
} }
/* In ring mode we need to fill the desc3 because it is used /* In ring mode we need to fill the desc3 because it is used as buffer */
* as buffer */ static void stmmac_init_desc3(struct dma_desc *p)
static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p)
{ {
if (unlikely(des3_as_data_buf))
p->des3 = p->des2 + BUF_SIZE_8KiB; p->des3 = p->des2 + BUF_SIZE_8KiB;
} }
static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr,
unsigned int size)
{
}
static void stmmac_clean_desc3(struct dma_desc *p) static void stmmac_clean_desc3(struct dma_desc *p)
{ {
if (unlikely(p->des3)) if (unlikely(p->des3))
...@@ -121,7 +116,6 @@ const struct stmmac_ring_mode_ops ring_mode_ops = { ...@@ -121,7 +116,6 @@ const struct stmmac_ring_mode_ops ring_mode_ops = {
.jumbo_frm = stmmac_jumbo_frm, .jumbo_frm = stmmac_jumbo_frm,
.refill_desc3 = stmmac_refill_desc3, .refill_desc3 = stmmac_refill_desc3,
.init_desc3 = stmmac_init_desc3, .init_desc3 = stmmac_init_desc3,
.init_dma_chain = stmmac_init_dma_chain,
.clean_desc3 = stmmac_clean_desc3, .clean_desc3 = stmmac_clean_desc3,
.set_16kib_bfsize = stmmac_set_16kib_bfsize, .set_16kib_bfsize = stmmac_set_16kib_bfsize,
}; };
...@@ -93,6 +93,7 @@ struct stmmac_priv { ...@@ -93,6 +93,7 @@ struct stmmac_priv {
u32 tx_coal_timer; u32 tx_coal_timer;
int use_riwt; int use_riwt;
u32 rx_riwt; u32 rx_riwt;
unsigned int mode;
}; };
extern int phyaddr; extern int phyaddr;
......
...@@ -130,6 +130,13 @@ module_param(eee_timer, int, S_IRUGO | S_IWUSR); ...@@ -130,6 +130,13 @@ module_param(eee_timer, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x)) #define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
/* By default the driver will use the ring mode to manage tx and rx descriptors
* but passing this value so user can force to use the chain instead of the ring
*/
static unsigned int chain_mode;
module_param(chain_mode, int, S_IRUGO);
MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
static irqreturn_t stmmac_interrupt(int irq, void *dev_id); static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
#ifdef CONFIG_STMMAC_DEBUG_FS #ifdef CONFIG_STMMAC_DEBUG_FS
...@@ -514,17 +521,15 @@ static void init_dma_desc_rings(struct net_device *dev) ...@@ -514,17 +521,15 @@ static void init_dma_desc_rings(struct net_device *dev)
struct sk_buff *skb; struct sk_buff *skb;
unsigned int txsize = priv->dma_tx_size; unsigned int txsize = priv->dma_tx_size;
unsigned int rxsize = priv->dma_rx_size; unsigned int rxsize = priv->dma_rx_size;
unsigned int bfsize; unsigned int bfsize = 0;
int dis_ic = 0; int dis_ic = 0;
int des3_as_data_buf = 0;
/* Set the max buffer size according to the DESC mode /* Set the max buffer size according to the DESC mode
* and the MTU. Note that RING mode allows 16KiB bsize. */ * and the MTU. Note that RING mode allows 16KiB bsize. */
if (priv->mode == STMMAC_RING_MODE)
bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu); bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
if (bfsize == BUF_SIZE_16KiB) if (bfsize < BUF_SIZE_16KiB)
des3_as_data_buf = 1;
else
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
...@@ -571,7 +576,9 @@ static void init_dma_desc_rings(struct net_device *dev) ...@@ -571,7 +576,9 @@ static void init_dma_desc_rings(struct net_device *dev)
p->des2 = priv->rx_skbuff_dma[i]; p->des2 = priv->rx_skbuff_dma[i];
priv->hw->ring->init_desc3(des3_as_data_buf, p); if ((priv->mode == STMMAC_RING_MODE) &&
(bfsize == BUF_SIZE_16KiB))
priv->hw->ring->init_desc3(p);
DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]); priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
...@@ -589,17 +596,20 @@ static void init_dma_desc_rings(struct net_device *dev) ...@@ -589,17 +596,20 @@ static void init_dma_desc_rings(struct net_device *dev)
/* In case of Chained mode this sets the des3 to the next /* In case of Chained mode this sets the des3 to the next
* element in the chain */ * element in the chain */
priv->hw->ring->init_dma_chain(priv->dma_rx, priv->dma_rx_phy, rxsize); if (priv->mode == STMMAC_CHAIN_MODE) {
priv->hw->ring->init_dma_chain(priv->dma_tx, priv->dma_tx_phy, txsize); priv->hw->chain->init_dma_chain(priv->dma_rx, priv->dma_rx_phy,
rxsize);
priv->hw->chain->init_dma_chain(priv->dma_tx, priv->dma_tx_phy,
txsize);
}
priv->dirty_tx = 0; priv->dirty_tx = 0;
priv->cur_tx = 0; priv->cur_tx = 0;
if (priv->use_riwt) if (priv->use_riwt)
dis_ic = 1; dis_ic = 1;
/* Clear the Rx/Tx descriptors */ /* Clear the Rx/Tx descriptors */
priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic); priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic, priv->mode);
priv->hw->desc->init_tx_desc(priv->dma_tx, txsize); priv->hw->desc->init_tx_desc(priv->dma_tx, txsize, priv->mode);
if (netif_msg_hw(priv)) { if (netif_msg_hw(priv)) {
pr_info("RX descriptor ring:\n"); pr_info("RX descriptor ring:\n");
...@@ -726,6 +736,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) ...@@ -726,6 +736,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
dma_unmap_single(priv->device, p->des2, dma_unmap_single(priv->device, p->des2,
priv->hw->desc->get_tx_len(p), priv->hw->desc->get_tx_len(p),
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (priv->mode == STMMAC_RING_MODE)
priv->hw->ring->clean_desc3(p); priv->hw->ring->clean_desc3(p);
if (likely(skb != NULL)) { if (likely(skb != NULL)) {
...@@ -733,7 +744,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) ...@@ -733,7 +744,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
priv->tx_skbuff[entry] = NULL; priv->tx_skbuff[entry] = NULL;
} }
priv->hw->desc->release_tx_desc(p); priv->hw->desc->release_tx_desc(p, priv->mode);
priv->dirty_tx++; priv->dirty_tx++;
} }
...@@ -778,7 +789,8 @@ static void stmmac_tx_err(struct stmmac_priv *priv) ...@@ -778,7 +789,8 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
priv->hw->dma->stop_tx(priv->ioaddr); priv->hw->dma->stop_tx(priv->ioaddr);
dma_free_tx_skbufs(priv); dma_free_tx_skbufs(priv);
priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size,
priv->mode);
priv->dirty_tx = 0; priv->dirty_tx = 0;
priv->cur_tx = 0; priv->cur_tx = 0;
priv->hw->dma->start_tx(priv->ioaddr); priv->hw->dma->start_tx(priv->ioaddr);
...@@ -1190,7 +1202,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1190,7 +1202,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
unsigned int txsize = priv->dma_tx_size; unsigned int txsize = priv->dma_tx_size;
unsigned int entry; unsigned int entry;
int i, csum_insertion = 0; int i, csum_insertion = 0, is_jumbo = 0;
int nfrags = skb_shinfo(skb)->nr_frags; int nfrags = skb_shinfo(skb)->nr_frags;
struct dma_desc *desc, *first; struct dma_desc *desc, *first;
unsigned int nopaged_len = skb_headlen(skb); unsigned int nopaged_len = skb_headlen(skb);
...@@ -1236,15 +1248,27 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1236,15 +1248,27 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
#endif #endif
priv->tx_skbuff[entry] = skb; priv->tx_skbuff[entry] = skb;
if (priv->hw->ring->is_jumbo_frm(skb->len, priv->plat->enh_desc)) { /* To program the descriptors according to the size of the frame */
entry = priv->hw->ring->jumbo_frm(priv, skb, csum_insertion); if (priv->mode == STMMAC_RING_MODE) {
desc = priv->dma_tx + entry; is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
priv->plat->enh_desc);
if (unlikely(is_jumbo))
entry = priv->hw->ring->jumbo_frm(priv, skb,
csum_insertion);
} else { } else {
is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
priv->plat->enh_desc);
if (unlikely(is_jumbo))
entry = priv->hw->chain->jumbo_frm(priv, skb,
csum_insertion);
}
if (likely(!is_jumbo)) {
desc->des2 = dma_map_single(priv->device, skb->data, desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE); nopaged_len, DMA_TO_DEVICE);
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
csum_insertion); csum_insertion, priv->mode);
} } else
desc = priv->dma_tx + entry;
for (i = 0; i < nfrags; i++) { for (i = 0; i < nfrags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
...@@ -1257,7 +1281,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1257,7 +1281,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
priv->tx_skbuff[entry] = NULL; priv->tx_skbuff[entry] = NULL;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion); priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
priv->mode);
wmb(); wmb();
priv->hw->desc->set_tx_owner(desc); priv->hw->desc->set_tx_owner(desc);
wmb(); wmb();
...@@ -1338,7 +1363,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) ...@@ -1338,7 +1363,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
(p + entry)->des2 = priv->rx_skbuff_dma[entry]; (p + entry)->des2 = priv->rx_skbuff_dma[entry];
if (unlikely(priv->plat->has_gmac)) if (unlikely((priv->mode == STMMAC_RING_MODE) &&
(priv->plat->has_gmac)))
priv->hw->ring->refill_desc3(bfsize, p + entry); priv->hw->ring->refill_desc3(bfsize, p + entry);
RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
...@@ -1884,12 +1910,20 @@ static int stmmac_hw_init(struct stmmac_priv *priv) ...@@ -1884,12 +1910,20 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->hw = mac; priv->hw = mac;
/* To use the chained or ring mode */
priv->hw->ring = &ring_mode_ops;
/* Get and dump the chip ID */ /* Get and dump the chip ID */
priv->synopsys_id = stmmac_get_synopsys_id(priv); priv->synopsys_id = stmmac_get_synopsys_id(priv);
/* To use the chained or ring mode */
if (chain_mode) {
priv->hw->chain = &chain_mode_ops;
pr_info(" Chain mode enabled\n");
priv->mode = STMMAC_CHAIN_MODE;
} else {
priv->hw->ring = &ring_mode_ops;
pr_info(" Ring mode enabled\n");
priv->mode = STMMAC_RING_MODE;
}
/* Get the HW capability (new GMAC newer than 3.50a) */ /* Get the HW capability (new GMAC newer than 3.50a) */
priv->hw_cap_support = stmmac_get_hw_features(priv); priv->hw_cap_support = stmmac_get_hw_features(priv);
if (priv->hw_cap_support) { if (priv->hw_cap_support) {
...@@ -2109,8 +2143,9 @@ int stmmac_suspend(struct net_device *ndev) ...@@ -2109,8 +2143,9 @@ int stmmac_suspend(struct net_device *ndev)
priv->hw->dma->stop_rx(priv->ioaddr); priv->hw->dma->stop_rx(priv->ioaddr);
/* Clear the Rx/Tx descriptors */ /* Clear the Rx/Tx descriptors */
priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size, priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
dis_ic); dis_ic, priv->mode);
priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size,
priv->mode);
/* Enable Power down mode by programming the PMT regs */ /* Enable Power down mode by programming the PMT regs */
if (device_may_wakeup(priv->device)) if (device_may_wakeup(priv->device))
...@@ -2249,6 +2284,9 @@ static int __init stmmac_cmdline_opt(char *str) ...@@ -2249,6 +2284,9 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "eee_timer:", 10)) { } else if (!strncmp(opt, "eee_timer:", 10)) {
if (kstrtoint(opt + 10, 0, &eee_timer)) if (kstrtoint(opt + 10, 0, &eee_timer))
goto err; goto err;
} else if (!strncmp(opt, "chain_mode:", 11)) {
if (kstrtoint(opt + 11, 0, &chain_mode))
goto err;
} }
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment