Commit f49809fe authored by Linus Torvalds's avatar Linus Torvalds
parents cdbbde14 c1ef1f35
...@@ -1555,6 +1555,7 @@ config SIS900 ...@@ -1555,6 +1555,7 @@ config SIS900
tristate "SiS 900/7016 PCI Fast Ethernet Adapter support" tristate "SiS 900/7016 PCI Fast Ethernet Adapter support"
depends on NET_PCI && PCI depends on NET_PCI && PCI
select CRC32 select CRC32
select MII
---help--- ---help---
This is a driver for the Fast Ethernet PCI network cards based on This is a driver for the Fast Ethernet PCI network cards based on
the SiS 900 and SiS 7016 chips. The SiS 900 core is also embedded in the SiS 900 and SiS 7016 chips. The SiS 900 core is also embedded in
......
...@@ -81,6 +81,7 @@ ...@@ -81,6 +81,7 @@
* cause DMA to kfree'd memory. * cause DMA to kfree'd memory.
* 0.31: 14 Nov 2004: ethtool support for getting/setting link * 0.31: 14 Nov 2004: ethtool support for getting/setting link
* capabilities. * capabilities.
* 0.32: 16 Apr 2005: RX_ERROR4 handling added.
* *
* Known bugs: * Known bugs:
* We suspect that on some hardware no TX done interrupts are generated. * We suspect that on some hardware no TX done interrupts are generated.
...@@ -92,7 +93,7 @@ ...@@ -92,7 +93,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic. * superfluous timer interrupts from the nic.
*/ */
#define FORCEDETH_VERSION "0.31" #define FORCEDETH_VERSION "0.32"
#define DRV_NAME "forcedeth" #define DRV_NAME "forcedeth"
#include <linux/module.h> #include <linux/module.h>
...@@ -109,6 +110,7 @@ ...@@ -109,6 +110,7 @@
#include <linux/mii.h> #include <linux/mii.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/if_vlan.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -1013,6 +1015,59 @@ static void nv_tx_timeout(struct net_device *dev) ...@@ -1013,6 +1015,59 @@ static void nv_tx_timeout(struct net_device *dev)
spin_unlock_irq(&np->lock); spin_unlock_irq(&np->lock);
} }
/*
* Called when the nic notices a mismatch between the actual data len on the
* wire and the len indicated in the 802 header
*/
static int nv_getlen(struct net_device *dev, void *packet, int datalen)
{
int hdrlen; /* length of the 802 header */
int protolen; /* length as stored in the proto field */
/* 1) calculate len according to header */
if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
hdrlen = VLAN_HLEN;
} else {
protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
hdrlen = ETH_HLEN;
}
dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
dev->name, datalen, protolen, hdrlen);
if (protolen > ETH_DATA_LEN)
return datalen; /* Value in proto field not a len, no checks possible */
protolen += hdrlen;
/* consistency checks: */
if (datalen > ETH_ZLEN) {
if (datalen >= protolen) {
/* more data on wire than in 802 header, trim of
* additional data.
*/
dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
dev->name, protolen);
return protolen;
} else {
/* less data on wire than mentioned in header.
* Discard the packet.
*/
dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
dev->name);
return -1;
}
} else {
/* short packet. Accept only if 802 values are also short */
if (protolen > ETH_ZLEN) {
dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
dev->name);
return -1;
}
dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
dev->name, datalen);
return datalen;
}
}
static void nv_rx_process(struct net_device *dev) static void nv_rx_process(struct net_device *dev)
{ {
struct fe_priv *np = get_nvpriv(dev); struct fe_priv *np = get_nvpriv(dev);
...@@ -1064,7 +1119,7 @@ static void nv_rx_process(struct net_device *dev) ...@@ -1064,7 +1119,7 @@ static void nv_rx_process(struct net_device *dev)
np->stats.rx_errors++; np->stats.rx_errors++;
goto next_pkt; goto next_pkt;
} }
if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4)) { if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
np->stats.rx_errors++; np->stats.rx_errors++;
goto next_pkt; goto next_pkt;
} }
...@@ -1078,22 +1133,24 @@ static void nv_rx_process(struct net_device *dev) ...@@ -1078,22 +1133,24 @@ static void nv_rx_process(struct net_device *dev)
np->stats.rx_errors++; np->stats.rx_errors++;
goto next_pkt; goto next_pkt;
} }
if (Flags & NV_RX_ERROR) { if (Flags & NV_RX_ERROR4) {
/* framing errors are soft errors, the rest is fatal. */ len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
if (Flags & NV_RX_FRAMINGERR) { if (len < 0) {
if (Flags & NV_RX_SUBSTRACT1) {
len--;
}
} else {
np->stats.rx_errors++; np->stats.rx_errors++;
goto next_pkt; goto next_pkt;
} }
} }
/* framing errors are soft errors. */
if (Flags & NV_RX_FRAMINGERR) {
if (Flags & NV_RX_SUBSTRACT1) {
len--;
}
}
} else { } else {
if (!(Flags & NV_RX2_DESCRIPTORVALID)) if (!(Flags & NV_RX2_DESCRIPTORVALID))
goto next_pkt; goto next_pkt;
if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4)) { if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
np->stats.rx_errors++; np->stats.rx_errors++;
goto next_pkt; goto next_pkt;
} }
...@@ -1107,17 +1164,19 @@ static void nv_rx_process(struct net_device *dev) ...@@ -1107,17 +1164,19 @@ static void nv_rx_process(struct net_device *dev)
np->stats.rx_errors++; np->stats.rx_errors++;
goto next_pkt; goto next_pkt;
} }
if (Flags & NV_RX2_ERROR) { if (Flags & NV_RX2_ERROR4) {
/* framing errors are soft errors, the rest is fatal. */ len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
if (Flags & NV_RX2_FRAMINGERR) { if (len < 0) {
if (Flags & NV_RX2_SUBSTRACT1) {
len--;
}
} else {
np->stats.rx_errors++; np->stats.rx_errors++;
goto next_pkt; goto next_pkt;
} }
} }
/* framing errors are soft errors */
if (Flags & NV_RX2_FRAMINGERR) {
if (Flags & NV_RX2_SUBSTRACT1) {
len--;
}
}
Flags &= NV_RX2_CHECKSUMMASK; Flags &= NV_RX2_CHECKSUMMASK;
if (Flags == NV_RX2_CHECKSUMOK1 || if (Flags == NV_RX2_CHECKSUMOK1 ||
Flags == NV_RX2_CHECKSUMOK2 || Flags == NV_RX2_CHECKSUMOK2 ||
...@@ -1480,6 +1539,13 @@ static void nv_do_nic_poll(unsigned long data) ...@@ -1480,6 +1539,13 @@ static void nv_do_nic_poll(unsigned long data)
enable_irq(dev->irq); enable_irq(dev->irq);
} }
#ifdef CONFIG_NET_POLL_CONTROLLER
static void nv_poll_controller(struct net_device *dev)
{
nv_do_nic_poll((unsigned long) dev);
}
#endif
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{ {
struct fe_priv *np = get_nvpriv(dev); struct fe_priv *np = get_nvpriv(dev);
...@@ -1962,6 +2028,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i ...@@ -1962,6 +2028,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev->get_stats = nv_get_stats; dev->get_stats = nv_get_stats;
dev->change_mtu = nv_change_mtu; dev->change_mtu = nv_change_mtu;
dev->set_multicast_list = nv_set_multicast; dev->set_multicast_list = nv_set_multicast;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = nv_poll_controller;
#endif
SET_ETHTOOL_OPS(dev, &ops); SET_ETHTOOL_OPS(dev, &ops);
dev->tx_timeout = nv_tx_timeout; dev->tx_timeout = nv_tx_timeout;
dev->watchdog_timeo = NV_WATCHDOG_TIMEO; dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
......
...@@ -924,7 +924,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp, ...@@ -924,7 +924,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp,
spin_lock_irqsave(&cnx->lock, flags); spin_lock_irqsave(&cnx->lock, flags);
if (! cnx->state & VETH_STATE_READY) if (! (cnx->state & VETH_STATE_READY))
goto drop; goto drop;
if ((skb->len - 14) > VETH_MAX_MTU) if ((skb->len - 14) > VETH_MAX_MTU)
...@@ -1023,6 +1023,8 @@ static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1023,6 +1023,8 @@ static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev)
lpmask = veth_transmit_to_many(skb, lpmask, dev); lpmask = veth_transmit_to_many(skb, lpmask, dev);
dev->trans_start = jiffies;
if (! lpmask) { if (! lpmask) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
} else { } else {
...@@ -1262,13 +1264,18 @@ static void veth_receive(struct veth_lpar_connection *cnx, ...@@ -1262,13 +1264,18 @@ static void veth_receive(struct veth_lpar_connection *cnx,
vlan = skb->data[9]; vlan = skb->data[9];
dev = veth_dev[vlan]; dev = veth_dev[vlan];
if (! dev) if (! dev) {
/* Some earlier versions of the driver sent /*
broadcasts down all connections, even to * Some earlier versions of the driver sent
lpars that weren't on the relevant vlan. * broadcasts down all connections, even to lpars
So ignore packets belonging to a vlan we're * that weren't on the relevant vlan. So ignore
not on. */ * packets belonging to a vlan we're not on.
* We can also be here if we receive packets while
* the driver is going down, because then dev is NULL.
*/
dev_kfree_skb_irq(skb);
continue; continue;
}
port = (struct veth_port *)dev->priv; port = (struct veth_port *)dev->priv;
dest = *((u64 *) skb->data) & 0xFFFFFFFFFFFF0000; dest = *((u64 *) skb->data) & 0xFFFFFFFFFFFF0000;
...@@ -1381,18 +1388,25 @@ void __exit veth_module_cleanup(void) ...@@ -1381,18 +1388,25 @@ void __exit veth_module_cleanup(void)
{ {
int i; int i;
vio_unregister_driver(&veth_driver); /* Stop the queues first to stop any new packets being sent. */
for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++)
if (veth_dev[i])
netif_stop_queue(veth_dev[i]);
/* Stop the connections before we unregister the driver. This
* ensures there's no skbs lying around holding the device open. */
for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) for (i = 0; i < HVMAXARCHITECTEDLPS; ++i)
veth_stop_connection(i); veth_stop_connection(i);
HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan); HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan);
/* Hypervisor callbacks may have scheduled more work while we /* Hypervisor callbacks may have scheduled more work while we
* were destroying connections. Now that we've disconnected from * were stoping connections. Now that we've disconnected from
* the hypervisor make sure everything's finished. */ * the hypervisor make sure everything's finished. */
flush_scheduled_work(); flush_scheduled_work();
vio_unregister_driver(&veth_driver);
for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) for (i = 0; i < HVMAXARCHITECTEDLPS; ++i)
veth_destroy_connection(i); veth_destroy_connection(i);
......
...@@ -2433,9 +2433,9 @@ static void __set_rx_mode(struct net_device *dev) ...@@ -2433,9 +2433,9 @@ static void __set_rx_mode(struct net_device *dev)
rx_mode = RxFilterEnable | AcceptBroadcast rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptMulticast | AcceptMyPhys; | AcceptMulticast | AcceptMyPhys;
for (i = 0; i < 64; i += 2) { for (i = 0; i < 64; i += 2) {
writew(HASH_TABLE + i, ioaddr + RxFilterAddr); writel(HASH_TABLE + i, ioaddr + RxFilterAddr);
writew((mc_filter[i+1]<<8) + mc_filter[i], writel((mc_filter[i + 1] << 8) + mc_filter[i],
ioaddr + RxFilterData); ioaddr + RxFilterData);
} }
} }
writel(rx_mode, ioaddr + RxFilterAddr); writel(rx_mode, ioaddr + RxFilterAddr);
......
#define _VERSION "0.20" #define VERSION "0.22"
/* ns83820.c by Benjamin LaHaise with contributions. /* ns83820.c by Benjamin LaHaise with contributions.
* *
* Questions/comments/discussion to linux-ns83820@kvack.org. * Questions/comments/discussion to linux-ns83820@kvack.org.
...@@ -63,9 +63,11 @@ ...@@ -63,9 +63,11 @@
* - fix missed txok introduced during performance * - fix missed txok introduced during performance
* tuning * tuning
* 0.20 - fix stupid RFEN thinko. i am such a smurf. * 0.20 - fix stupid RFEN thinko. i am such a smurf.
*
* 20040828 0.21 - add hardware vlan accleration * 20040828 0.21 - add hardware vlan accleration
* by Neil Horman <nhorman@redhat.com> * by Neil Horman <nhorman@redhat.com>
* 20050406 0.22 - improved DAC ifdefs from Andi Kleen
* - removal of dead code from Adrian Bunk
* - fix half duplex collision behaviour
* Driver Overview * Driver Overview
* =============== * ===============
* *
...@@ -129,18 +131,6 @@ static int lnksts = 0; /* CFG_LNKSTS bit polarity */ ...@@ -129,18 +131,6 @@ static int lnksts = 0; /* CFG_LNKSTS bit polarity */
#undef Dprintk #undef Dprintk
#define Dprintk dprintk #define Dprintk dprintk
#if defined(CONFIG_HIGHMEM64G) || defined(__ia64__)
#define USE_64BIT_ADDR "+"
#endif
#if defined(USE_64BIT_ADDR)
#define VERSION _VERSION USE_64BIT_ADDR
#define TRY_DAC 1
#else
#define VERSION _VERSION
#define TRY_DAC 0
#endif
/* tunables */ /* tunables */
#define RX_BUF_SIZE 1500 /* 8192 */ #define RX_BUF_SIZE 1500 /* 8192 */
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
...@@ -386,22 +376,16 @@ static int lnksts = 0; /* CFG_LNKSTS bit polarity */ ...@@ -386,22 +376,16 @@ static int lnksts = 0; /* CFG_LNKSTS bit polarity */
#define LINK_DOWN 0x02 #define LINK_DOWN 0x02
#define LINK_UP 0x04 #define LINK_UP 0x04
#ifdef USE_64BIT_ADDR #define HW_ADDR_LEN sizeof(dma_addr_t)
#define HW_ADDR_LEN 8
#define desc_addr_set(desc, addr) \ #define desc_addr_set(desc, addr) \
do { \ do { \
u64 __addr = (addr); \ ((desc)[0] = cpu_to_le32(addr)); \
(desc)[0] = cpu_to_le32(__addr); \ if (HW_ADDR_LEN == 8) \
(desc)[1] = cpu_to_le32(__addr >> 32); \ (desc)[1] = cpu_to_le32(((u64)addr) >> 32); \
} while(0) } while(0)
#define desc_addr_get(desc) \ #define desc_addr_get(desc) \
(((u64)le32_to_cpu((desc)[1]) << 32) \ (le32_to_cpu((desc)[0]) | \
| le32_to_cpu((desc)[0])) (HW_ADDR_LEN == 8 ? ((dma_addr_t)le32_to_cpu((desc)[1]))<<32 : 0))
#else
#define HW_ADDR_LEN 4
#define desc_addr_set(desc, addr) ((desc)[0] = cpu_to_le32(addr))
#define desc_addr_get(desc) (le32_to_cpu((desc)[0]))
#endif
#define DESC_LINK 0 #define DESC_LINK 0
#define DESC_BUFPTR (DESC_LINK + HW_ADDR_LEN/4) #define DESC_BUFPTR (DESC_LINK + HW_ADDR_LEN/4)
...@@ -727,11 +711,23 @@ static void fastcall phy_intr(struct net_device *ndev) ...@@ -727,11 +711,23 @@ static void fastcall phy_intr(struct net_device *ndev)
speed = ((cfg / CFG_SPDSTS0) & 3); speed = ((cfg / CFG_SPDSTS0) & 3);
fullduplex = (cfg & CFG_DUPSTS); fullduplex = (cfg & CFG_DUPSTS);
if (fullduplex) if (fullduplex) {
new_cfg |= CFG_SB; new_cfg |= CFG_SB;
writel(readl(dev->base + TXCFG)
| TXCFG_CSI | TXCFG_HBI,
dev->base + TXCFG);
writel(readl(dev->base + RXCFG) | RXCFG_RX_FD,
dev->base + RXCFG);
} else {
writel(readl(dev->base + TXCFG)
& ~(TXCFG_CSI | TXCFG_HBI),
dev->base + TXCFG);
writel(readl(dev->base + RXCFG) & ~(RXCFG_RX_FD),
dev->base + RXCFG);
}
if ((cfg & CFG_LNKSTS) && if ((cfg & CFG_LNKSTS) &&
((new_cfg ^ dev->CFG_cache) & CFG_MODE_1000)) { ((new_cfg ^ dev->CFG_cache) != 0)) {
writel(new_cfg, dev->base + CFG); writel(new_cfg, dev->base + CFG);
dev->CFG_cache = new_cfg; dev->CFG_cache = new_cfg;
} }
...@@ -1189,7 +1185,6 @@ static int ns83820_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1189,7 +1185,6 @@ static int ns83820_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
for (;;) { for (;;) {
volatile u32 *desc = dev->tx_descs + (free_idx * DESC_SIZE); volatile u32 *desc = dev->tx_descs + (free_idx * DESC_SIZE);
u32 residue = 0;
dprintk("frag[%3u]: %4u @ 0x%08Lx\n", free_idx, len, dprintk("frag[%3u]: %4u @ 0x%08Lx\n", free_idx, len,
(unsigned long long)buf); (unsigned long long)buf);
...@@ -1199,17 +1194,11 @@ static int ns83820_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1199,17 +1194,11 @@ static int ns83820_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
desc_addr_set(desc + DESC_BUFPTR, buf); desc_addr_set(desc + DESC_BUFPTR, buf);
desc[DESC_EXTSTS] = cpu_to_le32(extsts); desc[DESC_EXTSTS] = cpu_to_le32(extsts);
cmdsts = ((nr_frags|residue) ? CMDSTS_MORE : do_intr ? CMDSTS_INTR : 0); cmdsts = ((nr_frags) ? CMDSTS_MORE : do_intr ? CMDSTS_INTR : 0);
cmdsts |= (desc == first_desc) ? 0 : CMDSTS_OWN; cmdsts |= (desc == first_desc) ? 0 : CMDSTS_OWN;
cmdsts |= len; cmdsts |= len;
desc[DESC_CMDSTS] = cpu_to_le32(cmdsts); desc[DESC_CMDSTS] = cpu_to_le32(cmdsts);
if (residue) {
buf += len;
len = residue;
continue;
}
if (!nr_frags) if (!nr_frags)
break; break;
...@@ -1841,7 +1830,8 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ ...@@ -1841,7 +1830,8 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
int using_dac = 0; int using_dac = 0;
/* See if we can set the dma mask early on; failure is fatal. */ /* See if we can set the dma mask early on; failure is fatal. */
if (TRY_DAC && !pci_set_dma_mask(pci_dev, 0xffffffffffffffffULL)) { if (sizeof(dma_addr_t) == 8 &&
!pci_set_dma_mask(pci_dev, 0xffffffffffffffffULL)) {
using_dac = 1; using_dac = 1;
} else if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { } else if (!pci_set_dma_mask(pci_dev, 0xffffffff)) {
using_dac = 0; using_dac = 0;
...@@ -1972,9 +1962,8 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ ...@@ -1972,9 +1962,8 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
/* When compiled with 64 bit addressing, we must always enable /* When compiled with 64 bit addressing, we must always enable
* the 64 bit descriptor format. * the 64 bit descriptor format.
*/ */
#ifdef USE_64BIT_ADDR if (sizeof(dma_addr_t) == 8)
dev->CFG_cache |= CFG_M64ADDR; dev->CFG_cache |= CFG_M64ADDR;
#endif
if (using_dac) if (using_dac)
dev->CFG_cache |= CFG_T64ADDR; dev->CFG_cache |= CFG_T64ADDR;
......
...@@ -162,6 +162,7 @@ struct sis900_private { ...@@ -162,6 +162,7 @@ struct sis900_private {
struct mii_phy * mii; struct mii_phy * mii;
struct mii_phy * first_mii; /* record the first mii structure */ struct mii_phy * first_mii; /* record the first mii structure */
unsigned int cur_phy; unsigned int cur_phy;
struct mii_if_info mii_info;
struct timer_list timer; /* Link status detection timer. */ struct timer_list timer; /* Link status detection timer. */
u8 autong_complete; /* 1: auto-negotiate complete */ u8 autong_complete; /* 1: auto-negotiate complete */
...@@ -203,7 +204,7 @@ static int sis900_open(struct net_device *net_dev); ...@@ -203,7 +204,7 @@ static int sis900_open(struct net_device *net_dev);
static int sis900_mii_probe (struct net_device * net_dev); static int sis900_mii_probe (struct net_device * net_dev);
static void sis900_init_rxfilter (struct net_device * net_dev); static void sis900_init_rxfilter (struct net_device * net_dev);
static u16 read_eeprom(long ioaddr, int location); static u16 read_eeprom(long ioaddr, int location);
static u16 mdio_read(struct net_device *net_dev, int phy_id, int location); static int mdio_read(struct net_device *net_dev, int phy_id, int location);
static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val); static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val);
static void sis900_timer(unsigned long data); static void sis900_timer(unsigned long data);
static void sis900_check_mode (struct net_device *net_dev, struct mii_phy *mii_phy); static void sis900_check_mode (struct net_device *net_dev, struct mii_phy *mii_phy);
...@@ -478,7 +479,13 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, ...@@ -478,7 +479,13 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
sis_priv->msg_enable = sis900_debug; sis_priv->msg_enable = sis900_debug;
else else
sis_priv->msg_enable = SIS900_DEF_MSG; sis_priv->msg_enable = SIS900_DEF_MSG;
sis_priv->mii_info.dev = net_dev;
sis_priv->mii_info.mdio_read = mdio_read;
sis_priv->mii_info.mdio_write = mdio_write;
sis_priv->mii_info.phy_id_mask = 0x1f;
sis_priv->mii_info.reg_num_mask = 0x1f;
/* Get Mac address according to the chip revision */ /* Get Mac address according to the chip revision */
pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &(sis_priv->chipset_rev)); pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &(sis_priv->chipset_rev));
if(netif_msg_probe(sis_priv)) if(netif_msg_probe(sis_priv))
...@@ -725,6 +732,8 @@ static u16 sis900_default_phy(struct net_device * net_dev) ...@@ -725,6 +732,8 @@ static u16 sis900_default_phy(struct net_device * net_dev)
pci_name(sis_priv->pci_dev), sis_priv->cur_phy); pci_name(sis_priv->pci_dev), sis_priv->cur_phy);
} }
sis_priv->mii_info.phy_id = sis_priv->cur_phy;
status = mdio_read(net_dev, sis_priv->cur_phy, MII_CONTROL); status = mdio_read(net_dev, sis_priv->cur_phy, MII_CONTROL);
status &= (~MII_CNTL_ISOLATE); status &= (~MII_CNTL_ISOLATE);
...@@ -852,7 +861,7 @@ static void mdio_reset(long mdio_addr) ...@@ -852,7 +861,7 @@ static void mdio_reset(long mdio_addr)
* Please see SiS7014 or ICS spec * Please see SiS7014 or ICS spec
*/ */
static u16 mdio_read(struct net_device *net_dev, int phy_id, int location) static int mdio_read(struct net_device *net_dev, int phy_id, int location)
{ {
long mdio_addr = net_dev->base_addr + mear; long mdio_addr = net_dev->base_addr + mear;
int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift); int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
...@@ -1966,10 +1975,47 @@ static void sis900_set_msglevel(struct net_device *net_dev, u32 value) ...@@ -1966,10 +1975,47 @@ static void sis900_set_msglevel(struct net_device *net_dev, u32 value)
sis_priv->msg_enable = value; sis_priv->msg_enable = value;
} }
static u32 sis900_get_link(struct net_device *net_dev)
{
struct sis900_private *sis_priv = net_dev->priv;
return mii_link_ok(&sis_priv->mii_info);
}
static int sis900_get_settings(struct net_device *net_dev,
struct ethtool_cmd *cmd)
{
struct sis900_private *sis_priv = net_dev->priv;
spin_lock_irq(&sis_priv->lock);
mii_ethtool_gset(&sis_priv->mii_info, cmd);
spin_unlock_irq(&sis_priv->lock);
return 0;
}
static int sis900_set_settings(struct net_device *net_dev,
struct ethtool_cmd *cmd)
{
struct sis900_private *sis_priv = net_dev->priv;
int rt;
spin_lock_irq(&sis_priv->lock);
rt = mii_ethtool_sset(&sis_priv->mii_info, cmd);
spin_unlock_irq(&sis_priv->lock);
return rt;
}
static int sis900_nway_reset(struct net_device *net_dev)
{
struct sis900_private *sis_priv = net_dev->priv;
return mii_nway_restart(&sis_priv->mii_info);
}
static struct ethtool_ops sis900_ethtool_ops = { static struct ethtool_ops sis900_ethtool_ops = {
.get_drvinfo = sis900_get_drvinfo, .get_drvinfo = sis900_get_drvinfo,
.get_msglevel = sis900_get_msglevel, .get_msglevel = sis900_get_msglevel,
.set_msglevel = sis900_set_msglevel, .set_msglevel = sis900_set_msglevel,
.get_link = sis900_get_link,
.get_settings = sis900_get_settings,
.set_settings = sis900_set_settings,
.nway_reset = sis900_nway_reset,
}; };
/** /**
......
...@@ -193,6 +193,12 @@ static int aui[MAX_TLAN_BOARDS]; ...@@ -193,6 +193,12 @@ static int aui[MAX_TLAN_BOARDS];
static int duplex[MAX_TLAN_BOARDS]; static int duplex[MAX_TLAN_BOARDS];
static int speed[MAX_TLAN_BOARDS]; static int speed[MAX_TLAN_BOARDS];
static int boards_found; static int boards_found;
module_param_array(aui, int, NULL, 0);
module_param_array(duplex, int, NULL, 0);
module_param_array(speed, int, NULL, 0);
MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
MODULE_PARM_DESC(duplex, "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)");
MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>"); MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters"); MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
...@@ -204,8 +210,13 @@ MODULE_LICENSE("GPL"); ...@@ -204,8 +210,13 @@ MODULE_LICENSE("GPL");
/* Turn on debugging. See Documentation/networking/tlan.txt for details */ /* Turn on debugging. See Documentation/networking/tlan.txt for details */
static int debug; static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
static int bbuf; static int bbuf;
module_param(bbuf, int, 0);
MODULE_PARM_DESC(bbuf, "ThunderLAN use big buffer (0-1)");
static u8 *TLanPadBuffer; static u8 *TLanPadBuffer;
static dma_addr_t TLanPadBufferDMA; static dma_addr_t TLanPadBufferDMA;
static char TLanSignature[] = "TLAN"; static char TLanSignature[] = "TLAN";
...@@ -2381,6 +2392,7 @@ TLan_FinishReset( struct net_device *dev ) ...@@ -2381,6 +2392,7 @@ TLan_FinishReset( struct net_device *dev )
TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET ); TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
return; return;
} }
TLan_SetMulticastList(dev);
} /* TLan_FinishReset */ } /* TLan_FinishReset */
......
This diff is collapsed.
...@@ -321,6 +321,7 @@ static struct { ...@@ -321,6 +321,7 @@ static struct {
{ 0x01bf, 0x3302, NULL, ATMEL_FW_TYPE_502E, "Belkin F5D6020-V2" }, { 0x01bf, 0x3302, NULL, ATMEL_FW_TYPE_502E, "Belkin F5D6020-V2" },
{ 0, 0, "BT/Voyager 1020 Laptop Adapter", ATMEL_FW_TYPE_502, "BT Voyager 1020" }, { 0, 0, "BT/Voyager 1020 Laptop Adapter", ATMEL_FW_TYPE_502, "BT Voyager 1020" },
{ 0, 0, "IEEE 802.11b/Wireless LAN PC Card", ATMEL_FW_TYPE_502, "Siemens Gigaset PC Card II" }, { 0, 0, "IEEE 802.11b/Wireless LAN PC Card", ATMEL_FW_TYPE_502, "Siemens Gigaset PC Card II" },
{ 0, 0, "IEEE 802.11b/Wireless LAN Card S", ATMEL_FW_TYPE_504_2958, "Siemens Gigaset PC Card II" },
{ 0, 0, "CNet/CNWLC 11Mbps Wireless PC Card V-5", ATMEL_FW_TYPE_502E, "CNet CNWLC-811ARL" }, { 0, 0, "CNet/CNWLC 11Mbps Wireless PC Card V-5", ATMEL_FW_TYPE_502E, "CNet CNWLC-811ARL" },
{ 0, 0, "Wireless/PC_CARD", ATMEL_FW_TYPE_502D, "Planet WL-3552" }, { 0, 0, "Wireless/PC_CARD", ATMEL_FW_TYPE_502D, "Planet WL-3552" },
{ 0, 0, "OEM/11Mbps Wireless LAN PC Card V-3", ATMEL_FW_TYPE_502, "OEM 11Mbps WLAN PCMCIA Card" }, { 0, 0, "OEM/11Mbps Wireless LAN PC Card V-3", ATMEL_FW_TYPE_502, "OEM 11Mbps WLAN PCMCIA Card" },
......
...@@ -9,6 +9,7 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o ...@@ -9,6 +9,7 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
obj-$(CONFIG_SMSGIUCV) += smsgiucv.o obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o
obj-$(CONFIG_LCS) += lcs.o cu3088.o obj-$(CONFIG_LCS) += lcs.o cu3088.o
qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o qeth_tso.o obj-$(CONFIG_CLAW) += claw.o cu3088.o
qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o
qeth-$(CONFIG_PROC_FS) += qeth_proc.o qeth-$(CONFIG_PROC_FS) += qeth_proc.o
obj-$(CONFIG_QETH) += qeth.o obj-$(CONFIG_QETH) += qeth.o
/* /*
* *
* linux/drivers/s390/net/ctcdbug.h ($Revision: 1.4 $) * linux/drivers/s390/net/ctcdbug.h ($Revision: 1.5 $)
* *
* CTC / ESCON network driver - s390 dbf exploit. * CTC / ESCON network driver - s390 dbf exploit.
* *
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
* Author(s): Original Code written by * Author(s): Original Code written by
* Peter Tiedemann (ptiedem@de.ibm.com) * Peter Tiedemann (ptiedem@de.ibm.com)
* *
* $Revision: 1.4 $ $Date: 2004/10/15 09:26:58 $ * $Revision: 1.5 $ $Date: 2005/02/27 19:46:44 $
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -25,9 +25,11 @@ ...@@ -25,9 +25,11 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
#ifndef _CTCDBUG_H_
#define _CTCDBUG_H_
#include <asm/debug.h> #include <asm/debug.h>
#include "ctcmain.h"
/** /**
* Debug Facility stuff * Debug Facility stuff
*/ */
...@@ -41,7 +43,7 @@ ...@@ -41,7 +43,7 @@
#define CTC_DBF_DATA_LEN 128 #define CTC_DBF_DATA_LEN 128
#define CTC_DBF_DATA_INDEX 3 #define CTC_DBF_DATA_INDEX 3
#define CTC_DBF_DATA_NR_AREAS 1 #define CTC_DBF_DATA_NR_AREAS 1
#define CTC_DBF_DATA_LEVEL 2 #define CTC_DBF_DATA_LEVEL 3
#define CTC_DBF_TRACE_NAME "ctc_trace" #define CTC_DBF_TRACE_NAME "ctc_trace"
#define CTC_DBF_TRACE_LEN 16 #define CTC_DBF_TRACE_LEN 16
...@@ -121,3 +123,5 @@ hex_dump(unsigned char *buf, size_t len) ...@@ -121,3 +123,5 @@ hex_dump(unsigned char *buf, size_t len)
printk("\n"); printk("\n");
} }
#endif
This diff is collapsed.
/*
* $Id: ctcmain.h,v 1.4 2005/03/24 09:04:17 mschwide Exp $
*
* CTC / ESCON network driver
*
* Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
Peter Tiedemann (ptiedem@de.ibm.com)
*
*
* Documentation used:
* - Principles of Operation (IBM doc#: SA22-7201-06)
* - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
* - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
* - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
* - ESCON I/O Interface (IBM doc#: SA22-7202-029
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* RELEASE-TAG: CTC/ESCON network driver $Revision: 1.4 $
*
*/
#ifndef _CTCMAIN_H_
#define _CTCMAIN_H_
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#include "ctctty.h"
#include "fsm.h"
#include "cu3088.h"
/**
* CCW commands, used in this driver.
*/
#define CCW_CMD_WRITE 0x01
#define CCW_CMD_READ 0x02
#define CCW_CMD_SET_EXTENDED 0xc3
#define CCW_CMD_PREPARE 0xe3
#define CTC_PROTO_S390 0
#define CTC_PROTO_LINUX 1
#define CTC_PROTO_LINUX_TTY 2
#define CTC_PROTO_OS390 3
#define CTC_PROTO_MAX 3
#define CTC_BUFSIZE_LIMIT 65535
#define CTC_BUFSIZE_DEFAULT 32768
#define CTC_TIMEOUT_5SEC 5000
#define CTC_INITIAL_BLOCKLEN 2
#define READ 0
#define WRITE 1
#define CTC_ID_SIZE BUS_ID_SIZE+3
struct ctc_profile {
unsigned long maxmulti;
unsigned long maxcqueue;
unsigned long doios_single;
unsigned long doios_multi;
unsigned long txlen;
unsigned long tx_time;
struct timespec send_stamp;
};
/**
* Definition of one channel
*/
struct channel {
/**
* Pointer to next channel in list.
*/
struct channel *next;
char id[CTC_ID_SIZE];
struct ccw_device *cdev;
/**
* Type of this channel.
* CTC/A or Escon for valid channels.
*/
enum channel_types type;
/**
* Misc. flags. See CHANNEL_FLAGS_... below
*/
__u32 flags;
/**
* The protocol of this channel
*/
__u16 protocol;
/**
* I/O and irq related stuff
*/
struct ccw1 *ccw;
struct irb *irb;
/**
* RX/TX buffer size
*/
int max_bufsize;
/**
* Transmit/Receive buffer.
*/
struct sk_buff *trans_skb;
/**
* Universal I/O queue.
*/
struct sk_buff_head io_queue;
/**
* TX queue for collecting skb's during busy.
*/
struct sk_buff_head collect_queue;
/**
* Amount of data in collect_queue.
*/
int collect_len;
/**
* spinlock for collect_queue and collect_len
*/
spinlock_t collect_lock;
/**
* Timer for detecting unresposive
* I/O operations.
*/
fsm_timer timer;
/**
* Retry counter for misc. operations.
*/
int retry;
/**
* The finite state machine of this channel
*/
fsm_instance *fsm;
/**
* The corresponding net_device this channel
* belongs to.
*/
struct net_device *netdev;
struct ctc_profile prof;
unsigned char *trans_skb_data;
__u16 logflags;
};
#define CHANNEL_FLAGS_READ 0
#define CHANNEL_FLAGS_WRITE 1
#define CHANNEL_FLAGS_INUSE 2
#define CHANNEL_FLAGS_BUFSIZE_CHANGED 4
#define CHANNEL_FLAGS_FAILED 8
#define CHANNEL_FLAGS_WAITIRQ 16
#define CHANNEL_FLAGS_RWMASK 1
#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
#define LOG_FLAG_ILLEGALPKT 1
#define LOG_FLAG_ILLEGALSIZE 2
#define LOG_FLAG_OVERRUN 4
#define LOG_FLAG_NOMEM 8
#define CTC_LOGLEVEL_INFO 1
#define CTC_LOGLEVEL_NOTICE 2
#define CTC_LOGLEVEL_WARN 4
#define CTC_LOGLEVEL_EMERG 8
#define CTC_LOGLEVEL_ERR 16
#define CTC_LOGLEVEL_DEBUG 32
#define CTC_LOGLEVEL_CRIT 64
#define CTC_LOGLEVEL_DEFAULT \
(CTC_LOGLEVEL_INFO | CTC_LOGLEVEL_NOTICE | CTC_LOGLEVEL_WARN | CTC_LOGLEVEL_CRIT)
#define CTC_LOGLEVEL_MAX ((CTC_LOGLEVEL_CRIT<<1)-1)
#define ctc_pr_debug(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_DEBUG) printk(KERN_DEBUG fmt,##arg); } while (0)
#define ctc_pr_info(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_INFO) printk(KERN_INFO fmt,##arg); } while (0)
#define ctc_pr_notice(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_NOTICE) printk(KERN_NOTICE fmt,##arg); } while (0)
#define ctc_pr_warn(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_WARN) printk(KERN_WARNING fmt,##arg); } while (0)
#define ctc_pr_emerg(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_EMERG) printk(KERN_EMERG fmt,##arg); } while (0)
#define ctc_pr_err(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_ERR) printk(KERN_ERR fmt,##arg); } while (0)
#define ctc_pr_crit(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_CRIT) printk(KERN_CRIT fmt,##arg); } while (0)
struct ctc_priv {
struct net_device_stats stats;
unsigned long tbusy;
/**
* The finite state machine of this interface.
*/
fsm_instance *fsm;
/**
* The protocol of this device
*/
__u16 protocol;
/**
* Timer for restarting after I/O Errors
*/
fsm_timer restart_timer;
int buffer_size;
struct channel *channel[2];
};
/**
* Definition of our link level header.
*/
struct ll_header {
__u16 length;
__u16 type;
__u16 unused;
};
#define LL_HEADER_LENGTH (sizeof(struct ll_header))
/**
* Compatibility macros for busy handling
* of network devices.
*/
static __inline__ void
ctc_clear_busy(struct net_device * dev)
{
clear_bit(0, &(((struct ctc_priv *) dev->priv)->tbusy));
if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
netif_wake_queue(dev);
}
static __inline__ int
ctc_test_and_set_busy(struct net_device * dev)
{
if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
netif_stop_queue(dev);
return test_and_set_bit(0, &((struct ctc_priv *) dev->priv)->tbusy);
}
#endif
/* /*
* $Id: ctctty.c,v 1.26 2004/08/04 11:06:55 mschwide Exp $ * $Id: ctctty.c,v 1.29 2005/04/05 08:50:44 mschwide Exp $
* *
* CTC / ESCON network driver, tty interface. * CTC / ESCON network driver, tty interface.
* *
...@@ -1056,8 +1056,7 @@ ctc_tty_close(struct tty_struct *tty, struct file *filp) ...@@ -1056,8 +1056,7 @@ ctc_tty_close(struct tty_struct *tty, struct file *filp)
info->tty = 0; info->tty = 0;
tty->closing = 0; tty->closing = 0;
if (info->blocked_open) { if (info->blocked_open) {
set_current_state(TASK_INTERRUPTIBLE); msleep_interruptible(500);
schedule_timeout(HZ/2);
wake_up_interruptible(&info->open_wait); wake_up_interruptible(&info->open_wait);
} }
info->flags &= ~(CTC_ASYNC_NORMAL_ACTIVE | CTC_ASYNC_CLOSING); info->flags &= ~(CTC_ASYNC_NORMAL_ACTIVE | CTC_ASYNC_CLOSING);
......
/* /*
* $Id: cu3088.c,v 1.34 2004/06/15 13:16:27 pavlic Exp $ * $Id: cu3088.c,v 1.35 2005/03/30 19:28:52 richtera Exp $
* *
* CTC / LCS ccw_device driver * CTC / LCS ccw_device driver
* *
...@@ -39,6 +39,7 @@ const char *cu3088_type[] = { ...@@ -39,6 +39,7 @@ const char *cu3088_type[] = {
"FICON channel", "FICON channel",
"P390 LCS card", "P390 LCS card",
"OSA LCS card", "OSA LCS card",
"CLAW channel device",
"unknown channel type", "unknown channel type",
"unsupported channel type", "unsupported channel type",
}; };
...@@ -51,6 +52,7 @@ static struct ccw_device_id cu3088_ids[] = { ...@@ -51,6 +52,7 @@ static struct ccw_device_id cu3088_ids[] = {
{ CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon }, { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon },
{ CCW_DEVICE(0x3088, 0x01), .driver_info = channel_type_p390 }, { CCW_DEVICE(0x3088, 0x01), .driver_info = channel_type_p390 },
{ CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 }, { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 },
{ CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw },
{ /* end of list */ } { /* end of list */ }
}; };
......
...@@ -23,6 +23,9 @@ enum channel_types { ...@@ -23,6 +23,9 @@ enum channel_types {
/* Device is a OSA2 card */ /* Device is a OSA2 card */
channel_type_osa2, channel_type_osa2,
/* Device is a CLAW channel device */
channel_type_claw,
/* Device is a channel, but we don't know /* Device is a channel, but we don't know
* anything about it */ * anything about it */
channel_type_unknown, channel_type_unknown,
......
/* /*
* $Id: iucv.c,v 1.43 2005/02/09 14:47:43 braunu Exp $ * $Id: iucv.c,v 1.45 2005/04/26 22:59:06 braunu Exp $
* *
* IUCV network driver * IUCV network driver
* *
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
* *
* RELEASE-TAG: IUCV lowlevel driver $Revision: 1.43 $ * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.45 $
* *
*/ */
...@@ -355,7 +355,7 @@ do { \ ...@@ -355,7 +355,7 @@ do { \
static void static void
iucv_banner(void) iucv_banner(void)
{ {
char vbuf[] = "$Revision: 1.43 $"; char vbuf[] = "$Revision: 1.45 $";
char *version = vbuf; char *version = vbuf;
if ((version = strchr(version, ':'))) { if ((version = strchr(version, ':'))) {
...@@ -2553,12 +2553,12 @@ EXPORT_SYMBOL (iucv_resume); ...@@ -2553,12 +2553,12 @@ EXPORT_SYMBOL (iucv_resume);
#endif #endif
EXPORT_SYMBOL (iucv_reply_prmmsg); EXPORT_SYMBOL (iucv_reply_prmmsg);
EXPORT_SYMBOL (iucv_send); EXPORT_SYMBOL (iucv_send);
#if 0
EXPORT_SYMBOL (iucv_send2way); EXPORT_SYMBOL (iucv_send2way);
EXPORT_SYMBOL (iucv_send2way_array); EXPORT_SYMBOL (iucv_send2way_array);
EXPORT_SYMBOL (iucv_send_array);
EXPORT_SYMBOL (iucv_send2way_prmmsg); EXPORT_SYMBOL (iucv_send2way_prmmsg);
EXPORT_SYMBOL (iucv_send2way_prmmsg_array); EXPORT_SYMBOL (iucv_send2way_prmmsg_array);
#if 0
EXPORT_SYMBOL (iucv_send_array);
EXPORT_SYMBOL (iucv_send_prmmsg); EXPORT_SYMBOL (iucv_send_prmmsg);
EXPORT_SYMBOL (iucv_setmask); EXPORT_SYMBOL (iucv_setmask);
#endif #endif
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
* Frank Pavlic (pavlic@de.ibm.com) and * Frank Pavlic (pavlic@de.ibm.com) and
* Martin Schwidefsky <schwidefsky@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com>
* *
* $Revision: 1.96 $ $Date: 2004/11/11 13:42:33 $ * $Revision: 1.98 $ $Date: 2005/04/18 13:41:29 $
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -59,7 +59,7 @@ ...@@ -59,7 +59,7 @@
/** /**
* initialization string for output * initialization string for output
*/ */
#define VERSION_LCS_C "$Revision: 1.96 $" #define VERSION_LCS_C "$Revision: 1.98 $"
static char version[] __initdata = "LCS driver ("VERSION_LCS_C "/" VERSION_LCS_H ")"; static char version[] __initdata = "LCS driver ("VERSION_LCS_C "/" VERSION_LCS_H ")";
static char debug_buffer[255]; static char debug_buffer[255];
...@@ -1098,14 +1098,6 @@ lcs_check_multicast_support(struct lcs_card *card) ...@@ -1098,14 +1098,6 @@ lcs_check_multicast_support(struct lcs_card *card)
PRINT_ERR("Query IPAssist failed. Assuming unsupported!\n"); PRINT_ERR("Query IPAssist failed. Assuming unsupported!\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
/* Print out supported assists: IPv6 */
PRINT_INFO("LCS device %s %s IPv6 support\n", card->dev->name,
(card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
"with" : "without");
/* Print out supported assist: Multicast */
PRINT_INFO("LCS device %s %s Multicast support\n", card->dev->name,
(card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
"with" : "without");
if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT)
return 0; return 0;
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -1160,7 +1152,7 @@ lcs_fix_multicast_list(struct lcs_card *card) ...@@ -1160,7 +1152,7 @@ lcs_fix_multicast_list(struct lcs_card *card)
} }
} }
/* re-insert all entries from the failed_list into ipm_list */ /* re-insert all entries from the failed_list into ipm_list */
list_for_each_entry(ipm, &failed_list, list) { list_for_each_entry_safe(ipm, tmp, &failed_list, list) {
list_del_init(&ipm->list); list_del_init(&ipm->list);
list_add_tail(&ipm->list, &card->ipm_list); list_add_tail(&ipm->list, &card->ipm_list);
} }
...@@ -2198,30 +2190,39 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) ...@@ -2198,30 +2190,39 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
if (!dev) if (!dev)
goto out; goto out;
card->dev = dev; card->dev = dev;
netdev_out:
card->dev->priv = card; card->dev->priv = card;
card->dev->open = lcs_open_device; card->dev->open = lcs_open_device;
card->dev->stop = lcs_stop_device; card->dev->stop = lcs_stop_device;
card->dev->hard_start_xmit = lcs_start_xmit; card->dev->hard_start_xmit = lcs_start_xmit;
card->dev->get_stats = lcs_getstats; card->dev->get_stats = lcs_getstats;
SET_MODULE_OWNER(dev); SET_MODULE_OWNER(dev);
if (lcs_register_netdev(ccwgdev) != 0)
goto out;
memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH); memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
#ifdef CONFIG_IP_MULTICAST #ifdef CONFIG_IP_MULTICAST
if (!lcs_check_multicast_support(card)) if (!lcs_check_multicast_support(card))
card->dev->set_multicast_list = lcs_set_multicast_list; card->dev->set_multicast_list = lcs_set_multicast_list;
#endif #endif
netif_stop_queue(card->dev); netdev_out:
lcs_set_allowed_threads(card,0xffffffff); lcs_set_allowed_threads(card,0xffffffff);
if (recover_state == DEV_STATE_RECOVER) { if (recover_state == DEV_STATE_RECOVER) {
lcs_set_multicast_list(card->dev); lcs_set_multicast_list(card->dev);
card->dev->flags |= IFF_UP; card->dev->flags |= IFF_UP;
netif_wake_queue(card->dev); netif_wake_queue(card->dev);
card->state = DEV_STATE_UP; card->state = DEV_STATE_UP;
} else } else {
lcs_stopcard(card); lcs_stopcard(card);
}
if (lcs_register_netdev(ccwgdev) != 0)
goto out;
/* Print out supported assists: IPv6 */
PRINT_INFO("LCS device %s %s IPv6 support\n", card->dev->name,
(card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
"with" : "without");
/* Print out supported assist: Multicast */
PRINT_INFO("LCS device %s %s Multicast support\n", card->dev->name,
(card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
"with" : "without");
return 0; return 0;
out: out:
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include "qeth_mpc.h" #include "qeth_mpc.h"
#define VERSION_QETH_H "$Revision: 1.135 $" #define VERSION_QETH_H "$Revision: 1.139 $"
#ifdef CONFIG_QETH_IPV6 #ifdef CONFIG_QETH_IPV6
#define QETH_VERSION_IPV6 ":IPv6" #define QETH_VERSION_IPV6 ":IPv6"
...@@ -288,7 +288,8 @@ qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func) ...@@ -288,7 +288,8 @@ qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func)
#define QETH_TX_TIMEOUT 100 * HZ #define QETH_TX_TIMEOUT 100 * HZ
#define QETH_HEADER_SIZE 32 #define QETH_HEADER_SIZE 32
#define MAX_PORTNO 15 #define MAX_PORTNO 15
#define QETH_FAKE_LL_LEN ETH_HLEN #define QETH_FAKE_LL_LEN_ETH ETH_HLEN
#define QETH_FAKE_LL_LEN_TR (sizeof(struct trh_hdr)-TR_MAXRIFLEN+sizeof(struct trllc))
#define QETH_FAKE_LL_V6_ADDR_POS 24 #define QETH_FAKE_LL_V6_ADDR_POS 24
/*IPv6 address autoconfiguration stuff*/ /*IPv6 address autoconfiguration stuff*/
...@@ -369,6 +370,25 @@ struct qeth_hdr { ...@@ -369,6 +370,25 @@ struct qeth_hdr {
} hdr; } hdr;
} __attribute__ ((packed)); } __attribute__ ((packed));
/*TCP Segmentation Offload header*/
struct qeth_hdr_ext_tso {
__u16 hdr_tot_len;
__u8 imb_hdr_no;
__u8 reserved;
__u8 hdr_type;
__u8 hdr_version;
__u16 hdr_len;
__u32 payload_len;
__u16 mss;
__u16 dg_hdr_len;
__u8 padding[16];
} __attribute__ ((packed));
struct qeth_hdr_tso {
struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/
struct qeth_hdr_ext_tso ext;
} __attribute__ ((packed));
/* flags for qeth_hdr.flags */ /* flags for qeth_hdr.flags */
#define QETH_HDR_PASSTHRU 0x10 #define QETH_HDR_PASSTHRU 0x10
...@@ -866,6 +886,7 @@ qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size) ...@@ -866,6 +886,7 @@ qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size)
return hdr; return hdr;
} }
inline static int inline static int
qeth_get_hlen(__u8 link_type) qeth_get_hlen(__u8 link_type)
{ {
...@@ -873,19 +894,19 @@ qeth_get_hlen(__u8 link_type) ...@@ -873,19 +894,19 @@ qeth_get_hlen(__u8 link_type)
switch (link_type) { switch (link_type) {
case QETH_LINK_TYPE_HSTR: case QETH_LINK_TYPE_HSTR:
case QETH_LINK_TYPE_LANE_TR: case QETH_LINK_TYPE_LANE_TR:
return sizeof(struct qeth_hdr) + TR_HLEN; return sizeof(struct qeth_hdr_tso) + TR_HLEN;
default: default:
#ifdef CONFIG_QETH_VLAN #ifdef CONFIG_QETH_VLAN
return sizeof(struct qeth_hdr) + VLAN_ETH_HLEN; return sizeof(struct qeth_hdr_tso) + VLAN_ETH_HLEN;
#else #else
return sizeof(struct qeth_hdr) + ETH_HLEN; return sizeof(struct qeth_hdr_tso) + ETH_HLEN;
#endif #endif
} }
#else /* CONFIG_QETH_IPV6 */ #else /* CONFIG_QETH_IPV6 */
#ifdef CONFIG_QETH_VLAN #ifdef CONFIG_QETH_VLAN
return sizeof(struct qeth_hdr) + VLAN_HLEN; return sizeof(struct qeth_hdr_tso) + VLAN_HLEN;
#else #else
return sizeof(struct qeth_hdr); return sizeof(struct qeth_hdr_tso);
#endif #endif
#endif /* CONFIG_QETH_IPV6 */ #endif /* CONFIG_QETH_IPV6 */
} }
......
/* /*
* *
* linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.11 $) * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.13 $)
* *
* Enhanced Device Driver Packing (EDDP) support for the qeth driver. * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
* *
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* *
* Author(s): Thomas Spatzier <tspat@de.ibm.com> * Author(s): Thomas Spatzier <tspat@de.ibm.com>
* *
* $Revision: 1.11 $ $Date: 2005/03/24 09:04:18 $ * $Revision: 1.13 $ $Date: 2005/05/04 20:19:18 $
* *
*/ */
#include <linux/config.h> #include <linux/config.h>
...@@ -85,7 +85,7 @@ void ...@@ -85,7 +85,7 @@ void
qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf) qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
{ {
struct qeth_eddp_context_reference *ref; struct qeth_eddp_context_reference *ref;
QETH_DBF_TEXT(trace, 6, "eddprctx"); QETH_DBF_TEXT(trace, 6, "eddprctx");
while (!list_empty(&buf->ctx_list)){ while (!list_empty(&buf->ctx_list)){
ref = list_entry(buf->ctx_list.next, ref = list_entry(buf->ctx_list.next,
...@@ -139,7 +139,7 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, ...@@ -139,7 +139,7 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
"buffer!\n"); "buffer!\n");
goto out; goto out;
} }
} }
/* check if the whole next skb fits into current buffer */ /* check if the whole next skb fits into current buffer */
if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) - if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
buf->next_element_to_fill) buf->next_element_to_fill)
...@@ -152,7 +152,7 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, ...@@ -152,7 +152,7 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
* and increment ctx's refcnt */ * and increment ctx's refcnt */
must_refcnt = 1; must_refcnt = 1;
continue; continue;
} }
if (must_refcnt){ if (must_refcnt){
must_refcnt = 0; must_refcnt = 0;
if (qeth_eddp_buf_ref_context(buf, ctx)){ if (qeth_eddp_buf_ref_context(buf, ctx)){
...@@ -202,40 +202,29 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, ...@@ -202,40 +202,29 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
return flush_cnt; return flush_cnt;
} }
static inline int
qeth_get_skb_data_len(struct sk_buff *skb)
{
int len = skb->len;
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i)
len -= skb_shinfo(skb)->frags[i].size;
return len;
}
static inline void static inline void
qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
struct qeth_eddp_data *eddp) struct qeth_eddp_data *eddp, int data_len)
{ {
u8 *page; u8 *page;
int page_remainder; int page_remainder;
int page_offset; int page_offset;
int hdr_len; int pkt_len;
struct qeth_eddp_element *element; struct qeth_eddp_element *element;
QETH_DBF_TEXT(trace, 5, "eddpcrsh"); QETH_DBF_TEXT(trace, 5, "eddpcrsh");
page = ctx->pages[ctx->offset >> PAGE_SHIFT]; page = ctx->pages[ctx->offset >> PAGE_SHIFT];
page_offset = ctx->offset % PAGE_SIZE; page_offset = ctx->offset % PAGE_SIZE;
element = &ctx->elements[ctx->num_elements]; element = &ctx->elements[ctx->num_elements];
hdr_len = eddp->nhl + eddp->thl; pkt_len = eddp->nhl + eddp->thl + data_len;
/* FIXME: layer2 and VLAN !!! */ /* FIXME: layer2 and VLAN !!! */
if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
hdr_len += ETH_HLEN; pkt_len += ETH_HLEN;
if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
hdr_len += VLAN_HLEN; pkt_len += VLAN_HLEN;
/* does complete header fit in current page ? */ /* does complete packet fit in current page ? */
page_remainder = PAGE_SIZE - page_offset; page_remainder = PAGE_SIZE - page_offset;
if (page_remainder < (sizeof(struct qeth_hdr) + hdr_len)){ if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){
/* no -> go to start of next page */ /* no -> go to start of next page */
ctx->offset += page_remainder; ctx->offset += page_remainder;
page = ctx->pages[ctx->offset >> PAGE_SHIFT]; page = ctx->pages[ctx->offset >> PAGE_SHIFT];
...@@ -281,7 +270,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, ...@@ -281,7 +270,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
int left_in_frag; int left_in_frag;
int copy_len; int copy_len;
u8 *src; u8 *src;
QETH_DBF_TEXT(trace, 5, "eddpcdtc"); QETH_DBF_TEXT(trace, 5, "eddpcdtc");
if (skb_shinfo(eddp->skb)->nr_frags == 0) { if (skb_shinfo(eddp->skb)->nr_frags == 0) {
memcpy(dst, eddp->skb->data + eddp->skb_offset, len); memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
...@@ -292,7 +281,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, ...@@ -292,7 +281,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
while (len > 0) { while (len > 0) {
if (eddp->frag < 0) { if (eddp->frag < 0) {
/* we're in skb->data */ /* we're in skb->data */
left_in_frag = qeth_get_skb_data_len(eddp->skb) left_in_frag = (eddp->skb->len - eddp->skb->data_len)
- eddp->skb_offset; - eddp->skb_offset;
src = eddp->skb->data + eddp->skb_offset; src = eddp->skb->data + eddp->skb_offset;
} else { } else {
...@@ -424,7 +413,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, ...@@ -424,7 +413,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
struct tcphdr *tcph; struct tcphdr *tcph;
int data_len; int data_len;
u32 hcsum; u32 hcsum;
QETH_DBF_TEXT(trace, 5, "eddpftcp"); QETH_DBF_TEXT(trace, 5, "eddpftcp");
eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl; eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
tcph = eddp->skb->h.th; tcph = eddp->skb->h.th;
...@@ -464,7 +453,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, ...@@ -464,7 +453,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
else else
hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len); hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
/* fill the next segment into the context */ /* fill the next segment into the context */
qeth_eddp_create_segment_hdrs(ctx, eddp); qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum); qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
if (eddp->skb_offset >= eddp->skb->len) if (eddp->skb_offset >= eddp->skb->len)
break; break;
...@@ -474,13 +463,13 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, ...@@ -474,13 +463,13 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
eddp->th.tcp.h.seq += data_len; eddp->th.tcp.h.seq += data_len;
} }
} }
static inline int static inline int
qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
struct sk_buff *skb, struct qeth_hdr *qhdr) struct sk_buff *skb, struct qeth_hdr *qhdr)
{ {
struct qeth_eddp_data *eddp = NULL; struct qeth_eddp_data *eddp = NULL;
QETH_DBF_TEXT(trace, 5, "eddpficx"); QETH_DBF_TEXT(trace, 5, "eddpficx");
/* create our segmentation headers and copy original headers */ /* create our segmentation headers and copy original headers */
if (skb->protocol == ETH_P_IP) if (skb->protocol == ETH_P_IP)
...@@ -520,7 +509,7 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, ...@@ -520,7 +509,7 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
int hdr_len) int hdr_len)
{ {
int skbs_per_page; int skbs_per_page;
QETH_DBF_TEXT(trace, 5, "eddpcanp"); QETH_DBF_TEXT(trace, 5, "eddpcanp");
/* can we put multiple skbs in one page? */ /* can we put multiple skbs in one page? */
skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len); skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
...@@ -600,7 +589,7 @@ qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, ...@@ -600,7 +589,7 @@ qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *qhdr) struct qeth_hdr *qhdr)
{ {
struct qeth_eddp_context *ctx = NULL; struct qeth_eddp_context *ctx = NULL;
QETH_DBF_TEXT(trace, 5, "creddpct"); QETH_DBF_TEXT(trace, 5, "creddpct");
if (skb->protocol == ETH_P_IP) if (skb->protocol == ETH_P_IP)
ctx = qeth_eddp_create_context_generic(card, skb, ctx = qeth_eddp_create_context_generic(card, skb,
......
This diff is collapsed.
/*
* linux/drivers/s390/net/qeth_tso.c ($Revision: 1.6 $)
*
* Header file for qeth TCP Segmentation Offload support.
*
* Copyright 2004 IBM Corporation
*
* Author(s): Frank Pavlic <pavlic@de.ibm.com>
*
* $Revision: 1.6 $ $Date: 2005/03/24 09:04:18 $
*
*/
#include <linux/skbuff.h>
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include "qeth.h"
#include "qeth_mpc.h"
#include "qeth_tso.h"
/**
* skb already partially prepared
* classic qdio header in skb->data
* */
static inline struct qeth_hdr_tso *
qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
{
int rc = 0;
QETH_DBF_TEXT(trace, 5, "tsoprsk");
rc = qeth_realloc_headroom(card, skb,sizeof(struct qeth_hdr_ext_tso));
if (rc)
return NULL;
return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_ext_tso));
}
/**
* fill header for a TSO packet
*/
static inline void
qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
{
struct qeth_hdr_tso *hdr;
struct tcphdr *tcph;
struct iphdr *iph;
QETH_DBF_TEXT(trace, 5, "tsofhdr");
hdr = (struct qeth_hdr_tso *) skb->data;
iph = skb->nh.iph;
tcph = skb->h.th;
/*fix header to TSO values ...*/
hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
/*set values which are fix for the first approach ...*/
hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
hdr->ext.imb_hdr_no = 1;
hdr->ext.hdr_type = 1;
hdr->ext.hdr_version = 1;
hdr->ext.hdr_len = 28;
/*insert non-fix values */
hdr->ext.mss = skb_shinfo(skb)->tso_size;
hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
sizeof(struct qeth_hdr_tso));
}
/**
* change some header values as requested by hardware
*/
static inline void
qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
{
struct iphdr *iph;
struct ipv6hdr *ip6h;
struct tcphdr *tcph;
iph = skb->nh.iph;
ip6h = skb->nh.ipv6h;
tcph = skb->h.th;
tcph->check = 0;
if (skb->protocol == ETH_P_IPV6) {
ip6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
0, IPPROTO_TCP, 0);
return;
}
/*OSA want us to set these values ...*/
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
iph->tot_len = 0;
iph->check = 0;
}
static inline struct qeth_hdr_tso *
qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
int ipv, int cast_type)
{
struct qeth_hdr_tso *hdr;
int rc = 0;
QETH_DBF_TEXT(trace, 5, "tsoprep");
/*get headroom for tso qdio header */
hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
if (hdr == NULL) {
QETH_DBF_TEXT_(trace, 4, "2err%d", rc);
return NULL;
}
memset(hdr, 0, sizeof(struct qeth_hdr_tso));
/*fill first 32 bytes of qdio header as used
*FIXME: TSO has two struct members
* with different names but same size
* */
qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
qeth_tso_fill_header(card, skb);
qeth_tso_set_tcpip_header(card, skb);
return hdr;
}
static inline int
qeth_tso_get_queue_buffer(struct qeth_qdio_out_q *queue)
{
struct qeth_qdio_out_buffer *buffer;
int flush_cnt = 0;
QETH_DBF_TEXT(trace, 5, "tsobuf");
/* force to non-packing*/
if (queue->do_pack)
queue->do_pack = 0;
buffer = &queue->bufs[queue->next_buf_to_fill];
/* get a new buffer if current is already in use*/
if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
(buffer->next_element_to_fill > 0)) {
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
flush_cnt++;
}
return flush_cnt;
}
static inline void
__qeth_tso_fill_buffer_frag(struct qeth_qdio_out_buffer *buf,
struct sk_buff *skb)
{
struct skb_frag_struct *frag;
struct qdio_buffer *buffer;
int fragno, cnt, element;
unsigned long addr;
QETH_DBF_TEXT(trace, 6, "tsfilfrg");
/*initialize variables ...*/
fragno = skb_shinfo(skb)->nr_frags;
buffer = buf->buffer;
element = buf->next_element_to_fill;
/*fill buffer elements .....*/
for (cnt = 0; cnt < fragno; cnt++) {
frag = &skb_shinfo(skb)->frags[cnt];
addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
frag->page_offset;
buffer->element[element].addr = (char *)addr;
buffer->element[element].length = frag->size;
if (cnt < (fragno - 1))
buffer->element[element].flags =
SBAL_FLAGS_MIDDLE_FRAG;
else
buffer->element[element].flags =
SBAL_FLAGS_LAST_FRAG;
element++;
}
buf->next_element_to_fill = element;
}
static inline int
qeth_tso_fill_buffer(struct qeth_qdio_out_buffer *buf,
struct sk_buff *skb)
{
int length, length_here, element;
int hdr_len;
struct qdio_buffer *buffer;
struct qeth_hdr_tso *hdr;
char *data;
QETH_DBF_TEXT(trace, 3, "tsfilbuf");
/*increment user count and queue skb ...*/
atomic_inc(&skb->users);
skb_queue_tail(&buf->skb_list, skb);
/*initialize all variables...*/
buffer = buf->buffer;
hdr = (struct qeth_hdr_tso *)skb->data;
hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
data = skb->data + hdr_len;
length = skb->len - hdr_len;
element = buf->next_element_to_fill;
/*fill first buffer entry only with header information */
buffer->element[element].addr = skb->data;
buffer->element[element].length = hdr_len;
buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
buf->next_element_to_fill++;
if (skb_shinfo(skb)->nr_frags > 0) {
__qeth_tso_fill_buffer_frag(buf, skb);
goto out;
}
/*start filling buffer entries ...*/
element++;
while (length > 0) {
/* length_here is the remaining amount of data in this page */
length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
if (length < length_here)
length_here = length;
buffer->element[element].addr = data;
buffer->element[element].length = length_here;
length -= length_here;
if (!length)
buffer->element[element].flags =
SBAL_FLAGS_LAST_FRAG;
else
buffer->element[element].flags =
SBAL_FLAGS_MIDDLE_FRAG;
data += length_here;
element++;
}
/*set the buffer to primed ...*/
buf->next_element_to_fill = element;
out:
atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
return 1;
}
int
qeth_tso_send_packet(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type)
{
int flush_cnt = 0;
struct qeth_hdr_tso *hdr;
struct qeth_qdio_out_buffer *buffer;
int start_index;
QETH_DBF_TEXT(trace, 3, "tsosend");
if (!(hdr = qeth_tso_prepare_packet(card, skb, ipv, cast_type)))
return -ENOMEM;
/*check if skb fits in one SBAL ...*/
if (!(qeth_get_elements_no(card, (void*)hdr, skb)))
return -EINVAL;
/*lock queue, force switching to non-packing and send it ...*/
while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
QETH_OUT_Q_LOCKED,
&queue->state));
start_index = queue->next_buf_to_fill;
buffer = &queue->bufs[queue->next_buf_to_fill];
/*check if card is too busy ...*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
card->stats.tx_dropped++;
goto out;
}
/*let's force to non-packing and get a new SBAL*/
flush_cnt += qeth_tso_get_queue_buffer(queue);
buffer = &queue->bufs[queue->next_buf_to_fill];
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
card->stats.tx_dropped++;
goto out;
}
flush_cnt += qeth_tso_fill_buffer(buffer, skb);
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
out:
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
if (flush_cnt)
qeth_flush_buffers(queue, 0, start_index, flush_cnt);
/*do some statistics */
card->stats.tx_packets++;
card->stats.tx_bytes += skb->len;
return 0;
}
/* /*
* linux/drivers/s390/net/qeth_tso.h ($Revision: 1.4 $) * linux/drivers/s390/net/qeth_tso.h ($Revision: 1.7 $)
* *
* Header file for qeth TCP Segmentation Offload support. * Header file for qeth TCP Segmentation Offload support.
* *
...@@ -7,52 +7,148 @@ ...@@ -7,52 +7,148 @@
* *
* Author(s): Frank Pavlic <pavlic@de.ibm.com> * Author(s): Frank Pavlic <pavlic@de.ibm.com>
* *
* $Revision: 1.4 $ $Date: 2005/03/24 09:04:18 $ * $Revision: 1.7 $ $Date: 2005/05/04 20:19:18 $
* *
*/ */
#ifndef __QETH_TSO_H__ #ifndef __QETH_TSO_H__
#define __QETH_TSO_H__ #define __QETH_TSO_H__
#include <linux/skbuff.h>
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include "qeth.h"
#include "qeth_mpc.h"
extern int
qeth_tso_send_packet(struct qeth_card *, struct sk_buff *,
struct qeth_qdio_out_q *, int , int);
struct qeth_hdr_ext_tso { static inline struct qeth_hdr_tso *
__u16 hdr_tot_len; qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
__u8 imb_hdr_no; {
__u8 reserved; QETH_DBF_TEXT(trace, 5, "tsoprsk");
__u8 hdr_type; return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_tso));
__u8 hdr_version; }
__u16 hdr_len;
__u32 payload_len; /**
__u16 mss; * fill header for a TSO packet
__u16 dg_hdr_len; */
__u8 padding[16]; static inline void
} __attribute__ ((packed)); qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
{
struct qeth_hdr_tso *hdr;
struct tcphdr *tcph;
struct iphdr *iph;
QETH_DBF_TEXT(trace, 5, "tsofhdr");
hdr = (struct qeth_hdr_tso *) skb->data;
iph = skb->nh.iph;
tcph = skb->h.th;
/*fix header to TSO values ...*/
hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
/*set values which are fix for the first approach ...*/
hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
hdr->ext.imb_hdr_no = 1;
hdr->ext.hdr_type = 1;
hdr->ext.hdr_version = 1;
hdr->ext.hdr_len = 28;
/*insert non-fix values */
hdr->ext.mss = skb_shinfo(skb)->tso_size;
hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
sizeof(struct qeth_hdr_tso));
}
/**
* change some header values as requested by hardware
*/
static inline void
qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
{
struct iphdr *iph;
struct ipv6hdr *ip6h;
struct tcphdr *tcph;
struct qeth_hdr_tso { iph = skb->nh.iph;
struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/ ip6h = skb->nh.ipv6h;
struct qeth_hdr_ext_tso ext; tcph = skb->h.th;
} __attribute__ ((packed));
/*some helper functions*/ tcph->check = 0;
if (skb->protocol == ETH_P_IPV6) {
ip6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
0, IPPROTO_TCP, 0);
return;
}
/*OSA want us to set these values ...*/
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
iph->tot_len = 0;
iph->check = 0;
}
static inline int static inline int
qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb) qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
int ipv, int cast_type)
{
struct qeth_hdr_tso *hdr;
QETH_DBF_TEXT(trace, 5, "tsoprep");
hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
if (hdr == NULL) {
QETH_DBF_TEXT(trace, 4, "tsoperr");
return -ENOMEM;
}
memset(hdr, 0, sizeof(struct qeth_hdr_tso));
/*fill first 32 bytes of qdio header as used
*FIXME: TSO has two struct members
* with different names but same size
* */
qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
qeth_tso_fill_header(card, skb);
qeth_tso_set_tcpip_header(card, skb);
return 0;
}
static inline void
__qeth_fill_buffer_frag(struct sk_buff *skb, struct qdio_buffer *buffer,
int is_tso, int *next_element_to_fill)
{ {
int elements_needed = 0; struct skb_frag_struct *frag;
int fragno;
if (skb_shinfo(skb)->nr_frags > 0) unsigned long addr;
elements_needed = (skb_shinfo(skb)->nr_frags + 1); int element, cnt, dlen;
if (elements_needed == 0 )
elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE) fragno = skb_shinfo(skb)->nr_frags;
+ skb->len) >> PAGE_SHIFT); element = *next_element_to_fill;
if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){ dlen = 0;
PRINT_ERR("qeth_do_send_packet: invalid size of "
"IP packet. Discarded."); if (is_tso)
return 0; buffer->element[element].flags =
SBAL_FLAGS_MIDDLE_FRAG;
else
buffer->element[element].flags =
SBAL_FLAGS_FIRST_FRAG;
if ( (dlen = (skb->len - skb->data_len)) ) {
buffer->element[element].addr = skb->data;
buffer->element[element].length = dlen;
element++;
}
for (cnt = 0; cnt < fragno; cnt++) {
frag = &skb_shinfo(skb)->frags[cnt];
addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
frag->page_offset;
buffer->element[element].addr = (char *)addr;
buffer->element[element].length = frag->size;
if (cnt < (fragno - 1))
buffer->element[element].flags =
SBAL_FLAGS_MIDDLE_FRAG;
else
buffer->element[element].flags =
SBAL_FLAGS_LAST_FRAG;
element++;
} }
return elements_needed; *next_element_to_fill = element;
} }
#endif /* __QETH_TSO_H__ */ #endif /* __QETH_TSO_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment