Commit e37a72de authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
  [IPV6]: Added GSO support for TCPv6
  [NET]: Generalise TSO-specific bits from skb_setup_caps
  [IPV6]: Added GSO support for TCPv6
  [IPV6]: Remove redundant length check on input
  [NETFILTER]: SCTP conntrack: fix crash triggered by packet without chunks
  [TG3]: Update version and reldate
  [TG3]: Add TSO workaround using GSO
  [TG3]: Turn on hw fix for ASF problems
  [TG3]: Add rx BD workaround
  [TG3]: Add tg3_netif_stop() in vlan functions
  [TCP]: Reset gso_segs if packet is dodgy
parents 93fdf10d f83ef8c0
...@@ -3959,7 +3959,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3959,7 +3959,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Control_2 = 0; txdp->Control_2 = 0;
#ifdef NETIF_F_TSO #ifdef NETIF_F_TSO
mss = skb_shinfo(skb)->gso_size; mss = skb_shinfo(skb)->gso_size;
if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) { if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
txdp->Control_1 |= TXD_TCP_LSO_EN; txdp->Control_1 |= TXD_TCP_LSO_EN;
txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
} }
...@@ -3979,7 +3979,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3979,7 +3979,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
} }
frg_len = skb->len - skb->data_len; frg_len = skb->len - skb->data_len;
if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) { if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) {
int ufo_size; int ufo_size;
ufo_size = skb_shinfo(skb)->gso_size; ufo_size = skb_shinfo(skb)->gso_size;
...@@ -4008,7 +4008,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -4008,7 +4008,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Host_Control = (unsigned long) skb; txdp->Host_Control = (unsigned long) skb;
txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
txdp->Control_1 |= TXD_UFO_EN; txdp->Control_1 |= TXD_UFO_EN;
frg_cnt = skb_shinfo(skb)->nr_frags; frg_cnt = skb_shinfo(skb)->nr_frags;
...@@ -4023,12 +4023,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -4023,12 +4023,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
(sp->pdev, frag->page, frag->page_offset, (sp->pdev, frag->page, frag->page_offset,
frag->size, PCI_DMA_TODEVICE); frag->size, PCI_DMA_TODEVICE);
txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
txdp->Control_1 |= TXD_UFO_EN; txdp->Control_1 |= TXD_UFO_EN;
} }
txdp->Control_1 |= TXD_GATHER_CODE_LAST; txdp->Control_1 |= TXD_GATHER_CODE_LAST;
if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
frg_cnt++; /* as Txd0 was used for inband header */ frg_cnt++; /* as Txd0 was used for inband header */
tx_fifo = mac_control->tx_FIFO_start[queue]; tx_fifo = mac_control->tx_FIFO_start[queue];
...@@ -4042,7 +4042,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -4042,7 +4042,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
if (mss) if (mss)
val64 |= TX_FIFO_SPECIAL_FUNC; val64 |= TX_FIFO_SPECIAL_FUNC;
#endif #endif
if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
val64 |= TX_FIFO_SPECIAL_FUNC; val64 |= TX_FIFO_SPECIAL_FUNC;
writeq(val64, &tx_fifo->List_Control); writeq(val64, &tx_fifo->List_Control);
...@@ -7019,6 +7019,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) ...@@ -7019,6 +7019,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
dev->features |= NETIF_F_HIGHDMA; dev->features |= NETIF_F_HIGHDMA;
#ifdef NETIF_F_TSO #ifdef NETIF_F_TSO
dev->features |= NETIF_F_TSO; dev->features |= NETIF_F_TSO;
#endif
#ifdef NETIF_F_TSO6
dev->features |= NETIF_F_TSO6;
#endif #endif
if (sp->device_type & XFRAME_II_DEVICE) { if (sp->device_type & XFRAME_II_DEVICE) {
dev->features |= NETIF_F_UFO; dev->features |= NETIF_F_UFO;
......
...@@ -68,8 +68,8 @@ ...@@ -68,8 +68,8 @@
#define DRV_MODULE_NAME "tg3" #define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": " #define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "3.60" #define DRV_MODULE_VERSION "3.61"
#define DRV_MODULE_RELDATE "June 17, 2006" #define DRV_MODULE_RELDATE "June 29, 2006"
#define TG3_DEF_MAC_MODE 0 #define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0 #define TG3_DEF_RX_MODE 0
...@@ -3194,7 +3194,7 @@ static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag) ...@@ -3194,7 +3194,7 @@ static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
*/ */
static int tg3_rx(struct tg3 *tp, int budget) static int tg3_rx(struct tg3 *tp, int budget)
{ {
u32 work_mask; u32 work_mask, rx_std_posted = 0;
u32 sw_idx = tp->rx_rcb_ptr; u32 sw_idx = tp->rx_rcb_ptr;
u16 hw_idx; u16 hw_idx;
int received; int received;
...@@ -3221,6 +3221,7 @@ static int tg3_rx(struct tg3 *tp, int budget) ...@@ -3221,6 +3221,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
mapping); mapping);
skb = tp->rx_std_buffers[desc_idx].skb; skb = tp->rx_std_buffers[desc_idx].skb;
post_ptr = &tp->rx_std_ptr; post_ptr = &tp->rx_std_ptr;
rx_std_posted++;
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx], dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
mapping); mapping);
...@@ -3308,6 +3309,15 @@ static int tg3_rx(struct tg3 *tp, int budget) ...@@ -3308,6 +3309,15 @@ static int tg3_rx(struct tg3 *tp, int budget)
next_pkt: next_pkt:
(*post_ptr)++; (*post_ptr)++;
if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
u32 idx = *post_ptr % TG3_RX_RING_SIZE;
tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
TG3_64BIT_REG_LOW, idx);
work_mask &= ~RXD_OPAQUE_RING_STD;
rx_std_posted = 0;
}
next_pkt_nopost: next_pkt_nopost:
sw_idx++; sw_idx++;
sw_idx %= TG3_RX_RCB_RING_SIZE(tp); sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
...@@ -3869,6 +3879,40 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3869,6 +3879,40 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
#if TG3_TSO_SUPPORT != 0
static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
/* Use GSO to workaround a rare TSO bug that may be triggered when the
* TSO header is greater than 80 bytes.
*/
static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
{
struct sk_buff *segs, *nskb;
/* Estimate the number of fragments in the worst case */
if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
netif_stop_queue(tp->dev);
return NETDEV_TX_BUSY;
}
segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
if (unlikely(IS_ERR(segs)))
goto tg3_tso_bug_end;
do {
nskb = segs;
segs = segs->next;
nskb->next = NULL;
tg3_start_xmit_dma_bug(nskb, tp->dev);
} while (segs);
tg3_tso_bug_end:
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
#endif
/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
* support TG3_FLG2_HW_TSO_1 or firmware TSO only. * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
*/ */
...@@ -3905,7 +3949,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) ...@@ -3905,7 +3949,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
mss = 0; mss = 0;
if (skb->len > (tp->dev->mtu + ETH_HLEN) && if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
(mss = skb_shinfo(skb)->gso_size) != 0) { (mss = skb_shinfo(skb)->gso_size) != 0) {
int tcp_opt_len, ip_tcp_len; int tcp_opt_len, ip_tcp_len, hdr_len;
if (skb_header_cloned(skb) && if (skb_header_cloned(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
...@@ -3916,11 +3960,16 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) ...@@ -3916,11 +3960,16 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
tcp_opt_len = ((skb->h.th->doff - 5) * 4); tcp_opt_len = ((skb->h.th->doff - 5) * 4);
ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
hdr_len = ip_tcp_len + tcp_opt_len;
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
(tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
return (tg3_tso_bug(tp, skb));
base_flags |= (TXD_FLAG_CPU_PRE_DMA | base_flags |= (TXD_FLAG_CPU_PRE_DMA |
TXD_FLAG_CPU_POST_DMA); TXD_FLAG_CPU_POST_DMA);
skb->nh.iph->check = 0; skb->nh.iph->check = 0;
skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); skb->nh.iph->tot_len = htons(mss + hdr_len);
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
skb->h.th->check = 0; skb->h.th->check = 0;
base_flags &= ~TXD_FLAG_TCPUDP_CSUM; base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
...@@ -5980,7 +6029,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) ...@@ -5980,7 +6029,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
} }
/* Setup replenish threshold. */ /* Setup replenish threshold. */
tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8); val = tp->rx_pending / 8;
if (val == 0)
val = 1;
else if (val > tp->rx_std_max_post)
val = tp->rx_std_max_post;
tw32(RCVBDI_STD_THRESH, val);
/* Initialize TG3_BDINFO's at: /* Initialize TG3_BDINFO's at:
* RCVDBDI_STD_BD: standard eth size rx ring * RCVDBDI_STD_BD: standard eth size rx ring
...@@ -6140,8 +6195,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) ...@@ -6140,8 +6195,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
#endif #endif
/* Receive/send statistics. */ /* Receive/send statistics. */
if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { val = tr32(RCVLPC_STATS_ENABLE);
val &= ~RCVLPC_STATSENAB_DACK_FIX;
tw32(RCVLPC_STATS_ENABLE, val);
} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
val = tr32(RCVLPC_STATS_ENABLE); val = tr32(RCVLPC_STATS_ENABLE);
val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
tw32(RCVLPC_STATS_ENABLE, val); tw32(RCVLPC_STATS_ENABLE, val);
...@@ -8737,6 +8796,9 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) ...@@ -8737,6 +8796,9 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
{ {
struct tg3 *tp = netdev_priv(dev); struct tg3 *tp = netdev_priv(dev);
if (netif_running(dev))
tg3_netif_stop(tp);
tg3_full_lock(tp, 0); tg3_full_lock(tp, 0);
tp->vlgrp = grp; tp->vlgrp = grp;
...@@ -8745,16 +8807,25 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) ...@@ -8745,16 +8807,25 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
__tg3_set_rx_mode(dev); __tg3_set_rx_mode(dev);
tg3_full_unlock(tp); tg3_full_unlock(tp);
if (netif_running(dev))
tg3_netif_start(tp);
} }
static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{ {
struct tg3 *tp = netdev_priv(dev); struct tg3 *tp = netdev_priv(dev);
if (netif_running(dev))
tg3_netif_stop(tp);
tg3_full_lock(tp, 0); tg3_full_lock(tp, 0);
if (tp->vlgrp) if (tp->vlgrp)
tp->vlgrp->vlan_devices[vid] = NULL; tp->vlgrp->vlan_devices[vid] = NULL;
tg3_full_unlock(tp); tg3_full_unlock(tp);
if (netif_running(dev))
tg3_netif_start(tp);
} }
#endif #endif
...@@ -10159,8 +10230,14 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) ...@@ -10159,8 +10230,14 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) { GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
} else } else {
tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1; tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
TG3_FLG2_HW_TSO_1_BUG;
if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
ASIC_REV_5750 &&
tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
}
} }
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
...@@ -10532,6 +10609,16 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) ...@@ -10532,6 +10609,16 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
(tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
tp->rx_offset = 0; tp->rx_offset = 0;
tp->rx_std_max_post = TG3_RX_RING_SIZE;
/* Increment the rx prod index on the rx std ring by at most
* 8 for these chips to workaround hw errata.
*/
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
tp->rx_std_max_post = 8;
/* By default, disable wake-on-lan. User can change this /* By default, disable wake-on-lan. User can change this
* using ETHTOOL_SWOL. * using ETHTOOL_SWOL.
*/ */
......
...@@ -125,6 +125,7 @@ ...@@ -125,6 +125,7 @@
#define CHIPREV_ID_5750_A0 0x4000 #define CHIPREV_ID_5750_A0 0x4000
#define CHIPREV_ID_5750_A1 0x4001 #define CHIPREV_ID_5750_A1 0x4001
#define CHIPREV_ID_5750_A3 0x4003 #define CHIPREV_ID_5750_A3 0x4003
#define CHIPREV_ID_5750_C2 0x4202
#define CHIPREV_ID_5752_A0_HW 0x5000 #define CHIPREV_ID_5752_A0_HW 0x5000
#define CHIPREV_ID_5752_A0 0x6000 #define CHIPREV_ID_5752_A0 0x6000
#define CHIPREV_ID_5752_A1 0x6001 #define CHIPREV_ID_5752_A1 0x6001
...@@ -760,6 +761,7 @@ ...@@ -760,6 +761,7 @@
#define RCVLPC_STATSCTRL_ENABLE 0x00000001 #define RCVLPC_STATSCTRL_ENABLE 0x00000001
#define RCVLPC_STATSCTRL_FASTUPD 0x00000002 #define RCVLPC_STATSCTRL_FASTUPD 0x00000002
#define RCVLPC_STATS_ENABLE 0x00002018 #define RCVLPC_STATS_ENABLE 0x00002018
#define RCVLPC_STATSENAB_DACK_FIX 0x00040000
#define RCVLPC_STATSENAB_LNGBRST_RFIX 0x00400000 #define RCVLPC_STATSENAB_LNGBRST_RFIX 0x00400000
#define RCVLPC_STATS_INCMASK 0x0000201c #define RCVLPC_STATS_INCMASK 0x0000201c
/* 0x2020 --> 0x2100 unused */ /* 0x2020 --> 0x2100 unused */
...@@ -2137,6 +2139,7 @@ struct tg3 { ...@@ -2137,6 +2139,7 @@ struct tg3 {
struct tg3_rx_buffer_desc *rx_std; struct tg3_rx_buffer_desc *rx_std;
struct ring_info *rx_std_buffers; struct ring_info *rx_std_buffers;
dma_addr_t rx_std_mapping; dma_addr_t rx_std_mapping;
u32 rx_std_max_post;
struct tg3_rx_buffer_desc *rx_jumbo; struct tg3_rx_buffer_desc *rx_jumbo;
struct ring_info *rx_jumbo_buffers; struct ring_info *rx_jumbo_buffers;
...@@ -2191,7 +2194,7 @@ struct tg3 { ...@@ -2191,7 +2194,7 @@ struct tg3 {
#define TG3_FLAG_INIT_COMPLETE 0x80000000 #define TG3_FLAG_INIT_COMPLETE 0x80000000
u32 tg3_flags2; u32 tg3_flags2;
#define TG3_FLG2_RESTART_TIMER 0x00000001 #define TG3_FLG2_RESTART_TIMER 0x00000001
/* 0x00000002 available */ #define TG3_FLG2_HW_TSO_1_BUG 0x00000002
#define TG3_FLG2_NO_ETH_WIRE_SPEED 0x00000004 #define TG3_FLG2_NO_ETH_WIRE_SPEED 0x00000004
#define TG3_FLG2_IS_5788 0x00000008 #define TG3_FLG2_IS_5788 0x00000008
#define TG3_FLG2_MAX_RXPEND_64 0x00000010 #define TG3_FLG2_MAX_RXPEND_64 0x00000010
......
...@@ -313,10 +313,12 @@ struct net_device ...@@ -313,10 +313,12 @@ struct net_device
/* Segmentation offload features */ /* Segmentation offload features */
#define NETIF_F_GSO_SHIFT 16 #define NETIF_F_GSO_SHIFT 16
#define NETIF_F_GSO_MASK 0xffff0000
#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
#define NETIF_F_UFO (SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT) #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
#define NETIF_F_TSO_ECN (SKB_GSO_TCPV4_ECN << NETIF_F_GSO_SHIFT) #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) #define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
...@@ -991,13 +993,18 @@ extern void dev_seq_stop(struct seq_file *seq, void *v); ...@@ -991,13 +993,18 @@ extern void dev_seq_stop(struct seq_file *seq, void *v);
extern void linkwatch_run_queue(void); extern void linkwatch_run_queue(void);
static inline int skb_gso_ok(struct sk_buff *skb, int features) static inline int net_gso_ok(int features, int gso_type)
{ {
int feature = skb_shinfo(skb)->gso_size ? int feature = gso_type << NETIF_F_GSO_SHIFT;
skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0;
return (features & feature) == feature; return (features & feature) == feature;
} }
static inline int skb_gso_ok(struct sk_buff *skb, int features)
{
return net_gso_ok(features, skb_shinfo(skb)->gso_size ?
skb_shinfo(skb)->gso_type : 0);
}
static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
{ {
return !skb_gso_ok(skb, dev->features); return !skb_gso_ok(skb, dev->features);
......
...@@ -171,13 +171,15 @@ enum { ...@@ -171,13 +171,15 @@ enum {
enum { enum {
SKB_GSO_TCPV4 = 1 << 0, SKB_GSO_TCPV4 = 1 << 0,
SKB_GSO_UDPV4 = 1 << 1, SKB_GSO_UDP = 1 << 1,
/* This indicates the skb is from an untrusted source. */ /* This indicates the skb is from an untrusted source. */
SKB_GSO_DODGY = 1 << 2, SKB_GSO_DODGY = 1 << 2,
/* This indicates the tcp segment has CWR set. */ /* This indicates the tcp segment has CWR set. */
SKB_GSO_TCPV4_ECN = 1 << 3, SKB_GSO_TCP_ECN = 1 << 3,
SKB_GSO_TCPV6 = 1 << 4,
}; };
/** /**
......
...@@ -146,7 +146,7 @@ static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, ...@@ -146,7 +146,7 @@ static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
struct rt6_info *rt = (struct rt6_info *) dst; struct rt6_info *rt = (struct rt6_info *) dst;
write_lock(&sk->sk_dst_lock); write_lock(&sk->sk_dst_lock);
__sk_dst_set(sk, dst); sk_setup_caps(sk, dst);
np->daddr_cache = daddr; np->daddr_cache = daddr;
np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
write_unlock(&sk->sk_dst_lock); write_unlock(&sk->sk_dst_lock);
......
...@@ -50,11 +50,17 @@ struct inet6_protocol ...@@ -50,11 +50,17 @@ struct inet6_protocol
struct inet6_skb_parm *opt, struct inet6_skb_parm *opt,
int type, int code, int offset, int type, int code, int offset,
__u32 info); __u32 info);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
int features);
unsigned int flags; /* INET6_PROTO_xxx */ unsigned int flags; /* INET6_PROTO_xxx */
}; };
#define INET6_PROTO_NOPOLICY 0x1 #define INET6_PROTO_NOPOLICY 0x1
#define INET6_PROTO_FINAL 0x2 #define INET6_PROTO_FINAL 0x2
/* This should be set for any extension header which is compatible with GSO. */
#define INET6_PROTO_GSO_EXTHDR 0x4
#endif #endif
/* This is used to register socket interfaces for IP protocols. */ /* This is used to register socket interfaces for IP protocols. */
......
...@@ -140,6 +140,7 @@ struct sock_common { ...@@ -140,6 +140,7 @@ struct sock_common {
* @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings
* @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
* @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
* @sk_lingertime: %SO_LINGER l_linger setting * @sk_lingertime: %SO_LINGER l_linger setting
* @sk_backlog: always used with the per-socket spinlock held * @sk_backlog: always used with the per-socket spinlock held
* @sk_callback_lock: used with the callbacks in the end of this struct * @sk_callback_lock: used with the callbacks in the end of this struct
...@@ -211,6 +212,7 @@ struct sock { ...@@ -211,6 +212,7 @@ struct sock {
gfp_t sk_allocation; gfp_t sk_allocation;
int sk_sndbuf; int sk_sndbuf;
int sk_route_caps; int sk_route_caps;
int sk_gso_type;
int sk_rcvlowat; int sk_rcvlowat;
unsigned long sk_flags; unsigned long sk_flags;
unsigned long sk_lingertime; unsigned long sk_lingertime;
...@@ -1025,15 +1027,20 @@ extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); ...@@ -1025,15 +1027,20 @@ extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
static inline int sk_can_gso(const struct sock *sk)
{
return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
}
static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst) static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{ {
__sk_dst_set(sk, dst); __sk_dst_set(sk, dst);
sk->sk_route_caps = dst->dev->features; sk->sk_route_caps = dst->dev->features;
if (sk->sk_route_caps & NETIF_F_GSO) if (sk->sk_route_caps & NETIF_F_GSO)
sk->sk_route_caps |= NETIF_F_TSO; sk->sk_route_caps |= NETIF_F_GSO_MASK;
if (sk->sk_route_caps & NETIF_F_TSO) { if (sk_can_gso(sk)) {
if (dst->header_len) if (dst->header_len)
sk->sk_route_caps &= ~NETIF_F_TSO; sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
else else
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
} }
......
...@@ -751,7 +751,7 @@ static inline int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) ...@@ -751,7 +751,7 @@ static inline int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
if (in_flight >= tp->snd_cwnd) if (in_flight >= tp->snd_cwnd)
return 1; return 1;
if (!(sk->sk_route_caps & NETIF_F_TSO)) if (!sk_can_gso(sk))
return 0; return 0;
left = tp->snd_cwnd - in_flight; left = tp->snd_cwnd - in_flight;
......
...@@ -55,9 +55,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp, ...@@ -55,9 +55,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp,
if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) { if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) {
tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
skb->h.th->cwr = 1; skb->h.th->cwr = 1;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
skb_shinfo(skb)->gso_type |=
SKB_GSO_TCPV4_ECN;
} }
} else { } else {
/* ACK or retransmitted segment: clear ECT|CE */ /* ACK or retransmitted segment: clear ECT|CE */
......
...@@ -743,7 +743,7 @@ static inline int ip_ufo_append_data(struct sock *sk, ...@@ -743,7 +743,7 @@ static inline int ip_ufo_append_data(struct sock *sk,
if (!err) { if (!err) {
/* specify the length of each IP datagram fragment*/ /* specify the length of each IP datagram fragment*/
skb_shinfo(skb)->gso_size = mtu - fragheaderlen; skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4; skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
__skb_queue_tail(&sk->sk_write_queue, skb); __skb_queue_tail(&sk->sk_write_queue, skb);
return 0; return 0;
...@@ -1088,7 +1088,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, ...@@ -1088,7 +1088,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
if ((sk->sk_protocol == IPPROTO_UDP) && if ((sk->sk_protocol == IPPROTO_UDP) &&
(rt->u.dst.dev->features & NETIF_F_UFO)) { (rt->u.dst.dev->features & NETIF_F_UFO)) {
skb_shinfo(skb)->gso_size = mtu - fragheaderlen; skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4; skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
} }
......
...@@ -254,7 +254,7 @@ static int do_basic_checks(struct ip_conntrack *conntrack, ...@@ -254,7 +254,7 @@ static int do_basic_checks(struct ip_conntrack *conntrack,
} }
DEBUGP("Basic checks passed\n"); DEBUGP("Basic checks passed\n");
return 0; return count == 0;
} }
static int new_state(enum ip_conntrack_dir dir, static int new_state(enum ip_conntrack_dir dir,
......
...@@ -642,7 +642,7 @@ static inline int select_size(struct sock *sk, struct tcp_sock *tp) ...@@ -642,7 +642,7 @@ static inline int select_size(struct sock *sk, struct tcp_sock *tp)
int tmp = tp->mss_cache; int tmp = tp->mss_cache;
if (sk->sk_route_caps & NETIF_F_SG) { if (sk->sk_route_caps & NETIF_F_SG) {
if (sk->sk_route_caps & NETIF_F_TSO) if (sk_can_gso(sk))
tmp = 0; tmp = 0;
else { else {
int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
...@@ -2165,13 +2165,19 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) ...@@ -2165,13 +2165,19 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
if (!pskb_may_pull(skb, thlen)) if (!pskb_may_pull(skb, thlen))
goto out; goto out;
segs = NULL;
if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST))
goto out;
oldlen = (u16)~skb->len; oldlen = (u16)~skb->len;
__skb_pull(skb, thlen); __skb_pull(skb, thlen);
if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
int mss = skb_shinfo(skb)->gso_size;
skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
segs = NULL;
goto out;
}
segs = skb_segment(skb, features); segs = skb_segment(skb, features);
if (IS_ERR(segs)) if (IS_ERR(segs))
goto out; goto out;
...@@ -2208,6 +2214,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) ...@@ -2208,6 +2214,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
out: out:
return segs; return segs;
} }
EXPORT_SYMBOL(tcp_tso_segment);
extern void __skb_cb_too_small_for_tcp(int, int); extern void __skb_cb_too_small_for_tcp(int, int);
extern struct tcp_congestion_ops tcp_reno; extern struct tcp_congestion_ops tcp_reno;
......
...@@ -241,6 +241,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) ...@@ -241,6 +241,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
goto failure; goto failure;
/* OK, now commit destination to socket. */ /* OK, now commit destination to socket. */
sk->sk_gso_type = SKB_GSO_TCPV4;
sk_setup_caps(sk, &rt->u.dst); sk_setup_caps(sk, &rt->u.dst);
if (!tp->write_seq) if (!tp->write_seq)
...@@ -883,6 +884,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, ...@@ -883,6 +884,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
if (!newsk) if (!newsk)
goto exit; goto exit;
newsk->sk_gso_type = SKB_GSO_TCPV4;
sk_setup_caps(newsk, dst); sk_setup_caps(newsk, dst);
newtp = tcp_sk(newsk); newtp = tcp_sk(newsk);
......
...@@ -510,8 +510,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) ...@@ -510,8 +510,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
{ {
if (skb->len <= mss_now || if (skb->len <= mss_now || !sk_can_gso(sk)) {
!(sk->sk_route_caps & NETIF_F_TSO)) {
/* Avoid the costly divide in the normal /* Avoid the costly divide in the normal
* non-TSO case. * non-TSO case.
*/ */
...@@ -525,7 +524,7 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned ...@@ -525,7 +524,7 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned
factor /= mss_now; factor /= mss_now;
skb_shinfo(skb)->gso_segs = factor; skb_shinfo(skb)->gso_segs = factor;
skb_shinfo(skb)->gso_size = mss_now; skb_shinfo(skb)->gso_size = mss_now;
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; skb_shinfo(skb)->gso_type = sk->sk_gso_type;
} }
} }
...@@ -824,9 +823,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) ...@@ -824,9 +823,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
mss_now = tp->mss_cache; mss_now = tp->mss_cache;
if (large_allowed && if (large_allowed && sk_can_gso(sk) && !tp->urg_mode)
(sk->sk_route_caps & NETIF_F_TSO) &&
!tp->urg_mode)
doing_tso = 1; doing_tso = 1;
if (dst) { if (dst) {
......
...@@ -659,8 +659,6 @@ int inet6_sk_rebuild_header(struct sock *sk) ...@@ -659,8 +659,6 @@ int inet6_sk_rebuild_header(struct sock *sk)
} }
ip6_dst_store(sk, dst, NULL); ip6_dst_store(sk, dst, NULL);
sk->sk_route_caps = dst->dev->features &
~(NETIF_F_IP_CSUM | NETIF_F_TSO);
} }
return 0; return 0;
......
...@@ -179,7 +179,7 @@ static int ipv6_destopt_rcv(struct sk_buff **skbp) ...@@ -179,7 +179,7 @@ static int ipv6_destopt_rcv(struct sk_buff **skbp)
static struct inet6_protocol destopt_protocol = { static struct inet6_protocol destopt_protocol = {
.handler = ipv6_destopt_rcv, .handler = ipv6_destopt_rcv,
.flags = INET6_PROTO_NOPOLICY, .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR,
}; };
void __init ipv6_destopt_init(void) void __init ipv6_destopt_init(void)
...@@ -340,7 +340,7 @@ static int ipv6_rthdr_rcv(struct sk_buff **skbp) ...@@ -340,7 +340,7 @@ static int ipv6_rthdr_rcv(struct sk_buff **skbp)
static struct inet6_protocol rthdr_protocol = { static struct inet6_protocol rthdr_protocol = {
.handler = ipv6_rthdr_rcv, .handler = ipv6_rthdr_rcv,
.flags = INET6_PROTO_NOPOLICY, .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR,
}; };
void __init ipv6_rthdr_init(void) void __init ipv6_rthdr_init(void)
......
...@@ -186,8 +186,6 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) ...@@ -186,8 +186,6 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
} }
ip6_dst_store(sk, dst, NULL); ip6_dst_store(sk, dst, NULL);
sk->sk_route_caps = dst->dev->features &
~(NETIF_F_IP_CSUM | NETIF_F_TSO);
} }
skb->dst = dst_clone(dst); skb->dst = dst_clone(dst);
......
...@@ -84,14 +84,9 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt ...@@ -84,14 +84,9 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
*/ */
IP6CB(skb)->iif = skb->dst ? ((struct rt6_info *)skb->dst)->rt6i_idev->dev->ifindex : dev->ifindex; IP6CB(skb)->iif = skb->dst ? ((struct rt6_info *)skb->dst)->rt6i_idev->dev->ifindex : dev->ifindex;
if (skb->len < sizeof(struct ipv6hdr)) if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
goto err; goto err;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) {
IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
goto drop;
}
hdr = skb->nh.ipv6h; hdr = skb->nh.ipv6h;
if (hdr->version != 6) if (hdr->version != 6)
......
...@@ -229,7 +229,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, ...@@ -229,7 +229,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
skb->priority = sk->sk_priority; skb->priority = sk->sk_priority;
mtu = dst_mtu(dst); mtu = dst_mtu(dst);
if ((skb->len <= mtu) || ipfragok) { if ((skb->len <= mtu) || ipfragok || skb_shinfo(skb)->gso_size) {
IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
dst_output); dst_output);
...@@ -834,7 +834,7 @@ static inline int ip6_ufo_append_data(struct sock *sk, ...@@ -834,7 +834,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
/* specify the length of each IP datagram fragment*/ /* specify the length of each IP datagram fragment*/
skb_shinfo(skb)->gso_size = mtu - fragheaderlen - skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
sizeof(struct frag_hdr); sizeof(struct frag_hdr);
skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4; skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
ipv6_select_ident(skb, &fhdr); ipv6_select_ident(skb, &fhdr);
skb_shinfo(skb)->ip6_frag_id = fhdr.identification; skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
__skb_queue_tail(&sk->sk_write_queue, skb); __skb_queue_tail(&sk->sk_write_queue, skb);
......
...@@ -57,9 +57,71 @@ ...@@ -57,9 +57,71 @@
DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly; DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly;
static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct ipv6hdr *ipv6h;
struct inet6_protocol *ops;
int proto;
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
goto out;
ipv6h = skb->nh.ipv6h;
proto = ipv6h->nexthdr;
__skb_pull(skb, sizeof(*ipv6h));
rcu_read_lock();
for (;;) {
struct ipv6_opt_hdr *opth;
int len;
if (proto != NEXTHDR_HOP) {
ops = rcu_dereference(inet6_protos[proto]);
if (unlikely(!ops))
goto unlock;
if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
break;
}
if (unlikely(!pskb_may_pull(skb, 8)))
goto unlock;
opth = (void *)skb->data;
len = opth->hdrlen * 8 + 8;
if (unlikely(!pskb_may_pull(skb, len)))
goto unlock;
proto = opth->nexthdr;
__skb_pull(skb, len);
}
skb->h.raw = skb->data;
if (likely(ops->gso_segment))
segs = ops->gso_segment(skb, features);
unlock:
rcu_read_unlock();
if (unlikely(IS_ERR(segs)))
goto out;
for (skb = segs; skb; skb = skb->next) {
ipv6h = skb->nh.ipv6h;
ipv6h->payload_len = htons(skb->len - skb->mac_len);
}
out:
return segs;
}
static struct packet_type ipv6_packet_type = { static struct packet_type ipv6_packet_type = {
.type = __constant_htons(ETH_P_IPV6), .type = __constant_htons(ETH_P_IPV6),
.func = ipv6_rcv, .func = ipv6_rcv,
.gso_segment = ipv6_gso_segment,
}; };
struct ip6_ra_chain *ip6_ra_chain; struct ip6_ra_chain *ip6_ra_chain;
......
...@@ -269,9 +269,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, ...@@ -269,9 +269,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
ipv6_addr_copy(&np->saddr, saddr); ipv6_addr_copy(&np->saddr, saddr);
inet->rcv_saddr = LOOPBACK4_IPV6; inet->rcv_saddr = LOOPBACK4_IPV6;
sk->sk_gso_type = SKB_GSO_TCPV6;
ip6_dst_store(sk, dst, NULL); ip6_dst_store(sk, dst, NULL);
sk->sk_route_caps = dst->dev->features &
~(NETIF_F_IP_CSUM | NETIF_F_TSO);
icsk->icsk_ext_hdr_len = 0; icsk->icsk_ext_hdr_len = 0;
if (np->opt) if (np->opt)
...@@ -929,9 +928,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, ...@@ -929,9 +928,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
* comment in that function for the gory details. -acme * comment in that function for the gory details. -acme
*/ */
sk->sk_gso_type = SKB_GSO_TCPV6;
ip6_dst_store(newsk, dst, NULL); ip6_dst_store(newsk, dst, NULL);
newsk->sk_route_caps = dst->dev->features &
~(NETIF_F_IP_CSUM | NETIF_F_TSO);
newtcp6sk = (struct tcp6_sock *)newsk; newtcp6sk = (struct tcp6_sock *)newsk;
inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
...@@ -1605,6 +1603,7 @@ struct proto tcpv6_prot = { ...@@ -1605,6 +1603,7 @@ struct proto tcpv6_prot = {
static struct inet6_protocol tcpv6_protocol = { static struct inet6_protocol tcpv6_protocol = {
.handler = tcp_v6_rcv, .handler = tcp_v6_rcv,
.err_handler = tcp_v6_err, .err_handler = tcp_v6_err,
.gso_segment = tcp_tso_segment,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
}; };
......
...@@ -261,7 +261,7 @@ static int do_basic_checks(struct nf_conn *conntrack, ...@@ -261,7 +261,7 @@ static int do_basic_checks(struct nf_conn *conntrack,
} }
DEBUGP("Basic checks passed\n"); DEBUGP("Basic checks passed\n");
return 0; return count == 0;
} }
static int new_state(enum ip_conntrack_dir dir, static int new_state(enum ip_conntrack_dir dir,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment