Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
a4657141
Commit
a4657141
authored
Aug 09, 2006
by
Greg Kroah-Hartman
Browse files
Options
Browse Files
Download
Plain Diff
Merge gregkh@master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
parents
54f58d6c
7c91767a
Changes
10
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
72 additions
and
41 deletions
+72
-41
drivers/net/tg3.c
drivers/net/tg3.c
+28
-23
drivers/net/tg3.h
drivers/net/tg3.h
+3
-5
include/linux/skbuff.h
include/linux/skbuff.h
+2
-2
net/core/dst.c
net/core/dst.c
+1
-2
net/core/pktgen.c
net/core/pktgen.c
+4
-0
net/core/rtnetlink.c
net/core/rtnetlink.c
+14
-1
net/core/skbuff.c
net/core/skbuff.c
+3
-1
net/ipv4/route.c
net/ipv4/route.c
+1
-1
net/ipv4/tcp_output.c
net/ipv4/tcp_output.c
+9
-3
net/ipx/af_ipx.c
net/ipx/af_ipx.c
+7
-3
No files found.
drivers/net/tg3.c
View file @
a4657141
...
@@ -68,8 +68,8 @@
...
@@ -68,8 +68,8 @@
#define DRV_MODULE_NAME "tg3"
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "3.6
4
"
#define DRV_MODULE_VERSION "3.6
5
"
#define DRV_MODULE_RELDATE "
July 31
, 2006"
#define DRV_MODULE_RELDATE "
August 07
, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
#define TG3_DEF_RX_MODE 0
...
@@ -123,9 +123,6 @@
...
@@ -123,9 +123,6 @@
TG3_RX_RCB_RING_SIZE(tp))
TG3_RX_RCB_RING_SIZE(tp))
#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
TG3_TX_RING_SIZE)
TG3_TX_RING_SIZE)
#define TX_BUFFS_AVAIL(TP) \
((TP)->tx_pending - \
(((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
...
@@ -2987,6 +2984,13 @@ static void tg3_tx_recover(struct tg3 *tp)
...
@@ -2987,6 +2984,13 @@ static void tg3_tx_recover(struct tg3 *tp)
spin_unlock
(
&
tp
->
lock
);
spin_unlock
(
&
tp
->
lock
);
}
}
static
inline
u32
tg3_tx_avail
(
struct
tg3
*
tp
)
{
smp_mb
();
return
(
tp
->
tx_pending
-
((
tp
->
tx_prod
-
tp
->
tx_cons
)
&
(
TG3_TX_RING_SIZE
-
1
)));
}
/* Tigon3 never reports partial packet sends. So we do not
/* Tigon3 never reports partial packet sends. So we do not
* need special logic to handle SKBs that have not had all
* need special logic to handle SKBs that have not had all
* of their frags sent yet, like SunGEM does.
* of their frags sent yet, like SunGEM does.
...
@@ -3038,12 +3042,20 @@ static void tg3_tx(struct tg3 *tp)
...
@@ -3038,12 +3042,20 @@ static void tg3_tx(struct tg3 *tp)
tp
->
tx_cons
=
sw_idx
;
tp
->
tx_cons
=
sw_idx
;
if
(
unlikely
(
netif_queue_stopped
(
tp
->
dev
)))
{
/* Need to make the tx_cons update visible to tg3_start_xmit()
spin_lock
(
&
tp
->
tx_lock
);
* before checking for netif_queue_stopped(). Without the
* memory barrier, there is a small possibility that tg3_start_xmit()
* will miss it and cause the queue to be stopped forever.
*/
smp_mb
();
if
(
unlikely
(
netif_queue_stopped
(
tp
->
dev
)
&&
(
tg3_tx_avail
(
tp
)
>
TG3_TX_WAKEUP_THRESH
)))
{
netif_tx_lock
(
tp
->
dev
);
if
(
netif_queue_stopped
(
tp
->
dev
)
&&
if
(
netif_queue_stopped
(
tp
->
dev
)
&&
(
TX_BUFFS_AVAIL
(
tp
)
>
TG3_TX_WAKEUP_THRESH
))
(
tg3_tx_avail
(
tp
)
>
TG3_TX_WAKEUP_THRESH
))
netif_wake_queue
(
tp
->
dev
);
netif_wake_queue
(
tp
->
dev
);
spin_unlock
(
&
tp
->
tx_lock
);
netif_tx_unlock
(
tp
->
dev
);
}
}
}
}
...
@@ -3101,7 +3113,6 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
...
@@ -3101,7 +3113,6 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
if
(
skb
==
NULL
)
if
(
skb
==
NULL
)
return
-
ENOMEM
;
return
-
ENOMEM
;
skb
->
dev
=
tp
->
dev
;
skb_reserve
(
skb
,
tp
->
rx_offset
);
skb_reserve
(
skb
,
tp
->
rx_offset
);
mapping
=
pci_map_single
(
tp
->
pdev
,
skb
->
data
,
mapping
=
pci_map_single
(
tp
->
pdev
,
skb
->
data
,
...
@@ -3274,7 +3285,6 @@ static int tg3_rx(struct tg3 *tp, int budget)
...
@@ -3274,7 +3285,6 @@ static int tg3_rx(struct tg3 *tp, int budget)
if
(
copy_skb
==
NULL
)
if
(
copy_skb
==
NULL
)
goto
drop_it_no_recycle
;
goto
drop_it_no_recycle
;
copy_skb
->
dev
=
tp
->
dev
;
skb_reserve
(
copy_skb
,
2
);
skb_reserve
(
copy_skb
,
2
);
skb_put
(
copy_skb
,
len
);
skb_put
(
copy_skb
,
len
);
pci_dma_sync_single_for_cpu
(
tp
->
pdev
,
dma_addr
,
len
,
PCI_DMA_FROMDEVICE
);
pci_dma_sync_single_for_cpu
(
tp
->
pdev
,
dma_addr
,
len
,
PCI_DMA_FROMDEVICE
);
...
@@ -3797,7 +3807,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
...
@@ -3797,7 +3807,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
* interrupt. Furthermore, IRQ processing runs lockless so we have
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
* no IRQ context deadlocks to worry about either. Rejoice!
*/
*/
if
(
unlikely
(
TX_BUFFS_AVAIL
(
tp
)
<=
(
skb_shinfo
(
skb
)
->
nr_frags
+
1
)))
{
if
(
unlikely
(
tg3_tx_avail
(
tp
)
<=
(
skb_shinfo
(
skb
)
->
nr_frags
+
1
)))
{
if
(
!
netif_queue_stopped
(
dev
))
{
if
(
!
netif_queue_stopped
(
dev
))
{
netif_stop_queue
(
dev
);
netif_stop_queue
(
dev
);
...
@@ -3893,12 +3903,10 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
...
@@ -3893,12 +3903,10 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
tw32_tx_mbox
((
MAILBOX_SNDHOST_PROD_IDX_0
+
TG3_64BIT_REG_LOW
),
entry
);
tw32_tx_mbox
((
MAILBOX_SNDHOST_PROD_IDX_0
+
TG3_64BIT_REG_LOW
),
entry
);
tp
->
tx_prod
=
entry
;
tp
->
tx_prod
=
entry
;
if
(
unlikely
(
TX_BUFFS_AVAIL
(
tp
)
<=
(
MAX_SKB_FRAGS
+
1
)))
{
if
(
unlikely
(
tg3_tx_avail
(
tp
)
<=
(
MAX_SKB_FRAGS
+
1
)))
{
spin_lock
(
&
tp
->
tx_lock
);
netif_stop_queue
(
dev
);
netif_stop_queue
(
dev
);
if
(
TX_BUFFS_AVAIL
(
tp
)
>
TG3_TX_WAKEUP_THRESH
)
if
(
tg3_tx_avail
(
tp
)
>
TG3_TX_WAKEUP_THRESH
)
netif_wake_queue
(
tp
->
dev
);
netif_wake_queue
(
tp
->
dev
);
spin_unlock
(
&
tp
->
tx_lock
);
}
}
out_unlock:
out_unlock:
...
@@ -3920,7 +3928,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
...
@@ -3920,7 +3928,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
struct
sk_buff
*
segs
,
*
nskb
;
struct
sk_buff
*
segs
,
*
nskb
;
/* Estimate the number of fragments in the worst case */
/* Estimate the number of fragments in the worst case */
if
(
unlikely
(
TX_BUFFS_AVAIL
(
tp
)
<=
(
skb_shinfo
(
skb
)
->
gso_segs
*
3
)))
{
if
(
unlikely
(
tg3_tx_avail
(
tp
)
<=
(
skb_shinfo
(
skb
)
->
gso_segs
*
3
)))
{
netif_stop_queue
(
tp
->
dev
);
netif_stop_queue
(
tp
->
dev
);
return
NETDEV_TX_BUSY
;
return
NETDEV_TX_BUSY
;
}
}
...
@@ -3960,7 +3968,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
...
@@ -3960,7 +3968,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
* interrupt. Furthermore, IRQ processing runs lockless so we have
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
* no IRQ context deadlocks to worry about either. Rejoice!
*/
*/
if
(
unlikely
(
TX_BUFFS_AVAIL
(
tp
)
<=
(
skb_shinfo
(
skb
)
->
nr_frags
+
1
)))
{
if
(
unlikely
(
tg3_tx_avail
(
tp
)
<=
(
skb_shinfo
(
skb
)
->
nr_frags
+
1
)))
{
if
(
!
netif_queue_stopped
(
dev
))
{
if
(
!
netif_queue_stopped
(
dev
))
{
netif_stop_queue
(
dev
);
netif_stop_queue
(
dev
);
...
@@ -4110,12 +4118,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
...
@@ -4110,12 +4118,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
tw32_tx_mbox
((
MAILBOX_SNDHOST_PROD_IDX_0
+
TG3_64BIT_REG_LOW
),
entry
);
tw32_tx_mbox
((
MAILBOX_SNDHOST_PROD_IDX_0
+
TG3_64BIT_REG_LOW
),
entry
);
tp
->
tx_prod
=
entry
;
tp
->
tx_prod
=
entry
;
if
(
unlikely
(
TX_BUFFS_AVAIL
(
tp
)
<=
(
MAX_SKB_FRAGS
+
1
)))
{
if
(
unlikely
(
tg3_tx_avail
(
tp
)
<=
(
MAX_SKB_FRAGS
+
1
)))
{
spin_lock
(
&
tp
->
tx_lock
);
netif_stop_queue
(
dev
);
netif_stop_queue
(
dev
);
if
(
TX_BUFFS_AVAIL
(
tp
)
>
TG3_TX_WAKEUP_THRESH
)
if
(
tg3_tx_avail
(
tp
)
>
TG3_TX_WAKEUP_THRESH
)
netif_wake_queue
(
tp
->
dev
);
netif_wake_queue
(
tp
->
dev
);
spin_unlock
(
&
tp
->
tx_lock
);
}
}
out_unlock:
out_unlock:
...
@@ -11474,7 +11480,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
...
@@ -11474,7 +11480,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp
->
grc_mode
|=
GRC_MODE_BSWAP_NONFRM_DATA
;
tp
->
grc_mode
|=
GRC_MODE_BSWAP_NONFRM_DATA
;
#endif
#endif
spin_lock_init
(
&
tp
->
lock
);
spin_lock_init
(
&
tp
->
lock
);
spin_lock_init
(
&
tp
->
tx_lock
);
spin_lock_init
(
&
tp
->
indirect_lock
);
spin_lock_init
(
&
tp
->
indirect_lock
);
INIT_WORK
(
&
tp
->
reset_task
,
tg3_reset_task
,
tp
);
INIT_WORK
(
&
tp
->
reset_task
,
tg3_reset_task
,
tp
);
...
...
drivers/net/tg3.h
View file @
a4657141
...
@@ -2079,9 +2079,9 @@ struct tg3 {
...
@@ -2079,9 +2079,9 @@ struct tg3 {
* lock: Held during reset, PHY access, timer, and when
* lock: Held during reset, PHY access, timer, and when
* updating tg3_flags and tg3_flags2.
* updating tg3_flags and tg3_flags2.
*
*
*
tx_lock: Held during tg3_start_xmit and tg3_tx only
*
netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds
*
when calling netif_[start|stop]_queue.
*
netif_tx_lock when it needs to call
*
tg3_start_xmit is protected by netif_tx_lock
.
*
netif_wake_queue
.
*
*
* Both of these locks are to be held with BH safety.
* Both of these locks are to be held with BH safety.
*
*
...
@@ -2118,8 +2118,6 @@ struct tg3 {
...
@@ -2118,8 +2118,6 @@ struct tg3 {
u32
tx_cons
;
u32
tx_cons
;
u32
tx_pending
;
u32
tx_pending
;
spinlock_t
tx_lock
;
struct
tg3_tx_buffer_desc
*
tx_ring
;
struct
tg3_tx_buffer_desc
*
tx_ring
;
struct
tx_ring_info
*
tx_buffers
;
struct
tx_ring_info
*
tx_buffers
;
dma_addr_t
tx_desc_mapping
;
dma_addr_t
tx_desc_mapping
;
...
...
include/linux/skbuff.h
View file @
a4657141
...
@@ -1081,7 +1081,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
...
@@ -1081,7 +1081,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
* the headroom they think they need without accounting for the
* the headroom they think they need without accounting for the
* built in space. The built in space is used for optimisations.
* built in space. The built in space is used for optimisations.
*
*
* %NULL is returned i
n
there is no free memory.
* %NULL is returned i
f
there is no free memory.
*/
*/
static
inline
struct
sk_buff
*
__dev_alloc_skb
(
unsigned
int
length
,
static
inline
struct
sk_buff
*
__dev_alloc_skb
(
unsigned
int
length
,
gfp_t
gfp_mask
)
gfp_t
gfp_mask
)
...
@@ -1101,7 +1101,7 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
...
@@ -1101,7 +1101,7 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
* the headroom they think they need without accounting for the
* the headroom they think they need without accounting for the
* built in space. The built in space is used for optimisations.
* built in space. The built in space is used for optimisations.
*
*
* %NULL is returned i
n
there is no free memory. Although this function
* %NULL is returned i
f
there is no free memory. Although this function
* allocates memory it can be called from an interrupt.
* allocates memory it can be called from an interrupt.
*/
*/
static
inline
struct
sk_buff
*
dev_alloc_skb
(
unsigned
int
length
)
static
inline
struct
sk_buff
*
dev_alloc_skb
(
unsigned
int
length
)
...
...
net/core/dst.c
View file @
a4657141
...
@@ -95,12 +95,11 @@ static void dst_run_gc(unsigned long dummy)
...
@@ -95,12 +95,11 @@ static void dst_run_gc(unsigned long dummy)
dst_gc_timer_inc
=
DST_GC_INC
;
dst_gc_timer_inc
=
DST_GC_INC
;
dst_gc_timer_expires
=
DST_GC_MIN
;
dst_gc_timer_expires
=
DST_GC_MIN
;
}
}
dst_gc_timer
.
expires
=
jiffies
+
dst_gc_timer_expires
;
#if RT_CACHE_DEBUG >= 2
#if RT_CACHE_DEBUG >= 2
printk
(
"dst_total: %d/%d %ld
\n
"
,
printk
(
"dst_total: %d/%d %ld
\n
"
,
atomic_read
(
&
dst_total
),
delayed
,
dst_gc_timer_expires
);
atomic_read
(
&
dst_total
),
delayed
,
dst_gc_timer_expires
);
#endif
#endif
add_timer
(
&
dst_gc_timer
);
mod_timer
(
&
dst_gc_timer
,
jiffies
+
dst_gc_timer_expires
);
out:
out:
spin_unlock
(
&
dst_lock
);
spin_unlock
(
&
dst_lock
);
...
...
net/core/pktgen.c
View file @
a4657141
...
@@ -2149,6 +2149,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
...
@@ -2149,6 +2149,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
skb
->
mac
.
raw
=
((
u8
*
)
iph
)
-
14
-
pkt_dev
->
nr_labels
*
sizeof
(
u32
);
skb
->
mac
.
raw
=
((
u8
*
)
iph
)
-
14
-
pkt_dev
->
nr_labels
*
sizeof
(
u32
);
skb
->
dev
=
odev
;
skb
->
dev
=
odev
;
skb
->
pkt_type
=
PACKET_HOST
;
skb
->
pkt_type
=
PACKET_HOST
;
skb
->
nh
.
iph
=
iph
;
skb
->
h
.
uh
=
udph
;
if
(
pkt_dev
->
nfrags
<=
0
)
if
(
pkt_dev
->
nfrags
<=
0
)
pgh
=
(
struct
pktgen_hdr
*
)
skb_put
(
skb
,
datalen
);
pgh
=
(
struct
pktgen_hdr
*
)
skb_put
(
skb
,
datalen
);
...
@@ -2460,6 +2462,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
...
@@ -2460,6 +2462,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
skb
->
protocol
=
protocol
;
skb
->
protocol
=
protocol
;
skb
->
dev
=
odev
;
skb
->
dev
=
odev
;
skb
->
pkt_type
=
PACKET_HOST
;
skb
->
pkt_type
=
PACKET_HOST
;
skb
->
nh
.
ipv6h
=
iph
;
skb
->
h
.
uh
=
udph
;
if
(
pkt_dev
->
nfrags
<=
0
)
if
(
pkt_dev
->
nfrags
<=
0
)
pgh
=
(
struct
pktgen_hdr
*
)
skb_put
(
skb
,
datalen
);
pgh
=
(
struct
pktgen_hdr
*
)
skb_put
(
skb
,
datalen
);
...
...
net/core/rtnetlink.c
View file @
a4657141
...
@@ -394,6 +394,9 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
...
@@ -394,6 +394,9 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
}
}
if
(
ida
[
IFLA_ADDRESS
-
1
])
{
if
(
ida
[
IFLA_ADDRESS
-
1
])
{
struct
sockaddr
*
sa
;
int
len
;
if
(
!
dev
->
set_mac_address
)
{
if
(
!
dev
->
set_mac_address
)
{
err
=
-
EOPNOTSUPP
;
err
=
-
EOPNOTSUPP
;
goto
out
;
goto
out
;
...
@@ -405,7 +408,17 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
...
@@ -405,7 +408,17 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
if
(
ida
[
IFLA_ADDRESS
-
1
]
->
rta_len
!=
RTA_LENGTH
(
dev
->
addr_len
))
if
(
ida
[
IFLA_ADDRESS
-
1
]
->
rta_len
!=
RTA_LENGTH
(
dev
->
addr_len
))
goto
out
;
goto
out
;
err
=
dev
->
set_mac_address
(
dev
,
RTA_DATA
(
ida
[
IFLA_ADDRESS
-
1
]));
len
=
sizeof
(
sa_family_t
)
+
dev
->
addr_len
;
sa
=
kmalloc
(
len
,
GFP_KERNEL
);
if
(
!
sa
)
{
err
=
-
ENOMEM
;
goto
out
;
}
sa
->
sa_family
=
dev
->
type
;
memcpy
(
sa
->
sa_data
,
RTA_DATA
(
ida
[
IFLA_ADDRESS
-
1
]),
dev
->
addr_len
);
err
=
dev
->
set_mac_address
(
dev
,
sa
);
kfree
(
sa
);
if
(
err
)
if
(
err
)
goto
out
;
goto
out
;
send_addr_notify
=
1
;
send_addr_notify
=
1
;
...
...
net/core/skbuff.c
View file @
a4657141
...
@@ -268,8 +268,10 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
...
@@ -268,8 +268,10 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
struct
sk_buff
*
skb
;
struct
sk_buff
*
skb
;
skb
=
alloc_skb
(
length
+
NET_SKB_PAD
,
gfp_mask
);
skb
=
alloc_skb
(
length
+
NET_SKB_PAD
,
gfp_mask
);
if
(
likely
(
skb
))
if
(
likely
(
skb
))
{
skb_reserve
(
skb
,
NET_SKB_PAD
);
skb_reserve
(
skb
,
NET_SKB_PAD
);
skb
->
dev
=
dev
;
}
return
skb
;
return
skb
;
}
}
...
...
net/ipv4/route.c
View file @
a4657141
...
@@ -3157,7 +3157,7 @@ int __init ip_rt_init(void)
...
@@ -3157,7 +3157,7 @@ int __init ip_rt_init(void)
rhash_entries
,
rhash_entries
,
(
num_physpages
>=
128
*
1024
)
?
(
num_physpages
>=
128
*
1024
)
?
15
:
17
,
15
:
17
,
HASH_HIGHMEM
,
0
,
&
rt_hash_log
,
&
rt_hash_log
,
&
rt_hash_mask
,
&
rt_hash_mask
,
0
);
0
);
...
...
net/ipv4/tcp_output.c
View file @
a4657141
...
@@ -466,6 +466,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
...
@@ -466,6 +466,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
if
(
skb
->
len
!=
tcp_header_size
)
if
(
skb
->
len
!=
tcp_header_size
)
tcp_event_data_sent
(
tp
,
skb
,
sk
);
tcp_event_data_sent
(
tp
,
skb
,
sk
);
if
(
after
(
tcb
->
end_seq
,
tp
->
snd_nxt
)
||
tcb
->
seq
==
tcb
->
end_seq
)
TCP_INC_STATS
(
TCP_MIB_OUTSEGS
);
TCP_INC_STATS
(
TCP_MIB_OUTSEGS
);
err
=
icsk
->
icsk_af_ops
->
queue_xmit
(
skb
,
0
);
err
=
icsk
->
icsk_af_ops
->
queue_xmit
(
skb
,
0
);
...
@@ -2157,10 +2158,9 @@ int tcp_connect(struct sock *sk)
...
@@ -2157,10 +2158,9 @@ int tcp_connect(struct sock *sk)
skb_shinfo
(
buff
)
->
gso_size
=
0
;
skb_shinfo
(
buff
)
->
gso_size
=
0
;
skb_shinfo
(
buff
)
->
gso_type
=
0
;
skb_shinfo
(
buff
)
->
gso_type
=
0
;
buff
->
csum
=
0
;
buff
->
csum
=
0
;
tp
->
snd_nxt
=
tp
->
write_seq
;
TCP_SKB_CB
(
buff
)
->
seq
=
tp
->
write_seq
++
;
TCP_SKB_CB
(
buff
)
->
seq
=
tp
->
write_seq
++
;
TCP_SKB_CB
(
buff
)
->
end_seq
=
tp
->
write_seq
;
TCP_SKB_CB
(
buff
)
->
end_seq
=
tp
->
write_seq
;
tp
->
snd_nxt
=
tp
->
write_seq
;
tp
->
pushed_seq
=
tp
->
write_seq
;
/* Send it off. */
/* Send it off. */
TCP_SKB_CB
(
buff
)
->
when
=
tcp_time_stamp
;
TCP_SKB_CB
(
buff
)
->
when
=
tcp_time_stamp
;
...
@@ -2170,6 +2170,12 @@ int tcp_connect(struct sock *sk)
...
@@ -2170,6 +2170,12 @@ int tcp_connect(struct sock *sk)
sk_charge_skb
(
sk
,
buff
);
sk_charge_skb
(
sk
,
buff
);
tp
->
packets_out
+=
tcp_skb_pcount
(
buff
);
tp
->
packets_out
+=
tcp_skb_pcount
(
buff
);
tcp_transmit_skb
(
sk
,
buff
,
1
,
GFP_KERNEL
);
tcp_transmit_skb
(
sk
,
buff
,
1
,
GFP_KERNEL
);
/* We change tp->snd_nxt after the tcp_transmit_skb() call
* in order to make this packet get counted in tcpOutSegs.
*/
tp
->
snd_nxt
=
tp
->
write_seq
;
tp
->
pushed_seq
=
tp
->
write_seq
;
TCP_INC_STATS
(
TCP_MIB_ACTIVEOPENS
);
TCP_INC_STATS
(
TCP_MIB_ACTIVEOPENS
);
/* Timer for repeating the SYN until an answer. */
/* Timer for repeating the SYN until an answer. */
...
...
net/ipx/af_ipx.c
View file @
a4657141
...
@@ -1642,13 +1642,17 @@ static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
...
@@ -1642,13 +1642,17 @@ static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
if
((
skb
=
skb_share_check
(
skb
,
GFP_ATOMIC
))
==
NULL
)
if
((
skb
=
skb_share_check
(
skb
,
GFP_ATOMIC
))
==
NULL
)
goto
out
;
goto
out
;
ipx
=
ipx_hdr
(
skb
);
if
(
!
pskb_may_pull
(
skb
,
sizeof
(
struct
ipxhdr
)))
ipx_pktsize
=
ntohs
(
ipx
->
ipx_pktsize
);
goto
drop
;
ipx_pktsize
=
ntohs
(
ipxhdr
(
skb
)
->
ipx_pktsize
);
/* Too small or invalid header? */
/* Too small or invalid header? */
if
(
ipx_pktsize
<
sizeof
(
struct
ipxhdr
)
||
ipx_pktsize
>
skb
->
len
)
if
(
ipx_pktsize
<
sizeof
(
struct
ipxhdr
)
||
!
pskb_may_pull
(
skb
,
ipx_pktsize
))
goto
drop
;
goto
drop
;
ipx
=
ipx_hdr
(
skb
);
if
(
ipx
->
ipx_checksum
!=
IPX_NO_CHECKSUM
&&
if
(
ipx
->
ipx_checksum
!=
IPX_NO_CHECKSUM
&&
ipx
->
ipx_checksum
!=
ipx_cksum
(
ipx
,
ipx_pktsize
))
ipx
->
ipx_checksum
!=
ipx_cksum
(
ipx
,
ipx_pktsize
))
goto
drop
;
goto
drop
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment