Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
5f6c1284
Commit
5f6c1284
authored
Jun 07, 2002
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Hand merge.
parents
75541efd
574b2d87
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
469 additions
and
493 deletions
+469
-493
include/linux/skbuff.h
include/linux/skbuff.h
+245
-254
net/core/skbuff.c
net/core/skbuff.c
+224
-239
No files found.
include/linux/skbuff.h
View file @
5f6c1284
...
...
@@ -35,10 +35,13 @@
#define CHECKSUM_HW 1
#define CHECKSUM_UNNECESSARY 2
#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES-1)) & ~(SMP_CACHE_BYTES-1))
#define SKB_MAX_ORDER(X,ORDER) (((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct skb_shared_info))&~(SMP_CACHE_BYTES-1))
#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X),0))
#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0,2))
#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
~(SMP_CACHE_BYTES - 1))
#define SKB_MAX_ORDER(X, ORDER) (((PAGE_SIZE << (ORDER)) - (X) - \
sizeof(struct skb_shared_info)) & \
~(SMP_CACHE_BYTES - 1))
#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
/* A. Checksumming of received packets by device.
*
...
...
@@ -79,7 +82,7 @@
*/
#ifdef __i386__
#define NET_CALLER(arg) (*(((void
**)&arg)-
1))
#define NET_CALLER(arg) (*(((void
**)&arg) -
1))
#else
#define NET_CALLER(arg) __builtin_return_address(0)
#endif
...
...
@@ -97,8 +100,8 @@ struct nf_ct_info {
struct
sk_buff_head
{
/* These two members must be first. */
struct
sk_buff
*
next
;
struct
sk_buff
*
prev
;
struct
sk_buff
*
next
;
struct
sk_buff
*
prev
;
__u32
qlen
;
spinlock_t
lock
;
...
...
@@ -110,8 +113,7 @@ struct sk_buff;
typedef
struct
skb_frag_struct
skb_frag_t
;
struct
skb_frag_struct
{
struct
skb_frag_struct
{
struct
page
*
page
;
__u16
page_offset
;
__u16
size
;
...
...
@@ -127,19 +129,54 @@ struct skb_shared_info {
skb_frag_t
frags
[
MAX_SKB_FRAGS
];
};
/**
* struct sk_buff - socket buffer
* @next: Next buffer in list
* @prev: Previous buffer in list
* @list: List we are on
* @sk: Socket we are owned by
* @stamp: Time we arrived
* @dev: Device we arrived on/are leaving by
* @h: Transport layer header
* @nh: Network layer header
* @mac: Link layer header
* @dst: FIXME: Describe this field
* @cb: Control buffer. Free for use by every layer. Put private vars here
* @len: Length of actual data
* @data_len: Data length
* @csum: Checksum
* @__unused: Dead field, may be reused
* @cloned: Head may be cloned (check refcnt to be sure)
* @pkt_type: Packet class
* @ip_summed: Driver fed us an IP checksum
* @priority: Packet queueing priority
* @users: User count - see {datagram,tcp}.c
* @protocol: Packet protocol from driver
* @security: Security level of packet
* @truesize: Buffer size
* @head: Head of buffer
* @data: Data head pointer
* @tail: Tail pointer
* @end: End pointer
* @destructor: Destruct function
* @nfmark: Can be used for communication between hooks
* @nfcache: Cache info
* @nfct: Associated connection, if any
* @nf_debug: Netfilter debugging
* @tc_index: Traffic control index
*/
struct
sk_buff
{
/* These two members must be first. */
struct
sk_buff
*
next
;
/* Next buffer in list */
struct
sk_buff
*
prev
;
/* Previous buffer in list */
struct
sk_buff
*
next
;
struct
sk_buff
*
prev
;
struct
sk_buff_head
*
list
;
/* List we are on */
struct
sock
*
sk
;
/* Socket we are owned by */
struct
timeval
stamp
;
/* Time we arrived */
struct
net_device
*
dev
;
/* Device we arrived on/are leaving by */
struct
sk_buff_head
*
list
;
struct
sock
*
sk
;
struct
timeval
stamp
;
struct
net_device
*
dev
;
/* Transport layer header */
union
{
union
{
struct
tcphdr
*
th
;
struct
udphdr
*
uh
;
struct
icmphdr
*
icmph
;
...
...
@@ -149,9 +186,7 @@ struct sk_buff {
unsigned
char
*
raw
;
}
h
;
/* Network layer header */
union
{
union
{
struct
iphdr
*
iph
;
struct
ipv6hdr
*
ipv6h
;
struct
arphdr
*
arph
;
...
...
@@ -159,9 +194,7 @@ struct sk_buff {
unsigned
char
*
raw
;
}
nh
;
/* Link layer header */
union
{
union
{
struct
ethhdr
*
ethernet
;
unsigned
char
*
raw
;
}
mac
;
...
...
@@ -176,43 +209,38 @@ struct sk_buff {
*/
char
cb
[
48
];
unsigned
int
len
;
/* Length of actual data */
unsigned
int
data_len
;
unsigned
int
csum
;
/* Checksum */
unsigned
char
__unused
,
/* Dead field, may be reused */
cloned
,
/* head may be cloned (check refcnt to be sure). */
pkt_type
,
/* Packet class */
ip_summed
;
/* Driver fed us an IP checksum */
__u32
priority
;
/* Packet queueing priority */
atomic_t
users
;
/* User count - see datagram.c,tcp.c */
unsigned
short
protocol
;
/* Packet protocol from driver. */
unsigned
short
security
;
/* Security level of packet */
unsigned
int
truesize
;
/* Buffer size */
unsigned
char
*
head
;
/* Head of buffer */
unsigned
char
*
data
;
/* Data head pointer */
unsigned
char
*
tail
;
/* Tail pointer */
unsigned
char
*
end
;
/* End pointer */
void
(
*
destructor
)(
struct
sk_buff
*
);
/* Destruct function */
unsigned
int
len
,
data_len
,
csum
;
unsigned
char
__unused
,
cloned
,
pkt_type
,
ip_summed
;
__u32
priority
;
atomic_t
users
;
unsigned
short
protocol
,
security
;
unsigned
int
truesize
;
unsigned
char
*
head
,
*
data
,
*
tail
,
*
end
;
void
(
*
destructor
)(
struct
sk_buff
*
skb
);
#ifdef CONFIG_NETFILTER
/* Can be used for communication between hooks. */
unsigned
long
nfmark
;
/* Cache info */
__u32
nfcache
;
/* Associated connection, if any */
struct
nf_ct_info
*
nfct
;
#ifdef CONFIG_NETFILTER_DEBUG
unsigned
int
nf_debug
;
#endif
#endif
/*CONFIG_NETFILTER*/
#endif
/* CONFIG_NETFILTER */
#if defined(CONFIG_HIPPI)
union
{
union
{
__u32
ifield
;
}
private
;
#endif
#ifdef CONFIG_NET_SCHED
__u32
tc_index
;
/* traffic control index */
#endif
...
...
@@ -230,20 +258,23 @@ struct sk_buff {
#include <asm/system.h>
extern
void
__kfree_skb
(
struct
sk_buff
*
skb
);
extern
struct
sk_buff
*
alloc_skb
(
unsigned
int
size
,
int
priority
);
extern
struct
sk_buff
*
alloc_skb
(
unsigned
int
size
,
int
priority
);
extern
void
kfree_skbmem
(
struct
sk_buff
*
skb
);
extern
struct
sk_buff
*
skb_clone
(
struct
sk_buff
*
skb
,
int
priority
);
extern
struct
sk_buff
*
skb_copy
(
const
struct
sk_buff
*
skb
,
int
priority
);
extern
struct
sk_buff
*
pskb_copy
(
struct
sk_buff
*
skb
,
int
gfp_mask
);
extern
int
pskb_expand_head
(
struct
sk_buff
*
skb
,
int
nhead
,
int
ntail
,
int
gfp_mask
);
extern
struct
sk_buff
*
skb_realloc_headroom
(
struct
sk_buff
*
skb
,
unsigned
int
headroom
);
extern
struct
sk_buff
*
skb_copy_expand
(
const
struct
sk_buff
*
skb
,
int
newheadroom
,
int
newtailroom
,
extern
struct
sk_buff
*
skb_clone
(
struct
sk_buff
*
skb
,
int
priority
);
extern
struct
sk_buff
*
skb_copy
(
const
struct
sk_buff
*
skb
,
int
priority
);
extern
struct
sk_buff
*
pskb_copy
(
struct
sk_buff
*
skb
,
int
gfp_mask
);
extern
int
pskb_expand_head
(
struct
sk_buff
*
skb
,
int
nhead
,
int
ntail
,
int
gfp_mask
);
extern
struct
sk_buff
*
skb_realloc_headroom
(
struct
sk_buff
*
skb
,
unsigned
int
headroom
);
extern
struct
sk_buff
*
skb_copy_expand
(
const
struct
sk_buff
*
skb
,
int
newheadroom
,
int
newtailroom
,
int
priority
);
#define dev_kfree_skb(a) kfree_skb(a)
extern
void
skb_over_panic
(
struct
sk_buff
*
skb
,
int
len
,
void
*
here
);
extern
void
skb_under_panic
(
struct
sk_buff
*
skb
,
int
len
,
void
*
here
);
extern
void
skb_over_panic
(
struct
sk_buff
*
skb
,
int
len
,
void
*
here
);
extern
void
skb_under_panic
(
struct
sk_buff
*
skb
,
int
len
,
void
*
here
);
/* Internal */
#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))
...
...
@@ -254,10 +285,9 @@ extern void skb_under_panic(struct sk_buff *skb, int len, void *here);
*
* Returns true if the queue is empty, false otherwise.
*/
static
inline
int
skb_queue_empty
(
struct
sk_buff_head
*
list
)
{
return
(
list
->
next
==
(
struct
sk_buff
*
)
list
)
;
return
list
->
next
==
(
struct
sk_buff
*
)
list
;
}
/**
...
...
@@ -267,7 +297,6 @@ static inline int skb_queue_empty(struct sk_buff_head *list)
* Makes another reference to a socket buffer and returns a pointer
* to the buffer.
*/
static
inline
struct
sk_buff
*
skb_get
(
struct
sk_buff
*
skb
)
{
atomic_inc
(
&
skb
->
users
);
...
...
@@ -275,7 +304,7 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb)
}
/*
* If users
==
1, we are the only owner and are can avoid redundant
* If users
==
1, we are the only owner and are can avoid redundant
* atomic change.
*/
...
...
@@ -286,7 +315,6 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb)
* Drop a reference to the buffer and free it if the usage count has
* hit zero.
*/
static
inline
void
kfree_skb
(
struct
sk_buff
*
skb
)
{
if
(
atomic_read
(
&
skb
->
users
)
==
1
||
atomic_dec_and_test
(
&
skb
->
users
))
...
...
@@ -308,7 +336,6 @@ static inline void kfree_skb_fast(struct sk_buff *skb)
* one of multiple shared copies of the buffer. Cloned buffers are
* shared data so must not be written to under normal circumstances.
*/
static
inline
int
skb_cloned
(
struct
sk_buff
*
skb
)
{
return
skb
->
cloned
&&
atomic_read
(
&
skb_shinfo
(
skb
)
->
dataref
)
!=
1
;
...
...
@@ -321,10 +348,9 @@ static inline int skb_cloned(struct sk_buff *skb)
* Returns true if more than one person has a reference to this
* buffer.
*/
static
inline
int
skb_shared
(
struct
sk_buff
*
skb
)
{
return
(
atomic_read
(
&
skb
->
users
)
!=
1
)
;
return
atomic_read
(
&
skb
->
users
)
!=
1
;
}
/**
...
...
@@ -340,19 +366,16 @@ static inline int skb_shared(struct sk_buff *skb)
*
* NULL is returned on a memory allocation failure.
*/
static
inline
struct
sk_buff
*
skb_share_check
(
struct
sk_buff
*
skb
,
int
pri
)
{
if
(
skb_shared
(
skb
))
{
struct
sk_buff
*
nskb
;
nskb
=
skb_clone
(
skb
,
pri
);
struct
sk_buff
*
nskb
=
skb_clone
(
skb
,
pri
);
kfree_skb
(
skb
);
return
nskb
;
skb
=
nskb
;
}
return
skb
;
}
/*
* Copy shared buffers into a new sk_buff. We effectively do COW on
* packets to handle cases where we have a local reader and forward
...
...
@@ -373,15 +396,14 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
*
* %NULL is returned on a memory allocation failure.
*/
static
inline
struct
sk_buff
*
skb_unshare
(
struct
sk_buff
*
skb
,
int
pri
)
{
struct
sk_buff
*
nskb
;
if
(
!
skb_cloned
(
skb
))
return
skb
;
nskb
=
skb_copy
(
skb
,
pri
);
if
(
skb_cloned
(
skb
))
{
struct
sk_buff
*
nskb
=
skb_copy
(
skb
,
pri
);
kfree_skb
(
skb
);
/* Free our shared copy */
return
nskb
;
skb
=
nskb
;
}
return
skb
;
}
/**
...
...
@@ -397,7 +419,6 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
* The reference count is not incremented and the reference is therefore
* volatile. Use with caution.
*/
static
inline
struct
sk_buff
*
skb_peek
(
struct
sk_buff_head
*
list_
)
{
struct
sk_buff
*
list
=
((
struct
sk_buff
*
)
list_
)
->
next
;
...
...
@@ -419,7 +440,6 @@ static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
* The reference count is not incremented and the reference is therefore
* volatile. Use with caution.
*/
static
inline
struct
sk_buff
*
skb_peek_tail
(
struct
sk_buff_head
*
list_
)
{
struct
sk_buff
*
list
=
((
struct
sk_buff
*
)
list_
)
->
prev
;
...
...
@@ -434,17 +454,15 @@ static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
*
* Return the length of an &sk_buff queue.
*/
static
inline
__u32
skb_queue_len
(
struct
sk_buff_head
*
list_
)
{
return
(
list_
->
qlen
)
;
return
list_
->
qlen
;
}
static
inline
void
skb_queue_head_init
(
struct
sk_buff_head
*
list
)
{
spin_lock_init
(
&
list
->
lock
);
list
->
prev
=
(
struct
sk_buff
*
)
list
;
list
->
next
=
(
struct
sk_buff
*
)
list
;
list
->
prev
=
list
->
next
=
(
struct
sk_buff
*
)
list
;
list
->
qlen
=
0
;
}
...
...
@@ -465,8 +483,8 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
*
* A buffer cannot be placed on two lists at the same time.
*/
static
inline
void
__skb_queue_head
(
struct
sk_buff_head
*
list
,
struct
sk_buff
*
newsk
)
static
inline
void
__skb_queue_head
(
struct
sk_buff_head
*
list
,
struct
sk_buff
*
newsk
)
{
struct
sk_buff
*
prev
,
*
next
;
...
...
@@ -476,8 +494,7 @@ static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *n
next
=
prev
->
next
;
newsk
->
next
=
next
;
newsk
->
prev
=
prev
;
next
->
prev
=
newsk
;
prev
->
next
=
newsk
;
next
->
prev
=
prev
->
next
=
newsk
;
}
...
...
@@ -492,8 +509,8 @@ static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *n
*
* A buffer cannot be placed on two lists at the same time.
*/
static
inline
void
skb_queue_head
(
struct
sk_buff_head
*
list
,
struct
sk_buff
*
newsk
)
static
inline
void
skb_queue_head
(
struct
sk_buff_head
*
list
,
struct
sk_buff
*
newsk
)
{
unsigned
long
flags
;
...
...
@@ -512,9 +529,8 @@ static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *new
*
* A buffer cannot be placed on two lists at the same time.
*/
static
inline
void
__skb_queue_tail
(
struct
sk_buff_head
*
list
,
struct
sk_buff
*
newsk
)
static
inline
void
__skb_queue_tail
(
struct
sk_buff_head
*
list
,
struct
sk_buff
*
newsk
)
{
struct
sk_buff
*
prev
,
*
next
;
...
...
@@ -524,8 +540,7 @@ static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *n
prev
=
next
->
prev
;
newsk
->
next
=
next
;
newsk
->
prev
=
prev
;
next
->
prev
=
newsk
;
prev
->
next
=
newsk
;
next
->
prev
=
prev
->
next
=
newsk
;
}
/**
...
...
@@ -539,8 +554,8 @@ static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *n
*
* A buffer cannot be placed on two lists at the same time.
*/
static
inline
void
skb_queue_tail
(
struct
sk_buff_head
*
list
,
struct
sk_buff
*
newsk
)
static
inline
void
skb_queue_tail
(
struct
sk_buff_head
*
list
,
struct
sk_buff
*
newsk
)
{
unsigned
long
flags
;
...
...
@@ -557,7 +572,6 @@ static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *new
* so must be used with appropriate locks held only. The head item is
* returned or %NULL if the list is empty.
*/
static
inline
struct
sk_buff
*
__skb_dequeue
(
struct
sk_buff_head
*
list
)
{
struct
sk_buff
*
next
,
*
prev
,
*
result
;
...
...
@@ -571,8 +585,7 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
list
->
qlen
--
;
next
->
prev
=
prev
;
prev
->
next
=
next
;
result
->
next
=
NULL
;
result
->
prev
=
NULL
;
result
->
next
=
result
->
prev
=
NULL
;
result
->
list
=
NULL
;
}
return
result
;
...
...
@@ -603,13 +616,12 @@ static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
*/
static
inline
void
__skb_insert
(
struct
sk_buff
*
newsk
,
struct
sk_buff
*
prev
,
struct
sk_buff
*
next
,
struct
sk_buff_head
*
list
)
struct
sk_buff
*
prev
,
struct
sk_buff
*
next
,
struct
sk_buff_head
*
list
)
{
newsk
->
next
=
next
;
newsk
->
prev
=
prev
;
next
->
prev
=
newsk
;
prev
->
next
=
newsk
;
next
->
prev
=
prev
->
next
=
newsk
;
newsk
->
list
=
list
;
list
->
qlen
++
;
}
...
...
@@ -666,16 +678,14 @@ static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
* remove sk_buff from list. _Must_ be called atomically, and with
* the list known..
*/
static
inline
void
__skb_unlink
(
struct
sk_buff
*
skb
,
struct
sk_buff_head
*
list
)
{
struct
sk_buff
*
next
,
*
prev
;
struct
sk_buff
*
next
,
*
prev
;
list
->
qlen
--
;
next
=
skb
->
next
;
prev
=
skb
->
prev
;
skb
->
next
=
NULL
;
skb
->
prev
=
NULL
;
skb
->
next
=
skb
->
prev
=
NULL
;
skb
->
list
=
NULL
;
next
->
prev
=
prev
;
prev
->
next
=
next
;
...
...
@@ -693,16 +703,15 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
* unlink. Thus a list must have its contents unlinked before it is
* destroyed.
*/
static
inline
void
skb_unlink
(
struct
sk_buff
*
skb
)
{
struct
sk_buff_head
*
list
=
skb
->
list
;
if
(
list
)
{
if
(
list
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
list
->
lock
,
flags
);
if
(
skb
->
list
==
list
)
if
(
skb
->
list
==
list
)
__skb_unlink
(
skb
,
skb
->
list
);
spin_unlock_irqrestore
(
&
list
->
lock
,
flags
);
}
...
...
@@ -718,7 +727,6 @@ static inline void skb_unlink(struct sk_buff *skb)
* so must be used with appropriate locks held only. The tail item is
* returned or %NULL if the list is empty.
*/
static
inline
struct
sk_buff
*
__skb_dequeue_tail
(
struct
sk_buff_head
*
list
)
{
struct
sk_buff
*
skb
=
skb_peek_tail
(
list
);
...
...
@@ -735,7 +743,6 @@ static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
* may be used safely with other locking list functions. The tail item is
* returned or %NULL if the list is empty.
*/
static
inline
struct
sk_buff
*
skb_dequeue_tail
(
struct
sk_buff_head
*
list
)
{
unsigned
long
flags
;
...
...
@@ -757,20 +764,22 @@ static inline int skb_headlen(const struct sk_buff *skb)
return
skb
->
len
-
skb
->
data_len
;
}
#define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) BUG(); } while (0)
#define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) BUG(); } while (0)
#define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) BUG(); } while (0)
#define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) \
BUG(); } while (0)
#define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) \
BUG(); } while (0)
#define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) \
BUG(); } while (0)
/*
* Add data to an sk_buff
*/
static
inline
unsigned
char
*
__skb_put
(
struct
sk_buff
*
skb
,
unsigned
int
len
)
{
unsigned
char
*
tmp
=
skb
->
tail
;
unsigned
char
*
tmp
=
skb
->
tail
;
SKB_LINEAR_ASSERT
(
skb
);
skb
->
tail
+=
len
;
skb
->
len
+=
len
;
skb
->
tail
+=
len
;
skb
->
len
+=
len
;
return
tmp
;
}
...
...
@@ -783,23 +792,21 @@ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
* exceed the total buffer size the kernel will panic. A pointer to the
* first byte of the extra data is returned.
*/
static
inline
unsigned
char
*
skb_put
(
struct
sk_buff
*
skb
,
unsigned
int
len
)
{
unsigned
char
*
tmp
=
skb
->
tail
;
unsigned
char
*
tmp
=
skb
->
tail
;
SKB_LINEAR_ASSERT
(
skb
);
skb
->
tail
+=
len
;
skb
->
len
+=
len
;
if
(
skb
->
tail
>
skb
->
end
)
{
skb
->
tail
+=
len
;
skb
->
len
+=
len
;
if
(
skb
->
tail
>
skb
->
end
)
skb_over_panic
(
skb
,
len
,
current_text_addr
());
}
return
tmp
;
}
static
inline
unsigned
char
*
__skb_push
(
struct
sk_buff
*
skb
,
unsigned
int
len
)
{
skb
->
data
-=
len
;
skb
->
len
+=
len
;
skb
->
data
-=
len
;
skb
->
len
+=
len
;
return
skb
->
data
;
}
...
...
@@ -812,23 +819,21 @@ static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
* start. If this would exceed the total buffer headroom the kernel will
* panic. A pointer to the first byte of the extra data is returned.
*/
static
inline
unsigned
char
*
skb_push
(
struct
sk_buff
*
skb
,
unsigned
int
len
)
{
skb
->
data
-=
len
;
skb
->
len
+=
len
;
if
(
skb
->
data
<
skb
->
head
)
{
skb
->
data
-=
len
;
skb
->
len
+=
len
;
if
(
skb
->
data
<
skb
->
head
)
skb_under_panic
(
skb
,
len
,
current_text_addr
());
}
return
skb
->
data
;
}
static
inline
char
*
__skb_pull
(
struct
sk_buff
*
skb
,
unsigned
int
len
)
{
skb
->
len
-=
len
;
skb
->
len
-=
len
;
if
(
skb
->
len
<
skb
->
data_len
)
BUG
();
return
skb
->
data
+=
len
;
return
skb
->
data
+=
len
;
}
/**
...
...
@@ -841,30 +846,25 @@ static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
* is returned. Once the data has been pulled future pushes will overwrite
* the old data.
*/
static
inline
unsigned
char
*
skb_pull
(
struct
sk_buff
*
skb
,
unsigned
int
len
)
static
inline
unsigned
char
*
skb_pull
(
struct
sk_buff
*
skb
,
unsigned
int
len
)
{
if
(
len
>
skb
->
len
)
return
NULL
;
return
__skb_pull
(
skb
,
len
);
return
(
len
>
skb
->
len
)
?
NULL
:
__skb_pull
(
skb
,
len
);
}
extern
unsigned
char
*
__pskb_pull_tail
(
struct
sk_buff
*
skb
,
int
delta
);
extern
unsigned
char
*
__pskb_pull_tail
(
struct
sk_buff
*
skb
,
int
delta
);
static
inline
char
*
__pskb_pull
(
struct
sk_buff
*
skb
,
unsigned
int
len
)
{
if
(
len
>
skb_headlen
(
skb
)
&&
__pskb_pull_tail
(
skb
,
len
-
skb_headlen
(
skb
))
==
NULL
)
!
__pskb_pull_tail
(
skb
,
len
-
skb_headlen
(
skb
))
)
return
NULL
;
skb
->
len
-=
len
;
return
skb
->
data
+=
len
;
}
static
inline
unsigned
char
*
pskb_pull
(
struct
sk_buff
*
skb
,
unsigned
int
len
)
static
inline
unsigned
char
*
pskb_pull
(
struct
sk_buff
*
skb
,
unsigned
int
len
)
{
if
(
len
>
skb
->
len
)
return
NULL
;
return
__pskb_pull
(
skb
,
len
);
return
(
len
>
skb
->
len
)
?
NULL
:
__pskb_pull
(
skb
,
len
);
}
static
inline
int
pskb_may_pull
(
struct
sk_buff
*
skb
,
unsigned
int
len
)
...
...
@@ -873,7 +873,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
return
1
;
if
(
len
>
skb
->
len
)
return
0
;
return
(
__pskb_pull_tail
(
skb
,
len
-
skb_headlen
(
skb
))
!=
NULL
)
;
return
__pskb_pull_tail
(
skb
,
len
-
skb_headlen
(
skb
))
!=
NULL
;
}
/**
...
...
@@ -882,10 +882,9 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
*
* Return the number of bytes of free space at the head of an &sk_buff.
*/
static
inline
int
skb_headroom
(
const
struct
sk_buff
*
skb
)
{
return
skb
->
data
-
skb
->
head
;
return
skb
->
data
-
skb
->
head
;
}
/**
...
...
@@ -894,10 +893,9 @@ static inline int skb_headroom(const struct sk_buff *skb)
*
* Return the number of bytes of free space at the tail of an sk_buff
*/
static
inline
int
skb_tailroom
(
const
struct
sk_buff
*
skb
)
{
return
skb_is_nonlinear
(
skb
)
?
0
:
skb
->
end
-
skb
->
tail
;
return
skb_is_nonlinear
(
skb
)
?
0
:
skb
->
end
-
skb
->
tail
;
}
/**
...
...
@@ -908,11 +906,10 @@ static inline int skb_tailroom(const struct sk_buff *skb)
* Increase the headroom of an empty &sk_buff by reducing the tail
* room. This is only allowed for an empty buffer.
*/
static
inline
void
skb_reserve
(
struct
sk_buff
*
skb
,
unsigned
int
len
)
{
skb
->
data
+=
len
;
skb
->
tail
+=
len
;
skb
->
data
+=
len
;
skb
->
tail
+=
len
;
}
extern
int
___pskb_trim
(
struct
sk_buff
*
skb
,
unsigned
int
len
,
int
realloc
);
...
...
@@ -921,10 +918,9 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
{
if
(
!
skb
->
data_len
)
{
skb
->
len
=
len
;
skb
->
tail
=
skb
->
data
+
len
;
}
else
{
skb
->
tail
=
skb
->
data
+
len
;
}
else
___pskb_trim
(
skb
,
len
,
0
);
}
}
/**
...
...
@@ -935,12 +931,10 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
* Cut the length of a buffer down by removing data from the tail. If
* the buffer is already under the length specified it is not modified.
*/
static
inline
void
skb_trim
(
struct
sk_buff
*
skb
,
unsigned
int
len
)
{
if
(
skb
->
len
>
len
)
{
if
(
skb
->
len
>
len
)
__skb_trim
(
skb
,
len
);
}
}
...
...
@@ -950,16 +944,13 @@ static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
skb
->
len
=
len
;
skb
->
tail
=
skb
->
data
+
len
;
return
0
;
}
else
{
return
___pskb_trim
(
skb
,
len
,
1
);
}
return
___pskb_trim
(
skb
,
len
,
1
);
}
static
inline
int
pskb_trim
(
struct
sk_buff
*
skb
,
unsigned
int
len
)
{
if
(
len
<
skb
->
len
)
return
__pskb_trim
(
skb
,
len
);
return
0
;
return
(
len
<
skb
->
len
)
?
__pskb_trim
(
skb
,
len
)
:
0
;
}
/**
...
...
@@ -970,8 +961,6 @@ static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
* destructor function and make the @skb unowned. The buffer continues
* to exist but is no longer charged to its former owner.
*/
static
inline
void
skb_orphan
(
struct
sk_buff
*
skb
)
{
if
(
skb
->
destructor
)
...
...
@@ -981,36 +970,32 @@ static inline void skb_orphan(struct sk_buff *skb)
}
/**
* skb_purge - empty a list
* skb_
queue_
purge - empty a list
* @list: list to empty
*
* Delete all buffers on an &sk_buff list. Each buffer is removed from
* the list and one reference dropped. This function takes the list
* lock and is atomic with respect to other list locking functions.
*/
static
inline
void
skb_queue_purge
(
struct
sk_buff_head
*
list
)
{
struct
sk_buff
*
skb
;
while
((
skb
=
skb_dequeue
(
list
))
!=
NULL
)
while
((
skb
=
skb_dequeue
(
list
))
!=
NULL
)
kfree_skb
(
skb
);
}
/**
* __skb_purge - empty a list
* __skb_
queue_
purge - empty a list
* @list: list to empty
*
* Delete all buffers on an &sk_buff list. Each buffer is removed from
* the list and one reference dropped. This function does not take the
* list lock and the caller must hold the relevant locks to use it.
*/
static
inline
void
__skb_queue_purge
(
struct
sk_buff_head
*
list
)
{
struct
sk_buff
*
skb
;
while
((
skb
=
__skb_dequeue
(
list
))
!=
NULL
)
while
((
skb
=
__skb_dequeue
(
list
))
!=
NULL
)
kfree_skb
(
skb
);
}
...
...
@@ -1026,15 +1011,12 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
*
* %NULL is returned in there is no free memory.
*/
static
inline
struct
sk_buff
*
__dev_alloc_skb
(
unsigned
int
length
,
int
gfp_mask
)
{
struct
sk_buff
*
skb
;
skb
=
alloc_skb
(
length
+
16
,
gfp_mask
);
struct
sk_buff
*
skb
=
alloc_skb
(
length
+
16
,
gfp_mask
);
if
(
skb
)
skb_reserve
(
skb
,
16
);
skb_reserve
(
skb
,
16
);
return
skb
;
}
...
...
@@ -1050,7 +1032,6 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
* %NULL is returned in there is no free memory. Although this function
* allocates memory it can be called from an interrupt.
*/
static
inline
struct
sk_buff
*
dev_alloc_skb
(
unsigned
int
length
)
{
return
__dev_alloc_skb
(
length
,
GFP_ATOMIC
);
...
...
@@ -1068,9 +1049,7 @@ static inline struct sk_buff *dev_alloc_skb(unsigned int length)
* The result is skb with writable area skb->head...skb->tail
* and at least @headroom of space at head.
*/
static
inline
int
skb_cow
(
struct
sk_buff
*
skb
,
unsigned
int
headroom
)
static
inline
int
skb_cow
(
struct
sk_buff
*
skb
,
unsigned
int
headroom
)
{
int
delta
=
(
headroom
>
16
?
headroom
:
16
)
-
skb_headroom
(
skb
);
...
...
@@ -1078,7 +1057,7 @@ skb_cow(struct sk_buff *skb, unsigned int headroom)
delta
=
0
;
if
(
delta
||
skb_cloned
(
skb
))
return
pskb_expand_head
(
skb
,
(
delta
+
15
)
&
~
15
,
0
,
GFP_ATOMIC
);
return
pskb_expand_head
(
skb
,
(
delta
+
15
)
&
~
15
,
0
,
GFP_ATOMIC
);
return
0
;
}
...
...
@@ -1088,7 +1067,8 @@ skb_cow(struct sk_buff *skb, unsigned int headroom)
* @gfp: allocation mode
*
* If there is no free memory -ENOMEM is returned, otherwise zero
* is returned and the old skb data released. */
* is returned and the old skb data released.
*/
int
skb_linearize
(
struct
sk_buff
*
skb
,
int
gfp
);
static
inline
void
*
kmap_skb_frag
(
const
skb_frag_t
*
frag
)
...
...
@@ -1113,34 +1093,45 @@ static inline void kunmap_skb_frag(void *vaddr)
#define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \
(skb != (struct sk_buff *)(queue)); \
skb=skb->next)
extern
struct
sk_buff
*
skb_recv_datagram
(
struct
sock
*
sk
,
unsigned
flags
,
int
noblock
,
int
*
err
);
extern
unsigned
int
datagram_poll
(
struct
file
*
file
,
struct
socket
*
sock
,
struct
poll_table_struct
*
wait
);
extern
int
skb_copy_datagram
(
const
struct
sk_buff
*
from
,
int
offset
,
char
*
to
,
int
size
);
extern
int
skb_copy_datagram_iovec
(
const
struct
sk_buff
*
from
,
int
offset
,
struct
iovec
*
to
,
int
size
);
extern
int
skb_copy_and_csum_datagram
(
const
struct
sk_buff
*
skb
,
int
offset
,
u8
*
to
,
int
len
,
unsigned
int
*
csump
);
extern
int
skb_copy_and_csum_datagram_iovec
(
const
struct
sk_buff
*
skb
,
int
hlen
,
struct
iovec
*
iov
);
extern
void
skb_free_datagram
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
);
extern
unsigned
int
skb_checksum
(
const
struct
sk_buff
*
skb
,
int
offset
,
int
len
,
unsigned
int
csum
);
extern
int
skb_copy_bits
(
const
struct
sk_buff
*
skb
,
int
offset
,
void
*
to
,
int
len
);
extern
unsigned
int
skb_copy_and_csum_bits
(
const
struct
sk_buff
*
skb
,
int
offset
,
u8
*
to
,
int
len
,
unsigned
int
csum
);
skb = skb->next)
extern
struct
sk_buff
*
skb_recv_datagram
(
struct
sock
*
sk
,
unsigned
flags
,
int
noblock
,
int
*
err
);
extern
unsigned
int
datagram_poll
(
struct
file
*
file
,
struct
socket
*
sock
,
struct
poll_table_struct
*
wait
);
extern
int
skb_copy_datagram
(
const
struct
sk_buff
*
from
,
int
offset
,
char
*
to
,
int
size
);
extern
int
skb_copy_datagram_iovec
(
const
struct
sk_buff
*
from
,
int
offset
,
struct
iovec
*
to
,
int
size
);
extern
int
skb_copy_and_csum_datagram
(
const
struct
sk_buff
*
skb
,
int
offset
,
u8
*
to
,
int
len
,
unsigned
int
*
csump
);
extern
int
skb_copy_and_csum_datagram_iovec
(
const
struct
sk_buff
*
skb
,
int
hlen
,
struct
iovec
*
iov
);
extern
void
skb_free_datagram
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
);
extern
unsigned
int
skb_checksum
(
const
struct
sk_buff
*
skb
,
int
offset
,
int
len
,
unsigned
int
csum
);
extern
int
skb_copy_bits
(
const
struct
sk_buff
*
skb
,
int
offset
,
void
*
to
,
int
len
);
extern
unsigned
int
skb_copy_and_csum_bits
(
const
struct
sk_buff
*
skb
,
int
offset
,
u8
*
to
,
int
len
,
unsigned
int
csum
);
extern
void
skb_copy_and_csum_dev
(
const
struct
sk_buff
*
skb
,
u8
*
to
);
extern
void
skb_init
(
void
);
extern
void
skb_add_mtu
(
int
mtu
);
#ifdef CONFIG_NETFILTER
static
inline
void
nf_conntrack_put
(
struct
nf_ct_info
*
nfct
)
static
inline
void
nf_conntrack_put
(
struct
nf_ct_info
*
nfct
)
{
if
(
nfct
&&
atomic_dec_and_test
(
&
nfct
->
master
->
use
))
nfct
->
master
->
destroy
(
nfct
->
master
);
}
static
inline
void
nf_conntrack_get
(
struct
nf_ct_info
*
nfct
)
static
inline
void
nf_conntrack_get
(
struct
nf_ct_info
*
nfct
)
{
if
(
nfct
)
atomic_inc
(
&
nfct
->
master
->
use
);
...
...
net/core/skbuff.c
View file @
5f6c1284
...
...
@@ -7,7 +7,8 @@
* Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
*
* Fixes:
* Alan Cox : Fixed the worst of the load balancer bugs.
* Alan Cox : Fixed the worst of the load
* balancer bugs.
* Dave Platt : Interrupt stacking fix.
* Richard Kooijman : Timestamp fixes.
* Alan Cox : Changed buffer format.
...
...
@@ -84,10 +85,9 @@ static union {
*
* Out of line support code for skb_put(). Not user callable.
*/
void
skb_over_panic
(
struct
sk_buff
*
skb
,
int
sz
,
void
*
here
)
{
printk
(
"skput:over: %p:%d put:%d dev:%s"
,
printk
(
KERN_INFO
"skput:over: %p:%d put:%d dev:%s"
,
here
,
skb
->
len
,
sz
,
skb
->
dev
?
skb
->
dev
->
name
:
"<NULL>"
);
BUG
();
}
...
...
@@ -101,10 +101,9 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here)
* Out of line support code for skb_push(). Not user callable.
*/
void
skb_under_panic
(
struct
sk_buff
*
skb
,
int
sz
,
void
*
here
)
{
printk
(
"skput:under: %p:%d put:%d dev:%s"
,
printk
(
KERN_INFO
"skput:under: %p:%d put:%d dev:%s"
,
here
,
skb
->
len
,
sz
,
skb
->
dev
?
skb
->
dev
->
name
:
"<NULL>"
);
BUG
();
}
...
...
@@ -116,7 +115,6 @@ static __inline__ struct sk_buff *skb_head_from_pool(void)
unsigned
long
flags
;
local_irq_save
(
flags
);
list
=
&
skb_head_pool
[
smp_processor_id
()].
list
;
if
(
skb_queue_len
(
list
))
...
...
@@ -138,11 +136,7 @@ static __inline__ void skb_head_to_pool(struct sk_buff *skb)
__skb_queue_head
(
list
,
skb
);
local_irq_restore
(
flags
);
return
;
}
local_irq_restore
(
flags
);
kmem_cache_free
(
skbuff_head_cache
,
skb
);
}
...
...
@@ -164,14 +158,13 @@ static __inline__ void skb_head_to_pool(struct sk_buff *skb)
* Buffers may only be allocated from interrupts using a @gfp_mask of
* %GFP_ATOMIC.
*/
struct
sk_buff
*
alloc_skb
(
unsigned
int
size
,
int
gfp_mask
)
struct
sk_buff
*
alloc_skb
(
unsigned
int
size
,
int
gfp_mask
)
{
struct
sk_buff
*
skb
;
u8
*
data
;
if
(
in_interrupt
()
&&
(
gfp_mask
&
__GFP_WAIT
))
{
static
int
count
=
0
;
static
int
count
;
if
(
++
count
<
5
)
{
printk
(
KERN_ERR
"alloc_skb called nonatomically "
"from interrupt %p
\n
"
,
NET_CALLER
(
size
));
...
...
@@ -182,25 +175,24 @@ struct sk_buff *alloc_skb(unsigned int size,int gfp_mask)
/* Get the HEAD */
skb
=
skb_head_from_pool
();
if
(
skb
==
NULL
)
{
skb
=
kmem_cache_alloc
(
skbuff_head_cache
,
gfp_mask
&
~
__GFP_DMA
);
if
(
skb
==
NULL
)
goto
nohead
;
if
(
!
skb
)
{
skb
=
kmem_cache_alloc
(
skbuff_head_cache
,
gfp_mask
&
~
__GFP_DMA
);
if
(
!
skb
)
goto
out
;
}
/* Get the DATA. Size must match skb_add_mtu(). */
size
=
SKB_DATA_ALIGN
(
size
);
data
=
kmalloc
(
size
+
sizeof
(
struct
skb_shared_info
),
gfp_mask
);
if
(
data
==
NULL
)
if
(
!
data
)
goto
nodata
;
/* XXX: does not include slab overhead */
skb
->
truesize
=
size
+
sizeof
(
struct
sk_buff
);
/* Load the data pointers. */
skb
->
head
=
data
;
skb
->
data
=
data
;
skb
->
tail
=
data
;
skb
->
head
=
skb
->
data
=
skb
->
tail
=
data
;
skb
->
end
=
data
+
size
;
/* Set up other state */
...
...
@@ -212,12 +204,12 @@ struct sk_buff *alloc_skb(unsigned int size,int gfp_mask)
atomic_set
(
&
(
skb_shinfo
(
skb
)
->
dataref
),
1
);
skb_shinfo
(
skb
)
->
nr_frags
=
0
;
skb_shinfo
(
skb
)
->
frag_list
=
NULL
;
out:
return
skb
;
nodata:
skb_head_to_pool
(
skb
);
nohead:
return
NULL
;
skb
=
NULL
;
goto
out
;
}
...
...
@@ -229,11 +221,10 @@ static inline void skb_headerinit(void *p, kmem_cache_t *cache,
{
struct
sk_buff
*
skb
=
p
;
skb
->
next
=
NULL
;
skb
->
prev
=
NULL
;
skb
->
next
=
skb
->
prev
=
NULL
;
skb
->
list
=
NULL
;
skb
->
sk
=
NULL
;
skb
->
stamp
.
tv_sec
=
0
;
/* No idea about time */
skb
->
stamp
.
tv_sec
=
0
;
/* No idea about time */
skb
->
dev
=
NULL
;
skb
->
dst
=
NULL
;
memset
(
skb
->
cb
,
0
,
sizeof
(
skb
->
cb
));
...
...
@@ -272,7 +263,7 @@ static void skb_clone_fraglist(struct sk_buff *skb)
{
struct
sk_buff
*
list
;
for
(
list
=
skb_shinfo
(
skb
)
->
frag_list
;
list
;
list
=
list
->
next
)
for
(
list
=
skb_shinfo
(
skb
)
->
frag_list
;
list
;
list
=
list
->
next
)
skb_get
(
list
);
}
...
...
@@ -321,10 +312,9 @@ void __kfree_skb(struct sk_buff *skb)
dst_release
(
skb
->
dst
);
if
(
skb
->
destructor
)
{
if
(
in_irq
())
{
printk
(
KERN_WARNING
"Warning: kfree_skb on hard IRQ %p
\n
"
,
NET_CALLER
(
skb
));
}
if
(
in_irq
())
printk
(
KERN_WARNING
"Warning: kfree_skb on "
"hard IRQ %p
\n
"
,
NET_CALLER
(
skb
));
skb
->
destructor
(
skb
);
}
#ifdef CONFIG_NETFILTER
...
...
@@ -350,9 +340,8 @@ void __kfree_skb(struct sk_buff *skb)
struct
sk_buff
*
skb_clone
(
struct
sk_buff
*
skb
,
int
gfp_mask
)
{
struct
sk_buff
*
n
;
struct
sk_buff
*
n
=
skb_head_from_pool
()
;
n
=
skb_head_from_pool
();
if
(
!
n
)
{
n
=
kmem_cache_alloc
(
skbuff_head_cache
,
gfp_mask
);
if
(
!
n
)
...
...
@@ -418,28 +407,28 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
*/
unsigned
long
offset
=
new
->
data
-
old
->
data
;
new
->
list
=
NULL
;
new
->
sk
=
NULL
;
new
->
dev
=
old
->
dev
;
new
->
priority
=
old
->
priority
;
new
->
protocol
=
old
->
protocol
;
new
->
dst
=
dst_clone
(
old
->
dst
);
new
->
h
.
raw
=
old
->
h
.
raw
+
offset
;
new
->
nh
.
raw
=
old
->
nh
.
raw
+
offset
;
new
->
mac
.
raw
=
old
->
mac
.
raw
+
offset
;
new
->
list
=
NULL
;
new
->
sk
=
NULL
;
new
->
dev
=
old
->
dev
;
new
->
priority
=
old
->
priority
;
new
->
protocol
=
old
->
protocol
;
new
->
dst
=
dst_clone
(
old
->
dst
);
new
->
h
.
raw
=
old
->
h
.
raw
+
offset
;
new
->
nh
.
raw
=
old
->
nh
.
raw
+
offset
;
new
->
mac
.
raw
=
old
->
mac
.
raw
+
offset
;
memcpy
(
new
->
cb
,
old
->
cb
,
sizeof
(
old
->
cb
));
atomic_set
(
&
new
->
users
,
1
);
new
->
pkt_type
=
old
->
pkt_type
;
new
->
stamp
=
old
->
stamp
;
new
->
pkt_type
=
old
->
pkt_type
;
new
->
stamp
=
old
->
stamp
;
new
->
destructor
=
NULL
;
new
->
security
=
old
->
security
;
new
->
security
=
old
->
security
;
#ifdef CONFIG_NETFILTER
new
->
nfmark
=
old
->
nfmark
;
new
->
nfcache
=
old
->
nfcache
;
new
->
nfct
=
old
->
nfct
;
new
->
nfmark
=
old
->
nfmark
;
new
->
nfcache
=
old
->
nfcache
;
new
->
nfct
=
old
->
nfct
;
nf_conntrack_get
(
new
->
nfct
);
#ifdef CONFIG_NETFILTER_DEBUG
new
->
nf_debug
=
old
->
nf_debug
;
new
->
nf_debug
=
old
->
nf_debug
;
#endif
#endif
#ifdef CONFIG_NET_SCHED
...
...
@@ -466,28 +455,26 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
struct
sk_buff
*
skb_copy
(
const
struct
sk_buff
*
skb
,
int
gfp_mask
)
{
struct
sk_buff
*
n
;
int
headerlen
=
skb
->
data
-
skb
->
head
;
int
headerlen
=
skb
->
data
-
skb
->
head
;
/*
* Allocate the copy buffer
*/
n
=
alloc_skb
(
skb
->
end
-
skb
->
head
+
skb
->
data_len
,
gfp_mask
);
if
(
n
==
NULL
)
struct
sk_buff
*
n
=
alloc_skb
(
skb
->
end
-
skb
->
head
+
skb
->
data_len
,
gfp_mask
);
if
(
!
n
)
return
NULL
;
/* Set the data pointer */
skb_reserve
(
n
,
headerlen
);
skb_reserve
(
n
,
headerlen
);
/* Set the tail pointer and length */
skb_put
(
n
,
skb
->
len
);
skb_put
(
n
,
skb
->
len
);
n
->
csum
=
skb
->
csum
;
n
->
ip_summed
=
skb
->
ip_summed
;
if
(
skb_copy_bits
(
skb
,
-
headerlen
,
n
->
head
,
headerlen
+
skb
->
len
))
if
(
skb_copy_bits
(
skb
,
-
headerlen
,
n
->
head
,
headerlen
+
skb
->
len
))
BUG
();
copy_skb_header
(
n
,
skb
);
return
n
;
}
...
...
@@ -498,7 +485,7 @@ int skb_linearize(struct sk_buff *skb, int gfp_mask)
u8
*
data
;
long
offset
;
int
headerlen
=
skb
->
data
-
skb
->
head
;
int
expand
=
(
skb
->
tail
+
skb
->
data_len
)
-
skb
->
end
;
int
expand
=
(
skb
->
tail
+
skb
->
data_len
)
-
skb
->
end
;
if
(
skb_shared
(
skb
))
BUG
();
...
...
@@ -506,14 +493,14 @@ int skb_linearize(struct sk_buff *skb, int gfp_mask)
if
(
expand
<=
0
)
expand
=
0
;
size
=
(
skb
->
end
-
skb
->
head
+
expand
)
;
size
=
skb
->
end
-
skb
->
head
+
expand
;
size
=
SKB_DATA_ALIGN
(
size
);
data
=
kmalloc
(
size
+
sizeof
(
struct
skb_shared_info
),
gfp_mask
);
if
(
data
==
NULL
)
if
(
!
data
)
return
-
ENOMEM
;
/* Copy entire thing */
if
(
skb_copy_bits
(
skb
,
-
headerlen
,
data
,
headerlen
+
skb
->
len
))
if
(
skb_copy_bits
(
skb
,
-
headerlen
,
data
,
headerlen
+
skb
->
len
))
BUG
();
/* Offset between the two in bytes */
...
...
@@ -561,19 +548,18 @@ int skb_linearize(struct sk_buff *skb, int gfp_mask)
struct
sk_buff
*
pskb_copy
(
struct
sk_buff
*
skb
,
int
gfp_mask
)
{
struct
sk_buff
*
n
;
/*
* Allocate the copy buffer
*/
n
=
alloc_skb
(
skb
->
end
-
skb
->
head
,
gfp_mask
);
if
(
n
==
NULL
)
return
NULL
;
struct
sk_buff
*
n
=
alloc_skb
(
skb
->
end
-
skb
->
head
,
gfp_mask
);
if
(
!
n
)
goto
out
;
/* Set the data pointer */
skb_reserve
(
n
,
skb
->
data
-
skb
->
head
);
skb_reserve
(
n
,
skb
->
data
-
skb
->
head
);
/* Set the tail pointer and length */
skb_put
(
n
,
skb_headlen
(
skb
));
skb_put
(
n
,
skb_headlen
(
skb
));
/* Copy the bytes */
memcpy
(
n
->
data
,
skb
->
data
,
n
->
len
);
n
->
csum
=
skb
->
csum
;
...
...
@@ -598,7 +584,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
}
copy_skb_header
(
n
,
skb
);
out:
return
n
;
}
...
...
@@ -631,15 +617,15 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
size
=
SKB_DATA_ALIGN
(
size
);
data
=
kmalloc
(
size
+
sizeof
(
struct
skb_shared_info
),
gfp_mask
);
if
(
data
==
NULL
)
if
(
!
data
)
goto
nodata
;
/* Copy only real data... and, alas, header. This should be
* optimized for the cases when header is void. */
memcpy
(
data
+
nhead
,
skb
->
head
,
skb
->
tail
-
skb
->
head
);
memcpy
(
data
+
size
,
skb
->
end
,
sizeof
(
struct
skb_shared_info
));
memcpy
(
data
+
nhead
,
skb
->
head
,
skb
->
tail
-
skb
->
head
);
memcpy
(
data
+
size
,
skb
->
end
,
sizeof
(
struct
skb_shared_info
));
for
(
i
=
0
;
i
<
skb_shinfo
(
skb
)
->
nr_frags
;
i
++
)
for
(
i
=
0
;
i
<
skb_shinfo
(
skb
)
->
nr_frags
;
i
++
)
get_page
(
skb_shinfo
(
skb
)
->
frags
[
i
].
page
);
if
(
skb_shinfo
(
skb
)
->
frag_list
)
...
...
@@ -647,11 +633,10 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
skb_release_data
(
skb
);
off
=
(
data
+
nhead
)
-
skb
->
head
;
off
=
(
data
+
nhead
)
-
skb
->
head
;
skb
->
head
=
data
;
skb
->
end
=
data
+
size
;
skb
->
end
=
data
+
size
;
skb
->
data
+=
off
;
skb
->
tail
+=
off
;
skb
->
mac
.
raw
+=
off
;
...
...
@@ -667,22 +652,22 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
/* Make private copy of skb with writable head and some headroom */
struct
sk_buff
*
skb_realloc_headroom
(
struct
sk_buff
*
skb
,
unsigned
int
headroom
)
struct
sk_buff
*
skb_realloc_headroom
(
struct
sk_buff
*
skb
,
unsigned
int
headroom
)
{
struct
sk_buff
*
skb2
;
int
delta
=
headroom
-
skb_headroom
(
skb
);
if
(
delta
<=
0
)
return
pskb_copy
(
skb
,
GFP_ATOMIC
);
skb2
=
pskb_copy
(
skb
,
GFP_ATOMIC
);
else
{
skb2
=
skb_clone
(
skb
,
GFP_ATOMIC
);
if
(
skb2
==
NULL
||
!
pskb_expand_head
(
skb2
,
SKB_DATA_ALIGN
(
delta
),
0
,
GFP_ATOMIC
))
return
skb2
;
if
(
skb2
&&
pskb_expand_head
(
skb2
,
SKB_DATA_ALIGN
(
delta
),
0
,
GFP_ATOMIC
))
{
kfree_skb
(
skb2
);
return
NULL
;
skb2
=
NULL
;
}
}
return
skb2
;
}
...
...
@@ -704,34 +689,28 @@ skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
* You must pass %GFP_ATOMIC as the allocation priority if this function
* is called from an interrupt.
*/
struct
sk_buff
*
skb_copy_expand
(
const
struct
sk_buff
*
skb
,
int
newheadroom
,
int
newtailroom
,
int
gfp_mask
)
int
newheadroom
,
int
newtailroom
,
int
gfp_mask
)
{
struct
sk_buff
*
n
;
/*
* Allocate the copy buffer
*/
n
=
alloc_skb
(
newheadroom
+
skb
->
len
+
newtailroom
,
struct
sk_buff
*
n
=
alloc_skb
(
newheadroom
+
skb
->
len
+
newtailroom
,
gfp_mask
);
if
(
n
==
NULL
)
if
(
!
n
)
return
NULL
;
skb_reserve
(
n
,
newheadroom
);
skb_reserve
(
n
,
newheadroom
);
/* Set the tail pointer and length */
skb_put
(
n
,
skb
->
len
);
skb_put
(
n
,
skb
->
len
);
/* Copy the data only. */
if
(
skb_copy_bits
(
skb
,
0
,
n
->
data
,
skb
->
len
))
BUG
();
copy_skb_header
(
n
,
skb
);
return
n
;
}
...
...
@@ -746,7 +725,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
int
nfrags
=
skb_shinfo
(
skb
)
->
nr_frags
;
int
i
;
for
(
i
=
0
;
i
<
nfrags
;
i
++
)
{
for
(
i
=
0
;
i
<
nfrags
;
i
++
)
{
int
end
=
offset
+
skb_shinfo
(
skb
)
->
frags
[
i
].
size
;
if
(
end
>
len
)
{
if
(
skb_cloned
(
skb
))
{
...
...
@@ -759,7 +738,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
put_page
(
skb_shinfo
(
skb
)
->
frags
[
i
].
page
);
skb_shinfo
(
skb
)
->
nr_frags
--
;
}
else
{
skb_shinfo
(
skb
)
->
frags
[
i
].
size
=
len
-
offset
;
skb_shinfo
(
skb
)
->
frags
[
i
].
size
=
len
-
offset
;
}
}
offset
=
end
;
...
...
@@ -809,18 +788,17 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
*
* It is pretty complicated. Luckily, it is called only in exceptional cases.
*/
unsigned
char
*
__pskb_pull_tail
(
struct
sk_buff
*
skb
,
int
delta
)
unsigned
char
*
__pskb_pull_tail
(
struct
sk_buff
*
skb
,
int
delta
)
{
int
i
,
k
,
eat
;
/* If skb has not enough free space at tail, get new one
* plus 128 bytes for future expansions. If we have enough
* room at tail, reallocate without expansion only if skb is cloned.
*/
eat
=
(
skb
->
tail
+
delta
)
-
skb
->
end
;
int
i
,
k
,
eat
=
(
skb
->
tail
+
delta
)
-
skb
->
end
;
if
(
eat
>
0
||
skb_cloned
(
skb
))
{
if
(
pskb_expand_head
(
skb
,
0
,
eat
>
0
?
eat
+
128
:
0
,
GFP_ATOMIC
))
if
(
pskb_expand_head
(
skb
,
0
,
eat
>
0
?
eat
+
128
:
0
,
GFP_ATOMIC
))
return
NULL
;
}
...
...
@@ -830,12 +808,12 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
/* Optimization: no fragments, no reasons to preestimate
* size of pulled pages. Superb.
*/
if
(
skb_shinfo
(
skb
)
->
frag_list
==
NULL
)
if
(
!
skb_shinfo
(
skb
)
->
frag_list
)
goto
pull_pages
;
/* Estimate size of pulled pages. */
eat
=
delta
;
for
(
i
=
0
;
i
<
skb_shinfo
(
skb
)
->
nr_frags
;
i
++
)
{
for
(
i
=
0
;
i
<
skb_shinfo
(
skb
)
->
nr_frags
;
i
++
)
{
if
(
skb_shinfo
(
skb
)
->
frags
[
i
].
size
>=
eat
)
goto
pull_pages
;
eat
-=
skb_shinfo
(
skb
)
->
frags
[
i
].
size
;
...
...
@@ -854,7 +832,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
struct
sk_buff
*
insp
=
NULL
;
do
{
if
(
list
==
NULL
)
if
(
!
list
)
BUG
();
if
(
list
->
len
<=
eat
)
{
...
...
@@ -868,7 +846,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
if
(
skb_shared
(
list
))
{
/* Sucks! We need to fork list. :-( */
clone
=
skb_clone
(
list
,
GFP_ATOMIC
);
if
(
clone
==
NULL
)
if
(
!
clone
)
return
NULL
;
insp
=
list
->
next
;
list
=
clone
;
...
...
@@ -877,7 +855,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
* problems. */
insp
=
list
;
}
if
(
pskb_pull
(
list
,
eat
)
==
NULL
)
{
if
(
!
pskb_pull
(
list
,
eat
)
)
{
if
(
clone
)
kfree_skb
(
clone
);
return
NULL
;
...
...
@@ -902,7 +880,7 @@ unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
pull_pages:
eat
=
delta
;
k
=
0
;
for
(
i
=
0
;
i
<
skb_shinfo
(
skb
)
->
nr_frags
;
i
++
)
{
for
(
i
=
0
;
i
<
skb_shinfo
(
skb
)
->
nr_frags
;
i
++
)
{
if
(
skb_shinfo
(
skb
)
->
frags
[
i
].
size
<=
eat
)
{
put_page
(
skb_shinfo
(
skb
)
->
frags
[
i
].
page
);
eat
-=
skb_shinfo
(
skb
)
->
frags
[
i
].
size
;
...
...
@@ -931,11 +909,11 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
int
i
,
copy
;
int
start
=
skb
->
len
-
skb
->
data_len
;
if
(
offset
>
(
int
)
skb
->
len
-
len
)
if
(
offset
>
(
int
)
skb
->
len
-
len
)
goto
fault
;
/* Copy header. */
if
((
copy
=
start
-
offset
)
>
0
)
{
if
((
copy
=
start
-
offset
)
>
0
)
{
if
(
copy
>
len
)
copy
=
len
;
memcpy
(
to
,
skb
->
data
+
offset
,
copy
);
...
...
@@ -948,18 +926,19 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
for
(
i
=
0
;
i
<
skb_shinfo
(
skb
)
->
nr_frags
;
i
++
)
{
int
end
;
BUG_TRAP
(
start
<=
offset
+
len
);
BUG_TRAP
(
start
<=
offset
+
len
);
end
=
start
+
skb_shinfo
(
skb
)
->
frags
[
i
].
size
;
if
((
copy
=
end
-
offset
)
>
0
)
{
if
((
copy
=
end
-
offset
)
>
0
)
{
u8
*
vaddr
;
if
(
copy
>
len
)
copy
=
len
;
vaddr
=
kmap_skb_frag
(
&
skb_shinfo
(
skb
)
->
frags
[
i
]);
memcpy
(
to
,
vaddr
+
skb_shinfo
(
skb
)
->
frags
[
i
].
page_offset
+
offset
-
start
,
copy
);
memcpy
(
to
,
vaddr
+
skb_shinfo
(
skb
)
->
frags
[
i
].
page_offset
+
offset
-
start
,
copy
);
kunmap_skb_frag
(
vaddr
);
if
((
len
-=
copy
)
==
0
)
...
...
@@ -971,18 +950,19 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
}
if
(
skb_shinfo
(
skb
)
->
frag_list
)
{
struct
sk_buff
*
list
;
struct
sk_buff
*
list
=
skb_shinfo
(
skb
)
->
frag_list
;
for
(
list
=
skb_shinfo
(
skb
)
->
frag_list
;
list
;
list
=
list
->
next
)
{
for
(
;
list
;
list
=
list
->
next
)
{
int
end
;
BUG_TRAP
(
start
<=
offset
+
len
);
BUG_TRAP
(
start
<=
offset
+
len
);
end
=
start
+
list
->
len
;
if
((
copy
=
end
-
offset
)
>
0
)
{
if
((
copy
=
end
-
offset
)
>
0
)
{
if
(
copy
>
len
)
copy
=
len
;
if
(
skb_copy_bits
(
list
,
offset
-
start
,
to
,
copy
))
if
(
skb_copy_bits
(
list
,
offset
-
start
,
to
,
copy
))
goto
fault
;
if
((
len
-=
copy
)
==
0
)
return
0
;
...
...
@@ -992,7 +972,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
start
=
end
;
}
}
if
(
len
==
0
)
if
(
!
len
)
return
0
;
fault:
...
...
@@ -1001,30 +981,31 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
/* Checksum skb data. */
unsigned
int
skb_checksum
(
const
struct
sk_buff
*
skb
,
int
offset
,
int
len
,
unsigned
int
csum
)
unsigned
int
skb_checksum
(
const
struct
sk_buff
*
skb
,
int
offset
,
int
len
,
unsigned
int
csum
)
{
int
i
,
copy
;
int
start
=
skb
->
len
-
skb
->
data_len
;
int
i
,
copy
=
start
-
offset
;
int
pos
=
0
;
/* Checksum header. */
if
(
(
copy
=
start
-
offset
)
>
0
)
{
if
(
copy
>
0
)
{
if
(
copy
>
len
)
copy
=
len
;
csum
=
csum_partial
(
skb
->
data
+
offset
,
copy
,
csum
);
csum
=
csum_partial
(
skb
->
data
+
offset
,
copy
,
csum
);
if
((
len
-=
copy
)
==
0
)
return
csum
;
offset
+=
copy
;
pos
=
copy
;
}
for
(
i
=
0
;
i
<
skb_shinfo
(
skb
)
->
nr_frags
;
i
++
)
{
for
(
i
=
0
;
i
<
skb_shinfo
(
skb
)
->
nr_frags
;
i
++
)
{
int
end
;
BUG_TRAP
(
start
<=
offset
+
len
);
BUG_TRAP
(
start
<=
offset
+
len
);
end
=
start
+
skb_shinfo
(
skb
)
->
frags
[
i
].
size
;
if
((
copy
=
end
-
offset
)
>
0
)
{
if
((
copy
=
end
-
offset
)
>
0
)
{
unsigned
int
csum2
;
u8
*
vaddr
;
skb_frag_t
*
frag
=
&
skb_shinfo
(
skb
)
->
frags
[
i
];
...
...
@@ -1033,7 +1014,7 @@ unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsign
copy
=
len
;
vaddr
=
kmap_skb_frag
(
frag
);
csum2
=
csum_partial
(
vaddr
+
frag
->
page_offset
+
offset
-
start
,
copy
,
0
);
offset
-
start
,
copy
,
0
);
kunmap_skb_frag
(
vaddr
);
csum
=
csum_block_add
(
csum
,
csum2
,
pos
);
if
(
!
(
len
-=
copy
))
...
...
@@ -1045,19 +1026,20 @@ unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsign
}
if
(
skb_shinfo
(
skb
)
->
frag_list
)
{
struct
sk_buff
*
list
;
struct
sk_buff
*
list
=
skb_shinfo
(
skb
)
->
frag_list
;
for
(
list
=
skb_shinfo
(
skb
)
->
frag_list
;
list
;
list
=
list
->
next
)
{
for
(
;
list
;
list
=
list
->
next
)
{
int
end
;
BUG_TRAP
(
start
<=
offset
+
len
);
BUG_TRAP
(
start
<=
offset
+
len
);
end
=
start
+
list
->
len
;
if
((
copy
=
end
-
offset
)
>
0
)
{
if
((
copy
=
end
-
offset
)
>
0
)
{
unsigned
int
csum2
;
if
(
copy
>
len
)
copy
=
len
;
csum2
=
skb_checksum
(
list
,
offset
-
start
,
copy
,
0
);
csum2
=
skb_checksum
(
list
,
offset
-
start
,
copy
,
0
);
csum
=
csum_block_add
(
csum
,
csum2
,
pos
);
if
((
len
-=
copy
)
==
0
)
return
csum
;
...
...
@@ -1067,26 +1049,27 @@ unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsign
start
=
end
;
}
}
if
(
len
==
0
)
return
csum
;
if
(
len
)
BUG
();
return
csum
;
}
/* Both of above in one bottle. */
unsigned
int
skb_copy_and_csum_bits
(
const
struct
sk_buff
*
skb
,
int
offset
,
u8
*
to
,
int
len
,
unsigned
int
csum
)
unsigned
int
skb_copy_and_csum_bits
(
const
struct
sk_buff
*
skb
,
int
offset
,
u8
*
to
,
int
len
,
unsigned
int
csum
)
{
int
i
,
copy
;
int
start
=
skb
->
len
-
skb
->
data_len
;
int
i
,
copy
=
start
-
offset
;
int
pos
=
0
;
/* Copy header. */
if
(
(
copy
=
start
-
offset
)
>
0
)
{
if
(
copy
>
0
)
{
if
(
copy
>
len
)
copy
=
len
;
csum
=
csum_partial_copy_nocheck
(
skb
->
data
+
offset
,
to
,
copy
,
csum
);
csum
=
csum_partial_copy_nocheck
(
skb
->
data
+
offset
,
to
,
copy
,
csum
);
if
((
len
-=
copy
)
==
0
)
return
csum
;
offset
+=
copy
;
...
...
@@ -1094,13 +1077,13 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *t
pos
=
copy
;
}
for
(
i
=
0
;
i
<
skb_shinfo
(
skb
)
->
nr_frags
;
i
++
)
{
for
(
i
=
0
;
i
<
skb_shinfo
(
skb
)
->
nr_frags
;
i
++
)
{
int
end
;
BUG_TRAP
(
start
<=
offset
+
len
);
BUG_TRAP
(
start
<=
offset
+
len
);
end
=
start
+
skb_shinfo
(
skb
)
->
frags
[
i
].
size
;
if
((
copy
=
end
-
offset
)
>
0
)
{
if
((
copy
=
end
-
offset
)
>
0
)
{
unsigned
int
csum2
;
u8
*
vaddr
;
skb_frag_t
*
frag
=
&
skb_shinfo
(
skb
)
->
frags
[
i
];
...
...
@@ -1108,8 +1091,10 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *t
if
(
copy
>
len
)
copy
=
len
;
vaddr
=
kmap_skb_frag
(
frag
);
csum2
=
csum_partial_copy_nocheck
(
vaddr
+
frag
->
page_offset
+
offset
-
start
,
to
,
copy
,
0
);
csum2
=
csum_partial_copy_nocheck
(
vaddr
+
frag
->
page_offset
+
offset
-
start
,
to
,
copy
,
0
);
kunmap_skb_frag
(
vaddr
);
csum
=
csum_block_add
(
csum
,
csum2
,
pos
);
if
(
!
(
len
-=
copy
))
...
...
@@ -1122,19 +1107,21 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *t
}
if
(
skb_shinfo
(
skb
)
->
frag_list
)
{
struct
sk_buff
*
list
;
struct
sk_buff
*
list
=
skb_shinfo
(
skb
)
->
frag_list
;
for
(
list
=
skb_shinfo
(
skb
)
->
frag_list
;
list
;
list
=
list
->
next
)
{
for
(
;
list
;
list
=
list
->
next
)
{
unsigned
int
csum2
;
int
end
;
BUG_TRAP
(
start
<=
offset
+
len
);
BUG_TRAP
(
start
<=
offset
+
len
);
end
=
start
+
list
->
len
;
if
((
copy
=
end
-
offset
)
>
0
)
{
if
((
copy
=
end
-
offset
)
>
0
)
{
if
(
copy
>
len
)
copy
=
len
;
csum2
=
skb_copy_and_csum_bits
(
list
,
offset
-
start
,
to
,
copy
,
0
);
csum2
=
skb_copy_and_csum_bits
(
list
,
offset
-
start
,
to
,
copy
,
0
);
csum
=
csum_block_add
(
csum
,
csum2
,
pos
);
if
((
len
-=
copy
)
==
0
)
return
csum
;
...
...
@@ -1145,9 +1132,7 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *t
start
=
end
;
}
}
if
(
len
==
0
)
return
csum
;
if
(
len
)
BUG
();
return
csum
;
}
...
...
@@ -1169,8 +1154,8 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
csum
=
0
;
if
(
csstart
!=
skb
->
len
)
csum
=
skb_copy_and_csum_bits
(
skb
,
csstart
,
to
+
csstart
,
skb
->
len
-
csstart
,
0
);
csum
=
skb_copy_and_csum_bits
(
skb
,
csstart
,
to
+
csstart
,
skb
->
len
-
csstart
,
0
);
if
(
skb
->
ip_summed
==
CHECKSUM_HW
)
{
long
csstuff
=
csstart
+
skb
->
csum
;
...
...
@@ -1204,6 +1189,6 @@ void __init skb_init(void)
if
(
!
skbuff_head_cache
)
panic
(
"cannot create skbuff cache"
);
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
skb_queue_head_init
(
&
skb_head_pool
[
i
].
list
);
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment