Commit 8728b834 authored by David S. Miller's avatar David S. Miller Committed by David S. Miller

[NET]: Kill skb->list

Remove the "list" member of struct sk_buff, as it is entirely
redundant.  All SKB list removal callers know which list the
SKB is on, so storing this in sk_buff does nothing other than
taking up some space.

Two tricky bits were SCTP, which I took care of, and two ATM
drivers which Francois Romieu <romieu@fr.zoreil.com> fixed
up.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarFrancois Romieu <romieu@fr.zoreil.com>
parent 6869c4d8
This diff is collapsed.
...@@ -103,8 +103,14 @@ ...@@ -103,8 +103,14 @@
#define NS_IOREMAP_SIZE 4096 #define NS_IOREMAP_SIZE 4096
#define BUF_SM 0x00000000 /* These two are used for push_rxbufs() */ /*
#define BUF_LG 0x00000001 /* CMD, Write_FreeBufQ, LBUF bit */ * BUF_XX distinguish the Rx buffers depending on their (small/large) size.
* BUG_SM and BUG_LG are both used by the driver and the device.
* BUF_NONE is only used by the driver.
*/
#define BUF_SM 0x00000000 /* These two are used for push_rxbufs() */
#define BUF_LG 0x00000001 /* CMD, Write_FreeBufQ, LBUF bit */
#define BUF_NONE 0xffffffff /* Software only: */
#define NS_HBUFSIZE 65568 /* Size of max. AAL5 PDU */ #define NS_HBUFSIZE 65568 /* Size of max. AAL5 PDU */
#define NS_MAX_IOVECS (2 + (65568 - NS_SMBUFSIZE) / \ #define NS_MAX_IOVECS (2 + (65568 - NS_SMBUFSIZE) / \
...@@ -684,6 +690,12 @@ enum ns_regs ...@@ -684,6 +690,12 @@ enum ns_regs
/* Device driver structures ***************************************************/ /* Device driver structures ***************************************************/
struct ns_skb_cb {
u32 buf_type; /* BUF_SM/BUF_LG/BUF_NONE */
};
#define NS_SKB_CB(skb) ((struct ns_skb_cb *)((skb)->cb))
typedef struct tsq_info typedef struct tsq_info
{ {
void *org; void *org;
......
...@@ -417,10 +417,12 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); ...@@ -417,10 +417,12 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
chan = (here[3] & uPD98401_AAL5_CHAN) >> chan = (here[3] & uPD98401_AAL5_CHAN) >>
uPD98401_AAL5_CHAN_SHIFT; uPD98401_AAL5_CHAN_SHIFT;
if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) { if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) {
int pos = ZATM_VCC(vcc)->pool;
vcc = zatm_dev->rx_map[chan]; vcc = zatm_dev->rx_map[chan];
if (skb == zatm_dev->last_free[ZATM_VCC(vcc)->pool]) if (skb == zatm_dev->last_free[pos])
zatm_dev->last_free[ZATM_VCC(vcc)->pool] = NULL; zatm_dev->last_free[pos] = NULL;
skb_unlink(skb); skb_unlink(skb, zatm_dev->pool + pos);
} }
else { else {
printk(KERN_ERR DEV_LABEL "(itf %d): RX indication " printk(KERN_ERR DEV_LABEL "(itf %d): RX indication "
......
...@@ -158,7 +158,7 @@ static int bfusb_send_bulk(struct bfusb *bfusb, struct sk_buff *skb) ...@@ -158,7 +158,7 @@ static int bfusb_send_bulk(struct bfusb *bfusb, struct sk_buff *skb)
if (err) { if (err) {
BT_ERR("%s bulk tx submit failed urb %p err %d", BT_ERR("%s bulk tx submit failed urb %p err %d",
bfusb->hdev->name, urb, err); bfusb->hdev->name, urb, err);
skb_unlink(skb); skb_unlink(skb, &bfusb->pending_q);
usb_free_urb(urb); usb_free_urb(urb);
} else } else
atomic_inc(&bfusb->pending_tx); atomic_inc(&bfusb->pending_tx);
...@@ -212,7 +212,7 @@ static void bfusb_tx_complete(struct urb *urb, struct pt_regs *regs) ...@@ -212,7 +212,7 @@ static void bfusb_tx_complete(struct urb *urb, struct pt_regs *regs)
read_lock(&bfusb->lock); read_lock(&bfusb->lock);
skb_unlink(skb); skb_unlink(skb, &bfusb->pending_q);
skb_queue_tail(&bfusb->completed_q, skb); skb_queue_tail(&bfusb->completed_q, skb);
bfusb_tx_wakeup(bfusb); bfusb_tx_wakeup(bfusb);
...@@ -253,7 +253,7 @@ static int bfusb_rx_submit(struct bfusb *bfusb, struct urb *urb) ...@@ -253,7 +253,7 @@ static int bfusb_rx_submit(struct bfusb *bfusb, struct urb *urb)
if (err) { if (err) {
BT_ERR("%s bulk rx submit failed urb %p err %d", BT_ERR("%s bulk rx submit failed urb %p err %d",
bfusb->hdev->name, urb, err); bfusb->hdev->name, urb, err);
skb_unlink(skb); skb_unlink(skb, &bfusb->pending_q);
kfree_skb(skb); kfree_skb(skb);
usb_free_urb(urb); usb_free_urb(urb);
} }
...@@ -398,7 +398,7 @@ static void bfusb_rx_complete(struct urb *urb, struct pt_regs *regs) ...@@ -398,7 +398,7 @@ static void bfusb_rx_complete(struct urb *urb, struct pt_regs *regs)
buf += len; buf += len;
} }
skb_unlink(skb); skb_unlink(skb, &bfusb->pending_q);
kfree_skb(skb); kfree_skb(skb);
bfusb_rx_submit(bfusb, urb); bfusb_rx_submit(bfusb, urb);
......
...@@ -681,7 +681,7 @@ static void handle_packet_response(struct hpsb_host *host, int tcode, ...@@ -681,7 +681,7 @@ static void handle_packet_response(struct hpsb_host *host, int tcode,
return; return;
} }
__skb_unlink(skb, skb->list); __skb_unlink(skb, &host->pending_packet_queue);
if (packet->state == hpsb_queued) { if (packet->state == hpsb_queued) {
packet->sendtime = jiffies; packet->sendtime = jiffies;
...@@ -989,7 +989,7 @@ void abort_timedouts(unsigned long __opaque) ...@@ -989,7 +989,7 @@ void abort_timedouts(unsigned long __opaque)
packet = (struct hpsb_packet *)skb->data; packet = (struct hpsb_packet *)skb->data;
if (time_before(packet->sendtime + expire, jiffies)) { if (time_before(packet->sendtime + expire, jiffies)) {
__skb_unlink(skb, skb->list); __skb_unlink(skb, &host->pending_packet_queue);
packet->state = hpsb_complete; packet->state = hpsb_complete;
packet->ack_code = ACKX_TIMEOUT; packet->ack_code = ACKX_TIMEOUT;
queue_packet_complete(packet); queue_packet_complete(packet);
......
...@@ -606,7 +606,7 @@ handle_ack(act2000_card *card, act2000_chan *chan, __u8 blocknr) { ...@@ -606,7 +606,7 @@ handle_ack(act2000_card *card, act2000_chan *chan, __u8 blocknr) {
if ((((m->msg.data_b3_req.fakencci >> 8) & 0xff) == chan->ncci) && if ((((m->msg.data_b3_req.fakencci >> 8) & 0xff) == chan->ncci) &&
(m->msg.data_b3_req.blocknr == blocknr)) { (m->msg.data_b3_req.blocknr == blocknr)) {
/* found corresponding DATA_B3_REQ */ /* found corresponding DATA_B3_REQ */
skb_unlink(tmp); skb_unlink(tmp, &card->ackq);
chan->queued -= m->msg.data_b3_req.datalen; chan->queued -= m->msg.data_b3_req.datalen;
if (m->msg.data_b3_req.flags) if (m->msg.data_b3_req.flags)
ret = m->msg.data_b3_req.datalen; ret = m->msg.data_b3_req.datalen;
......
...@@ -156,52 +156,6 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -156,52 +156,6 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb); SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);
#ifdef SHAPER_COMPLEX /* and broken.. */
while(ptr && ptr!=(struct sk_buff *)&shaper->sendq)
{
if(ptr->pri<skb->pri
&& jiffies - SHAPERCB(ptr)->shapeclock < SHAPER_MAXSLIP)
{
struct sk_buff *tmp=ptr->prev;
/*
* It goes before us therefore we slip the length
* of the new frame.
*/
SHAPERCB(ptr)->shapeclock+=SHAPERCB(skb)->shapelen;
SHAPERCB(ptr)->shapelatency+=SHAPERCB(skb)->shapelen;
/*
* The packet may have slipped so far back it
* fell off.
*/
if(SHAPERCB(ptr)->shapelatency > SHAPER_LATENCY)
{
skb_unlink(ptr);
dev_kfree_skb(ptr);
}
ptr=tmp;
}
else
break;
}
if(ptr==NULL || ptr==(struct sk_buff *)&shaper->sendq)
skb_queue_head(&shaper->sendq,skb);
else
{
struct sk_buff *tmp;
/*
* Set the packet clock out time according to the
* frames ahead. Im sure a bit of thought could drop
* this loop.
*/
for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && tmp!=ptr; tmp=tmp->next)
SHAPERCB(skb)->shapeclock+=tmp->shapelen;
skb_append(ptr,skb);
}
#else
{ {
struct sk_buff *tmp; struct sk_buff *tmp;
/* /*
...@@ -220,7 +174,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -220,7 +174,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else } else
skb_queue_tail(&shaper->sendq, skb); skb_queue_tail(&shaper->sendq, skb);
} }
#endif
if(sh_debug) if(sh_debug)
printk("Frame queued.\n"); printk("Frame queued.\n");
if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN) if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
...@@ -302,7 +256,7 @@ static void shaper_kick(struct shaper *shaper) ...@@ -302,7 +256,7 @@ static void shaper_kick(struct shaper *shaper)
* Pull the frame and get interrupts back on. * Pull the frame and get interrupts back on.
*/ */
skb_unlink(skb); skb_unlink(skb, &shaper->sendq);
if (shaper->recovery < if (shaper->recovery <
SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen) SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen; shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen;
......
...@@ -445,7 +445,7 @@ void s508_s514_unlock(sdla_t *card, unsigned long *smp_flags); ...@@ -445,7 +445,7 @@ void s508_s514_unlock(sdla_t *card, unsigned long *smp_flags);
void s508_s514_lock(sdla_t *card, unsigned long *smp_flags); void s508_s514_lock(sdla_t *card, unsigned long *smp_flags);
unsigned short calc_checksum (char *, int); unsigned short calc_checksum (char *, int);
static int setup_fr_header(struct sk_buff** skb, static int setup_fr_header(struct sk_buff *skb,
struct net_device* dev, char op_mode); struct net_device* dev, char op_mode);
...@@ -1372,7 +1372,7 @@ static int if_send(struct sk_buff* skb, struct net_device* dev) ...@@ -1372,7 +1372,7 @@ static int if_send(struct sk_buff* skb, struct net_device* dev)
/* Move the if_header() code to here. By inserting frame /* Move the if_header() code to here. By inserting frame
* relay header in if_header() we would break the * relay header in if_header() we would break the
* tcpdump and other packet sniffers */ * tcpdump and other packet sniffers */
chan->fr_header_len = setup_fr_header(&skb,dev,chan->common.usedby); chan->fr_header_len = setup_fr_header(skb,dev,chan->common.usedby);
if (chan->fr_header_len < 0 ){ if (chan->fr_header_len < 0 ){
++chan->ifstats.tx_dropped; ++chan->ifstats.tx_dropped;
++card->wandev.stats.tx_dropped; ++card->wandev.stats.tx_dropped;
...@@ -1597,8 +1597,6 @@ static int setup_for_delayed_transmit(struct net_device* dev, ...@@ -1597,8 +1597,6 @@ static int setup_for_delayed_transmit(struct net_device* dev,
return 1; return 1;
} }
skb_unlink(skb);
chan->transmit_length = len; chan->transmit_length = len;
chan->delay_skb = skb; chan->delay_skb = skb;
...@@ -4871,18 +4869,15 @@ static void unconfig_fr (sdla_t *card) ...@@ -4871,18 +4869,15 @@ static void unconfig_fr (sdla_t *card)
} }
} }
static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev, static int setup_fr_header(struct sk_buff *skb, struct net_device* dev,
char op_mode) char op_mode)
{ {
struct sk_buff *skb = *skb_orig;
fr_channel_t *chan=dev->priv; fr_channel_t *chan=dev->priv;
if (op_mode == WANPIPE){ if (op_mode == WANPIPE) {
chan->fr_header[0]=Q922_UI; chan->fr_header[0]=Q922_UI;
switch (htons(skb->protocol)){ switch (htons(skb->protocol)){
case ETH_P_IP: case ETH_P_IP:
chan->fr_header[1]=NLPID_IP; chan->fr_header[1]=NLPID_IP;
break; break;
...@@ -4894,16 +4889,14 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev, ...@@ -4894,16 +4889,14 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev,
} }
/* If we are in bridging mode, we must apply /* If we are in bridging mode, we must apply
* an Ethernet header */ * an Ethernet header
if (op_mode == BRIDGE || op_mode == BRIDGE_NODE){ */
if (op_mode == BRIDGE || op_mode == BRIDGE_NODE) {
/* Encapsulate the packet as a bridged Ethernet frame. */ /* Encapsulate the packet as a bridged Ethernet frame. */
#ifdef DEBUG #ifdef DEBUG
printk(KERN_INFO "%s: encapsulating skb for frame relay\n", printk(KERN_INFO "%s: encapsulating skb for frame relay\n",
dev->name); dev->name);
#endif #endif
chan->fr_header[0] = 0x03; chan->fr_header[0] = 0x03;
chan->fr_header[1] = 0x00; chan->fr_header[1] = 0x00;
chan->fr_header[2] = 0x80; chan->fr_header[2] = 0x80;
...@@ -4916,7 +4909,6 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev, ...@@ -4916,7 +4909,6 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev,
/* Yuck. */ /* Yuck. */
skb->protocol = ETH_P_802_3; skb->protocol = ETH_P_802_3;
return 8; return 8;
} }
return 0; return 0;
......
...@@ -2903,19 +2903,18 @@ static struct net_device_stats *usbnet_get_stats (struct net_device *net) ...@@ -2903,19 +2903,18 @@ static struct net_device_stats *usbnet_get_stats (struct net_device *net)
* completion callbacks. 2.5 should have fixed those bugs... * completion callbacks. 2.5 should have fixed those bugs...
*/ */
static void defer_bh (struct usbnet *dev, struct sk_buff *skb) static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
{ {
struct sk_buff_head *list = skb->list;
unsigned long flags; unsigned long flags;
spin_lock_irqsave (&list->lock, flags); spin_lock_irqsave(&list->lock, flags);
__skb_unlink (skb, list); __skb_unlink(skb, list);
spin_unlock (&list->lock); spin_unlock(&list->lock);
spin_lock (&dev->done.lock); spin_lock(&dev->done.lock);
__skb_queue_tail (&dev->done, skb); __skb_queue_tail(&dev->done, skb);
if (dev->done.qlen == 1) if (dev->done.qlen == 1)
tasklet_schedule (&dev->bh); tasklet_schedule(&dev->bh);
spin_unlock_irqrestore (&dev->done.lock, flags); spin_unlock_irqrestore(&dev->done.lock, flags);
} }
/* some work can't be done in tasklets, so we use keventd /* some work can't be done in tasklets, so we use keventd
...@@ -3120,7 +3119,7 @@ static void rx_complete (struct urb *urb, struct pt_regs *regs) ...@@ -3120,7 +3119,7 @@ static void rx_complete (struct urb *urb, struct pt_regs *regs)
break; break;
} }
defer_bh (dev, skb); defer_bh(dev, skb, &dev->rxq);
if (urb) { if (urb) {
if (netif_running (dev->net) if (netif_running (dev->net)
...@@ -3490,7 +3489,7 @@ static void tx_complete (struct urb *urb, struct pt_regs *regs) ...@@ -3490,7 +3489,7 @@ static void tx_complete (struct urb *urb, struct pt_regs *regs)
urb->dev = NULL; urb->dev = NULL;
entry->state = tx_done; entry->state = tx_done;
defer_bh (dev, skb); defer_bh(dev, skb, &dev->txq);
} }
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
......
...@@ -204,7 +204,6 @@ struct sk_buff { ...@@ -204,7 +204,6 @@ struct sk_buff {
struct sk_buff *next; struct sk_buff *next;
struct sk_buff *prev; struct sk_buff *prev;
struct sk_buff_head *list;
struct sock *sk; struct sock *sk;
struct timeval stamp; struct timeval stamp;
struct net_device *dev; struct net_device *dev;
...@@ -597,7 +596,6 @@ static inline void __skb_queue_head(struct sk_buff_head *list, ...@@ -597,7 +596,6 @@ static inline void __skb_queue_head(struct sk_buff_head *list,
{ {
struct sk_buff *prev, *next; struct sk_buff *prev, *next;
newsk->list = list;
list->qlen++; list->qlen++;
prev = (struct sk_buff *)list; prev = (struct sk_buff *)list;
next = prev->next; next = prev->next;
...@@ -622,7 +620,6 @@ static inline void __skb_queue_tail(struct sk_buff_head *list, ...@@ -622,7 +620,6 @@ static inline void __skb_queue_tail(struct sk_buff_head *list,
{ {
struct sk_buff *prev, *next; struct sk_buff *prev, *next;
newsk->list = list;
list->qlen++; list->qlen++;
next = (struct sk_buff *)list; next = (struct sk_buff *)list;
prev = next->prev; prev = next->prev;
...@@ -655,7 +652,6 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) ...@@ -655,7 +652,6 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
next->prev = prev; next->prev = prev;
prev->next = next; prev->next = next;
result->next = result->prev = NULL; result->next = result->prev = NULL;
result->list = NULL;
} }
return result; return result;
} }
...@@ -664,7 +660,7 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) ...@@ -664,7 +660,7 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
/* /*
* Insert a packet on a list. * Insert a packet on a list.
*/ */
extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk); extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
static inline void __skb_insert(struct sk_buff *newsk, static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next, struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list) struct sk_buff_head *list)
...@@ -672,24 +668,23 @@ static inline void __skb_insert(struct sk_buff *newsk, ...@@ -672,24 +668,23 @@ static inline void __skb_insert(struct sk_buff *newsk,
newsk->next = next; newsk->next = next;
newsk->prev = prev; newsk->prev = prev;
next->prev = prev->next = newsk; next->prev = prev->next = newsk;
newsk->list = list;
list->qlen++; list->qlen++;
} }
/* /*
* Place a packet after a given packet in a list. * Place a packet after a given packet in a list.
*/ */
extern void skb_append(struct sk_buff *old, struct sk_buff *newsk); extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk) static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
{ {
__skb_insert(newsk, old, old->next, old->list); __skb_insert(newsk, old, old->next, list);
} }
/* /*
* remove sk_buff from list. _Must_ be called atomically, and with * remove sk_buff from list. _Must_ be called atomically, and with
* the list known.. * the list known..
*/ */
extern void skb_unlink(struct sk_buff *skb); extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{ {
struct sk_buff *next, *prev; struct sk_buff *next, *prev;
...@@ -698,7 +693,6 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) ...@@ -698,7 +693,6 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
next = skb->next; next = skb->next;
prev = skb->prev; prev = skb->prev;
skb->next = skb->prev = NULL; skb->next = skb->prev = NULL;
skb->list = NULL;
next->prev = prev; next->prev = prev;
prev->next = next; prev->next = next;
} }
......
...@@ -34,7 +34,6 @@ ...@@ -34,7 +34,6 @@
void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to) void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
{ {
struct sk_buff *skb;
unsigned long flags; unsigned long flags;
struct sk_buff *skb_from = (struct sk_buff *) from; struct sk_buff *skb_from = (struct sk_buff *) from;
struct sk_buff *skb_to = (struct sk_buff *) to; struct sk_buff *skb_to = (struct sk_buff *) to;
...@@ -47,8 +46,6 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to) ...@@ -47,8 +46,6 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
prev->next = skb_to; prev->next = skb_to;
to->prev->next = from->next; to->prev->next = from->next;
to->prev = from->prev; to->prev = from->prev;
for (skb = from->next; skb != skb_to; skb = skb->next)
skb->list = to;
to->qlen += from->qlen; to->qlen += from->qlen;
spin_unlock(&to->lock); spin_unlock(&to->lock);
from->prev = skb_from; from->prev = skb_from;
......
...@@ -76,7 +76,7 @@ void ax25_requeue_frames(ax25_cb *ax25) ...@@ -76,7 +76,7 @@ void ax25_requeue_frames(ax25_cb *ax25)
if (skb_prev == NULL) if (skb_prev == NULL)
skb_queue_head(&ax25->write_queue, skb); skb_queue_head(&ax25->write_queue, skb);
else else
skb_append(skb_prev, skb); skb_append(skb_prev, skb, &ax25->write_queue);
skb_prev = skb; skb_prev = skb;
} }
} }
......
...@@ -281,8 +281,6 @@ void kfree_skbmem(struct sk_buff *skb) ...@@ -281,8 +281,6 @@ void kfree_skbmem(struct sk_buff *skb)
void __kfree_skb(struct sk_buff *skb) void __kfree_skb(struct sk_buff *skb)
{ {
BUG_ON(skb->list != NULL);
dst_release(skb->dst); dst_release(skb->dst);
#ifdef CONFIG_XFRM #ifdef CONFIG_XFRM
secpath_put(skb->sp); secpath_put(skb->sp);
...@@ -333,7 +331,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask) ...@@ -333,7 +331,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
#define C(x) n->x = skb->x #define C(x) n->x = skb->x
n->next = n->prev = NULL; n->next = n->prev = NULL;
n->list = NULL;
n->sk = NULL; n->sk = NULL;
C(stamp); C(stamp);
C(dev); C(dev);
...@@ -403,7 +400,6 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) ...@@ -403,7 +400,6 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
*/ */
unsigned long offset = new->data - old->data; unsigned long offset = new->data - old->data;
new->list = NULL;
new->sk = NULL; new->sk = NULL;
new->dev = old->dev; new->dev = old->dev;
new->real_dev = old->real_dev; new->real_dev = old->real_dev;
...@@ -1342,50 +1338,43 @@ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) ...@@ -1342,50 +1338,43 @@ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
__skb_queue_tail(list, newsk); __skb_queue_tail(list, newsk);
spin_unlock_irqrestore(&list->lock, flags); spin_unlock_irqrestore(&list->lock, flags);
} }
/** /**
* skb_unlink - remove a buffer from a list * skb_unlink - remove a buffer from a list
* @skb: buffer to remove * @skb: buffer to remove
* @list: list to use
* *
* Place a packet after a given packet in a list. The list locks are taken * Remove a packet from a list. The list locks are taken and this
* and this function is atomic with respect to other list locked calls * function is atomic with respect to other list locked calls
* *
* Works even without knowing the list it is sitting on, which can be * You must know what list the SKB is on.
* handy at times. It also means that THE LIST MUST EXIST when you
* unlink. Thus a list must have its contents unlinked before it is
* destroyed.
*/ */
void skb_unlink(struct sk_buff *skb) void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{ {
struct sk_buff_head *list = skb->list; unsigned long flags;
if (list) {
unsigned long flags;
spin_lock_irqsave(&list->lock, flags); spin_lock_irqsave(&list->lock, flags);
if (skb->list == list) __skb_unlink(skb, list);
__skb_unlink(skb, skb->list); spin_unlock_irqrestore(&list->lock, flags);
spin_unlock_irqrestore(&list->lock, flags);
}
} }
/** /**
* skb_append - append a buffer * skb_append - append a buffer
* @old: buffer to insert after * @old: buffer to insert after
* @newsk: buffer to insert * @newsk: buffer to insert
* @list: list to use
* *
* Place a packet after a given packet in a list. The list locks are taken * Place a packet after a given packet in a list. The list locks are taken
* and this function is atomic with respect to other list locked calls. * and this function is atomic with respect to other list locked calls.
* A buffer cannot be placed on two lists at the same time. * A buffer cannot be placed on two lists at the same time.
*/ */
void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
void skb_append(struct sk_buff *old, struct sk_buff *newsk)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&old->list->lock, flags); spin_lock_irqsave(&list->lock, flags);
__skb_append(old, newsk); __skb_append(old, newsk, list);
spin_unlock_irqrestore(&old->list->lock, flags); spin_unlock_irqrestore(&list->lock, flags);
} }
...@@ -1393,19 +1382,21 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk) ...@@ -1393,19 +1382,21 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
* skb_insert - insert a buffer * skb_insert - insert a buffer
* @old: buffer to insert before * @old: buffer to insert before
* @newsk: buffer to insert * @newsk: buffer to insert
* @list: list to use
*
* Place a packet before a given packet in a list. The list locks are
* taken and this function is atomic with respect to other list locked
* calls.
* *
* Place a packet before a given packet in a list. The list locks are taken
* and this function is atomic with respect to other list locked calls
* A buffer cannot be placed on two lists at the same time. * A buffer cannot be placed on two lists at the same time.
*/ */
void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&old->list->lock, flags); spin_lock_irqsave(&list->lock, flags);
__skb_insert(newsk, old->prev, old, old->list); __skb_insert(newsk, old->prev, old, list);
spin_unlock_irqrestore(&old->list->lock, flags); spin_unlock_irqrestore(&list->lock, flags);
} }
#if 0 #if 0
......
...@@ -1763,7 +1763,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock, ...@@ -1763,7 +1763,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
nskb = skb->next; nskb = skb->next;
if (skb->len == 0) { if (skb->len == 0) {
skb_unlink(skb); skb_unlink(skb, queue);
kfree_skb(skb); kfree_skb(skb);
/* /*
* N.B. Don't refer to skb or cb after this point * N.B. Don't refer to skb or cb after this point
......
...@@ -479,7 +479,7 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff ...@@ -479,7 +479,7 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
xmit_count = cb2->xmit_count; xmit_count = cb2->xmit_count;
segnum = cb2->segnum; segnum = cb2->segnum;
/* Remove and drop ack'ed packet */ /* Remove and drop ack'ed packet */
skb_unlink(ack); skb_unlink(ack, q);
kfree_skb(ack); kfree_skb(ack);
ack = NULL; ack = NULL;
......
...@@ -869,7 +869,7 @@ static void aun_tx_ack(unsigned long seq, int result) ...@@ -869,7 +869,7 @@ static void aun_tx_ack(unsigned long seq, int result)
foundit: foundit:
tx_result(skb->sk, eb->cookie, result); tx_result(skb->sk, eb->cookie, result);
skb_unlink(skb); skb_unlink(skb, &aun_queue);
spin_unlock_irqrestore(&aun_queue_lock, flags); spin_unlock_irqrestore(&aun_queue_lock, flags);
kfree_skb(skb); kfree_skb(skb);
} }
...@@ -947,7 +947,7 @@ static void ab_cleanup(unsigned long h) ...@@ -947,7 +947,7 @@ static void ab_cleanup(unsigned long h)
{ {
tx_result(skb->sk, eb->cookie, tx_result(skb->sk, eb->cookie,
ECTYPE_TRANSMIT_NOT_PRESENT); ECTYPE_TRANSMIT_NOT_PRESENT);
skb_unlink(skb); skb_unlink(skb, &aun_queue);
kfree_skb(skb); kfree_skb(skb);
} }
skb = newskb; skb = newskb;
......
...@@ -975,7 +975,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -975,7 +975,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (!skb->len) { if (!skb->len) {
if (sk->sk_send_head == skb) if (sk->sk_send_head == skb)
sk->sk_send_head = NULL; sk->sk_send_head = NULL;
__skb_unlink(skb, skb->list); __skb_unlink(skb, &sk->sk_write_queue);
sk_stream_free_skb(sk, skb); sk_stream_free_skb(sk, skb);
} }
......
...@@ -2085,7 +2085,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt ...@@ -2085,7 +2085,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
seq_rtt = now - scb->when; seq_rtt = now - scb->when;
tcp_dec_pcount_approx(&tp->fackets_out, skb); tcp_dec_pcount_approx(&tp->fackets_out, skb);
tcp_packets_out_dec(tp, skb); tcp_packets_out_dec(tp, skb);
__skb_unlink(skb, skb->list); __skb_unlink(skb, &sk->sk_write_queue);
sk_stream_free_skb(sk, skb); sk_stream_free_skb(sk, skb);
} }
...@@ -2853,7 +2853,7 @@ static void tcp_ofo_queue(struct sock *sk) ...@@ -2853,7 +2853,7 @@ static void tcp_ofo_queue(struct sock *sk)
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
SOCK_DEBUG(sk, "ofo packet was already received \n"); SOCK_DEBUG(sk, "ofo packet was already received \n");
__skb_unlink(skb, skb->list); __skb_unlink(skb, &tp->out_of_order_queue);
__kfree_skb(skb); __kfree_skb(skb);
continue; continue;
} }
...@@ -2861,7 +2861,7 @@ static void tcp_ofo_queue(struct sock *sk) ...@@ -2861,7 +2861,7 @@ static void tcp_ofo_queue(struct sock *sk)
tp->rcv_nxt, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
TCP_SKB_CB(skb)->end_seq); TCP_SKB_CB(skb)->end_seq);
__skb_unlink(skb, skb->list); __skb_unlink(skb, &tp->out_of_order_queue);
__skb_queue_tail(&sk->sk_receive_queue, skb); __skb_queue_tail(&sk->sk_receive_queue, skb);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if(skb->h.th->fin) if(skb->h.th->fin)
...@@ -3027,7 +3027,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -3027,7 +3027,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
u32 end_seq = TCP_SKB_CB(skb)->end_seq; u32 end_seq = TCP_SKB_CB(skb)->end_seq;
if (seq == TCP_SKB_CB(skb1)->end_seq) { if (seq == TCP_SKB_CB(skb1)->end_seq) {
__skb_append(skb1, skb); __skb_append(skb1, skb, &tp->out_of_order_queue);
if (!tp->rx_opt.num_sacks || if (!tp->rx_opt.num_sacks ||
tp->selective_acks[0].end_seq != seq) tp->selective_acks[0].end_seq != seq)
...@@ -3071,7 +3071,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -3071,7 +3071,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq); tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq);
break; break;
} }
__skb_unlink(skb1, skb1->list); __skb_unlink(skb1, &tp->out_of_order_queue);
tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq);
__kfree_skb(skb1); __kfree_skb(skb1);
} }
...@@ -3088,8 +3088,9 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -3088,8 +3088,9 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
* simplifies code) * simplifies code)
*/ */
static void static void
tcp_collapse(struct sock *sk, struct sk_buff *head, tcp_collapse(struct sock *sk, struct sk_buff_head *list,
struct sk_buff *tail, u32 start, u32 end) struct sk_buff *head, struct sk_buff *tail,
u32 start, u32 end)
{ {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -3099,7 +3100,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head, ...@@ -3099,7 +3100,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
/* No new bits? It is possible on ofo queue. */ /* No new bits? It is possible on ofo queue. */
if (!before(start, TCP_SKB_CB(skb)->end_seq)) { if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
struct sk_buff *next = skb->next; struct sk_buff *next = skb->next;
__skb_unlink(skb, skb->list); __skb_unlink(skb, list);
__kfree_skb(skb); __kfree_skb(skb);
NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
skb = next; skb = next;
...@@ -3145,7 +3146,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head, ...@@ -3145,7 +3146,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head); nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
__skb_insert(nskb, skb->prev, skb, skb->list); __skb_insert(nskb, skb->prev, skb, list);
sk_stream_set_owner_r(nskb, sk); sk_stream_set_owner_r(nskb, sk);
/* Copy data, releasing collapsed skbs. */ /* Copy data, releasing collapsed skbs. */
...@@ -3164,7 +3165,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head, ...@@ -3164,7 +3165,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
} }
if (!before(start, TCP_SKB_CB(skb)->end_seq)) { if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
struct sk_buff *next = skb->next; struct sk_buff *next = skb->next;
__skb_unlink(skb, skb->list); __skb_unlink(skb, list);
__kfree_skb(skb); __kfree_skb(skb);
NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
skb = next; skb = next;
...@@ -3200,7 +3201,8 @@ static void tcp_collapse_ofo_queue(struct sock *sk) ...@@ -3200,7 +3201,8 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
if (skb == (struct sk_buff *)&tp->out_of_order_queue || if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
after(TCP_SKB_CB(skb)->seq, end) || after(TCP_SKB_CB(skb)->seq, end) ||
before(TCP_SKB_CB(skb)->end_seq, start)) { before(TCP_SKB_CB(skb)->end_seq, start)) {
tcp_collapse(sk, head, skb, start, end); tcp_collapse(sk, &tp->out_of_order_queue,
head, skb, start, end);
head = skb; head = skb;
if (skb == (struct sk_buff *)&tp->out_of_order_queue) if (skb == (struct sk_buff *)&tp->out_of_order_queue)
break; break;
...@@ -3237,7 +3239,8 @@ static int tcp_prune_queue(struct sock *sk) ...@@ -3237,7 +3239,8 @@ static int tcp_prune_queue(struct sock *sk)
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
tcp_collapse_ofo_queue(sk); tcp_collapse_ofo_queue(sk);
tcp_collapse(sk, sk->sk_receive_queue.next, tcp_collapse(sk, &sk->sk_receive_queue,
sk->sk_receive_queue.next,
(struct sk_buff*)&sk->sk_receive_queue, (struct sk_buff*)&sk->sk_receive_queue,
tp->copied_seq, tp->rcv_nxt); tp->copied_seq, tp->rcv_nxt);
sk_stream_mem_reclaim(sk); sk_stream_mem_reclaim(sk);
...@@ -3462,7 +3465,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th) ...@@ -3462,7 +3465,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
tp->copied_seq++; tp->copied_seq++;
if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
__skb_unlink(skb, skb->list); __skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb); __kfree_skb(skb);
} }
} }
......
...@@ -505,7 +505,7 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned ...@@ -505,7 +505,7 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned
/* Link BUFF into the send queue. */ /* Link BUFF into the send queue. */
skb_header_release(buff); skb_header_release(buff);
__skb_append(skb, buff); __skb_append(skb, buff, &sk->sk_write_queue);
return 0; return 0;
} }
...@@ -893,7 +893,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, ...@@ -893,7 +893,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
/* Link BUFF into the send queue. */ /* Link BUFF into the send queue. */
skb_header_release(buff); skb_header_release(buff);
__skb_append(skb, buff); __skb_append(skb, buff, &sk->sk_write_queue);
return 0; return 0;
} }
...@@ -1238,7 +1238,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m ...@@ -1238,7 +1238,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
tcp_skb_pcount(next_skb) != 1); tcp_skb_pcount(next_skb) != 1);
/* Ok. We will be able to collapse the packet. */ /* Ok. We will be able to collapse the packet. */
__skb_unlink(next_skb, next_skb->list); __skb_unlink(next_skb, &sk->sk_write_queue);
memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
......
...@@ -988,9 +988,6 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command) ...@@ -988,9 +988,6 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__); IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__);
return; return;
} }
/* Unlink tx_skb from list */
tx_skb->next = tx_skb->prev = NULL;
tx_skb->list = NULL;
/* Clear old Nr field + poll bit */ /* Clear old Nr field + poll bit */
tx_skb->data[1] &= 0x0f; tx_skb->data[1] &= 0x0f;
...@@ -1063,9 +1060,6 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command) ...@@ -1063,9 +1060,6 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command)
IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__); IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__);
return; return;
} }
/* Unlink tx_skb from list */
tx_skb->next = tx_skb->prev = NULL;
tx_skb->list = NULL;
/* Clear old Nr field + poll bit */ /* Clear old Nr field + poll bit */
tx_skb->data[1] &= 0x0f; tx_skb->data[1] &= 0x0f;
......
...@@ -78,7 +78,7 @@ void lapb_requeue_frames(struct lapb_cb *lapb) ...@@ -78,7 +78,7 @@ void lapb_requeue_frames(struct lapb_cb *lapb)
if (!skb_prev) if (!skb_prev)
skb_queue_head(&lapb->write_queue, skb); skb_queue_head(&lapb->write_queue, skb);
else else
skb_append(skb_prev, skb); skb_append(skb_prev, skb, &lapb->write_queue);
skb_prev = skb; skb_prev = skb;
} }
} }
......
...@@ -714,7 +714,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, ...@@ -714,7 +714,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
if (uaddr) if (uaddr)
memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr)); memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
msg->msg_namelen = sizeof(*uaddr); msg->msg_namelen = sizeof(*uaddr);
if (!skb->list) { if (!skb->next) {
dgram_free: dgram_free:
kfree_skb(skb); kfree_skb(skb);
} }
......
...@@ -71,7 +71,11 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) ...@@ -71,7 +71,11 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
if (!ev->ind_prim && !ev->cfm_prim) { if (!ev->ind_prim && !ev->cfm_prim) {
/* indicate or confirm not required */ /* indicate or confirm not required */
if (!skb->list) /* XXX this is not very pretty, perhaps we should store
* XXX indicate/confirm-needed state in the llc_conn_state_ev
* XXX control block of the SKB instead? -DaveM
*/
if (!skb->next)
goto out_kfree_skb; goto out_kfree_skb;
goto out_skb_put; goto out_skb_put;
} }
......
...@@ -77,7 +77,7 @@ void nr_requeue_frames(struct sock *sk) ...@@ -77,7 +77,7 @@ void nr_requeue_frames(struct sock *sk)
if (skb_prev == NULL) if (skb_prev == NULL)
skb_queue_head(&sk->sk_write_queue, skb); skb_queue_head(&sk->sk_write_queue, skb);
else else
skb_append(skb_prev, skb); skb_append(skb_prev, skb, &sk->sk_write_queue);
skb_prev = skb; skb_prev = skb;
} }
} }
......
...@@ -74,7 +74,7 @@ void rose_requeue_frames(struct sock *sk) ...@@ -74,7 +74,7 @@ void rose_requeue_frames(struct sock *sk)
if (skb_prev == NULL) if (skb_prev == NULL)
skb_queue_head(&sk->sk_write_queue, skb); skb_queue_head(&sk->sk_write_queue, skb);
else else
skb_append(skb_prev, skb); skb_append(skb_prev, skb, &sk->sk_write_queue);
skb_prev = skb; skb_prev = skb;
} }
} }
......
...@@ -4892,7 +4892,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, ...@@ -4892,7 +4892,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
event = sctp_skb2event(skb); event = sctp_skb2event(skb);
if (event->asoc == assoc) { if (event->asoc == assoc) {
__skb_unlink(skb, skb->list); __skb_unlink(skb, &oldsk->sk_receive_queue);
__skb_queue_tail(&newsk->sk_receive_queue, skb); __skb_queue_tail(&newsk->sk_receive_queue, skb);
} }
} }
...@@ -4921,7 +4921,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, ...@@ -4921,7 +4921,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
event = sctp_skb2event(skb); event = sctp_skb2event(skb);
if (event->asoc == assoc) { if (event->asoc == assoc) {
__skb_unlink(skb, skb->list); __skb_unlink(skb, &oldsp->pd_lobby);
__skb_queue_tail(queue, skb); __skb_queue_tail(queue, skb);
} }
} }
......
...@@ -50,9 +50,9 @@ ...@@ -50,9 +50,9 @@
/* Forward declarations for internal helpers. */ /* Forward declarations for internal helpers. */
static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq, static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *); struct sctp_ulpevent *);
static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *, static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
struct sctp_ulpevent *); struct sctp_ulpevent *);
/* 1st Level Abstractions */ /* 1st Level Abstractions */
...@@ -125,7 +125,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, ...@@ -125,7 +125,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
event = sctp_ulpq_order(ulpq, event); event = sctp_ulpq_order(ulpq, event);
} }
/* Send event to the ULP. */ /* Send event to the ULP. 'event' is the sctp_ulpevent for
* very first SKB on the 'temp' list.
*/
if (event) if (event)
sctp_ulpq_tail_event(ulpq, event); sctp_ulpq_tail_event(ulpq, event);
...@@ -158,14 +160,18 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) ...@@ -158,14 +160,18 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
return sctp_clear_pd(ulpq->asoc->base.sk); return sctp_clear_pd(ulpq->asoc->base.sk);
} }
/* If the SKB of 'event' is on a list, it is the first such member
* of that list.
*/
int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
{ {
struct sock *sk = ulpq->asoc->base.sk; struct sock *sk = ulpq->asoc->base.sk;
struct sk_buff_head *queue; struct sk_buff_head *queue, *skb_list;
struct sk_buff *skb = sctp_event2skb(event);
int clear_pd = 0; int clear_pd = 0;
skb_list = (struct sk_buff_head *) skb->prev;
/* If the socket is just going to throw this away, do not /* If the socket is just going to throw this away, do not
* even try to deliver it. * even try to deliver it.
*/ */
...@@ -197,10 +203,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) ...@@ -197,10 +203,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
/* If we are harvesting multiple skbs they will be /* If we are harvesting multiple skbs they will be
* collected on a list. * collected on a list.
*/ */
if (sctp_event2skb(event)->list) if (skb_list)
sctp_skb_list_tail(sctp_event2skb(event)->list, queue); sctp_skb_list_tail(skb_list, queue);
else else
__skb_queue_tail(queue, sctp_event2skb(event)); __skb_queue_tail(queue, skb);
/* Did we just complete partial delivery and need to get /* Did we just complete partial delivery and need to get
* rolling again? Move pending data to the receive * rolling again? Move pending data to the receive
...@@ -214,10 +220,11 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) ...@@ -214,10 +220,11 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
return 1; return 1;
out_free: out_free:
if (sctp_event2skb(event)->list) if (skb_list)
sctp_queue_purge_ulpevents(sctp_event2skb(event)->list); sctp_queue_purge_ulpevents(skb_list);
else else
sctp_ulpevent_free(event); sctp_ulpevent_free(event);
return 0; return 0;
} }
...@@ -269,7 +276,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, ...@@ -269,7 +276,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
* payload was fragmented on the way and ip had to reassemble them. * payload was fragmented on the way and ip had to reassemble them.
* We add the rest of skb's to the first skb's fraglist. * We add the rest of skb's to the first skb's fraglist.
*/ */
static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag) static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
{ {
struct sk_buff *pos; struct sk_buff *pos;
struct sctp_ulpevent *event; struct sctp_ulpevent *event;
...@@ -294,7 +301,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, ...@@ -294,7 +301,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag,
skb_shinfo(f_frag)->frag_list = pos; skb_shinfo(f_frag)->frag_list = pos;
/* Remove the first fragment from the reassembly queue. */ /* Remove the first fragment from the reassembly queue. */
__skb_unlink(f_frag, f_frag->list); __skb_unlink(f_frag, queue);
while (pos) { while (pos) {
pnext = pos->next; pnext = pos->next;
...@@ -304,7 +311,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, ...@@ -304,7 +311,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag,
f_frag->data_len += pos->len; f_frag->data_len += pos->len;
/* Remove the fragment from the reassembly queue. */ /* Remove the fragment from the reassembly queue. */
__skb_unlink(pos, pos->list); __skb_unlink(pos, queue);
/* Break if we have reached the last fragment. */ /* Break if we have reached the last fragment. */
if (pos == l_frag) if (pos == l_frag)
...@@ -375,7 +382,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u ...@@ -375,7 +382,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
done: done:
return retval; return retval;
found: found:
retval = sctp_make_reassembled_event(first_frag, pos); retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
if (retval) if (retval)
retval->msg_flags |= MSG_EOR; retval->msg_flags |= MSG_EOR;
goto done; goto done;
...@@ -435,7 +442,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq ...@@ -435,7 +442,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq
* further. * further.
*/ */
done: done:
retval = sctp_make_reassembled_event(first_frag, last_frag); retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
if (retval && is_last) if (retval && is_last)
retval->msg_flags |= MSG_EOR; retval->msg_flags |= MSG_EOR;
...@@ -527,7 +534,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u ...@@ -527,7 +534,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u
* further. * further.
*/ */
done: done:
retval = sctp_make_reassembled_event(first_frag, last_frag); retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
return retval; return retval;
} }
...@@ -537,6 +544,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u ...@@ -537,6 +544,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u
static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event) struct sctp_ulpevent *event)
{ {
struct sk_buff_head *event_list;
struct sk_buff *pos, *tmp; struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent; struct sctp_ulpevent *cevent;
struct sctp_stream *in; struct sctp_stream *in;
...@@ -547,6 +555,8 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, ...@@ -547,6 +555,8 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
ssn = event->ssn; ssn = event->ssn;
in = &ulpq->asoc->ssnmap->in; in = &ulpq->asoc->ssnmap->in;
event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
/* We are holding the chunks by stream, by SSN. */ /* We are holding the chunks by stream, by SSN. */
sctp_skb_for_each(pos, &ulpq->lobby, tmp) { sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
cevent = (struct sctp_ulpevent *) pos->cb; cevent = (struct sctp_ulpevent *) pos->cb;
...@@ -567,10 +577,10 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, ...@@ -567,10 +577,10 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
/* Found it, so mark in the ssnmap. */ /* Found it, so mark in the ssnmap. */
sctp_ssn_next(in, sid); sctp_ssn_next(in, sid);
__skb_unlink(pos, pos->list); __skb_unlink(pos, &ulpq->lobby);
/* Attach all gathered skbs to the event. */ /* Attach all gathered skbs to the event. */
__skb_queue_tail(sctp_event2skb(event)->list, pos); __skb_queue_tail(event_list, pos);
} }
} }
...@@ -626,7 +636,7 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq, ...@@ -626,7 +636,7 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
} }
static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event) struct sctp_ulpevent *event)
{ {
__u16 sid, ssn; __u16 sid, ssn;
struct sctp_stream *in; struct sctp_stream *in;
...@@ -667,7 +677,7 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) ...@@ -667,7 +677,7 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
{ {
struct sk_buff *pos, *tmp; struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent; struct sctp_ulpevent *cevent;
struct sctp_ulpevent *event = NULL; struct sctp_ulpevent *event;
struct sctp_stream *in; struct sctp_stream *in;
struct sk_buff_head temp; struct sk_buff_head temp;
__u16 csid, cssn; __u16 csid, cssn;
...@@ -675,6 +685,8 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) ...@@ -675,6 +685,8 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
in = &ulpq->asoc->ssnmap->in; in = &ulpq->asoc->ssnmap->in;
/* We are holding the chunks by stream, by SSN. */ /* We are holding the chunks by stream, by SSN. */
skb_queue_head_init(&temp);
event = NULL;
sctp_skb_for_each(pos, &ulpq->lobby, tmp) { sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
cevent = (struct sctp_ulpevent *) pos->cb; cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->stream; csid = cevent->stream;
...@@ -686,19 +698,20 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) ...@@ -686,19 +698,20 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
/* Found it, so mark in the ssnmap. */ /* Found it, so mark in the ssnmap. */
sctp_ssn_next(in, csid); sctp_ssn_next(in, csid);
__skb_unlink(pos, pos->list); __skb_unlink(pos, &ulpq->lobby);
if (!event) { if (!event) {
/* Create a temporary list to collect chunks on. */ /* Create a temporary list to collect chunks on. */
event = sctp_skb2event(pos); event = sctp_skb2event(pos);
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event)); __skb_queue_tail(&temp, sctp_event2skb(event));
} else { } else {
/* Attach all gathered skbs to the event. */ /* Attach all gathered skbs to the event. */
__skb_queue_tail(sctp_event2skb(event)->list, pos); __skb_queue_tail(&temp, pos);
} }
} }
/* Send event to the ULP. */ /* Send event to the ULP. 'event' is the sctp_ulpevent for
* very first SKB on the 'temp' list.
*/
if (event) if (event)
sctp_ulpq_tail_event(ulpq, event); sctp_ulpq_tail_event(ulpq, event);
} }
......
...@@ -286,16 +286,16 @@ void unix_gc(void) ...@@ -286,16 +286,16 @@ void unix_gc(void)
skb = skb_peek(&s->sk_receive_queue); skb = skb_peek(&s->sk_receive_queue);
while (skb && while (skb &&
skb != (struct sk_buff *)&s->sk_receive_queue) { skb != (struct sk_buff *)&s->sk_receive_queue) {
nextsk=skb->next; nextsk = skb->next;
/* /*
* Do we have file descriptors ? * Do we have file descriptors ?
*/ */
if(UNIXCB(skb).fp) if (UNIXCB(skb).fp) {
{ __skb_unlink(skb,
__skb_unlink(skb, skb->list); &s->sk_receive_queue);
__skb_queue_tail(&hitlist,skb); __skb_queue_tail(&hitlist, skb);
} }
skb=nextsk; skb = nextsk;
} }
spin_unlock(&s->sk_receive_queue.lock); spin_unlock(&s->sk_receive_queue.lock);
} }
......
...@@ -80,7 +80,7 @@ void x25_requeue_frames(struct sock *sk) ...@@ -80,7 +80,7 @@ void x25_requeue_frames(struct sock *sk)
if (!skb_prev) if (!skb_prev)
skb_queue_head(&sk->sk_write_queue, skb); skb_queue_head(&sk->sk_write_queue, skb);
else else
skb_append(skb_prev, skb); skb_append(skb_prev, skb, &sk->sk_write_queue);
skb_prev = skb; skb_prev = skb;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment