Commit 83fd84ce authored by Kai Germaschewski's avatar Kai Germaschewski

ISDN: new xmit handling for ISDN net interfaces

Instead of using locking per-channel, just lock the entire ISDN net work
interface as an entity, getting rid of weird locking.
  
Basically, ::hard_start_xmit() is already serialized by the network core,
so we could just rely on that. However, we want to send keep alive
frames, PPP messages from ipppd and such things directly without going
through the network stack, so we additionally lock
isdn_net_local->xmit_lock in hard_start_xmit(), which is taken in
the other paths dealing with transmitting frames as well.
parent 5a7728c6
...@@ -33,102 +33,90 @@ ...@@ -33,102 +33,90 @@
#include "isdn_concap.h" #include "isdn_concap.h"
#include "isdn_ciscohdlck.h" #include "isdn_ciscohdlck.h"
/* #define ISDN_NET_MAX_QUEUE_LENGTH 2
* Outline of new tbusy handling:
*
* Old method, roughly spoken, consisted of setting tbusy when entering
* isdn_net_start_xmit() and at several other locations and clearing
* it from isdn_net_start_xmit() thread when sending was successful.
*
* With 2.3.x multithreaded network core, to prevent problems, tbusy should
* only be set by the isdn_net_start_xmit() thread and only when a tx-busy
* condition is detected. Other threads (in particular isdn_net_stat_callb())
* are only allowed to clear tbusy.
*
* -HE
*/
/* /*
* About SOFTNET: * is this particular channel busy?
* Most of the changes were pretty obvious and basically done by HE already.
*
* One problem of the isdn net device code is that is uses struct net_device
* for masters and slaves. However, only master interface are registered to
* the network layer, and therefore, it only makes sense to call netif_*
* functions on them.
*
* --KG
*/
/*
* Find out if the netdevice has been ifup-ed yet.
*/ */
static inline int static inline int
isdn_net_device_started(isdn_net_dev *idev) isdn_net_dev_busy(isdn_net_dev *idev)
{ {
return netif_running(&idev->mlp->dev); return idev->frame_cnt >= ISDN_NET_MAX_QUEUE_LENGTH;
} }
/* /*
* stop the network -> net_device queue. * find out if the net_device which this mlp is belongs to is busy.
* It's busy iff all channels are busy.
* must hold mlp->xmit_lock
* FIXME: Use a mlp->frame_cnt instead of loop?
*/ */
static inline void static inline int
isdn_net_dev_stop_queue(isdn_net_dev *idev) isdn_net_local_busy(isdn_net_local *mlp)
{ {
netif_stop_queue(&idev->mlp->dev); isdn_net_dev *idev;
list_for_each_entry(idev, &mlp->online, online) {
if (!isdn_net_dev_busy(idev))
return 0;
}
return 1;
} }
/* /*
* find out if the net_device which this lp belongs to (lp can be * For the given net device, this will get a non-busy channel out of the
* master or slave) is busy. It's busy iff all (master and slave) * corresponding bundle.
* queues are busy
*/ */
static inline int static inline isdn_net_dev *
isdn_net_device_busy(isdn_net_dev *idev) isdn_net_get_xmit_dev(isdn_net_local *mlp)
{ {
isdn_net_local *mlp = idev->mlp; isdn_net_dev *idev;
unsigned long flags;
int retval = 1;
if (!isdn_net_dev_busy(idev))
return 0;
spin_lock_irqsave(&mlp->online_lock, flags);
list_for_each_entry(idev, &mlp->online, online) { list_for_each_entry(idev, &mlp->online, online) {
if (!isdn_net_dev_busy(idev)) { if (!isdn_net_dev_busy(idev)) {
retval = 0; /* point the head to next online channel */
break; list_del(&mlp->online);
list_add(&mlp->online, &idev->online);
return idev;
} }
} }
spin_unlock_irqrestore(&mlp->online_lock, flags); return NULL;
return retval;
} }
static inline /* mlp->xmit_lock must be held */
void isdn_net_inc_frame_cnt(isdn_net_dev *idev) static inline void
isdn_net_inc_frame_cnt(isdn_net_dev *idev)
{ {
atomic_inc(&idev->frame_cnt); isdn_net_local *mlp = idev->mlp;
if (isdn_net_device_busy(idev))
isdn_net_dev_stop_queue(idev); if (isdn_net_local_busy(mlp))
isdn_BUG();
idev->frame_cnt++;
if (isdn_net_local_busy(mlp))
netif_stop_queue(&mlp->dev);
} }
/* mlp->xmit_lock must be held */
static inline void static inline void
isdn_net_dec_frame_cnt(isdn_net_dev *idev) isdn_net_dec_frame_cnt(isdn_net_dev *idev)
{ {
atomic_dec(&idev->frame_cnt); isdn_net_local *mlp = idev->mlp;
int was_busy;
if (!isdn_net_device_busy(idev)) {
if (!skb_queue_empty(&idev->super_tx_queue))
tasklet_schedule(&idev->tlet);
else
isdn_net_dev_wake_queue(idev);
}
}
static inline was_busy = isdn_net_local_busy(mlp);
void isdn_net_zero_frame_cnt(isdn_net_dev *idev)
{ idev->frame_cnt--;
atomic_set(&idev->frame_cnt, 0);
if (isdn_net_local_busy(mlp))
isdn_BUG();
if (!was_busy)
return;
if (!skb_queue_empty(&idev->super_tx_queue))
tasklet_schedule(&idev->tlet);
else
netif_wake_queue(&mlp->dev);
} }
/* Prototypes */ /* Prototypes */
...@@ -143,8 +131,11 @@ int ...@@ -143,8 +131,11 @@ int
isdn_net_bsent(isdn_net_dev *idev, isdn_ctrl *c) isdn_net_bsent(isdn_net_dev *idev, isdn_ctrl *c)
{ {
isdn_net_local *mlp = idev->mlp; isdn_net_local *mlp = idev->mlp;
unsigned long flags;
spin_lock_irqsave(&mlp->xmit_lock, flags);
isdn_net_dec_frame_cnt(idev); isdn_net_dec_frame_cnt(idev);
spin_unlock_irqrestore(&mlp->xmit_lock, flags);
mlp->stats.tx_packets++; mlp->stats.tx_packets++;
mlp->stats.tx_bytes += c->parm.length; mlp->stats.tx_bytes += c->parm.length;
return 1; return 1;
...@@ -212,33 +203,24 @@ isdn_net_log_skb(struct sk_buff *skb, isdn_net_dev *idev) ...@@ -212,33 +203,24 @@ isdn_net_log_skb(struct sk_buff *skb, isdn_net_dev *idev)
* this function is used to send supervisory data, i.e. data which was * this function is used to send supervisory data, i.e. data which was
* not received from the network layer, but e.g. frames from ipppd, CCP * not received from the network layer, but e.g. frames from ipppd, CCP
* reset frames etc. * reset frames etc.
* must hold mlp->xmit_lock
*/ */
void void
isdn_net_write_super(isdn_net_dev *idev, struct sk_buff *skb) isdn_net_write_super(isdn_net_dev *idev, struct sk_buff *skb)
{ {
if (in_irq()) { if (!isdn_net_dev_busy(idev))
// we can't grab the lock from irq context,
// so we just queue the packet
skb_queue_tail(&idev->super_tx_queue, skb);
tasklet_schedule(&idev->tlet);
return;
}
spin_lock_bh(&idev->xmit_lock);
if (!isdn_net_dev_busy(idev)) {
isdn_net_writebuf_skb(idev, skb); isdn_net_writebuf_skb(idev, skb);
} else { else
skb_queue_tail(&idev->super_tx_queue, skb); skb_queue_tail(&idev->super_tx_queue, skb);
}
spin_unlock_bh(&idev->xmit_lock);
} }
/* /*
* all frames sent from the (net) LL to a HL driver should go via this function * all frames sent from the (net) LL to a HL driver should go via this function
* it's serialized by the caller holding the idev->xmit_lock spinlock * it's serialized by the caller holding the idev->xmit_lock spinlock
* must hold mlp->xmit_lock
*/ */
void isdn_net_writebuf_skb(isdn_net_dev *idev, struct sk_buff *skb) void
isdn_net_writebuf_skb(isdn_net_dev *idev, struct sk_buff *skb)
{ {
isdn_net_local *mlp = idev->mlp; isdn_net_local *mlp = idev->mlp;
int ret; int ret;
...@@ -298,22 +280,27 @@ isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -298,22 +280,27 @@ isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{ {
isdn_net_dev *idev; isdn_net_dev *idev;
isdn_net_local *mlp = ndev->priv; isdn_net_local *mlp = ndev->priv;
unsigned long flags;
int retval;
ndev->trans_start = jiffies; ndev->trans_start = jiffies;
if (list_empty(&mlp->online)) spin_lock_irqsave(&mlp->xmit_lock, flags);
return isdn_net_autodial(skb, ndev);
if (list_empty(&mlp->online)) {
retval = isdn_net_autodial(skb, ndev);
goto out;
}
idev = isdn_net_get_locked_dev(mlp); idev = isdn_net_get_xmit_dev(mlp);
if (!idev) { if (!idev) {
printk(KERN_WARNING "%s: all channels busy - requeuing!\n", ndev->name); printk(KERN_INFO "%s: all channels busy - requeuing!\n", ndev->name);
netif_stop_queue(ndev); netif_stop_queue(ndev);
return 1; retval = 1;
goto out;
} }
/* we have our idev locked from now on */
isdn_net_writebuf_skb(idev, skb); isdn_net_writebuf_skb(idev, skb);
spin_unlock_bh(&idev->xmit_lock);
/* the following stuff is here for backwards compatibility. /* the following stuff is here for backwards compatibility.
* in future, start-up and hangup of slaves (based on current load) * in future, start-up and hangup of slaves (based on current load)
...@@ -348,7 +335,10 @@ isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -348,7 +335,10 @@ isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev)
list_add_tail(&mlp->online, &idev->online); list_add_tail(&mlp->online, &idev->online);
} }
return 0; retval = 0;
out:
spin_unlock_irqrestore(&mlp->xmit_lock, flags);
return retval;
} }
int int
......
...@@ -62,85 +62,6 @@ enum { ...@@ -62,85 +62,6 @@ enum {
ST_CHARGE_HAVE_CINT, /* got a second chare info and thus the timing */ ST_CHARGE_HAVE_CINT, /* got a second chare info and thus the timing */
}; };
#define ISDN_NET_MAX_QUEUE_LENGTH 2
/*
* is this particular channel busy?
*/
static inline int
isdn_net_dev_busy(isdn_net_dev *idev)
{
if (atomic_read(&idev->frame_cnt) < ISDN_NET_MAX_QUEUE_LENGTH)
return 0;
else
return 1;
}
/*
* For the given net device, this will get a non-busy channel out of the
* corresponding bundle. The returned channel is locked.
*/
static inline isdn_net_dev *
isdn_net_get_locked_dev(isdn_net_local *mlp)
{
unsigned long flags;
isdn_net_dev *idev;
spin_lock_irqsave(&mlp->online_lock, flags);
list_for_each_entry(idev, &mlp->online, online) {
spin_lock_bh(&idev->xmit_lock);
if (!isdn_net_dev_busy(idev)) {
/* point the head to next online channel */
list_del(&mlp->online);
list_add(&mlp->online, &idev->online);
goto found;
}
spin_unlock_bh(&idev->xmit_lock);
}
idev = NULL;
found:
spin_unlock_irqrestore(&mlp->online_lock, flags);
return idev;
}
/*
* add a channel to a bundle
*/
static inline void
isdn_net_add_to_bundle(isdn_net_local *mlp, isdn_net_dev *idev)
{
unsigned long flags;
spin_lock_irqsave(&mlp->online_lock, flags);
list_add(&idev->online, &mlp->online);
spin_unlock_irqrestore(&mlp->online_lock, flags);
}
/*
* remove a channel from the bundle it belongs to
*/
static inline void
isdn_net_rm_from_bundle(isdn_net_dev *idev)
{
isdn_net_local *mlp = idev->mlp;
unsigned long flags;
spin_lock_irqsave(&mlp->online_lock, flags);
// list_del(&idev->online); FIXME
spin_unlock_irqrestore(&mlp->online_lock, flags);
}
/*
* wake up the network -> net_device queue.
* For slaves, wake the corresponding master interface.
*/
static inline void
isdn_net_dev_wake_queue(isdn_net_dev *idev)
{
netif_wake_queue(&idev->mlp->dev);
}
static inline int static inline int
isdn_net_bound(isdn_net_dev *idev) isdn_net_bound(isdn_net_dev *idev)
{ {
......
...@@ -348,7 +348,6 @@ isdn_net_addif(char *name, isdn_net_local *mlp) ...@@ -348,7 +348,6 @@ isdn_net_addif(char *name, isdn_net_local *mlp)
strcpy(idev->name, name); strcpy(idev->name, name);
tasklet_init(&idev->tlet, isdn_net_tasklet, (unsigned long) idev); tasklet_init(&idev->tlet, isdn_net_tasklet, (unsigned long) idev);
spin_lock_init(&idev->xmit_lock);
skb_queue_head_init(&idev->super_tx_queue); skb_queue_head_init(&idev->super_tx_queue);
idev->isdn_slot = -1; idev->isdn_slot = -1;
...@@ -380,6 +379,7 @@ isdn_net_addif(char *name, isdn_net_local *mlp) ...@@ -380,6 +379,7 @@ isdn_net_addif(char *name, isdn_net_local *mlp)
mlp->magic = ISDN_NET_MAGIC; mlp->magic = ISDN_NET_MAGIC;
INIT_LIST_HEAD(&mlp->slaves); INIT_LIST_HEAD(&mlp->slaves);
INIT_LIST_HEAD(&mlp->online); INIT_LIST_HEAD(&mlp->online);
spin_lock_init(&mlp->xmit_lock);
mlp->p_encap = -1; mlp->p_encap = -1;
isdn_net_set_encap(mlp, ISDN_NET_ENCAP_RAWIP); isdn_net_set_encap(mlp, ISDN_NET_ENCAP_RAWIP);
...@@ -1152,16 +1152,16 @@ static void ...@@ -1152,16 +1152,16 @@ static void
isdn_net_tasklet(unsigned long data) isdn_net_tasklet(unsigned long data)
{ {
isdn_net_dev *idev = (isdn_net_dev *) data; isdn_net_dev *idev = (isdn_net_dev *) data;
isdn_net_local *mlp = idev->mlp;
struct sk_buff *skb; struct sk_buff *skb;
unsigned long flags;
spin_lock_bh(&idev->xmit_lock); spin_lock_irqsave(&mlp->xmit_lock, flags);
while (!isdn_net_dev_busy(idev)) { while (!isdn_net_dev_busy(idev) &&
skb = skb_dequeue(&idev->super_tx_queue); (skb = skb_dequeue(&idev->super_tx_queue))) {
if (!skb)
break;
isdn_net_writebuf_skb(idev, skb); isdn_net_writebuf_skb(idev, skb);
} }
spin_unlock_bh(&idev->xmit_lock); spin_unlock_irqrestore(&mlp->xmit_lock, flags);
} }
/* ====================================================================== */ /* ====================================================================== */
...@@ -1629,6 +1629,7 @@ bconn(struct fsm_inst *fi, int pr, void *arg) ...@@ -1629,6 +1629,7 @@ bconn(struct fsm_inst *fi, int pr, void *arg)
{ {
isdn_net_dev *idev = fi->userdata; isdn_net_dev *idev = fi->userdata;
isdn_net_local *mlp = idev->mlp; isdn_net_local *mlp = idev->mlp;
unsigned long flags;
fsm_change_state(&idev->fi, ST_ACTIVE); fsm_change_state(&idev->fi, ST_ACTIVE);
...@@ -1640,14 +1641,16 @@ bconn(struct fsm_inst *fi, int pr, void *arg) ...@@ -1640,14 +1641,16 @@ bconn(struct fsm_inst *fi, int pr, void *arg)
del_timer(&idev->dial_timer); del_timer(&idev->dial_timer);
} }
isdn_net_add_to_bundle(mlp, idev); spin_lock_irqsave(&mlp->xmit_lock, flags);
list_add(&idev->online, &mlp->online);
spin_unlock_irqrestore(&mlp->xmit_lock, flags);
printk(KERN_INFO "%s connected\n", idev->name); printk(KERN_INFO "%s connected\n", idev->name);
/* If first Chargeinfo comes before B-Channel connect, /* If first Chargeinfo comes before B-Channel connect,
* we correct the timestamp here. * we correct the timestamp here.
*/ */
idev->chargetime = jiffies; idev->chargetime = jiffies;
idev->frame_cnt = 0;
idev->transcount = 0; idev->transcount = 0;
idev->cps = 0; idev->cps = 0;
idev->last_jiffies = jiffies; idev->last_jiffies = jiffies;
...@@ -1655,7 +1658,7 @@ bconn(struct fsm_inst *fi, int pr, void *arg) ...@@ -1655,7 +1658,7 @@ bconn(struct fsm_inst *fi, int pr, void *arg)
if (mlp->ops->connected) if (mlp->ops->connected)
mlp->ops->connected(idev); mlp->ops->connected(idev);
else else
isdn_net_dev_wake_queue(idev); netif_wake_queue(&idev->mlp->dev);
return 0; return 0;
} }
...@@ -1665,15 +1668,18 @@ bhup(struct fsm_inst *fi, int pr, void *arg) ...@@ -1665,15 +1668,18 @@ bhup(struct fsm_inst *fi, int pr, void *arg)
{ {
isdn_net_dev *idev = fi->userdata; isdn_net_dev *idev = fi->userdata;
isdn_net_local *mlp = idev->mlp; isdn_net_local *mlp = idev->mlp;
unsigned long flags;
del_timer(&idev->dial_timer); del_timer(&idev->dial_timer);
if (mlp->ops->disconnected) if (mlp->ops->disconnected)
mlp->ops->disconnected(idev); mlp->ops->disconnected(idev);
spin_lock_irqsave(&mlp->xmit_lock, flags);
list_del(&idev->online);
spin_unlock_irqrestore(&mlp->xmit_lock, flags);
printk(KERN_INFO "%s: disconnected\n", idev->name); printk(KERN_INFO "%s: disconnected\n", idev->name);
fsm_change_state(fi, ST_WAIT_DHUP); fsm_change_state(fi, ST_WAIT_DHUP);
isdn_net_rm_from_bundle(idev);
return 0;
} }
static int static int
......
...@@ -338,10 +338,18 @@ typedef struct isdn_net_local_s { ...@@ -338,10 +338,18 @@ typedef struct isdn_net_local_s {
/* phone[0] = Incoming Numbers */ /* phone[0] = Incoming Numbers */
/* phone[1] = Outgoing Numbers */ /* phone[1] = Outgoing Numbers */
struct list_head slaves; /* list of all bundled channels */ struct list_head slaves; /* list of all bundled channels
struct list_head online; /* list of all bundled channels, protected by serializing config
which are currently online */ ioctls / no change allowed when
spinlock_t online_lock; /* lock to protect online list */ interface is running */
struct list_head online; /* circular list of all bundled
channels, which are currently
online
protected by xmit_lock */
spinlock_t xmit_lock; /* used to protect the xmit path of
a net_device, including all
associated channels's frame_cnt */
struct list_head running_devs; /* member of global running_devs */ struct list_head running_devs; /* member of global running_devs */
atomic_t refcnt; /* references held by ISDN code */ atomic_t refcnt; /* references held by ISDN code */
...@@ -393,12 +401,9 @@ typedef struct isdn_net_dev_s { ...@@ -393,12 +401,9 @@ typedef struct isdn_net_dev_s {
int pppbind; /* ippp device for bindings */ int pppbind; /* ippp device for bindings */
int ppp_slot; /* PPPD device slot number */ int ppp_slot; /* PPPD device slot number */
spinlock_t xmit_lock; /* used to protect the xmit path of */
/* a particular channel (including */
/* the frame_cnt */
struct sk_buff_head super_tx_queue; /* List of supervisory frames to */ struct sk_buff_head super_tx_queue; /* List of supervisory frames to */
/* be transmitted asap */ /* be transmitted asap */
atomic_t frame_cnt; /* number of frames currently */ int frame_cnt; /* number of frames currently */
/* queued in HL driver */ /* queued in HL driver */
struct tasklet_struct tlet; struct tasklet_struct tlet;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment