Commit 2f1efd68 authored by Kai Germaschewski's avatar Kai Germaschewski

ISDN: More moving of per-channel stuff into isdn_net_dev

parent 65c9ec4d
......@@ -164,7 +164,7 @@ isdn_net_ciscohdlck_slarp_send_keepalive(unsigned long data)
p += put_u32(p, lp->cisco_yourseq);
p += put_u16(p, 0xffff); // reliablity, always 0xffff
isdn_net_write_super(lp, skb);
isdn_net_write_super(idev, skb);
lp->cisco_timer.expires = jiffies + lp->cisco_keepalive_period * HZ;
......@@ -174,6 +174,7 @@ isdn_net_ciscohdlck_slarp_send_keepalive(unsigned long data)
static void
isdn_net_ciscohdlck_slarp_send_request(isdn_net_local *lp)
{
isdn_net_dev *idev = lp->netdev;
struct sk_buff *skb;
unsigned char *p;
......@@ -194,7 +195,7 @@ isdn_net_ciscohdlck_slarp_send_request(isdn_net_local *lp)
p += put_u32(p, 0); // netmask
p += put_u16(p, 0); // unused
isdn_net_write_super(lp, skb);
isdn_net_write_super(idev, skb);
}
static void
......@@ -218,7 +219,7 @@ isdn_ciscohdlck_connected(isdn_net_local *lp)
lp->cisco_timer.expires = jiffies + lp->cisco_keepalive_period * HZ;
add_timer(&lp->cisco_timer);
}
isdn_net_device_wake_queue(lp);
isdn_net_dev_wake_queue(lp->netdev);
}
static void
......@@ -232,6 +233,7 @@ isdn_ciscohdlck_disconnected(isdn_net_local *lp)
static void
isdn_net_ciscohdlck_slarp_send_reply(isdn_net_local *lp)
{
isdn_net_dev *idev = lp->netdev;
struct sk_buff *skb;
unsigned char *p;
struct in_device *in_dev = NULL;
......@@ -265,7 +267,7 @@ isdn_net_ciscohdlck_slarp_send_reply(isdn_net_local *lp)
p += put_u32(p, mask); // netmask
p += put_u16(p, 0); // unused
isdn_net_write_super(lp, skb);
isdn_net_write_super(idev, skb);
}
static void
......
......@@ -92,7 +92,8 @@ LIST_HEAD(isdn_net_devs); /* Linked list of isdn_net_dev's */
* Find out if the netdevice has been ifup-ed yet.
* For slaves, look at the corresponding master.
*/
static __inline__ int isdn_net_device_started(isdn_net_dev *n)
static inline int
isdn_net_device_started(isdn_net_dev *n)
{
isdn_net_local *lp = &n->local;
struct net_device *dev;
......@@ -108,8 +109,11 @@ static __inline__ int isdn_net_device_started(isdn_net_dev *n)
* stop the network -> net_device queue.
* For slaves, stop the corresponding master interface.
*/
static __inline__ void isdn_net_device_stop_queue(isdn_net_local *lp)
static inline void
isdn_net_dev_stop_queue(isdn_net_dev *idev)
{
isdn_net_local *lp = &idev->local;
if (lp->master)
netif_stop_queue(lp->master);
else
......@@ -121,15 +125,18 @@ static __inline__ void isdn_net_device_stop_queue(isdn_net_local *lp)
* master or slave) is busy. It's busy iff all (master and slave)
* queues are busy
*/
static __inline__ int isdn_net_device_busy(isdn_net_local *lp)
static inline int
isdn_net_device_busy(isdn_net_dev *idev)
{
isdn_net_local *nlp;
isdn_net_local *lp, *nlp;
isdn_net_dev *nd;
unsigned long flags;
if (!isdn_net_lp_busy(lp))
if (!isdn_net_dev_busy(idev))
return 0;
lp = &idev->local;
if (lp->master)
nd = ((isdn_net_local *) lp->master->priv)->netdev;
else
......@@ -138,7 +145,7 @@ static __inline__ int isdn_net_device_busy(isdn_net_local *lp)
spin_lock_irqsave(&nd->queue_lock, flags);
nlp = lp->next;
while (nlp != lp) {
if (!isdn_net_lp_busy(nlp)) {
if (!isdn_net_dev_busy(nlp->netdev)) {
spin_unlock_irqrestore(&nd->queue_lock, flags);
return 0;
}
......@@ -148,30 +155,33 @@ static __inline__ int isdn_net_device_busy(isdn_net_local *lp)
return 1;
}
static __inline__ void isdn_net_inc_frame_cnt(isdn_net_local *lp)
static inline
void isdn_net_inc_frame_cnt(isdn_net_dev *idev)
{
atomic_inc(&lp->frame_cnt);
if (isdn_net_device_busy(lp))
isdn_net_device_stop_queue(lp);
atomic_inc(&idev->frame_cnt);
if (isdn_net_device_busy(idev))
isdn_net_dev_stop_queue(idev);
}
static __inline__ void isdn_net_dec_frame_cnt(isdn_net_local *lp)
static inline void
isdn_net_dec_frame_cnt(isdn_net_dev *idev)
{
atomic_dec(&lp->frame_cnt);
atomic_dec(&idev->frame_cnt);
if (!(isdn_net_device_busy(lp))) {
if (!skb_queue_empty(&lp->super_tx_queue)) {
queue_task(&lp->tqueue, &tq_immediate);
if (!(isdn_net_device_busy(idev))) {
if (!skb_queue_empty(&idev->super_tx_queue)) {
queue_task(&idev->tqueue, &tq_immediate);
mark_bh(IMMEDIATE_BH);
} else {
isdn_net_device_wake_queue(lp);
isdn_net_dev_wake_queue(idev);
}
}
}
static __inline__ void isdn_net_zero_frame_cnt(isdn_net_local *lp)
static inline
void isdn_net_zero_frame_cnt(isdn_net_dev *idev)
{
atomic_set(&lp->frame_cnt, 0);
atomic_set(&idev->frame_cnt, 0);
}
/* For 2.2.x we leave the transmitter busy timeout at 2 secs, just
......@@ -208,7 +218,7 @@ int isdn_net_online(isdn_net_dev *idev)
/* Prototypes */
static int isdn_net_force_dial_lp(isdn_net_local *);
static int isdn_net_force_dial_idev(isdn_net_dev *);
static int isdn_net_start_xmit(struct sk_buff *, struct net_device *);
static void do_dialout(isdn_net_local *lp);
static int isdn_net_set_encap(isdn_net_dev *p, int encap);
......@@ -269,7 +279,7 @@ isdn_net_unbind_channel(isdn_net_local * lp)
if (lp->ops->unbind)
lp->ops->unbind(lp);
skb_queue_purge(&lp->super_tx_queue);
skb_queue_purge(&idev->super_tx_queue);
if (!lp->master) { /* reset only master device */
/* Moral equivalent of dev_purge_queues():
......@@ -412,7 +422,7 @@ static void isdn_net_connected(isdn_net_local *lp)
if (lp->ops->connected)
lp->ops->connected(lp);
else
isdn_net_device_wake_queue(lp);
isdn_net_dev_wake_queue(idev);
}
/*
......@@ -562,7 +572,7 @@ isdn_net_handle_event(isdn_net_local *lp, int pr, void *arg)
switch (pr) {
case ISDN_STAT_BSENT:
/* A packet has successfully been sent out */
isdn_net_dec_frame_cnt(lp);
isdn_net_dec_frame_cnt(idev);
lp->stats.tx_packets++;
lp->stats.tx_bytes += c->parm.length;
return 1;
......@@ -846,24 +856,25 @@ isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp)
* not received from the network layer, but e.g. frames from ipppd, CCP
* reset frames etc.
*/
void isdn_net_write_super(isdn_net_local *lp, struct sk_buff *skb)
void
isdn_net_write_super(isdn_net_dev *idev, struct sk_buff *skb)
{
if (in_irq()) {
// we can't grab the lock from irq context,
// so we just queue the packet
skb_queue_tail(&lp->super_tx_queue, skb);
queue_task(&lp->tqueue, &tq_immediate);
skb_queue_tail(&idev->super_tx_queue, skb);
queue_task(&idev->tqueue, &tq_immediate);
mark_bh(IMMEDIATE_BH);
return;
}
spin_lock_bh(&lp->xmit_lock);
if (!isdn_net_lp_busy(lp)) {
isdn_net_writebuf_skb(lp, skb);
spin_lock_bh(&idev->xmit_lock);
if (!isdn_net_dev_busy(idev)) {
isdn_net_writebuf_skb(idev, skb);
} else {
skb_queue_tail(&lp->super_tx_queue, skb);
skb_queue_tail(&idev->super_tx_queue, skb);
}
spin_unlock_bh(&lp->xmit_lock);
spin_unlock_bh(&idev->xmit_lock);
}
/*
......@@ -871,32 +882,32 @@ void isdn_net_write_super(isdn_net_local *lp, struct sk_buff *skb)
*/
static void isdn_net_softint(void *private)
{
isdn_net_local *lp = private;
isdn_net_dev *idev = private;
struct sk_buff *skb;
spin_lock_bh(&lp->xmit_lock);
while (!isdn_net_lp_busy(lp)) {
skb = skb_dequeue(&lp->super_tx_queue);
spin_lock_bh(&idev->xmit_lock);
while (!isdn_net_dev_busy(idev)) {
skb = skb_dequeue(&idev->super_tx_queue);
if (!skb)
break;
isdn_net_writebuf_skb(lp, skb);
isdn_net_writebuf_skb(idev, skb);
}
spin_unlock_bh(&lp->xmit_lock);
spin_unlock_bh(&idev->xmit_lock);
}
/*
* all frames sent from the (net) LL to a HL driver should go via this function
* it's serialized by the caller holding the lp->xmit_lock spinlock
* it's serialized by the caller holding the idev->xmit_lock spinlock
*/
void isdn_net_writebuf_skb(isdn_net_local *lp, struct sk_buff *skb)
void isdn_net_writebuf_skb(isdn_net_dev *idev, struct sk_buff *skb)
{
isdn_net_dev *idev = lp->netdev;
isdn_net_local *lp = &idev->local;
int ret;
int len = skb->len; /* save len */
/* before obtaining the lock the caller should have checked that
the lp isn't busy */
if (isdn_net_lp_busy(lp)) {
if (isdn_net_dev_busy(idev)) {
isdn_BUG();
goto error;
}
......@@ -913,7 +924,7 @@ void isdn_net_writebuf_skb(isdn_net_local *lp, struct sk_buff *skb)
}
idev->transcount += len;
isdn_net_inc_frame_cnt(lp);
isdn_net_inc_frame_cnt(idev);
return;
error:
......@@ -952,18 +963,18 @@ isdn_net_xmit(struct net_device *ndev, struct sk_buff *skb)
return isdn_ppp_xmit(skb, ndev);
}
nd = ((isdn_net_local *) ndev->priv)->netdev;
lp = isdn_net_get_locked_lp(nd);
if (!lp) {
idev = isdn_net_get_locked_dev(nd);
if (!idev) {
printk(KERN_WARNING "%s: all channels busy - requeuing!\n", ndev->name);
return 1;
}
idev = lp->netdev;
/* we have our lp locked from now on */
/* we have our idev locked from now on */
lp = &idev->local;
/* Reset hangup-timeout */
idev->huptimer = 0; // FIXME?
isdn_net_writebuf_skb(lp, skb);
spin_unlock_bh(&lp->xmit_lock);
isdn_net_writebuf_skb(idev, skb);
spin_unlock_bh(&idev->xmit_lock);
/* the following stuff is here for backwards compatibility.
* in future, start-up and hangup of slaves (based on current load)
......@@ -980,23 +991,23 @@ isdn_net_xmit(struct net_device *ndev, struct sk_buff *skb)
if (idev->cps > lp->triggercps) {
if (lp->slave) {
if (!lp->sqfull) {
if (!idev->sqfull) {
/* First time overload: set timestamp only */
lp->sqfull = 1;
lp->sqfull_stamp = jiffies;
idev->sqfull = 1;
idev->sqfull_stamp = jiffies;
} else {
/* subsequent overload: if slavedelay exceeded, start dialing */
if (time_after(jiffies, lp->sqfull_stamp + lp->slavedelay)) {
if (time_after(jiffies, idev->sqfull_stamp + lp->slavedelay)) {
slp = lp->slave->priv;
if (!isdn_net_bound(slp->netdev)) {
isdn_net_force_dial_lp((isdn_net_local *) lp->slave->priv);
isdn_net_force_dial_idev(((isdn_net_local *) lp->slave->priv)->netdev);
}
}
}
}
} else {
if (lp->sqfull && time_after(jiffies, lp->sqfull_stamp + lp->slavedelay + (10 * HZ))) {
lp->sqfull = 0;
if (idev->sqfull && time_after(jiffies, idev->sqfull_stamp + lp->slavedelay + 10 * HZ)) {
idev->sqfull = 0;
}
/* this is a hack to allow auto-hangup for slaves on moderate loads */
nd->queue = &nd->local;
......@@ -1053,7 +1064,7 @@ isdn_net_autodial(struct sk_buff *skb, struct net_device *ndev)
idev->dialwait_timer = 0;
}
if (isdn_net_force_dial_lp(lp) < 0)
if (isdn_net_force_dial_idev(idev) < 0)
goto discard;
/* Log packet, which triggered dialing */
......@@ -1474,11 +1485,11 @@ isdn_net_findif(char *name)
* from isdn_net_start_xmit().
*/
static int
isdn_net_force_dial_lp(isdn_net_local *lp)
isdn_net_force_dial_idev(isdn_net_dev *idev)
{
isdn_net_dev *idev = lp->netdev;
int slot;
unsigned long flags;
isdn_net_local *lp = &idev->local;
if (isdn_net_bound(idev))
return -EBUSY;
......@@ -1515,12 +1526,13 @@ isdn_net_force_dial_lp(isdn_net_local *lp)
* themselves.
*/
int
isdn_net_dial_req(isdn_net_local * lp)
isdn_net_dial_req(isdn_net_dev *idev)
{
isdn_net_local *lp = &idev->local;
/* is there a better error code? */
if (!(ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_AUTO)) return -EBUSY;
return isdn_net_force_dial_lp(lp);
return isdn_net_force_dial_idev(idev);
}
/*
......@@ -1534,7 +1546,8 @@ isdn_net_force_dial(char *name)
if (!p)
return -ENODEV;
return (isdn_net_force_dial_lp(&p->local));
return isdn_net_force_dial_idev(p);
}
/*
......@@ -1595,10 +1608,10 @@ isdn_net_new(char *name, struct net_device *master)
netdev->local.netdev = netdev;
netdev->local.next = &netdev->local;
netdev->local.tqueue.sync = 0;
netdev->local.tqueue.routine = isdn_net_softint;
netdev->local.tqueue.data = &netdev->local;
spin_lock_init(&netdev->local.xmit_lock);
netdev->tqueue.sync = 0;
netdev->tqueue.routine = isdn_net_softint;
netdev->tqueue.data = netdev;
spin_lock_init(&netdev->xmit_lock);
netdev->isdn_slot = -1;
netdev->pre_device = -1;
......@@ -1609,7 +1622,7 @@ isdn_net_new(char *name, struct net_device *master)
netdev->pppbind = -1;
netdev->local.p_encap = -1;
skb_queue_head_init(&netdev->local.super_tx_queue);
skb_queue_head_init(&netdev->super_tx_queue);
netdev->local.l2_proto = ISDN_PROTO_L2_X75I;
netdev->local.l3_proto = ISDN_PROTO_L3_TRANS;
netdev->local.triggercps = 6000;
......
......@@ -52,10 +52,10 @@ extern int isdn_net_force_hangup(char *);
extern int isdn_net_force_dial(char *);
extern isdn_net_dev *isdn_net_findif(char *);
extern int isdn_net_rcv_skb(int, struct sk_buff *);
extern int isdn_net_dial_req(isdn_net_local *);
extern void isdn_net_writebuf_skb(isdn_net_local *lp, struct sk_buff *skb);
extern void isdn_net_write_super(isdn_net_local *lp, struct sk_buff *skb);
extern int isdn_net_online(isdn_net_dev *idev);
extern int isdn_net_dial_req(isdn_net_dev *);
extern void isdn_net_writebuf_skb(isdn_net_dev *, struct sk_buff *skb);
extern void isdn_net_write_super(isdn_net_dev *, struct sk_buff *skb);
extern int isdn_net_online(isdn_net_dev *);
static inline void
isdn_net_reset_huptimer(isdn_net_dev *idev, isdn_net_dev *idev2)
......@@ -69,9 +69,10 @@ isdn_net_reset_huptimer(isdn_net_dev *idev, isdn_net_dev *idev2)
/*
* is this particular channel busy?
*/
static __inline__ int isdn_net_lp_busy(isdn_net_local *lp)
static inline int
isdn_net_dev_busy(isdn_net_dev *idev)
{
if (atomic_read(&lp->frame_cnt) < ISDN_NET_MAX_QUEUE_LENGTH)
if (atomic_read(&idev->frame_cnt) < ISDN_NET_MAX_QUEUE_LENGTH)
return 0;
else
return 1;
......@@ -81,34 +82,39 @@ static __inline__ int isdn_net_lp_busy(isdn_net_local *lp)
* For the given net device, this will get a non-busy channel out of the
* corresponding bundle. The returned channel is locked.
*/
static __inline__ isdn_net_local * isdn_net_get_locked_lp(isdn_net_dev *nd)
static inline isdn_net_dev *
isdn_net_get_locked_dev(isdn_net_dev *nd)
{
unsigned long flags;
isdn_net_local *lp;
isdn_net_dev *idev;
spin_lock_irqsave(&nd->queue_lock, flags);
lp = nd->queue; /* get lp on top of queue */
spin_lock_bh(&nd->queue->xmit_lock);
while (isdn_net_lp_busy(nd->queue)) {
spin_unlock_bh(&nd->queue->xmit_lock);
idev = nd->queue->netdev;
spin_lock_bh(&idev->xmit_lock);
while (isdn_net_dev_busy(idev)) {
spin_unlock_bh(&idev->xmit_lock);
nd->queue = nd->queue->next;
idev = nd->queue->netdev;
if (nd->queue == lp) { /* not found -- should never happen */
lp = NULL;
goto errout;
}
spin_lock_bh(&nd->queue->xmit_lock);
spin_lock_bh(&idev->xmit_lock);
}
lp = nd->queue;
nd->queue = nd->queue->next;
errout:
spin_unlock_irqrestore(&nd->queue_lock, flags);
return lp;
return lp ? lp->netdev : NULL;
}
/*
* add a channel to a bundle
*/
static __inline__ void isdn_net_add_to_bundle(isdn_net_dev *nd, isdn_net_local *nlp)
static inline void
isdn_net_add_to_bundle(isdn_net_dev *nd, isdn_net_local *nlp)
{
isdn_net_local *lp;
unsigned long flags;
......@@ -127,7 +133,8 @@ static __inline__ void isdn_net_add_to_bundle(isdn_net_dev *nd, isdn_net_local *
/*
* remove a channel from the bundle it belongs to
*/
static __inline__ void isdn_net_rm_from_bundle(isdn_net_local *lp)
static inline void
isdn_net_rm_from_bundle(isdn_net_local *lp)
{
isdn_net_local *master_lp = lp;
unsigned long flags;
......@@ -152,15 +159,19 @@ static __inline__ void isdn_net_rm_from_bundle(isdn_net_local *lp)
* wake up the network -> net_device queue.
* For slaves, wake the corresponding master interface.
*/
static inline void isdn_net_device_wake_queue(isdn_net_local *lp)
static inline void
isdn_net_dev_wake_queue(isdn_net_dev *idev)
{
isdn_net_local *lp = &idev->local;
if (lp->master)
netif_wake_queue(lp->master);
else
netif_wake_queue(&lp->netdev->dev);
}
static inline int isdn_net_bound(isdn_net_dev *idev)
static inline int
isdn_net_bound(isdn_net_dev *idev)
{
return idev->isdn_slot >= 0;
}
......
......@@ -585,7 +585,6 @@ static unsigned int
isdn_ppp_poll(struct file *file, poll_table * wait)
{
unsigned int mask;
unsigned long flags;
struct ippp_struct *is;
is = file->private_data;
......@@ -782,7 +781,7 @@ isdn_ppp_write(struct file *file, const char *buf, size_t count, loff_t *off)
isdn_ppp_send_ccp(idev,&idev->local,skb); /* keeps CCP/compression states in sync */
isdn_net_write_super(&idev->local, skb);
isdn_net_write_super(idev, skb);
}
}
retval = count;
......@@ -1127,7 +1126,7 @@ static unsigned char *isdn_ppp_skb_push(struct sk_buff **skb_p,int len)
int
isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
{
isdn_net_local *lp,*mlp;
isdn_net_local *mlp;
isdn_net_dev *idev;
isdn_net_dev *nd;
unsigned int proto = PPP_IP; /* 0x21 */
......@@ -1166,13 +1165,12 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
return 0;
}
lp = isdn_net_get_locked_lp(nd);
if (!lp) {
idev = isdn_net_get_locked_dev(nd);
if (!idev) {
printk(KERN_WARNING "%s: all channels busy - requeuing!\n", netdev->name);
return 1;
}
/* we have our lp locked from now on */
idev = lp->netdev;
slot = idev->ppp_slot;
if (slot < 0 || slot > ISDN_MAX_CHANNELS) {
printk(KERN_ERR "isdn_ppp_xmit: lp->ppp_slot(%d)\n",
......@@ -1325,10 +1323,10 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
isdn_ppp_frame_log("xmit", skb->data, skb->len, 32,ipt->unit,idev->ppp_slot);
}
isdn_net_writebuf_skb(lp, skb);
isdn_net_writebuf_skb(idev, skb);
unlock:
spin_unlock_bh(&lp->xmit_lock);
spin_unlock_bh(&idev->xmit_lock);
return 0;
}
......@@ -1938,7 +1936,7 @@ isdn_ppp_dial_slave(char *name)
if (!sdev)
return 2;
isdn_net_dial_req((isdn_net_local *) sdev->priv);
isdn_net_dial_req(((isdn_net_local *) sdev->priv)->netdev);
return 0;
#else
return -1;
......@@ -2079,7 +2077,7 @@ static void isdn_ppp_ccp_xmit_reset(struct ippp_struct *is, int proto,
printk(KERN_DEBUG "Sending CCP Frame:\n");
isdn_ppp_frame_log("ccp-xmit", skb->data, skb->len, 32, is->unit,idev->ppp_slot);
isdn_net_write_super(&idev->local, skb);
isdn_net_write_super(idev, skb);
}
/* Allocate the reset state vector */
......
......@@ -329,8 +329,6 @@ typedef struct isdn_net_local_s {
u_char l2_proto; /* Layer-2-protocol */
u_char l3_proto; /* Layer-3-protocol */
int sqfull; /* Flag: netdev-queue overloaded */
ulong sqfull_stamp; /* Start-Time of overload */
ulong slavedelay; /* Dynamic bundling delaytime */
int triggercps; /* BogoCPS needed for trigger slave */
struct list_head phone[2]; /* List of remote-phonenumbers */
......@@ -341,14 +339,6 @@ typedef struct isdn_net_local_s {
struct isdn_net_local_s *next; /* Ptr to next link in bundle */
struct isdn_net_local_s *last; /* Ptr to last link in bundle */
struct isdn_net_dev_s *netdev; /* Ptr to netdev */
struct sk_buff_head super_tx_queue; /* List of supervisory frames to */
/* be transmitted asap */
atomic_t frame_cnt; /* number of frames currently */
/* queued in HL driver */
/* Ptr to orig. hard_header_cache */
spinlock_t xmit_lock; /* used to protect the xmit path of */
/* a particular channel (including */
/* the frame_cnt */
#ifdef CONFIG_ISDN_X25
struct concap_device_ops *dops; /* callbacks used by encapsulator */
......@@ -362,7 +352,6 @@ typedef struct isdn_net_local_s {
char cisco_line_state; /* state of line according to keepalive packets */
char cisco_debserint; /* debugging flag of cisco hdlc with slarp */
struct timer_list cisco_timer;
struct tq_struct tqueue;
struct isdn_netif_ops *ops;
} isdn_net_local;
......@@ -386,6 +375,8 @@ typedef struct isdn_net_dev_s {
int cps; /* current speed of this interface */
int transcount; /* byte-counter for cps-calculation */
int last_jiffies; /* when transcount was reset */
int sqfull; /* Flag: netdev-queue overloaded */
ulong sqfull_stamp; /* Start-Time of overload */
struct timer_list hup_timer; /* auto hangup timer */
int huptimer; /* Timeout-counter for auto-hangup */
......@@ -397,6 +388,15 @@ typedef struct isdn_net_dev_s {
int pppbind; /* ippp device for bindings */
int ppp_slot; /* PPPD device slot number */
spinlock_t xmit_lock; /* used to protect the xmit path of */
/* a particular channel (including */
/* the frame_cnt */
struct sk_buff_head super_tx_queue; /* List of supervisory frames to */
/* be transmitted asap */
atomic_t frame_cnt; /* number of frames currently */
/* queued in HL driver */
struct tq_struct tqueue;
isdn_net_local *queue; /* circular list of all bundled
channels, which are currently
online */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment