Commit 2db950ee authored by David S. Miller's avatar David S. Miller

Merge nuts.ninka.net:/home/davem/src/BK/network-2.5

into nuts.ninka.net:/home/davem/src/BK/net-2.5
parents defec5af f6578e8d
......@@ -34,81 +34,67 @@
#include <linux/init.h>
#include "x25_asy.h"
typedef struct x25_ctrl {
struct x25_asy ctrl; /* X.25 things */
struct net_device dev; /* the device */
} x25_asy_ctrl_t;
static x25_asy_ctrl_t **x25_asy_ctrls = NULL;
int x25_asy_maxdev = SL_NRUNIT; /* Can be overridden with insmod! */
static struct net_device **x25_asy_devs;
static int x25_asy_maxdev = SL_NRUNIT;
MODULE_PARM(x25_asy_maxdev, "i");
MODULE_LICENSE("GPL");
static int x25_asy_esc(unsigned char *p, unsigned char *d, int len);
static void x25_asy_unesc(struct x25_asy *sl, unsigned char c);
static void x25_asy_setup(struct net_device *dev);
/* Find a free X.25 channel, and link in this `tty' line. */
static inline struct x25_asy *x25_asy_alloc(void)
static struct x25_asy *x25_asy_alloc(void)
{
x25_asy_ctrl_t *slp = NULL;
struct net_device *dev = NULL;
struct x25_asy *sl;
int i;
if (x25_asy_ctrls == NULL)
if (x25_asy_devs == NULL)
return NULL; /* Master array missing ! */
for (i = 0; i < x25_asy_maxdev; i++)
{
slp = x25_asy_ctrls[i];
for (i = 0; i < x25_asy_maxdev; i++) {
dev = x25_asy_devs[i];
/* Not allocated ? */
if (slp == NULL)
if (dev == NULL)
break;
sl = dev->priv;
/* Not in use ? */
if (!test_and_set_bit(SLF_INUSE, &slp->ctrl.flags))
break;
if (!test_and_set_bit(SLF_INUSE, &sl->flags))
return sl;
}
/* SLP is set.. */
/* Sorry, too many, all slots in use */
if (i >= x25_asy_maxdev)
return NULL;
/* If no channels are available, allocate one */
if (!slp &&
(x25_asy_ctrls[i] = (x25_asy_ctrl_t *)kmalloc(sizeof(x25_asy_ctrl_t),
GFP_KERNEL)) != NULL) {
slp = x25_asy_ctrls[i];
memset(slp, 0, sizeof(x25_asy_ctrl_t));
if (!dev) {
char name[IFNAMSIZ];
sprintf(name, "x25asy%d", i);
dev = alloc_netdev(sizeof(struct x25_asy),
name, x25_asy_setup);
if (!dev)
return NULL;
/* Initialize channel control data */
set_bit(SLF_INUSE, &slp->ctrl.flags);
slp->ctrl.tty = NULL;
sprintf(slp->dev.name, "x25asy%d", i);
slp->dev.base_addr = i;
slp->dev.priv = (void*)&(slp->ctrl);
slp->dev.next = NULL;
slp->dev.init = x25_asy_init;
}
if (slp != NULL)
{
sl = dev->priv;
dev->base_addr = i;
/* register device so that it can be ifconfig'ed */
/* x25_asy_init() will be called as a side-effect */
/* SIDE-EFFECT WARNING: x25_asy_init() CLEARS slp->ctrl ! */
if (register_netdev(&(slp->dev)) == 0)
{
if (register_netdev(dev) == 0) {
/* (Re-)Set the INUSE bit. Very Important! */
set_bit(SLF_INUSE, &slp->ctrl.flags);
slp->ctrl.dev = &(slp->dev);
slp->dev.priv = (void*)&(slp->ctrl);
return (&(slp->ctrl));
}
else
{
clear_bit(SLF_INUSE,&(slp->ctrl.flags));
set_bit(SLF_INUSE, &sl->flags);
x25_asy_devs[i] = dev;
return sl;
} else {
printk("x25_asy_alloc() - register_netdev() failure.\n");
kfree(dev);
}
}
return NULL;
......@@ -116,8 +102,7 @@ static inline struct x25_asy *x25_asy_alloc(void)
/* Free an X.25 channel. */
static inline void x25_asy_free(struct x25_asy *sl)
static void x25_asy_free(struct x25_asy *sl)
{
/* Free all X.25 frame buffers. */
if (sl->rbuff) {
......@@ -134,18 +119,11 @@ static inline void x25_asy_free(struct x25_asy *sl)
}
}
/* MTU has been changed by the IP layer. Unfortunately we are not told
about this, but we spot it ourselves and fix things up. We could be
in an upcall from the tty driver, or in an ip packet queue. */
static void x25_asy_changed_mtu(struct x25_asy *sl)
static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
{
struct net_device *dev = sl->dev;
unsigned char *xbuff, *rbuff, *oxbuff, *orbuff;
int len;
unsigned long flags;
len = dev->mtu * 2;
struct x25_asy *sl = dev->priv;
unsigned char *xbuff, *rbuff;
int len = 2* newmtu;
xbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC);
rbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC);
......@@ -153,52 +131,47 @@ static void x25_asy_changed_mtu(struct x25_asy *sl)
if (xbuff == NULL || rbuff == NULL)
{
printk("%s: unable to grow X.25 buffers, MTU change cancelled.\n",
sl->dev->name);
dev->mtu = sl->mtu;
dev->name);
if (xbuff != NULL)
kfree(xbuff);
if (rbuff != NULL)
kfree(rbuff);
return;
return -ENOMEM;
}
save_flags(flags);
cli();
oxbuff = sl->xbuff;
sl->xbuff = xbuff;
orbuff = sl->rbuff;
sl->rbuff = rbuff;
spin_lock_bh(&sl->lock);
xbuff = xchg(&sl->xbuff, xbuff);
if (sl->xleft) {
if (sl->xleft <= len) {
memcpy(sl->xbuff, sl->xhead, sl->xleft);
} else {
sl->xleft = 0;
sl->tx_dropped++;
sl->stats.tx_dropped++;
}
}
sl->xhead = sl->xbuff;
rbuff = xchg(&sl->rbuff, rbuff);
if (sl->rcount) {
if (sl->rcount <= len) {
memcpy(sl->rbuff, orbuff, sl->rcount);
memcpy(sl->rbuff, rbuff, sl->rcount);
} else {
sl->rcount = 0;
sl->rx_over_errors++;
sl->stats.rx_over_errors++;
set_bit(SLF_ERROR, &sl->flags);
}
}
sl->mtu = dev->mtu;
dev->mtu = newmtu;
sl->buffsize = len;
restore_flags(flags);
spin_unlock_bh(&sl->lock);
if (oxbuff != NULL)
kfree(oxbuff);
if (orbuff != NULL)
kfree(orbuff);
if (xbuff != NULL)
kfree(xbuff);
if (rbuff != NULL)
kfree(rbuff);
return 0;
}
......@@ -226,13 +199,13 @@ static void x25_asy_bump(struct x25_asy *sl)
int err;
count = sl->rcount;
sl->rx_bytes+=count;
sl->stats.rx_bytes+=count;
skb = dev_alloc_skb(count+1);
if (skb == NULL)
{
printk("%s: memory squeeze, dropping packet.\n", sl->dev->name);
sl->rx_dropped++;
sl->stats.rx_dropped++;
return;
}
skb_push(skb,1); /* LAPB internal control */
......@@ -249,7 +222,7 @@ static void x25_asy_bump(struct x25_asy *sl)
{
netif_rx(skb);
sl->dev->last_rx = jiffies;
sl->rx_packets++;
sl->stats.rx_packets++;
}
}
......@@ -257,19 +230,13 @@ static void x25_asy_bump(struct x25_asy *sl)
static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
{
unsigned char *p;
int actual, count;
int actual, count, mtu = sl->dev->mtu;
if (sl->mtu != sl->dev->mtu) { /* Someone has been ifconfigging */
x25_asy_changed_mtu(sl);
}
if (len > sl->mtu)
if (len > mtu)
{ /* Sigh, shouldn't occur BUT ... */
len = sl->mtu;
len = mtu;
printk ("%s: truncating oversized transmit packet!\n", sl->dev->name);
sl->tx_dropped++;
sl->stats.tx_dropped++;
x25_asy_unlock(sl);
return;
}
......@@ -310,7 +277,7 @@ static void x25_asy_write_wakeup(struct tty_struct *tty)
{
/* Now serial buffer is almost free & we can start
* transmission of another packet */
sl->tx_packets++;
sl->stats.tx_packets++;
tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
x25_asy_unlock(sl);
return;
......@@ -324,15 +291,20 @@ static void x25_asy_write_wakeup(struct tty_struct *tty)
static void x25_asy_timeout(struct net_device *dev)
{
struct x25_asy *sl = (struct x25_asy*)(dev->priv);
/* May be we must check transmitter timeout here ?
* 14 Oct 1994 Dmitry Gorodchanin.
*/
printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
(sl->tty->driver->chars_in_buffer(sl->tty) || sl->xleft) ?
"bad line quality" : "driver error");
sl->xleft = 0;
sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
x25_asy_unlock(sl);
spin_lock(&sl->lock);
if (netif_queue_stopped(dev)) {
/* May be we must check transmitter timeout here ?
* 14 Oct 1994 Dmitry Gorodchanin.
*/
printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
(sl->tty->driver->chars_in_buffer(sl->tty) || sl->xleft) ?
"bad line quality" : "driver error");
sl->xleft = 0;
sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
x25_asy_unlock(sl);
}
spin_unlock(&sl->lock);
}
/* Encapsulate an IP datagram and kick it into a TTY queue. */
......@@ -342,10 +314,10 @@ static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev)
struct x25_asy *sl = (struct x25_asy*)(dev->priv);
int err;
if (!netif_running(sl->dev))
{
if (!netif_running(sl->dev)) {
printk("%s: xmit call when iface is down\n", dev->name);
return 1;
kfree_skb(skb);
return 0;
}
switch(skb->data[0])
......@@ -409,8 +381,11 @@ static int x25_asy_data_indication(void *token, struct sk_buff *skb)
static void x25_asy_data_transmit(void *token, struct sk_buff *skb)
{
struct x25_asy *sl=token;
if (netif_queue_stopped(sl->dev))
spin_lock(&sl->lock);
if (netif_queue_stopped(sl->dev) || sl->tty == NULL)
{
spin_unlock(&sl->lock);
printk(KERN_ERR "x25_asy: tbusy drop\n");
kfree_skb(skb);
return;
......@@ -419,10 +394,11 @@ static void x25_asy_data_transmit(void *token, struct sk_buff *skb)
if (skb != NULL)
{
x25_asy_lock(sl);
sl->tx_bytes+=skb->len;
sl->stats.tx_bytes+=skb->len;
x25_asy_encaps(sl, skb->data, skb->len);
dev_kfree_skb(skb);
}
spin_unlock(&sl->lock);
}
/*
......@@ -475,12 +451,20 @@ static void x25_asy_disconnected(void *token, int reason)
sl->dev->last_rx = jiffies;
}
static struct lapb_register_struct x25_asy_callbacks = {
.connect_confirmation = x25_asy_connected,
.connect_indication = x25_asy_connected,
.disconnect_confirmation = x25_asy_disconnected,
.disconnect_indication = x25_asy_disconnected,
.data_indication = x25_asy_data_indication,
.data_transmit = x25_asy_data_transmit,
};
/* Open the low-level part of the X.25 channel. Easy! */
/* Open the low-level part of the X.25 channel. Easy! */
static int x25_asy_open(struct net_device *dev)
{
struct lapb_register_struct x25_asy_callbacks;
struct x25_asy *sl = (struct x25_asy*)(dev->priv);
unsigned long len;
int err;
......@@ -505,7 +489,7 @@ static int x25_asy_open(struct net_device *dev)
if (sl->xbuff == NULL) {
goto noxbuff;
}
sl->mtu = dev->mtu;
sl->buffsize = len;
sl->rcount = 0;
sl->xleft = 0;
......@@ -516,14 +500,6 @@ static int x25_asy_open(struct net_device *dev)
/*
* Now attach LAPB
*/
x25_asy_callbacks.connect_confirmation=x25_asy_connected;
x25_asy_callbacks.connect_indication=x25_asy_connected;
x25_asy_callbacks.disconnect_confirmation=x25_asy_disconnected;
x25_asy_callbacks.disconnect_indication=x25_asy_disconnected;
x25_asy_callbacks.data_indication=x25_asy_data_indication;
x25_asy_callbacks.data_transmit=x25_asy_data_transmit;
if((err=lapb_register(sl, &x25_asy_callbacks))==LAPB_OK)
return 0;
......@@ -542,13 +518,16 @@ static int x25_asy_close(struct net_device *dev)
struct x25_asy *sl = (struct x25_asy*)(dev->priv);
int err;
if (sl->tty == NULL)
return -EBUSY;
spin_lock(&sl->lock);
if (sl->tty)
sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
netif_stop_queue(dev);
sl->rcount = 0;
sl->xleft = 0;
if((err=lapb_unregister(sl))!=LAPB_OK)
printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",err);
spin_unlock(&sl->lock);
return 0;
}
......@@ -571,20 +550,12 @@ static void x25_asy_receive_buf(struct tty_struct *tty, const unsigned char *cp,
if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
return;
/*
* Argh! mtu change time! - costs us the packet part received
* at the change
*/
if (sl->mtu != sl->dev->mtu) {
x25_asy_changed_mtu(sl);
}
/* Read the characters out of the buffer */
while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SLF_ERROR, &sl->flags)) {
sl->rx_errors++;
sl->stats.rx_errors++;
}
cp++;
continue;
......@@ -659,27 +630,14 @@ static void x25_asy_close_tty(struct tty_struct *tty)
tty->disc_data = 0;
sl->tty = NULL;
x25_asy_free(sl);
unregister_netdev(sl->dev);
}
static struct net_device_stats *x25_asy_get_stats(struct net_device *dev)
{
static struct net_device_stats stats;
struct x25_asy *sl = (struct x25_asy*)(dev->priv);
memset(&stats, 0, sizeof(struct net_device_stats));
stats.rx_packets = sl->rx_packets;
stats.tx_packets = sl->tx_packets;
stats.rx_bytes = sl->rx_bytes;
stats.tx_bytes = sl->tx_bytes;
stats.rx_dropped = sl->rx_dropped;
stats.tx_dropped = sl->tx_dropped;
stats.tx_errors = sl->tx_errors;
stats.rx_errors = sl->rx_errors;
stats.rx_over_errors = sl->rx_over_errors;
return (&stats);
return &sl->stats;
}
......@@ -757,7 +715,7 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
sl->rbuff[sl->rcount++] = s;
return;
}
sl->rx_over_errors++;
sl->stats.rx_over_errors++;
set_bit(SLF_ERROR, &sl->flags);
}
}
......@@ -799,18 +757,14 @@ static int x25_asy_open_dev(struct net_device *dev)
}
/* Initialise the X.25 driver. Called by the device init code */
int x25_asy_init(struct net_device *dev)
static void x25_asy_setup(struct net_device *dev)
{
struct x25_asy *sl = (struct x25_asy*)(dev->priv);
if (sl == NULL) /* Allocation failed ?? */
return -ENODEV;
struct x25_asy *sl = dev->priv;
/* Set up the control block. (And clear statistics) */
memset(sl, 0, sizeof (struct x25_asy));
sl->magic = X25_ASY_MAGIC;
sl->dev = dev;
spin_lock_init(&sl->lock);
set_bit(SLF_INUSE, &sl->flags);
/*
* Finish setting up the DEVICE info.
......@@ -823,6 +777,7 @@ int x25_asy_init(struct net_device *dev)
dev->open = x25_asy_open_dev;
dev->stop = x25_asy_close;
dev->get_stats = x25_asy_get_stats;
dev->change_mtu = x25_asy_change_mtu;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->type = ARPHRD_X25;
......@@ -830,8 +785,6 @@ int x25_asy_init(struct net_device *dev)
/* New-style flags. */
dev->flags = IFF_NOARP;
return 0;
}
static struct tty_ldisc x25_ldisc = {
......@@ -853,13 +806,15 @@ static int __init init_x25_asy(void)
printk(KERN_INFO "X.25 async: version 0.00 ALPHA "
"(dynamic channels, max=%d).\n", x25_asy_maxdev );
x25_asy_ctrls = kmalloc(sizeof(void*)*x25_asy_maxdev, GFP_KERNEL);
if (!x25_asy_ctrls) {
x25_asy_devs = kmalloc(sizeof(struct net_device *)*x25_asy_maxdev,
GFP_KERNEL);
if (!x25_asy_devs) {
printk(KERN_WARNING "X25 async: Can't allocate x25_asy_ctrls[] "
"array! Uaargh! (-> No X.25 available)\n");
return -ENOMEM;
}
memset(x25_asy_ctrls, 0, sizeof(void*)*x25_asy_maxdev); /* Pointers */
memset(x25_asy_devs, 0, sizeof(struct net_device *)*x25_asy_maxdev);
return tty_register_ldisc(N_X25, &x25_ldisc);
}
......@@ -867,22 +822,29 @@ static int __init init_x25_asy(void)
static void __exit exit_x25_asy(void)
{
struct net_device *dev;
int i;
for (i = 0; i < x25_asy_maxdev; i++) {
if (x25_asy_ctrls[i]) {
dev = x25_asy_devs[i];
if (dev) {
struct x25_asy *sl = dev->priv;
spin_lock_bh(&sl->lock);
if (sl->tty)
tty_hangup(sl->tty);
spin_unlock_bh(&sl->lock);
/*
* VSV = if dev->start==0, then device
* unregistered while close proc.
*/
if (netif_running(&(x25_asy_ctrls[i]->dev)))
unregister_netdev(&(x25_asy_ctrls[i]->dev));
kfree(x25_asy_ctrls[i]);
unregister_netdev(dev);
kfree(dev);
}
}
kfree(x25_asy_ctrls);
kfree(x25_asy_devs);
tty_register_ldisc(N_X25, NULL);
}
......
......@@ -18,8 +18,9 @@ struct x25_asy {
int magic;
/* Various fields. */
spinlock_t lock;
struct tty_struct *tty; /* ptr to TTY structure */
struct net_device *dev; /* easy for intr handling */
struct net_device *dev; /* easy for intr handling */
/* These are pointers to the malloc()ed frame buffers. */
unsigned char *rbuff; /* receiver buffer */
......@@ -29,17 +30,8 @@ struct x25_asy {
int xleft; /* bytes left in XMIT queue */
/* X.25 interface statistics. */
unsigned long rx_packets; /* inbound frames counter */
unsigned long tx_packets; /* outbound frames counter */
unsigned long rx_bytes; /* inbound byte counte */
unsigned long tx_bytes; /* outbound byte counter */
unsigned long rx_errors; /* Parity, etc. errors */
unsigned long tx_errors; /* Planned stuff */
unsigned long rx_dropped; /* No memory for skb */
unsigned long tx_dropped; /* When MTU change */
unsigned long rx_over_errors; /* Frame bigger then X.25 buf. */
int mtu; /* Our mtu (to spot changes!) */
struct net_device_stats stats;
int buffsize; /* Max buffers sizes */
unsigned long flags; /* Flag values/ mode etc */
......
......@@ -471,13 +471,54 @@ typedef struct sctp_cwr_chunk {
sctp_cwrhdr_t cwr_hdr;
} sctp_cwr_chunk_t __attribute__((packed));
/* FIXME: Cleanup needs to continue below this line. */
/*
* ADDIP Section 3.1 New Chunk Types
*/
/* ADDIP
* Section 3.1.1 Address Configuration Change Chunk (ASCONF)
*
* Serial Number: 32 bits (unsigned integer)
* This value represents a Serial Number for the ASCONF Chunk. The
* valid range of Serial Number is from 0 to 2^32-1.
* Serial Numbers wrap back to 0 after reaching 2^32 -1.
*
* Address Parameter: 8 or 20 bytes (depending on type)
* The address is an address of the sender of the ASCONF chunk,
* the address MUST be considered part of the association by the
* peer endpoint. This field may be used by the receiver of the
* ASCONF to help in finding the association. This parameter MUST
* be present in every ASCONF message i.e. it is a mandatory TLV
* parameter.
*
* ASCONF Parameter: TLV format
* Each Address configuration change is represented by a TLV
* parameter as defined in Section 3.2. One or more requests may
* be present in an ASCONF Chunk.
*
* Section 3.1.2 Address Configuration Acknowledgement Chunk (ASCONF-ACK)
*
* Serial Number: 32 bits (unsigned integer)
* This value represents the Serial Number for the received ASCONF
* Chunk that is acknowledged by this chunk. This value is copied
* from the received ASCONF Chunk.
*
* ASCONF Parameter Response: TLV format
* The ASCONF Parameter Response is used in the ASCONF-ACK to
* report status of ASCONF processing.
*/
typedef struct sctp_addiphdr {
__u32 serial;
__u8 params[0];
} sctp_addiphdr_t __attribute__((packed));
typedef struct sctp_addip_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_addiphdr_t addip_hdr;
} sctp_addip_chunk_t __attribute__((packed));
/* FIXME: Cleanup needs to continue below this line. */
/* ADDIP Section 3.1.1
*
......
......@@ -140,6 +140,9 @@ typedef struct {
#define rose_sk(__sk) ((rose_cb *)(__sk)->sk_protinfo)
/* Magic value indicating first entry in /proc (ie header) */
#define ROSE_PROC_START ((void *) 1)
/* af_rose.c */
extern ax25_address rose_callsign;
extern int sysctl_rose_restart_request_timeout;
......@@ -154,7 +157,7 @@ extern int sysctl_rose_maximum_vcs;
extern int sysctl_rose_window_size;
extern int rosecmp(rose_address *, rose_address *);
extern int rosecmpm(rose_address *, rose_address *, unsigned short);
extern char *rose2asc(rose_address *);
extern const char *rose2asc(const rose_address *);
extern struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
extern void rose_kill_by_neigh(struct rose_neigh *);
extern unsigned int rose_new_lci(struct rose_neigh *);
......@@ -163,7 +166,7 @@ extern void rose_destroy_socket(struct sock *);
/* rose_dev.c */
extern int rose_rx_ip(struct sk_buff *, struct net_device *);
extern int rose_init(struct net_device *);
extern void rose_setup(struct net_device *);
/* rose_in.c */
extern int rose_process_rx_frame(struct sock *, struct sk_buff *);
......@@ -193,6 +196,9 @@ extern void rose_enquiry_response(struct sock *);
/* rose_route.c */
extern struct rose_neigh *rose_loopback_neigh;
extern struct file_operations rose_neigh_fops;
extern struct file_operations rose_nodes_fops;
extern struct file_operations rose_routes_fops;
extern int rose_add_loopback_neigh(void);
extern int rose_add_loopback_node(rose_address *);
......@@ -207,9 +213,6 @@ extern struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *, unsign
extern int rose_rt_ioctl(unsigned int, void *);
extern void rose_link_failed(ax25_cb *, int);
extern int rose_route_frame(struct sk_buff *, ax25_cb *);
extern int rose_nodes_get_info(char *, char **, off_t, int);
extern int rose_neigh_get_info(char *, char **, off_t, int);
extern int rose_routes_get_info(char *, char **, off_t, int);
extern void rose_rt_free(void);
/* rose_subr.c */
......
......@@ -87,6 +87,7 @@ typedef enum {
SCTP_CMD_RTO_PENDING, /* Set transport's rto_pending. */
SCTP_CMD_PART_DELIVER, /* Partial data delivery considerations. */
SCTP_CMD_RENEGE, /* Renege data on an association. */
SCTP_CMD_SETUP_T4, /* ADDIP, setup T4 RTO timer parms. */
SCTP_CMD_LAST
} sctp_verb_t;
......
......@@ -75,6 +75,9 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM };
#define SCTP_NUM_BASE_CHUNK_TYPES (SCTP_CID_BASE_MAX + 1)
#define SCTP_NUM_CHUNK_TYPES (SCTP_NUM_BASE_CHUNKTYPES + 2)
#define SCTP_CID_ADDIP_MIN SCTP_CID_ASCONF
#define SCTP_CID_ADDIP_MAX SCTP_CID_ASCONF_ACK
#define SCTP_NUM_ADDIP_CHUNK_TYPES 2
/* These are the different flavours of event. */
typedef enum {
......@@ -99,6 +102,7 @@ typedef enum {
SCTP_EVENT_TIMEOUT_T1_INIT,
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN,
SCTP_EVENT_TIMEOUT_T3_RTX,
SCTP_EVENT_TIMEOUT_T4_RTO,
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD,
SCTP_EVENT_TIMEOUT_HEARTBEAT,
SCTP_EVENT_TIMEOUT_SACK,
......@@ -122,9 +126,10 @@ typedef enum {
SCTP_PRIMITIVE_ABORT,
SCTP_PRIMITIVE_SEND,
SCTP_PRIMITIVE_REQUESTHEARTBEAT,
SCTP_PRIMITIVE_ASCONF,
} sctp_event_primitive_t;
#define SCTP_EVENT_PRIMITIVE_MAX SCTP_PRIMITIVE_REQUESTHEARTBEAT
#define SCTP_EVENT_PRIMITIVE_MAX SCTP_PRIMITIVE_ASCONF
#define SCTP_NUM_PRIMITIVE_TYPES (SCTP_EVENT_PRIMITIVE_MAX + 1)
/* We define here a utility type for manipulating subtypes.
......
......@@ -117,6 +117,7 @@ sctp_state_fn_t sctp_sf_tabort_8_4_8;
sctp_state_fn_t sctp_sf_operr_notify;
sctp_state_fn_t sctp_sf_t1_timer_expire;
sctp_state_fn_t sctp_sf_t2_timer_expire;
sctp_state_fn_t sctp_sf_t4_timer_expire;
sctp_state_fn_t sctp_sf_t5_timer_expire;
sctp_state_fn_t sctp_sf_sendbeat_8_3;
sctp_state_fn_t sctp_sf_beat_8_3;
......@@ -137,6 +138,8 @@ sctp_state_fn_t sctp_sf_unk_chunk;
sctp_state_fn_t sctp_sf_do_8_5_1_E_sa;
sctp_state_fn_t sctp_sf_cookie_echoed_err;
sctp_state_fn_t sctp_sf_do_5_2_6_stale;
sctp_state_fn_t sctp_sf_do_asconf;
sctp_state_fn_t sctp_sf_do_asconf_ack;
/* Prototypes for primitive event state functions. */
sctp_state_fn_t sctp_sf_do_prm_asoc;
......@@ -154,6 +157,7 @@ sctp_state_fn_t sctp_sf_error_closed;
sctp_state_fn_t sctp_sf_error_shutdown;
sctp_state_fn_t sctp_sf_ignore_primitive;
sctp_state_fn_t sctp_sf_do_prm_requestheartbeat;
sctp_state_fn_t sctp_sf_do_prm_asconf;
/* Prototypes for other event state functions. */
sctp_state_fn_t sctp_sf_do_9_2_start_shutdown;
......@@ -184,10 +188,6 @@ sctp_state_fn_t sctp_do_9_2_reshutack;
sctp_state_fn_t sctp_do_8_3_hb_err;
sctp_state_fn_t sctp_heartoff;
/* Prototypes for addip related state functions. Not in use. */
sctp_state_fn_t sctp_addip_do_asconf;
sctp_state_fn_t sctp_addip_do_asconf_ack;
/* Prototypes for utility support functions. */
__u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t,
......@@ -260,6 +260,14 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *,
__u16 cause_code,
const void *payload,
size_t paylen);
struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc,
union sctp_addr *addr,
int vparam_len);
struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
struct sctp_chunk *asconf,
int vparam_len);
void sctp_chunk_assign_tsn(struct sctp_chunk *);
void sctp_chunk_assign_ssn(struct sctp_chunk *);
......
......@@ -169,11 +169,11 @@ extern struct sctp_globals {
/* This is the hash of all endpoints. */
int ep_hashsize;
struct sctp_hashbucket *ep_hashbucket;
struct sctp_hashbucket *ep_hashtable;
/* This is the hash of all associations. */
int assoc_hashsize;
struct sctp_hashbucket *assoc_hashbucket;
struct sctp_hashbucket *assoc_hashtable;
/* This is the sctp port control hash. */
int port_hashsize;
......@@ -207,9 +207,9 @@ extern struct sctp_globals {
#define sctp_max_outstreams (sctp_globals.max_outstreams)
#define sctp_address_families (sctp_globals.address_families)
#define sctp_ep_hashsize (sctp_globals.ep_hashsize)
#define sctp_ep_hashbucket (sctp_globals.ep_hashbucket)
#define sctp_ep_hashtable (sctp_globals.ep_hashtable)
#define sctp_assoc_hashsize (sctp_globals.assoc_hashsize)
#define sctp_assoc_hashbucket (sctp_globals.assoc_hashbucket)
#define sctp_assoc_hashtable (sctp_globals.assoc_hashtable)
#define sctp_port_hashsize (sctp_globals.port_hashsize)
#define sctp_port_rover (sctp_globals.port_rover)
#define sctp_port_alloc_lock (sctp_globals.port_alloc_lock)
......@@ -571,6 +571,7 @@ struct sctp_chunk {
struct sctp_ecnehdr *ecne_hdr;
struct sctp_cwrhdr *ecn_cwr_hdr;
struct sctp_errhdr *err_hdr;
struct sctp_addiphdr *addip_hdr;
} subh;
__u8 *chunk_end;
......@@ -1385,8 +1386,10 @@ struct sctp_association {
int cookie_len;
void *cookie;
/* ADDIP Extention (ADDIP) --xguo */
/* <expected peer-serial-number> minus 1 (ADDIP sec. 4.2 C1) */
/* ADDIP Section 4.2 Upon reception of an ASCONF Chunk.
* C1) ... "Peer-Serial-Number'. This value MUST be initialized to the
* Initial TSN Value minus 1
*/
__u32 addip_serial;
} peer;
......@@ -1623,12 +1626,12 @@ struct sctp_association {
/* ADDIP Section 4.1 ASCONF Chunk Procedures
*
* A2) A serial number should be assigned to the Chunk. The
* serial number should be a monotonically increasing
* number. All serial numbers are defined to be initialized at
* serial number SHOULD be a monotonically increasing
* number. The serial number SHOULD be initialized at
* the start of the association to the same value as the
* Initial TSN.
*
* [and]
* Initial TSN and every time a new ASCONF chunk is created
* it is incremented by one after assigning the serial number
* to the newly created chunk.
*
* ADDIP
* 3.1.1 Address/Stream Configuration Change Chunk (ASCONF)
......@@ -1637,14 +1640,11 @@ struct sctp_association {
*
* This value represents a Serial Number for the ASCONF
* Chunk. The valid range of Serial Number is from 0 to
* 4294967295 (2**32 - 1). Serial Numbers wrap back to 0
* 4294967295 (2^32 - 1). Serial Numbers wrap back to 0
* after reaching 4294967295.
*/
__u32 addip_serial;
/* Is the ADDIP extension enabled for this association? */
char addip_enable;
/* Need to send an ECNE Chunk? */
char need_ecne;
......
......@@ -349,7 +349,7 @@ void ax25_destroy_socket(ax25_cb *ax25)
ax25->timer.data = (unsigned long)ax25;
add_timer(&ax25->timer);
} else {
sk_free(ax25->sk);
sock_put(ax25->sk);
}
} else {
ax25_free_cb(ax25);
......@@ -944,15 +944,13 @@ static int ax25_release(struct socket *sock)
switch (ax25->state) {
case AX25_STATE_0:
ax25_disconnect(ax25, 0);
ax25_destroy_socket(ax25);
break;
goto drop;
case AX25_STATE_1:
case AX25_STATE_2:
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25_disconnect(ax25, 0);
ax25_destroy_socket(ax25);
break;
goto drop;
case AX25_STATE_3:
case AX25_STATE_4:
......@@ -995,13 +993,16 @@ static int ax25_release(struct socket *sock)
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
ax25_destroy_socket(ax25);
goto drop;
}
sock->sk = NULL;
sk->sk_socket = NULL; /* Not used, but we should do this */
release_sock(sk);
return 0;
drop:
release_sock(sk);
ax25_destroy_socket(ax25);
return 0;
}
......
......@@ -39,11 +39,12 @@
#include <linux/notifier.h>
#include <net/rose.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/tcp.h>
#include <net/ip.h>
#include <net/arp.h>
int rose_ndevs = 10;
static int rose_ndevs = 10;
int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1;
......@@ -57,7 +58,7 @@ int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC;
int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE;
HLIST_HEAD(rose_list);
static spinlock_t rose_list_lock = SPIN_LOCK_UNLOCKED;
spinlock_t rose_list_lock = SPIN_LOCK_UNLOCKED;
static struct proto_ops rose_proto_ops;
......@@ -66,7 +67,7 @@ ax25_address rose_callsign;
/*
* Convert a ROSE address into text.
*/
char *rose2asc(rose_address *addr)
const char *rose2asc(const rose_address *addr)
{
static char buffer[11];
......@@ -1332,29 +1333,57 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
return 0;
}
static int rose_get_info(char *buffer, char **start, off_t offset, int length)
#ifdef CONFIG_PROC_FS
static void *rose_info_start(struct seq_file *seq, loff_t *pos)
{
int i;
struct sock *s;
struct hlist_node *node;
struct net_device *dev;
const char *devname, *callsign;
int len = 0;
off_t pos = 0;
off_t begin = 0;
spin_lock_bh(&rose_list_lock);
if (*pos == 0)
return ROSE_PROC_START;
i = 1;
sk_for_each(s, node, &rose_list) {
if (i == *pos)
return s;
++i;
}
return NULL;
}
len += sprintf(buffer, "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
sk_for_each(s, node, &rose_list) {
return (v == ROSE_PROC_START) ? sk_head(&rose_list)
: sk_next((struct sock *)v);
}
static void rose_info_stop(struct seq_file *seq, void *v)
{
spin_unlock_bh(&rose_list_lock);
}
static int rose_info_show(struct seq_file *seq, void *v)
{
if (v == ROSE_PROC_START)
seq_puts(seq,
"dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
else {
struct sock *s = v;
rose_cb *rose = rose_sk(s);
const char *devname, *callsign;
const struct net_device *dev = rose->device;
if ((dev = rose->device) == NULL)
if (!dev)
devname = "???";
else
devname = dev->name;
len += sprintf(buffer + len, "%-10s %-9s ",
seq_printf(seq, "%-10s %-9s ",
rose2asc(&rose->dest_addr),
ax2asc(&rose->dest_call));
......@@ -1363,7 +1392,8 @@ static int rose_get_info(char *buffer, char **start, off_t offset, int length)
else
callsign = ax2asc(&rose->source_call);
len += sprintf(buffer + len, "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
seq_printf(seq,
"%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
rose2asc(&rose->source_addr),
callsign,
devname,
......@@ -1383,27 +1413,32 @@ static int rose_get_info(char *buffer, char **start, off_t offset, int length)
atomic_read(&s->sk_wmem_alloc),
atomic_read(&s->sk_rmem_alloc),
s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
pos = begin + len;
if (pos < offset) {
len = 0;
begin = pos;
}
if (pos > offset + length)
break;
}
spin_unlock_bh(&rose_list_lock);
*start = buffer + (offset - begin);
len -= (offset - begin);
return 0;
}
if (len > length) len = length;
static struct seq_operations rose_info_seqops = {
.start = rose_info_start,
.next = rose_info_next,
.stop = rose_info_stop,
.show = rose_info_show,
};
return len;
static int rose_info_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rose_info_seqops);
}
static struct file_operations rose_info_fops = {
.owner = THIS_MODULE,
.open = rose_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* CONFIG_PROC_FS */
static struct net_proto_family rose_family_ops = {
.family = PF_ROSE,
.create = rose_create,
......@@ -1435,7 +1470,7 @@ static struct notifier_block rose_dev_notifier = {
.notifier_call = rose_device_event,
};
static struct net_device *dev_rose;
static struct net_device **dev_rose;
static const char banner[] = KERN_INFO "F6FBB/G4KLX ROSE for Linux. Version 0.62 for AX25.037 Linux 2.4\n";
......@@ -1450,17 +1485,39 @@ static int __init rose_proto_init(void)
return -1;
}
if ((dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device), GFP_KERNEL)) == NULL) {
dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
if (dev_rose == NULL) {
printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
return -1;
}
memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device));
memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device*));
for (i = 0; i < rose_ndevs; i++) {
struct net_device *dev;
char name[IFNAMSIZ];
sprintf(name, "rose%d", i);
dev = alloc_netdev(sizeof(struct net_device_stats),
name, rose_setup);
if (!dev) {
printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
while (--i >= 0)
kfree(dev_rose[i]);
return -ENOMEM;
}
dev_rose[i] = dev;
}
for (i = 0; i < rose_ndevs; i++) {
sprintf(dev_rose[i].name, "rose%d", i);
dev_rose[i].init = rose_init;
register_netdev(&dev_rose[i]);
if (register_netdev(dev_rose[i])) {
printk(KERN_ERR "ROSE: netdevice regeistration failed\n");
while (--i >= 0) {
unregister_netdev(dev_rose[i]);
kfree(dev_rose[i]);
return -EIO;
}
}
}
sock_register(&rose_family_ops);
......@@ -1477,10 +1534,11 @@ static int __init rose_proto_init(void)
rose_add_loopback_neigh();
proc_net_create("rose", 0, rose_get_info);
proc_net_create("rose_neigh", 0, rose_neigh_get_info);
proc_net_create("rose_nodes", 0, rose_nodes_get_info);
proc_net_create("rose_routes", 0, rose_routes_get_info);
proc_net_fops_create("rose", S_IRUGO, &rose_info_fops);
proc_net_fops_create("rose_neigh", S_IRUGO, &rose_neigh_fops);
proc_net_fops_create("rose_nodes", S_IRUGO, &rose_nodes_fops);
proc_net_fops_create("rose_routes", S_IRUGO, &rose_routes_fops);
return 0;
}
module_init(rose_proto_init);
......@@ -1518,10 +1576,11 @@ static void __exit rose_exit(void)
sock_unregister(PF_ROSE);
for (i = 0; i < rose_ndevs; i++) {
if (dev_rose[i].priv != NULL) {
kfree(dev_rose[i].priv);
dev_rose[i].priv = NULL;
unregister_netdev(&dev_rose[i]);
struct net_device *dev = dev_rose[i];
if (dev) {
unregister_netdev(dev);
kfree(dev);
}
}
......
......@@ -165,7 +165,7 @@ static struct net_device_stats *rose_get_stats(struct net_device *dev)
return (struct net_device_stats *)dev->priv;
}
int rose_init(struct net_device *dev)
void rose_setup(struct net_device *dev)
{
SET_MODULE_OWNER(dev);
dev->mtu = ROSE_MAX_PACKET_SIZE - 2;
......@@ -182,13 +182,5 @@ int rose_init(struct net_device *dev)
/* New-style flags. */
dev->flags = 0;
if ((dev->priv = kmalloc(sizeof(struct net_device_stats), GFP_KERNEL)) == NULL)
return -ENOMEM;
memset(dev->priv, 0, sizeof(struct net_device_stats));
dev->get_stats = rose_get_stats;
return 0;
};
}
......@@ -35,12 +35,13 @@
#include <linux/netfilter.h>
#include <linux/init.h>
#include <net/rose.h>
#include <linux/seq_file.h>
static unsigned int rose_neigh_no = 1;
static struct rose_node *rose_node_list;
static spinlock_t rose_node_list_lock = SPIN_LOCK_UNLOCKED;
static struct rose_neigh *rose_neigh_list;
struct rose_neigh *rose_neigh_list;
static spinlock_t rose_neigh_list_lock = SPIN_LOCK_UNLOCKED;
static struct rose_route *rose_route_list;
static spinlock_t rose_route_list_lock = SPIN_LOCK_UNLOCKED;
......@@ -1066,165 +1067,248 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
return res;
}
int rose_nodes_get_info(char *buffer, char **start, off_t offset, int length)
#ifdef CONFIG_PROC_FS
static void *rose_node_start(struct seq_file *seq, loff_t *pos)
{
struct rose_node *rose_node;
int len = 0;
off_t pos = 0;
off_t begin = 0;
int i;
int i = 1;
spin_lock_bh(&rose_neigh_list_lock);
if (*pos == 0)
return ROSE_PROC_START;
for (rose_node = rose_node_list; rose_node && i < *pos;
rose_node = rose_node->next, ++i);
return (i == *pos) ? rose_node : NULL;
}
len += sprintf(buffer, "address mask n neigh neigh neigh\n");
static void *rose_node_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == ROSE_PROC_START) ? rose_node_list
: ((struct rose_node *)v)->next;
}
for (rose_node = rose_node_list; rose_node != NULL; rose_node = rose_node->next) {
static void rose_node_stop(struct seq_file *seq, void *v)
{
spin_unlock_bh(&rose_neigh_list_lock);
}
static int rose_node_show(struct seq_file *seq, void *v)
{
int i;
if (v == ROSE_PROC_START)
seq_puts(seq, "address mask n neigh neigh neigh\n");
else {
const struct rose_node *rose_node = v;
/* if (rose_node->loopback) {
len += sprintf(buffer + len, "%-10s %04d 1 loopback\n",
seq_printf(seq, "%-10s %04d 1 loopback\n",
rose2asc(&rose_node->address),
rose_node->mask);
} else { */
len += sprintf(buffer + len, "%-10s %04d %d",
seq_printf(seq, "%-10s %04d %d",
rose2asc(&rose_node->address),
rose_node->mask,
rose_node->count);
for (i = 0; i < rose_node->count; i++)
len += sprintf(buffer + len, " %05d",
seq_printf(seq, " %05d",
rose_node->neighbour[i]->number);
len += sprintf(buffer + len, "\n");
seq_puts(seq, "\n");
/* } */
pos = begin + len;
if (pos < offset) {
len = 0;
begin = pos;
}
if (pos > offset + length)
break;
}
spin_unlock_bh(&rose_neigh_list_lock);
*start = buffer + (offset - begin);
len -= (offset - begin);
return 0;
}
if (len > length)
len = length;
static struct seq_operations rose_node_seqops = {
.start = rose_node_start,
.next = rose_node_next,
.stop = rose_node_stop,
.show = rose_node_show,
};
return len;
static int rose_nodes_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rose_node_seqops);
}
int rose_neigh_get_info(char *buffer, char **start, off_t offset, int length)
struct file_operations rose_nodes_fops = {
.owner = THIS_MODULE,
.open = rose_nodes_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void *rose_neigh_start(struct seq_file *seq, loff_t *pos)
{
struct rose_neigh *rose_neigh;
int len = 0;
off_t pos = 0;
off_t begin = 0;
int i;
int i = 1;
spin_lock_bh(&rose_neigh_list_lock);
if (*pos == 0)
return ROSE_PROC_START;
len += sprintf(buffer, "addr callsign dev count use mode restart t0 tf digipeaters\n");
for (rose_neigh = rose_neigh_list; rose_neigh && i < *pos;
rose_neigh = rose_neigh->next, ++i);
for (rose_neigh = rose_neigh_list; rose_neigh != NULL; rose_neigh = rose_neigh->next) {
/* if (!rose_neigh->loopback) { */
len += sprintf(buffer + len, "%05d %-9s %-4s %3d %3d %3s %3s %3lu %3lu",
rose_neigh->number,
(rose_neigh->loopback) ? "RSLOOP-0" : ax2asc(&rose_neigh->callsign),
rose_neigh->dev ? rose_neigh->dev->name : "???",
rose_neigh->count,
rose_neigh->use,
(rose_neigh->dce_mode) ? "DCE" : "DTE",
(rose_neigh->restarted) ? "yes" : "no",
ax25_display_timer(&rose_neigh->t0timer) / HZ,
ax25_display_timer(&rose_neigh->ftimer) / HZ);
if (rose_neigh->digipeat != NULL) {
for (i = 0; i < rose_neigh->digipeat->ndigi; i++)
len += sprintf(buffer + len, " %s", ax2asc(&rose_neigh->digipeat->calls[i]));
}
return (i == *pos) ? rose_neigh : NULL;
}
len += sprintf(buffer + len, "\n");
static void *rose_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == ROSE_PROC_START) ? rose_neigh_list
: ((struct rose_neigh *)v)->next;
}
pos = begin + len;
static void rose_neigh_stop(struct seq_file *seq, void *v)
{
spin_unlock_bh(&rose_neigh_list_lock);
}
if (pos < offset) {
len = 0;
begin = pos;
}
static int rose_neigh_show(struct seq_file *seq, void *v)
{
int i;
if (pos > offset + length)
break;
/* } */
}
if (v == ROSE_PROC_START)
seq_puts(seq,
"addr callsign dev count use mode restart t0 tf digipeaters\n");
else {
struct rose_neigh *rose_neigh = v;
spin_unlock_bh(&rose_neigh_list_lock);
/* if (!rose_neigh->loopback) { */
seq_printf(seq, "%05d %-9s %-4s %3d %3d %3s %3s %3lu %3lu",
rose_neigh->number,
(rose_neigh->loopback) ? "RSLOOP-0" : ax2asc(&rose_neigh->callsign),
rose_neigh->dev ? rose_neigh->dev->name : "???",
rose_neigh->count,
rose_neigh->use,
(rose_neigh->dce_mode) ? "DCE" : "DTE",
(rose_neigh->restarted) ? "yes" : "no",
ax25_display_timer(&rose_neigh->t0timer) / HZ,
ax25_display_timer(&rose_neigh->ftimer) / HZ);
if (rose_neigh->digipeat != NULL) {
for (i = 0; i < rose_neigh->digipeat->ndigi; i++)
seq_printf(seq, " %s", ax2asc(&rose_neigh->digipeat->calls[i]));
}
seq_puts(seq, "\n");
}
return 0;
}
*start = buffer + (offset - begin);
len -= (offset - begin);
if (len > length)
len = length;
static struct seq_operations rose_neigh_seqops = {
.start = rose_neigh_start,
.next = rose_neigh_next,
.stop = rose_neigh_stop,
.show = rose_neigh_show,
};
return len;
static int rose_neigh_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rose_neigh_seqops);
}
int rose_routes_get_info(char *buffer, char **start, off_t offset, int length)
struct file_operations rose_neigh_fops = {
.owner = THIS_MODULE,
.open = rose_neigh_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void *rose_route_start(struct seq_file *seq, loff_t *pos)
{
struct rose_route *rose_route;
int len = 0;
off_t pos = 0;
off_t begin = 0;
int i = 1;
spin_lock_bh(&rose_route_list_lock);
if (*pos == 0)
return ROSE_PROC_START;
len += sprintf(buffer, "lci address callsign neigh <-> lci address callsign neigh\n");
for (rose_route = rose_route_list; rose_route && i < *pos;
rose_route = rose_route->next, ++i);
for (rose_route = rose_route_list; rose_route != NULL; rose_route = rose_route->next) {
if (rose_route->neigh1 != NULL) {
len += sprintf(buffer + len, "%3.3X %-10s %-9s %05d ",
rose_route->lci1,
rose2asc(&rose_route->src_addr),
ax2asc(&rose_route->src_call),
rose_route->neigh1->number);
} else {
len += sprintf(buffer + len, "000 * * 00000 ");
}
return (i == *pos) ? rose_route : NULL;
}
static void *rose_route_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == ROSE_PROC_START) ? rose_route_list
: ((struct rose_route *)v)->next;
}
if (rose_route->neigh2 != NULL) {
len += sprintf(buffer + len, "%3.3X %-10s %-9s %05d\n",
static void rose_route_stop(struct seq_file *seq, void *v)
{
spin_unlock_bh(&rose_route_list_lock);
}
static int rose_route_show(struct seq_file *seq, void *v)
{
if (v == ROSE_PROC_START)
seq_puts(seq,
"lci address callsign neigh <-> lci address callsign neigh\n");
else {
struct rose_route *rose_route = v;
if (rose_route->neigh1)
seq_printf(seq,
"%3.3X %-10s %-9s %05d ",
rose_route->lci1,
rose2asc(&rose_route->src_addr),
ax2asc(&rose_route->src_call),
rose_route->neigh1->number);
else
seq_puts(seq,
"000 * * 00000 ");
if (rose_route->neigh2)
seq_printf(seq,
"%3.3X %-10s %-9s %05d\n",
rose_route->lci2,
rose2asc(&rose_route->dest_addr),
ax2asc(&rose_route->dest_call),
rose_route->neigh2->number);
} else {
len += sprintf(buffer + len, "000 * * 00000\n");
}
pos = begin + len;
if (pos < offset) {
len = 0;
begin = pos;
else
seq_puts(seq,
"000 * * 00000\n");
}
return 0;
}
if (pos > offset + length)
break;
}
static struct seq_operations rose_route_seqops = {
.start = rose_route_start,
.next = rose_route_next,
.stop = rose_route_stop,
.show = rose_route_show,
};
spin_unlock_bh(&rose_route_list_lock);
*start = buffer + (offset - begin);
len -= (offset - begin);
static int rose_route_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rose_route_seqops);
}
if (len > length)
len = length;
struct file_operations rose_routes_fops = {
.owner = THIS_MODULE,
.open = rose_route_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
return len;
}
#endif /* CONFIG_PROC_FS */
/*
* Release all memory associated with ROSE routing structures.
......
......@@ -221,12 +221,14 @@ struct sctp_association *sctp_association_init(struct sctp_association *asoc,
* remote endpoint it should do the following:
* ...
* A2) a serial number should be assigned to the chunk. The serial
* number should be a monotonically increasing number. All serial
* numbers are defined to be initialized at the start of the
* number SHOULD be a monotonically increasing number. The serial
* numbers SHOULD be initialized at the start of the
* association to the same value as the initial TSN.
*/
asoc->addip_serial = asoc->c.initial_tsn;
skb_queue_head_init(&asoc->addip_chunks);
/* Make an empty list of remote transport addresses. */
INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
......@@ -264,8 +266,6 @@ struct sctp_association *sctp_association_init(struct sctp_association *asoc,
/* Set up the tsn tracking. */
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE, 0);
skb_queue_head_init(&asoc->addip_chunks);
asoc->need_ecne = 0;
asoc->eyecatcher = SCTP_ASSOC_EYECATCHER;
......
......@@ -185,6 +185,7 @@ static const char *sctp_timer_tbl[] = {
"TIMEOUT_T1_INIT",
"TIMEOUT_T2_SHUTDOWN",
"TIMEOUT_T3_RTX",
"TIMEOUT_T4_RTO",
"TIMEOUT_T5_SHUTDOWN_GUARD",
"TIMEOUT_HEARTBEAT",
"TIMEOUT_SACK",
......
......@@ -131,6 +131,7 @@ struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
ep->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] =
sp->rtoinfo.srto_initial * HZ / 1000;
ep->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
ep->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0;
/* sctpimpguide-05 Section 2.12.2
* If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
......
......@@ -528,7 +528,7 @@ void __sctp_hash_endpoint(struct sctp_endpoint *ep)
epb = &ep->base;
epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
head = &sctp_ep_hashbucket[epb->hashent];
head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
epp = &head->chain;
......@@ -558,7 +558,7 @@ void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
head = &sctp_ep_hashbucket[epb->hashent];
head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
......@@ -589,7 +589,7 @@ struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr)
int hash;
hash = sctp_ep_hashfn(laddr->v4.sin_port);
head = &sctp_ep_hashbucket[hash];
head = &sctp_ep_hashtable[hash];
read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) {
ep = sctp_ep(epb);
......@@ -627,7 +627,7 @@ void __sctp_hash_established(struct sctp_association *asoc)
/* Calculate which chain this entry will belong to. */
epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, asoc->peer.port);
head = &sctp_assoc_hashbucket[epb->hashent];
head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
epp = &head->chain;
......@@ -658,7 +658,7 @@ void __sctp_unhash_established(struct sctp_association *asoc)
epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port,
asoc->peer.port);
head = &sctp_assoc_hashbucket[epb->hashent];
head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
......@@ -688,7 +688,7 @@ struct sctp_association *__sctp_lookup_association(
* have wildcards anyways.
*/
hash = sctp_assoc_hashfn(local->v4.sin_port, peer->v4.sin_port);
head = &sctp_assoc_hashbucket[hash];
head = &sctp_assoc_hashtable[hash];
read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) {
asoc = sctp_assoc(epb);
......
......@@ -172,7 +172,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT LADDRS\n");
for (hash = 0; hash < sctp_ep_hashsize; hash++) {
head = &sctp_ep_hashbucket[hash];
head = &sctp_ep_hashtable[hash];
read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) {
ep = sctp_ep(epb);
......@@ -234,7 +234,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, " ASSOC SOCK STY SST ST HBKT LPORT RPORT "
"LADDRS <-> RADDRS\n");
for (hash = 0; hash < sctp_assoc_hashsize; hash++) {
head = &sctp_assoc_hashbucket[hash];
head = &sctp_assoc_hashtable[hash];
read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) {
assoc = sctp_assoc(epb);
......
......@@ -934,6 +934,8 @@ __init int sctp_init(void)
{
int i;
int status = 0;
unsigned long goal;
int order;
/* SCTP_DEBUG sanity check. */
if (!sctp_sanity_check())
......@@ -1017,52 +1019,75 @@ __init int sctp_init(void)
sctp_max_instreams = SCTP_DEFAULT_INSTREAMS;
sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
/* Allocate and initialize the association hash table. */
sctp_assoc_hashsize = 4096;
sctp_assoc_hashbucket = (struct sctp_hashbucket *)
kmalloc(4096 * sizeof(struct sctp_hashbucket), GFP_KERNEL);
if (!sctp_assoc_hashbucket) {
/* Size and allocate the association hash table.
* The methodology is similar to that of the tcp hash tables.
*/
if (num_physpages >= (128 * 1024))
goal = num_physpages >> (22 - PAGE_SHIFT);
else
goal = num_physpages >> (24 - PAGE_SHIFT);
for (order = 0; (1UL << order) < goal; order++)
;
do {
sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE /
sizeof(struct sctp_hashbucket);
if ((sctp_assoc_hashsize > (64 * 1024)) && order > 0)
continue;
sctp_assoc_hashtable = (struct sctp_hashbucket *)
__get_free_pages(GFP_ATOMIC, order);
} while (!sctp_assoc_hashtable && --order > 0);
if (!sctp_assoc_hashtable) {
printk(KERN_ERR "SCTP: Failed association hash alloc.\n");
status = -ENOMEM;
goto err_ahash_alloc;
}
for (i = 0; i < sctp_assoc_hashsize; i++) {
sctp_assoc_hashbucket[i].lock = RW_LOCK_UNLOCKED;
sctp_assoc_hashbucket[i].chain = NULL;
sctp_assoc_hashtable[i].lock = RW_LOCK_UNLOCKED;
sctp_assoc_hashtable[i].chain = NULL;
}
/* Allocate and initialize the endpoint hash table. */
sctp_ep_hashsize = 64;
sctp_ep_hashbucket = (struct sctp_hashbucket *)
sctp_ep_hashtable = (struct sctp_hashbucket *)
kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL);
if (!sctp_ep_hashbucket) {
if (!sctp_ep_hashtable) {
printk(KERN_ERR "SCTP: Failed endpoint_hash alloc.\n");
status = -ENOMEM;
goto err_ehash_alloc;
}
for (i = 0; i < sctp_ep_hashsize; i++) {
sctp_ep_hashbucket[i].lock = RW_LOCK_UNLOCKED;
sctp_ep_hashbucket[i].chain = NULL;
sctp_ep_hashtable[i].lock = RW_LOCK_UNLOCKED;
sctp_ep_hashtable[i].chain = NULL;
}
/* Allocate and initialize the SCTP port hash table. */
sctp_port_hashsize = 4096;
sctp_port_hashtable = (struct sctp_bind_hashbucket *)
kmalloc(4096 * sizeof(struct sctp_bind_hashbucket),GFP_KERNEL);
do {
sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
sizeof(struct sctp_bind_hashbucket);
if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
continue;
sctp_port_hashtable = (struct sctp_bind_hashbucket *)
__get_free_pages(GFP_ATOMIC, order);
} while (!sctp_port_hashtable && --order > 0);
if (!sctp_port_hashtable) {
printk(KERN_ERR "SCTP: Failed bind hash alloc.");
status = -ENOMEM;
goto err_bhash_alloc;
}
sctp_port_alloc_lock = SPIN_LOCK_UNLOCKED;
sctp_port_rover = sysctl_local_port_range[0] - 1;
for (i = 0; i < sctp_port_hashsize; i++) {
sctp_port_hashtable[i].lock = SPIN_LOCK_UNLOCKED;
sctp_port_hashtable[i].chain = NULL;
}
sctp_port_alloc_lock = SPIN_LOCK_UNLOCKED;
sctp_port_rover = sysctl_local_port_range[0] - 1;
printk(KERN_INFO "SCTP: Hash tables configured "
"(established %d bind %d)\n",
sctp_assoc_hashsize, sctp_port_hashsize);
sctp_sysctl_register();
INIT_LIST_HEAD(&sctp_address_families);
......@@ -1096,11 +1121,15 @@ __init int sctp_init(void)
err_v6_init:
sctp_sysctl_unregister();
list_del(&sctp_ipv4_specific.list);
kfree(sctp_port_hashtable);
free_pages((unsigned long)sctp_port_hashtable,
get_order(sctp_port_hashsize *
sizeof(struct sctp_bind_hashbucket)));
err_bhash_alloc:
kfree(sctp_ep_hashbucket);
kfree(sctp_ep_hashtable);
err_ehash_alloc:
kfree(sctp_assoc_hashbucket);
free_pages((unsigned long)sctp_assoc_hashtable,
get_order(sctp_assoc_hashsize *
sizeof(struct sctp_hashbucket)));
err_ahash_alloc:
sctp_dbg_objcnt_exit();
sctp_proc_exit();
......@@ -1136,9 +1165,13 @@ __exit void sctp_exit(void)
sctp_sysctl_unregister();
list_del(&sctp_ipv4_specific.list);
kfree(sctp_assoc_hashbucket);
kfree(sctp_ep_hashbucket);
kfree(sctp_port_hashtable);
free_pages((unsigned long)sctp_assoc_hashtable,
get_order(sctp_assoc_hashsize *
sizeof(struct sctp_hashbucket)));
kfree(sctp_ep_hashtable);
free_pages((unsigned long)sctp_port_hashtable,
get_order(sctp_port_hashsize *
sizeof(struct sctp_bind_hashbucket)));
kmem_cache_destroy(sctp_chunk_cachep);
kmem_cache_destroy(sctp_bucket_cachep);
......
......@@ -2088,3 +2088,85 @@ int sockaddr2sctp_addr(const union sctp_addr *sa, union sctp_addr_param *p)
return len;
}
/*
* ADDIP 3.1.1 Address Configuration Change Chunk (ASCONF)
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Type = 0xC1 | Chunk Flags | Chunk Length |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Serial Number |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Address Parameter |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ASCONF Parameter #1 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* \ \
* / .... /
* \ \
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ASCONF Parameter #N |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* Address Parameter and other parameter will not be wrapped in this function
*/
struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc,
union sctp_addr *addr, int vparam_len)
{
sctp_addiphdr_t asconf;
struct sctp_chunk *retval;
int length = sizeof(asconf) + vparam_len;
union sctp_params addrparam;
int addrlen;
addrlen = sockaddr2sctp_addr(addr, (union sctp_addr_param *)&addrparam);
if (!addrlen)
return NULL;
length += addrlen;
/* Create the chunk. */
retval = sctp_make_chunk(asoc, SCTP_CID_ASCONF, 0, length);
if (!retval)
return NULL;
asconf.serial = asoc->addip_serial++;
retval->subh.addip_hdr =
sctp_addto_chunk(retval, sizeof(asconf), &asconf);
retval->param_hdr.v =
sctp_addto_chunk(retval, addrlen, &addr);
return retval;
}
/*
* Unpack the parameters in an ASCONF chunk into an association and
* generate ASCONF-ACK chunk.
*
* ADDIP 3.1.2 Address Configuration Acknowledgement Chunk (ASCONF-ACK)
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Type = 0x80 | Chunk Flags | Chunk Length |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Serial Number |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ASCONF Parameter Response#1 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* \ \
* / .... /
* \ \
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ASCONF Parameter Response#N |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* All the parameter respoinces will be added in this function.
*/
struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
struct sctp_chunk *asconf,
int vparam_len)
{
// FIXME: process asconf chunk
return NULL;
}
......@@ -294,6 +294,12 @@ void sctp_generate_t2_shutdown_event(unsigned long data)
sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN);
}
void sctp_generate_t4_rto_event(unsigned long data)
{
struct sctp_association *asoc = (struct sctp_association *) data;
sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO);
}
void sctp_generate_t5_shutdown_guard_event(unsigned long data)
{
struct sctp_association *asoc = (struct sctp_association *)data;
......@@ -359,6 +365,7 @@ sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
sctp_generate_t1_init_event,
sctp_generate_t2_shutdown_event,
NULL,
sctp_generate_t4_rto_event,
sctp_generate_t5_shutdown_guard_event,
sctp_generate_heartbeat_event,
sctp_generate_sack_event,
......@@ -666,6 +673,23 @@ static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds,
sctp_association_free(asoc);
}
/*
* ADDIP Section 4.1 ASCONF Chunk Procedures
* A4) Start a T-4 RTO timer, using the RTO value of the selected
* destination address (normally the primary path; see RFC2960
* section 6.4 for details).
*/
static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc,
struct sctp_chunk *chunk)
{
struct sctp_transport *t;
t = asoc->peer.primary_path;
asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto;
chunk->transport = t;
}
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
......@@ -1177,6 +1201,10 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
GFP_ATOMIC);
break;
case SCTP_CMD_SETUP_T4:
sctp_cmd_setup_t4(commands, asoc, cmd->obj.ptr);
break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
......
......@@ -3062,6 +3062,36 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
return sctp_sf_shut_8_4_5(ep, NULL, type, arg, commands);
}
/*
* ADDIP Section 4.2 Upon reception of an ASCONF Chunk
* When an endpoint receive an ASCONF Chunk from the remote peer
* special procedures MAY be needed to identify the association the
* ASCONF Chunk is associated with. To properly find the association
* the following procedures should be L1 to L4 and C1 to C5
*/
sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
// FIXME: Handle the ASCONF chunk
return SCTP_DISPOSITION_CONSUME;
}
/*
* ADDIP Section 4.3 General rules for address manipulation
* When building TLV parameters for the ASCONF Chunk that will add or
* delete IP addresses the D0 to D13 rules should be applied:
*/
sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
// FIXME: Handle the ASCONF-ACK chunk
return SCTP_DISPOSITION_CONSUME;
}
/*
* Process an unknown chunk.
*
......@@ -3815,6 +3845,26 @@ sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
commands);
}
/*
* ADDIP Section 4.1 ASCONF Chunk Procedures
* When an endpoint has an ASCONF signaled change to be sent to the
* remote endpoint it should do A1 to A9
*/
sctp_disposition_t sctp_sf_do_prm_asconf(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Ignore the primitive event
*
......@@ -4213,6 +4263,21 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
return SCTP_DISPOSITION_NOMEM;
}
/*
* ADDIP Section 4.1 ASCONF CHunk Procedures
* If the T-4 RTO timer expires the endpoint should do B1 to B5
*/
sctp_disposition_t sctp_sf_t4_timer_expire(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
// FIXME: need to handle t4 expire
return SCTP_DISPOSITION_CONSUME;
}
/* sctpimpguide-05 Section 2.12.2
* The sender of the SHUTDOWN MAY also start an overall guard timer
* 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
......
......@@ -436,6 +436,55 @@ const sctp_sm_table_entry_t chunk_event_table[SCTP_NUM_BASE_CHUNK_TYPES][SCTP_ST
TYPE_SCTP_SHUTDOWN_COMPLETE,
}; /* state_fn_t chunk_event_table[][] */
#define TYPE_SCTP_ASCONF { \
/* SCTP_STATE_EMPTY */ \
{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
/* SCTP_STATE_CLOSED */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_COOKIE_WAIT */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_COOKIE_ECHOED */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_ESTABLISHED */ \
{.fn = sctp_sf_do_asconf, .name = "sctp_sf_do_asconf"}, \
/* SCTP_STATE_SHUTDOWN_PENDING */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_SHUTDOWN_SENT */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
} /* TYPE_SCTP_ASCONF */
#define TYPE_SCTP_ASCONF_ACK { \
/* SCTP_STATE_EMPTY */ \
{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
/* SCTP_STATE_CLOSED */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_COOKIE_WAIT */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_COOKIE_ECHOED */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_ESTABLISHED */ \
{.fn = sctp_sf_do_asconf_ack, .name = "sctp_sf_do_asconf_ack"}, \
/* SCTP_STATE_SHUTDOWN_PENDING */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_SHUTDOWN_SENT */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
} /* TYPE_SCTP_ASCONF_ACK */
/* The primary index for this table is the chunk type.
* The secondary index for this table is the state.
*/
const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = {
TYPE_SCTP_ASCONF,
TYPE_SCTP_ASCONF_ACK,
}; /*state_fn_t addip_chunk_event_table[][] */
static const sctp_sm_table_entry_t
chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
......@@ -582,6 +631,26 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
.name = "sctp_sf_do_prm_requestheartbeat"}, \
} /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */
#define TYPE_SCTP_PRIMITIVE_ASCONF { \
/* SCTP_STATE_EMPTY */ \
{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
/* SCTP_STATE_CLOSED */ \
{.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \
/* SCTP_STATE_COOKIE_WAIT */ \
{.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \
/* SCTP_STATE_COOKIE_ECHOED */ \
{.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \
/* SCTP_STATE_ESTABLISHED */ \
{.fn = sctp_sf_do_prm_asconf, .name = "sctp_sf_do_prm_asconf"}, \
/* SCTP_STATE_SHUTDOWN_PENDING */ \
{.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \
/* SCTP_STATE_SHUTDOWN_SENT */ \
{.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \
/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
{.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \
/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
{.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \
} /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */
/* The primary index for this table is the primitive type.
* The secondary index for this table is the state.
......@@ -592,6 +661,7 @@ const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPES][SCTP
TYPE_SCTP_PRIMITIVE_ABORT,
TYPE_SCTP_PRIMITIVE_SEND,
TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT,
TYPE_SCTP_PRIMITIVE_ASCONF,
};
#define TYPE_SCTP_OTHER_NO_PENDING_TSN { \
......@@ -726,6 +796,27 @@ const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_N
{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
}
#define TYPE_SCTP_EVENT_TIMEOUT_T4_RTO { \
/* SCTP_STATE_EMPTY */ \
{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
/* SCTP_STATE_CLOSED */ \
{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
/* SCTP_STATE_COOKIE_WAIT */ \
{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
/* SCTP_STATE_COOKIE_ECHOED */ \
{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
/* SCTP_STATE_ESTABLISHED */ \
{.fn = sctp_sf_t4_timer_expire, .name = "sctp_sf_t4_timer_expire"}, \
/* SCTP_STATE_SHUTDOWN_PENDING */ \
{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
/* SCTP_STATE_SHUTDOWN_SENT */ \
{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
}
#define TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD { \
/* SCTP_STATE_EMPTY */ \
{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
......@@ -817,6 +908,7 @@ const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STA
TYPE_SCTP_EVENT_TIMEOUT_T1_INIT,
TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN,
TYPE_SCTP_EVENT_TIMEOUT_T3_RTX,
TYPE_SCTP_EVENT_TIMEOUT_T4_RTO,
TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD,
TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT,
TYPE_SCTP_EVENT_TIMEOUT_SACK,
......@@ -833,5 +925,10 @@ const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
return &chunk_event_table[cid][state];
}
if (cid >= SCTP_CID_ADDIP_MIN && cid <= SCTP_CID_ADDIP_MAX) {
return &addip_chunk_event_table
[cid - SCTP_CID_ADDIP_MIN][state];
}
return &chunk_event_table_unknown[state];
}
......@@ -455,47 +455,6 @@ int sctp_bindx_add(struct sock *sk, struct sockaddr_storage *addrs, int addrcnt)
}
}
/* Notify the peer(s), assuming we have (an) association(s).
* FIXME: for UDP, we have a 1-1-many mapping amongst sk, ep and asoc,
* so we don't have to do much work on locating associations.
*
* However, when the separation of ep and asoc kicks in, especially
* for TCP style connection, it becomes n-1-n mapping. We will need
* to do more fine work. Until then, hold my peace.
* --xguo
*
* Really, I don't think that will be a problem. The bind()
* call on a socket will either know the endpoint
* (e.g. TCP-style listen()ing socket, or UDP-style socket),
* or exactly one association. The former case is EXACTLY
* what we have now. In the former case we know the
* association already. --piggy
*
* This code will be working on either a UDP style or a TCP style
* socket, or say either an endpoint or an association. The socket
* type verification code need to be added later before calling the
* ADDIP code.
* --daisy
*/
#ifdef CONFIG_IP_SCTP_ADDIP
/* Add these addresses to all associations on this endpoint. */
if (retval >= 0) {
struct list_head *pos;
struct sctp_endpoint *ep;
struct sctp_association *asoc;
ep = sctp_sk(sk)->ep;
list_for_each(pos, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
sctp_addip_addr_config(asoc,
SCTP_PARAM_ADD_IP,
addrs, addrcnt);
}
}
#endif
return retval;
}
......@@ -591,28 +550,6 @@ int sctp_bindx_rem(struct sock *sk, struct sockaddr_storage *addrs, int addrcnt)
}
}
/*
* This code will be working on either a UDP style or a TCP style
* socket, * or say either an endpoint or an association. The socket
* type verification code need to be added later before calling the
* ADDIP code.
* --daisy
*/
#ifdef CONFIG_IP_SCTP_ADDIP
/* Remove these addresses from all associations on this endpoint. */
if (retval >= 0) {
struct list_head *pos;
struct sctp_endpoint *ep;
struct sctp_association *asoc;
ep = sctp_sk(sk)->ep;
list_for_each(pos, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
sctp_addip_addr_config(asoc, SCTP_PARAM_DEL_IP,
addrs, addrcnt);
}
}
#endif
return retval;
}
......
......@@ -350,8 +350,11 @@ void x25_destroy_socket(struct sock *sk)
sk->sk_timer.function = x25_destroy_timer;
sk->sk_timer.data = (unsigned long)sk;
add_timer(&sk->sk_timer);
} else
sk_free(sk);
} else {
/* drop last reference so sock_put will free */
__sock_put(sk);
}
release_sock(sk);
sock_put(sk);
}
......@@ -553,7 +556,7 @@ static int x25_release(struct socket *sock)
case X25_STATE_2:
x25_disconnect(sk, 0, 0, 0);
x25_destroy_socket(sk);
break;
goto out;
case X25_STATE_1:
case X25_STATE_3:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment