Commit c5d40033 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents e7830385 f6ee1ee5
......@@ -432,4 +432,10 @@ enum {
#define TCA_ATM_MAX TCA_ATM_STATE
/* Delay section */
struct tc_dly_qopt
{
__u32 latency;
__u32 limit;
};
#endif
......@@ -77,7 +77,7 @@ struct flowi {
#define fl_icmp_type uli_u.icmpt.type
#define fl_icmp_code uli_u.icmpt.code
#define fl_ipsec_spi uli_u.spi
};
} __attribute__((__aligned__(BITS_PER_LONG/8)));
#define FLOW_DIR_IN 0
#define FLOW_DIR_OUT 1
......
......@@ -901,6 +901,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr,
struct sadb_sa *sa;
struct sadb_key *key;
uint16_t proto;
int err;
sa = (struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1];
......@@ -922,6 +923,9 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr,
if (proto == 0)
return ERR_PTR(-EINVAL);
/* default error is no buffer space */
err = -ENOBUFS;
/* RFC2367:
Only SADB_SASTATE_MATURE SAs may be submitted in an SADB_ADD message.
......@@ -980,8 +984,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr,
if (sa->sadb_sa_auth) {
int keysize = 0;
struct xfrm_algo_desc *a = xfrm_aalg_get_byid(sa->sadb_sa_auth);
if (!a)
if (!a) {
err = -ENOSYS;
goto out;
}
if (key)
keysize = (key->sadb_key_bits + 7) / 8;
x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL);
......@@ -999,8 +1005,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr,
if (sa->sadb_sa_encrypt) {
if (hdr->sadb_msg_satype == SADB_X_SATYPE_IPCOMP) {
struct xfrm_algo_desc *a = xfrm_calg_get_byid(sa->sadb_sa_encrypt);
if (!a)
if (!a) {
err = -ENOSYS;
goto out;
}
x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL);
if (!x->calg)
goto out;
......@@ -1009,8 +1017,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr,
} else {
int keysize = 0;
struct xfrm_algo_desc *a = xfrm_ealg_get_byid(sa->sadb_sa_encrypt);
if (!a)
if (!a) {
err = -ENOSYS;
goto out;
}
key = (struct sadb_key*) ext_hdrs[SADB_EXT_KEY_ENCRYPT-1];
if (key)
keysize = (key->sadb_key_bits + 7) / 8;
......@@ -1030,8 +1040,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr,
x->props.family = pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
&x->props.saddr);
if (!x->props.family)
if (!x->props.family) {
err = -EAFNOSUPPORT;
goto out;
}
pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1],
&x->id.daddr);
......@@ -1076,10 +1088,14 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr,
}
x->type = xfrm_get_type(proto, x->props.family);
if (x->type == NULL)
if (x->type == NULL) {
err = -ENOPROTOOPT;
goto out;
if (x->type->init_state(x, NULL))
}
if (x->type->init_state(x, NULL)) {
err = -EINVAL;
goto out;
}
x->km.seq = hdr->sadb_msg_seq;
x->km.state = XFRM_STATE_VALID;
return x;
......@@ -1087,7 +1103,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr,
out:
x->km.state = XFRM_STATE_DEAD;
xfrm_state_put(x);
return ERR_PTR(-ENOBUFS);
return ERR_PTR(err);
}
static int pfkey_reserved(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
......
......@@ -164,6 +164,17 @@ config NET_SCH_DSMARK
To compile this code as a module, choose M here: the
module will be called sch_dsmark.
config NET_SCH_DELAY
tristate "Delay simulator"
depends on NET_SCHED
help
Say Y if you want to delay packets by a fixed amount of
time. This is often useful to simulate network delay when
testing applications or protocols.
To compile this driver as a module, choose M here: the module
will be called sch_delay.
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
depends on NET_SCHED && NETFILTER
......
......@@ -22,6 +22,7 @@ obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o
obj-$(CONFIG_NET_SCH_DELAY) += sch_delay.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
obj-$(CONFIG_NET_CLS_FW) += cls_fw.o
......
/*
* net/sched/sch_delay.c Simple constant delay
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Stephen Hemminger <shemminger@osdl.org>
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/notifier.h>
#include <net/ip.h>
#include <net/route.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
/* Network delay simulator
This scheduler adds a fixed delay to all packets.
Similar to NISTnet and BSD Dummynet.
It uses byte fifo underneath similar to TBF */
struct dly_sched_data {
u32 latency;
u32 limit;
struct timer_list timer;
struct Qdisc *qdisc;
};
/* Time stamp put into socket buffer control block */
struct dly_skb_cb {
psched_time_t queuetime;
};
/* Enqueue packets with underlying discipline (fifo)
* but mark them with current time first.
*/
static int dly_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct dly_sched_data *q = (struct dly_sched_data *)sch->data;
struct dly_skb_cb *cb = (struct dly_skb_cb *)skb->cb;
int ret;
PSCHED_GET_TIME(cb->queuetime);
/* Queue to underlying scheduler */
ret = q->qdisc->enqueue(skb, q->qdisc);
if (ret)
sch->stats.drops++;
else {
sch->q.qlen++;
sch->stats.bytes += skb->len;
sch->stats.packets++;
}
return 0;
}
/* Requeue packets but don't change time stamp */
static int dly_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
struct dly_sched_data *q = (struct dly_sched_data *)sch->data;
int ret;
ret = q->qdisc->ops->requeue(skb, q->qdisc);
if (ret == 0)
sch->q.qlen++;
return ret;
}
static unsigned int dly_drop(struct Qdisc *sch)
{
struct dly_sched_data *q = (struct dly_sched_data *)sch->data;
unsigned int len;
len = q->qdisc->ops->drop(q->qdisc);
if (len) {
sch->q.qlen--;
sch->stats.drops++;
}
return len;
}
/* Dequeue packet.
* If packet needs to be held up, then stop the
* queue and set timer to wakeup later.
*/
static struct sk_buff *dly_dequeue(struct Qdisc *sch)
{
struct dly_sched_data *q = (struct dly_sched_data *)sch->data;
struct sk_buff *skb = q->qdisc->dequeue(q->qdisc);
if (skb) {
struct dly_skb_cb *cb = (struct dly_skb_cb *)skb->cb;
psched_time_t now;
long diff;
PSCHED_GET_TIME(now);
diff = q->latency - PSCHED_TDIFF(now, cb->queuetime);
if (diff <= 0) {
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
return skb;
}
if (!netif_queue_stopped(sch->dev)) {
long delay = PSCHED_US2JIFFIE(diff);
if (delay <= 0)
delay = 1;
mod_timer(&q->timer, jiffies+delay);
}
if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
sch->q.qlen--;
sch->stats.drops++;
}
sch->flags |= TCQ_F_THROTTLED;
}
return NULL;
}
static void dly_reset(struct Qdisc *sch)
{
struct dly_sched_data *q = (struct dly_sched_data *)sch->data;
qdisc_reset(q->qdisc);
sch->q.qlen = 0;
sch->flags &= ~TCQ_F_THROTTLED;
del_timer(&q->timer);
}
static void dly_timer(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc *)arg;
sch->flags &= ~TCQ_F_THROTTLED;
netif_schedule(sch->dev);
}
/* Tell Fifo the new limit. */
static int change_limit(struct Qdisc *q, u32 limit)
{
struct rtattr *rta;
int ret;
rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
if (!rta)
return -ENOMEM;
rta->rta_type = RTM_NEWQDISC;
((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
ret = q->ops->change(q, rta);
kfree(rta);
return ret;
}
/* Setup underlying FIFO discipline */
static int dly_change(struct Qdisc *sch, struct rtattr *opt)
{
struct dly_sched_data *q = (struct dly_sched_data *)sch->data;
struct tc_dly_qopt *qopt = RTA_DATA(opt);
int err;
if (q->qdisc == &noop_qdisc) {
struct Qdisc *child
= qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops);
if (!child)
return -EINVAL;
q->qdisc = child;
}
err = change_limit(q->qdisc, qopt->limit);
if (err) {
qdisc_destroy(q->qdisc);
q->qdisc = &noop_qdisc;
} else {
q->latency = qopt->latency;
q->limit = qopt->limit;
}
return err;
}
static int dly_init(struct Qdisc *sch, struct rtattr *opt)
{
struct dly_sched_data *q = (struct dly_sched_data *)sch->data;
if (!opt)
return -EINVAL;
init_timer(&q->timer);
q->timer.function = dly_timer;
q->timer.data = (unsigned long) sch;
q->qdisc = &noop_qdisc;
return dly_change(sch, opt);
}
static void dly_destroy(struct Qdisc *sch)
{
struct dly_sched_data *q = (struct dly_sched_data *)sch->data;
del_timer(&q->timer);
qdisc_destroy(q->qdisc);
q->qdisc = &noop_qdisc;
}
static int dly_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct dly_sched_data *q = (struct dly_sched_data *)sch->data;
unsigned char *b = skb->tail;
struct tc_dly_qopt qopt;
qopt.latency = q->latency;
qopt.limit = q->limit;
RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
return skb->len;
rtattr_failure:
skb_trim(skb, b - skb->data);
return -1;
}
static struct Qdisc_ops dly_qdisc_ops = {
.id = "delay",
.priv_size = sizeof(struct dly_sched_data),
.enqueue = dly_enqueue,
.dequeue = dly_dequeue,
.requeue = dly_requeue,
.drop = dly_drop,
.init = dly_init,
.reset = dly_reset,
.destroy = dly_destroy,
.change = dly_change,
.dump = dly_dump,
.owner = THIS_MODULE,
};
static int __init dly_module_init(void)
{
return register_qdisc(&dly_qdisc_ops);
}
static void __exit dly_module_exit(void)
{
unregister_qdisc(&dly_qdisc_ops);
}
module_init(dly_module_init)
module_exit(dly_module_exit)
MODULE_LICENSE("GPL");
......@@ -180,14 +180,12 @@ struct hfsc_class
struct hfsc_sched
{
u16 defcls; /* default class id */
struct hfsc_class root; /* root class */
struct hfsc_class *last_xmit; /* class that transmitted last
packet (for requeueing) */
struct list_head clhash[HFSC_HSIZE]; /* class hash */
struct list_head eligible; /* eligible list */
struct list_head droplist; /* active leaf class list (for
dropping) */
struct sk_buff_head requeue; /* requeued packet */
struct timer_list wd_timer; /* watchdog timer */
};
......@@ -566,8 +564,7 @@ sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
* service curve starting at (x, y).
*/
static void
rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x,
u64 y)
rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
{
rtsc->x = x;
rtsc->y = y;
......@@ -626,8 +623,7 @@ rtsc_x2y(struct runtime_sc *rtsc, u64 x)
* runtime service curve and the service curve starting at (x, y).
*/
static void
rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x,
u64 y)
rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
{
u64 y1, y2, dx, dy;
u32 dsm;
......@@ -1231,9 +1227,6 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
list_del(&cl->siblings);
hfsc_adjust_levels(cl->cl_parent);
hfsc_purge_queue(sch, cl);
if (q->last_xmit == cl)
q->last_xmit = NULL;
if (--cl->refcnt == 0)
hfsc_destroy_class(sch, cl);
......@@ -1541,6 +1534,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
INIT_LIST_HEAD(&q->clhash[i]);
INIT_LIST_HEAD(&q->eligible);
INIT_LIST_HEAD(&q->droplist);
skb_queue_head_init(&q->requeue);
q->root.refcnt = 1;
q->root.classid = sch->handle;
......@@ -1619,10 +1613,9 @@ hfsc_reset_qdisc(struct Qdisc *sch)
list_for_each_entry(cl, &q->clhash[i], hlist)
hfsc_reset_class(cl);
}
__skb_queue_purge(&q->requeue);
INIT_LIST_HEAD(&q->eligible);
INIT_LIST_HEAD(&q->droplist);
q->last_xmit = NULL;
del_timer(&q->wd_timer);
sch->flags &= ~TCQ_F_THROTTLED;
sch->q.qlen = 0;
......@@ -1639,7 +1632,7 @@ hfsc_destroy_qdisc(struct Qdisc *sch)
list_for_each_entry_safe(cl, next, &q->clhash[i], hlist)
hfsc_destroy_class(sch, cl);
}
__skb_queue_purge(&q->requeue);
del_timer(&q->wd_timer);
}
......@@ -1708,6 +1701,8 @@ hfsc_dequeue(struct Qdisc *sch)
if (sch->q.qlen == 0)
return NULL;
if ((skb = __skb_dequeue(&q->requeue)))
goto out;
PSCHED_GET_TIME(cur_time);
......@@ -1757,7 +1752,7 @@ hfsc_dequeue(struct Qdisc *sch)
set_passive(cl);
}
q->last_xmit = cl;
out:
sch->flags &= ~TCQ_F_THROTTLED;
sch->q.qlen--;
......@@ -1768,28 +1763,10 @@ static int
hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl = q->last_xmit;
unsigned int len = skb->len;
int ret;
if (cl == NULL) {
kfree_skb(skb);
sch->stats.drops++;
return NET_XMIT_DROP;
}
ret = cl->qdisc->ops->requeue(skb, cl->qdisc);
if (ret == NET_XMIT_SUCCESS) {
if (cl->qdisc->q.qlen == 1)
set_active(cl, len);
__skb_queue_head(&q->requeue, skb);
sch->q.qlen++;
} else {
cl->stats.drops++;
sch->stats.drops++;
}
q->last_xmit = NULL;
return ret;
return NET_XMIT_SUCCESS;
}
static unsigned int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment