Commit 0e91e70f authored by David S. Miller's avatar David S. Miller

Merge bk://212.42.230.204/net-2.6-sched

into nuts.davemloft.net:/disk1/BK/net-2.6
parents e18b2b9a 9b5d073a
......@@ -239,8 +239,10 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
* replay the request. We indicate this using
* -EAGAIN.
*/
if (tp_ops != NULL)
if (tp_ops != NULL) {
module_put(tp_ops->owner);
err = -EAGAIN;
}
}
#endif
kfree(tp);
......
......@@ -125,20 +125,20 @@ static __inline__ int route4_hash_wild(void)
return 32;
}
#define ROUTE4_APPLY_RESULT() \
do { \
*res = f->res; \
if (tcf_exts_is_available(&f->exts)) { \
int r = tcf_exts_exec(skb, &f->exts, res); \
if (r < 0) { \
dont_cache = 1; \
continue; \
} \
return r; \
} else if (!dont_cache) \
route4_set_fastmap(head, id, iif, f); \
return 0; \
} while(0)
#define ROUTE4_APPLY_RESULT() \
{ \
*res = f->res; \
if (tcf_exts_is_available(&f->exts)) { \
int r = tcf_exts_exec(skb, &f->exts, res); \
if (r < 0) { \
dont_cache = 1; \
continue; \
} \
return r; \
} else if (!dont_cache) \
route4_set_fastmap(head, id, iif, f); \
return 0; \
}
static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
......@@ -384,9 +384,9 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]);
if (id > 0x7FFF)
goto errout;
nhandle = (id | 0x8000) << 16;
nhandle |= (id | 0x8000) << 16;
} else
nhandle = 0xFFFF << 16;
nhandle |= 0xFFFF << 16;
if (handle && new) {
nhandle |= handle & 0x7F00;
......
......@@ -123,14 +123,14 @@ static struct tcf_ext_map rsvp_ext_map = {
.action = TCA_RSVP_ACT
};
#define RSVP_APPLY_RESULT() \
do { \
int r = tcf_exts_exec(skb, &f->exts, res); \
if (r < 0) \
continue; \
else if (r > 0) \
return r; \
} while(0)
#define RSVP_APPLY_RESULT() \
{ \
int r = tcf_exts_exec(skb, &f->exts, res); \
if (r < 0) \
continue; \
else if (r > 0) \
return r; \
}
static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
......
......@@ -241,7 +241,7 @@ cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
*/
static struct cbq_class *
cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *head = &q->link;
......@@ -255,13 +255,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
*/
if (TC_H_MAJ(prio^sch->handle) == 0 &&
(cl = cbq_class_lookup(q, prio)) != NULL)
return cl;
return cl;
*qerr = NET_XMIT_DROP;
for (;;) {
int result = 0;
#ifdef CONFIG_NET_CLS_ACT
int terminal = 0;
#endif
defmap = head->defaults;
/*
......@@ -282,27 +280,13 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_SHOT: /* Stop and kfree */
*qres = NET_XMIT_DROP;
terminal = 1;
break;
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
terminal = 1;
break;
case TC_ACT_RECLASSIFY: /* Things look good */
case TC_ACT_OK:
case TC_ACT_UNSPEC:
default:
break;
}
if (terminal) {
kfree_skb(skb);
*qerr = NET_XMIT_SUCCESS;
case TC_ACT_SHOT:
return NULL;
}
#else
#ifdef CONFIG_NET_CLS_POLICE
#elif defined(CONFIG_NET_CLS_POLICE)
switch (result) {
case TC_POLICE_RECLASSIFY:
return cbq_reclassify(skb, cl);
......@@ -311,7 +295,6 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
default:
break;
}
#endif
#endif
if (cl->level == 0)
return cl;
......@@ -423,45 +406,35 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
int len = skb->len;
int ret = NET_XMIT_SUCCESS;
struct cbq_class *cl = cbq_classify(skb, sch,&ret);
int ret;
struct cbq_class *cl = cbq_classify(skb, sch, &ret);
#ifdef CONFIG_NET_CLS_POLICE
q->rx_class = cl;
#endif
if (cl) {
#ifdef CONFIG_NET_CLS_POLICE
cl->q->__parent = sch;
#endif
if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
sch->q.qlen++;
sch->bstats.packets++;
sch->bstats.bytes+=len;
cbq_mark_toplevel(q, cl);
if (!cl->next_alive)
cbq_activate_class(cl);
return ret;
}
}
#ifndef CONFIG_NET_CLS_ACT
sch->qstats.drops++;
if (cl == NULL)
if (cl == NULL) {
if (ret == NET_XMIT_DROP)
sch->qstats.drops++;
kfree_skb(skb);
else {
cbq_mark_toplevel(q, cl);
cl->qstats.drops++;
}
#else
if ( NET_XMIT_DROP == ret) {
sch->qstats.drops++;
return ret;
}
if (cl != NULL) {
#ifdef CONFIG_NET_CLS_POLICE
cl->q->__parent = sch;
#endif
if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
sch->q.qlen++;
sch->bstats.packets++;
sch->bstats.bytes+=len;
cbq_mark_toplevel(q, cl);
cl->qstats.drops++;
if (!cl->next_alive)
cbq_activate_class(cl);
return ret;
}
#endif
sch->qstats.drops++;
cbq_mark_toplevel(q, cl);
cl->qstats.drops++;
return ret;
}
......
......@@ -1214,7 +1214,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
}
static struct hfsc_class *
hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
{
struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
......@@ -1227,35 +1227,20 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
if (cl->level == 0)
return cl;
*qerr = NET_XMIT_DROP;
tcf = q->root.filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
int terminal = 0;
switch (result) {
case TC_ACT_SHOT:
*qres = NET_XMIT_DROP;
terminal = 1;
break;
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
terminal = 1;
break;
case TC_ACT_RECLASSIFY:
case TC_ACT_OK:
case TC_ACT_UNSPEC:
default:
break;
}
if (terminal) {
kfree_skb(skb);
*qerr = NET_XMIT_SUCCESS;
case TC_ACT_SHOT:
return NULL;
}
#else
#ifdef CONFIG_NET_CLS_POLICE
#elif defined(CONFIG_NET_CLS_POLICE)
if (result == TC_POLICE_SHOT)
return NULL;
#endif
#endif
if ((cl = (struct hfsc_class *)res.class) == NULL) {
if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
......@@ -1652,27 +1637,19 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
static int
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
int ret = NET_XMIT_SUCCESS;
struct hfsc_class *cl = hfsc_classify(skb, sch, &ret);
unsigned int len = skb->len;
struct hfsc_class *cl;
unsigned int len;
int err;
#ifdef CONFIG_NET_CLS_ACT
cl = hfsc_classify(skb, sch, &err);
if (cl == NULL) {
if (NET_XMIT_DROP == ret) {
if (err == NET_XMIT_DROP)
sch->qstats.drops++;
}
return ret;
}
#else
if (cl == NULL) {
kfree_skb(skb);
sch->qstats.drops++;
return NET_XMIT_DROP;
return err;
}
#endif
len = skb->len;
err = cl->qdisc->enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) {
cl->qstats.drops++;
......
......@@ -305,7 +305,7 @@ static inline u32 htb_classid(struct htb_class *cl)
return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
}
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl;
......@@ -321,35 +321,20 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, in
if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0)
return cl;
*qerr = NET_XMIT_DROP;
tcf = q->filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
int terminal = 0;
switch (result) {
case TC_ACT_SHOT: /* Stop and kfree */
*qres = NET_XMIT_DROP;
terminal = 1;
break;
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
terminal = 1;
break;
case TC_ACT_RECLASSIFY: /* Things look good */
case TC_ACT_OK:
case TC_ACT_UNSPEC:
default:
break;
}
if (terminal) {
kfree_skb(skb);
*qerr = NET_XMIT_SUCCESS;
case TC_ACT_SHOT:
return NULL;
}
#else
#ifdef CONFIG_NET_CLS_POLICE
#elif defined(CONFIG_NET_CLS_POLICE)
if (result == TC_POLICE_SHOT)
return NULL;
#endif
return HTB_DIRECT;
#endif
if ((cl = (void*)res.class) == NULL) {
if (res.classid == sch->handle)
......@@ -723,37 +708,24 @@ htb_deactivate(struct htb_sched *q,struct htb_class *cl)
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
int ret = NET_XMIT_SUCCESS;
int ret;
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = htb_classify(skb,sch,&ret);
#ifdef CONFIG_NET_CLS_ACT
if (cl == HTB_DIRECT ) {
if (q->direct_queue.qlen < q->direct_qlen ) {
__skb_queue_tail(&q->direct_queue, skb);
q->direct_pkts++;
}
} else if (!cl) {
if (NET_XMIT_DROP == ret) {
sch->qstats.drops++;
}
return ret;
}
#else
if (cl == HTB_DIRECT || !cl) {
if (cl == HTB_DIRECT) {
/* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen && cl) {
if (q->direct_queue.qlen < q->direct_qlen) {
__skb_queue_tail(&q->direct_queue, skb);
q->direct_pkts++;
} else {
kfree_skb (skb);
sch->qstats.drops++;
return NET_XMIT_DROP;
}
}
#ifdef CONFIG_NET_CLS_ACT
} else if (!cl) {
if (ret == NET_XMIT_DROP)
sch->qstats.drops++;
kfree_skb (skb);
return ret;
#endif
else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
} else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
sch->qstats.drops++;
cl->qstats.drops++;
return NET_XMIT_DROP;
......
......@@ -47,37 +47,23 @@ struct prio_sched_data
};
static struct Qdisc *prio_classify(struct sk_buff *skb,
struct Qdisc *sch, int *r)
static struct Qdisc *
prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
{
struct prio_sched_data *q = qdisc_priv(sch);
u32 band = skb->priority;
struct tcf_result res;
*qerr = NET_XMIT_DROP;
if (TC_H_MAJ(skb->priority) != sch->handle) {
#ifdef CONFIG_NET_CLS_ACT
int result = 0, terminal = 0;
result = tc_classify(skb, q->filter_list, &res);
switch (result) {
case TC_ACT_SHOT:
*r = NET_XMIT_DROP;
terminal = 1;
break;
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
terminal = 1;
break;
case TC_ACT_RECLASSIFY:
case TC_ACT_OK:
case TC_ACT_UNSPEC:
default:
break;
};
if (terminal) {
kfree_skb(skb);
switch (tc_classify(skb, q->filter_list, &res)) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
*qerr = NET_XMIT_SUCCESS;
case TC_ACT_SHOT:
return NULL;
}
};
if (!q->filter_list ) {
#else
......@@ -97,15 +83,20 @@ static struct Qdisc *prio_classify(struct sk_buff *skb,
}
static int
prio_enqueue(struct sk_buff *skb, struct Qdisc* sch)
prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct Qdisc *qdisc;
int ret = NET_XMIT_SUCCESS;
int ret;
qdisc = prio_classify(skb, sch, &ret);
if (NULL == qdisc)
goto dropped;
#ifdef CONFIG_NET_CLS_ACT
if (qdisc == NULL) {
if (ret == NET_XMIT_DROP)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
}
#endif
if ((ret = qdisc->enqueue(skb, qdisc)) == NET_XMIT_SUCCESS) {
sch->bstats.bytes += skb->len;
......@@ -113,17 +104,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc* sch)
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
dropped:
#ifdef CONFIG_NET_CLS_ACT
if (NET_XMIT_DROP == ret) {
#endif
sch->qstats.drops++;
#ifdef CONFIG_NET_CLS_ACT
} else {
sch->qstats.overlimits++; /* abuse, but noone uses it */
}
#endif
sch->qstats.drops++;
return ret;
}
......@@ -132,18 +113,23 @@ static int
prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
struct Qdisc *qdisc;
int ret = NET_XMIT_DROP;
int ret;
qdisc = prio_classify(skb, sch, &ret);
if (qdisc == NULL)
goto dropped;
#ifdef CONFIG_NET_CLS_ACT
if (qdisc == NULL) {
if (ret == NET_XMIT_DROP)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
}
#endif
if ((ret = qdisc->ops->requeue(skb, qdisc)) == 0) {
if ((ret = qdisc->ops->requeue(skb, qdisc)) == NET_XMIT_SUCCESS) {
sch->q.qlen++;
sch->qstats.requeues++;
return 0;
}
dropped:
sch->qstats.drops++;
return NET_XMIT_DROP;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment