Commit 85e2d2e7 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 80aca3e6 39ed6207
...@@ -1978,7 +1978,8 @@ int __init atyfb_do_init(void) ...@@ -1978,7 +1978,8 @@ int __init atyfb_do_init(void)
/* /*
* Map memory-mapped registers. * Map memory-mapped registers.
*/ */
default_par->ati_regbase = addr + 0x7ffc00UL; default_par->ati_regbase = (void __iomem *)
(addr + 0x7ffc00UL);
info->fix.mmio_start = addr + 0x7ffc00UL; info->fix.mmio_start = addr + 0x7ffc00UL;
/* /*
...@@ -2289,7 +2290,8 @@ int __init atyfb_do_init(void) ...@@ -2289,7 +2290,8 @@ int __init atyfb_do_init(void)
default_par->mmap_map[0].voff + default_par->mmap_map[0].voff +
info->fix.smem_len; info->fix.smem_len;
default_par->mmap_map[1].poff = default_par->mmap_map[1].poff =
default_par->ati_regbase & PAGE_MASK; ((unsigned long) default_par->ati_regbase &
PAGE_MASK);
default_par->mmap_map[1].size = PAGE_SIZE; default_par->mmap_map[1].size = PAGE_SIZE;
default_par->mmap_map[1].prot_mask = _PAGE_CACHE; default_par->mmap_map[1].prot_mask = _PAGE_CACHE;
default_par->mmap_map[1].prot_flag = _PAGE_E; default_par->mmap_map[1].prot_flag = _PAGE_E;
......
...@@ -121,7 +121,6 @@ __dsthash_find(const struct ipt_hashlimit_htable *ht, struct dsthash_dst *dst) ...@@ -121,7 +121,6 @@ __dsthash_find(const struct ipt_hashlimit_htable *ht, struct dsthash_dst *dst)
{ {
struct dsthash_ent *ent; struct dsthash_ent *ent;
u_int32_t hash = hash_dst(ht, dst); u_int32_t hash = hash_dst(ht, dst);
MUST_BE_LOCKED(&ht->lock);
ent = LIST_FIND(&ht->hash[hash], dst_cmp, struct dsthash_ent *, dst); ent = LIST_FIND(&ht->hash[hash], dst_cmp, struct dsthash_ent *, dst);
return ent; return ent;
} }
...@@ -170,8 +169,6 @@ __dsthash_alloc_init(struct ipt_hashlimit_htable *ht, struct dsthash_dst *dst) ...@@ -170,8 +169,6 @@ __dsthash_alloc_init(struct ipt_hashlimit_htable *ht, struct dsthash_dst *dst)
static inline void static inline void
__dsthash_free(struct ipt_hashlimit_htable *ht, struct dsthash_ent *ent) __dsthash_free(struct ipt_hashlimit_htable *ht, struct dsthash_ent *ent)
{ {
MUST_BE_LOCKED(&ht->lock);
list_del(&ent->list); list_del(&ent->list);
kmem_cache_free(hashlimit_cachep, ent); kmem_cache_free(hashlimit_cachep, ent);
atomic_dec(&ht->count); atomic_dec(&ht->count);
...@@ -258,7 +255,7 @@ static void htable_selective_cleanup(struct ipt_hashlimit_htable *ht, ...@@ -258,7 +255,7 @@ static void htable_selective_cleanup(struct ipt_hashlimit_htable *ht,
IP_NF_ASSERT(ht->cfg.size && ht->cfg.max); IP_NF_ASSERT(ht->cfg.size && ht->cfg.max);
/* lock hash table and iterate over it */ /* lock hash table and iterate over it */
LOCK_BH(&ht->lock); spin_lock_bh(&ht->lock);
for (i = 0; i < ht->cfg.size; i++) { for (i = 0; i < ht->cfg.size; i++) {
struct dsthash_ent *dh, *n; struct dsthash_ent *dh, *n;
list_for_each_entry_safe(dh, n, &ht->hash[i], list) { list_for_each_entry_safe(dh, n, &ht->hash[i], list) {
...@@ -266,7 +263,7 @@ static void htable_selective_cleanup(struct ipt_hashlimit_htable *ht, ...@@ -266,7 +263,7 @@ static void htable_selective_cleanup(struct ipt_hashlimit_htable *ht,
__dsthash_free(ht, dh); __dsthash_free(ht, dh);
} }
} }
UNLOCK_BH(&ht->lock); spin_unlock_bh(&ht->lock);
} }
/* hash table garbage collector, run by timer */ /* hash table garbage collector, run by timer */
...@@ -457,7 +454,7 @@ hashlimit_match(const struct sk_buff *skb, ...@@ -457,7 +454,7 @@ hashlimit_match(const struct sk_buff *skb,
dst.dst_port = ports[1]; dst.dst_port = ports[1];
} }
LOCK_BH(&hinfo->lock); spin_lock_bh(&hinfo->lock);
dh = __dsthash_find(hinfo, &dst); dh = __dsthash_find(hinfo, &dst);
if (!dh) { if (!dh) {
dh = __dsthash_alloc_init(hinfo, &dst); dh = __dsthash_alloc_init(hinfo, &dst);
...@@ -466,7 +463,7 @@ hashlimit_match(const struct sk_buff *skb, ...@@ -466,7 +463,7 @@ hashlimit_match(const struct sk_buff *skb,
/* enomem... don't match == DROP */ /* enomem... don't match == DROP */
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_ERR "%s: ENOMEM\n", __FUNCTION__); printk(KERN_ERR "%s: ENOMEM\n", __FUNCTION__);
UNLOCK_BH(&hinfo->lock); spin_unlock_bh(&hinfo->lock);
return 0; return 0;
} }
...@@ -479,7 +476,7 @@ hashlimit_match(const struct sk_buff *skb, ...@@ -479,7 +476,7 @@ hashlimit_match(const struct sk_buff *skb,
hinfo->cfg.burst); hinfo->cfg.burst);
dh->rateinfo.cost = user2credits(hinfo->cfg.avg); dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
UNLOCK_BH(&hinfo->lock); spin_unlock_bh(&hinfo->lock);
return 1; return 1;
} }
...@@ -490,11 +487,11 @@ hashlimit_match(const struct sk_buff *skb, ...@@ -490,11 +487,11 @@ hashlimit_match(const struct sk_buff *skb,
if (dh->rateinfo.credit >= dh->rateinfo.cost) { if (dh->rateinfo.credit >= dh->rateinfo.cost) {
/* We're underlimit. */ /* We're underlimit. */
dh->rateinfo.credit -= dh->rateinfo.cost; dh->rateinfo.credit -= dh->rateinfo.cost;
UNLOCK_BH(&hinfo->lock); spin_unlock_bh(&hinfo->lock);
return 1; return 1;
} }
UNLOCK_BH(&hinfo->lock); spin_unlock_bh(&hinfo->lock);
/* default case: we're overlimit, thus don't match */ /* default case: we're overlimit, thus don't match */
return 0; return 0;
...@@ -569,7 +566,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos) ...@@ -569,7 +566,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos)
struct ipt_hashlimit_htable *htable = pde->data; struct ipt_hashlimit_htable *htable = pde->data;
unsigned int *bucket; unsigned int *bucket;
LOCK_BH(&htable->lock); spin_lock_bh(&htable->lock);
if (*pos >= htable->cfg.size) if (*pos >= htable->cfg.size)
return NULL; return NULL;
...@@ -603,7 +600,7 @@ static void dl_seq_stop(struct seq_file *s, void *v) ...@@ -603,7 +600,7 @@ static void dl_seq_stop(struct seq_file *s, void *v)
kfree(bucket); kfree(bucket);
UNLOCK_BH(&htable->lock); spin_unlock_bh(&htable->lock);
} }
static inline int dl_seq_real_show(struct dsthash_ent *ent, struct seq_file *s) static inline int dl_seq_real_show(struct dsthash_ent *ent, struct seq_file *s)
......
...@@ -588,7 +588,8 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) ...@@ -588,7 +588,8 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
/* Any change of skb->len requires recalculation of tso /* Any change of skb->len requires recalculation of tso
* factor and mss. * factor and mss.
*/ */
tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb)); if (tcp_skb_mss(skb))
tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
return 0; return 0;
} }
......
...@@ -88,7 +88,6 @@ struct tc_u_hnode ...@@ -88,7 +88,6 @@ struct tc_u_hnode
struct tc_u_common *tp_c; struct tc_u_common *tp_c;
int refcnt; int refcnt;
unsigned divisor; unsigned divisor;
u32 hgenerator;
struct tc_u_knode *ht[1]; struct tc_u_knode *ht[1];
}; };
......
...@@ -142,19 +142,23 @@ static int delay_skb(struct Qdisc *sch, struct sk_buff *skb) ...@@ -142,19 +142,23 @@ static int delay_skb(struct Qdisc *sch, struct sk_buff *skb)
{ {
struct netem_sched_data *q = qdisc_priv(sch); struct netem_sched_data *q = qdisc_priv(sch);
struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
psched_tdiff_t td;
psched_time_t now; psched_time_t now;
PSCHED_GET_TIME(now); PSCHED_GET_TIME(now);
PSCHED_TADD2(now, tabledist(q->latency, q->jitter, td = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist);
&q->delay_cor, q->delay_dist), PSCHED_TADD2(now, td, cb->time_to_send);
cb->time_to_send);
/* Always queue at tail to keep packets in order */ /* Always queue at tail to keep packets in order */
if (likely(q->delayed.qlen < q->limit)) { if (likely(q->delayed.qlen < q->limit)) {
__skb_queue_tail(&q->delayed, skb); __skb_queue_tail(&q->delayed, skb);
sch->q.qlen++;
sch->bstats.bytes += skb->len; sch->bstats.bytes += skb->len;
sch->bstats.packets++; sch->bstats.packets++;
if (!timer_pending(&q->timer)) {
q->timer.expires = jiffies + PSCHED_US2JIFFIE(td);
add_timer(&q->timer);
}
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -243,15 +247,31 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) ...@@ -243,15 +247,31 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
{ {
struct netem_sched_data *q = qdisc_priv(sch); struct netem_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb; struct sk_buff *skb;
skb = q->qdisc->dequeue(q->qdisc);
if (skb)
sch->q.qlen--;
return skb;
}
static void netem_watchdog(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc *)arg;
struct netem_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
psched_time_t now; psched_time_t now;
pr_debug("netem_watchdog: fired @%lu\n", jiffies);
spin_lock_bh(&sch->dev->queue_lock);
PSCHED_GET_TIME(now); PSCHED_GET_TIME(now);
while ((skb = skb_peek(&q->delayed)) != NULL) { while ((skb = skb_peek(&q->delayed)) != NULL) {
const struct netem_skb_cb *cb const struct netem_skb_cb *cb
= (const struct netem_skb_cb *)skb->cb; = (const struct netem_skb_cb *)skb->cb;
long delay long delay
= PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now)); = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now));
pr_debug("netem_dequeue: delay queue %p@%lu %ld\n", pr_debug("netem_watchdog: skb %p@%lu %ld\n",
skb, jiffies, delay); skb, jiffies, delay);
/* if more time remaining? */ /* if more time remaining? */
...@@ -263,20 +283,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) ...@@ -263,20 +283,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
if (q->qdisc->enqueue(skb, q->qdisc)) if (q->qdisc->enqueue(skb, q->qdisc))
sch->qstats.drops++; sch->qstats.drops++;
else
sch->q.qlen++;
} }
spin_unlock_bh(&sch->dev->queue_lock);
skb = q->qdisc->dequeue(q->qdisc);
if (skb)
sch->q.qlen--;
return skb;
}
static void netem_watchdog(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc *)arg;
pr_debug("netem_watchdog: fired @%lu\n", jiffies);
netif_schedule(sch->dev);
} }
static void netem_reset(struct Qdisc *sch) static void netem_reset(struct Qdisc *sch)
...@@ -493,7 +503,7 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, ...@@ -493,7 +503,7 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
sch_tree_lock(sch); sch_tree_lock(sch);
*old = xchg(&q->qdisc, new); *old = xchg(&q->qdisc, new);
qdisc_reset(*old); qdisc_reset(*old);
sch->q.qlen = q->delayed.qlen; sch->q.qlen = 0;
sch_tree_unlock(sch); sch_tree_unlock(sch);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment