Commit eb086ec5 authored by Ed Cashin's avatar Ed Cashin Committed by Linus Torvalds

aoe: use a kernel thread for transmissions

The dev_queue_xmit function needs to have interrupts enabled, so the most
simple way to get the locking right but still fulfill that requirement is
to use a process that can call dev_queue_xmit serially over queued
transmissions.
Signed-off-by: default avatarEd Cashin <ecashin@coraid.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 69cf2d85
...@@ -210,6 +210,8 @@ struct sk_buff *aoecmd_ata_id(struct aoedev *); ...@@ -210,6 +210,8 @@ struct sk_buff *aoecmd_ata_id(struct aoedev *);
void aoe_freetframe(struct frame *); void aoe_freetframe(struct frame *);
void aoe_flush_iocq(void); void aoe_flush_iocq(void);
void aoe_end_request(struct aoedev *, struct request *, int); void aoe_end_request(struct aoedev *, struct request *, int);
int aoe_ktstart(struct ktstate *k);
void aoe_ktstop(struct ktstate *k);
int aoedev_init(void); int aoedev_init(void);
void aoedev_exit(void); void aoedev_exit(void);
......
...@@ -1110,14 +1110,14 @@ kthread(void *vp) ...@@ -1110,14 +1110,14 @@ kthread(void *vp)
return 0; return 0;
} }
static void void
aoe_ktstop(struct ktstate *k) aoe_ktstop(struct ktstate *k)
{ {
kthread_stop(k->task); kthread_stop(k->task);
wait_for_completion(&k->rendez); wait_for_completion(&k->rendez);
} }
static int int
aoe_ktstart(struct ktstate *k) aoe_ktstart(struct ktstate *k)
{ {
struct task_struct *task; struct task_struct *task;
......
...@@ -33,6 +33,9 @@ static char aoe_iflist[IFLISTSZ]; ...@@ -33,6 +33,9 @@ static char aoe_iflist[IFLISTSZ];
module_param_string(aoe_iflist, aoe_iflist, IFLISTSZ, 0600); module_param_string(aoe_iflist, aoe_iflist, IFLISTSZ, 0600);
MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=\"dev1 [dev2 ...]\""); MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=\"dev1 [dev2 ...]\"");
static wait_queue_head_t txwq;
static struct ktstate kts;
#ifndef MODULE #ifndef MODULE
static int __init aoe_iflist_setup(char *str) static int __init aoe_iflist_setup(char *str)
{ {
...@@ -44,6 +47,23 @@ static int __init aoe_iflist_setup(char *str) ...@@ -44,6 +47,23 @@ static int __init aoe_iflist_setup(char *str)
__setup("aoe_iflist=", aoe_iflist_setup); __setup("aoe_iflist=", aoe_iflist_setup);
#endif #endif
static spinlock_t txlock;
static struct sk_buff_head skbtxq;
/* enters with txlock held */
static int
tx(void)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(&skbtxq))) {
spin_unlock_irq(&txlock);
dev_queue_xmit(skb);
spin_lock_irq(&txlock);
}
return 0;
}
int int
is_aoe_netif(struct net_device *ifp) is_aoe_netif(struct net_device *ifp)
{ {
...@@ -88,10 +108,14 @@ void ...@@ -88,10 +108,14 @@ void
aoenet_xmit(struct sk_buff_head *queue) aoenet_xmit(struct sk_buff_head *queue)
{ {
struct sk_buff *skb, *tmp; struct sk_buff *skb, *tmp;
ulong flags;
skb_queue_walk_safe(queue, skb, tmp) { skb_queue_walk_safe(queue, skb, tmp) {
__skb_unlink(skb, queue); __skb_unlink(skb, queue);
dev_queue_xmit(skb); spin_lock_irqsave(&txlock, flags);
skb_queue_tail(&skbtxq, skb);
spin_unlock_irqrestore(&txlock, flags);
wake_up(&txwq);
} }
} }
...@@ -169,6 +193,15 @@ static struct packet_type aoe_pt __read_mostly = { ...@@ -169,6 +193,15 @@ static struct packet_type aoe_pt __read_mostly = {
int __init int __init
aoenet_init(void) aoenet_init(void)
{ {
skb_queue_head_init(&skbtxq);
init_waitqueue_head(&txwq);
spin_lock_init(&txlock);
kts.lock = &txlock;
kts.fn = tx;
kts.waitq = &txwq;
kts.name = "aoe_tx";
if (aoe_ktstart(&kts))
return -EAGAIN;
dev_add_pack(&aoe_pt); dev_add_pack(&aoe_pt);
return 0; return 0;
} }
...@@ -176,6 +209,8 @@ aoenet_init(void) ...@@ -176,6 +209,8 @@ aoenet_init(void)
void void
aoenet_exit(void) aoenet_exit(void)
{ {
aoe_ktstop(&kts);
skb_queue_purge(&skbtxq);
dev_remove_pack(&aoe_pt); dev_remove_pack(&aoe_pt);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment