Commit 68e0d42f authored by Ed L. Cashin's avatar Ed L. Cashin Committed by Linus Torvalds

aoe: handle multiple network paths to AoE device

A remote AoE device is something can process ATA commands and is identified by
an AoE shelf number and an AoE slot number.  Such a device might have more
than one network interface, and it might be reachable by more than one local
network interface.  This patch tracks the available network paths available to
each AoE device, allowing them to be used more efficiently.

Andrew Morton asked about the call to msleep_interruptible in the revalidate
function.  Yes, if a signal is pending, then msleep_interruptible will not
return 0.  That means we will not loop but will call aoenet_xmit with a NULL
skb, which is a noop.  If the system is too low on memory or the aoe driver is
too low on frames, then the user can hit control-C to interrupt the attempt to
do a revalidate.  I have added a comment to the code summarizing that.

Andrew Morton asked whether the allocation performed inside addtgt could use a
more relaxed allocation like GFP_KERNEL, but addtgt is called when the aoedev
lock has been locked with spin_lock_irqsave.  It would be nice to allocate the
memory under fewer restrictions, but targets are only added when the device is
being discovered, and if the target can't be added right now, we can try again
in a minute when then next AoE config query broadcast goes out.

Andrew Morton pointed out that the "too many targets" message could be printed
for failing GFP_ATOMIC allocations.  The last patch in this series makes the
messages more specific.
Signed-off-by: default avatarEd L. Cashin <ecashin@coraid.com>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8911ef4d
......@@ -76,10 +76,8 @@ enum {
DEVFL_EXT = (1<<2), /* device accepts lba48 commands */
DEVFL_CLOSEWAIT = (1<<3), /* device is waiting for all closes to revalidate */
DEVFL_GDALLOC = (1<<4), /* need to alloc gendisk */
DEVFL_PAUSE = (1<<5),
DEVFL_KICKME = (1<<5), /* slow polling network card catch */
DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */
DEVFL_MAXBCNT = (1<<7), /* d->maxbcnt is not changeable */
DEVFL_KICKME = (1<<8),
BUFFL_FAIL = 1,
};
......@@ -88,17 +86,24 @@ enum {
DEFAULTBCNT = 2 * 512, /* 2 sectors */
NPERSHELF = 16, /* number of slots per shelf address */
FREETAG = -1,
MIN_BUFS = 8,
MIN_BUFS = 16,
NTARGETS = 8,
NAOEIFS = 8,
TIMERTICK = HZ / 10,
MINTIMER = HZ >> 2,
MAXTIMER = HZ << 1,
HELPWAIT = 20,
};
struct buf {
struct list_head bufs;
ulong start_time; /* for disk stats */
ulong stime; /* for disk stats */
ulong flags;
ulong nframesout;
char *bufaddr;
ulong resid;
ulong bv_resid;
ulong bv_off;
sector_t sector;
struct bio *bio;
struct bio_vec *bv;
......@@ -114,19 +119,37 @@ struct frame {
struct sk_buff *skb;
};
struct aoeif {
struct net_device *nd;
unsigned char lost;
unsigned char lostjumbo;
ushort maxbcnt;
};
struct aoetgt {
unsigned char addr[6];
ushort nframes;
struct frame *frames;
struct aoeif ifs[NAOEIFS];
struct aoeif *ifp; /* current aoeif in use */
ushort nout;
ushort maxout;
u16 lasttag; /* last tag sent */
u16 useme;
ulong lastwadj; /* last window adjustment */
int wpkts, rpkts;
};
struct aoedev {
struct aoedev *next;
unsigned char addr[6]; /* remote mac addr */
ushort flags;
ulong sysminor;
ulong aoemajor;
ulong aoeminor;
u16 aoeminor;
u16 flags;
u16 nopen; /* (bd_openers isn't available without sleeping) */
u16 lasttag; /* last tag sent */
u16 rttavg; /* round trip average of requests/responses */
u16 mintimer;
u16 fw_ver; /* version of blade's firmware */
u16 maxbcnt;
struct work_struct work;/* disk create work struct */
struct gendisk *gd;
struct request_queue blkq;
......@@ -134,15 +157,14 @@ struct aoedev {
sector_t ssize;
struct timer_list timer;
spinlock_t lock;
struct net_device *ifp; /* interface ed is attached to */
struct sk_buff *sendq_hd; /* packets needing to be sent, list head */
struct sk_buff *sendq_tl;
mempool_t *bufpool; /* for deadlock-free Buf allocation */
struct list_head bufq; /* queue of bios to work on */
struct buf *inprocess; /* the one we're currently working on */
ushort lostjumbo;
ushort nframes; /* number of frames below */
struct frame *frames;
struct aoetgt *targets[NTARGETS];
struct aoetgt **tgt; /* target in use when working */
struct aoetgt **htgt; /* target needing rexmit assistance */
};
......@@ -160,12 +182,13 @@ void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor);
void aoecmd_ata_rsp(struct sk_buff *);
void aoecmd_cfg_rsp(struct sk_buff *);
void aoecmd_sleepwork(struct work_struct *);
struct sk_buff *new_skb(ulong);
void aoecmd_cleanslate(struct aoedev *);
struct sk_buff *aoecmd_ata_id(struct aoedev *);
int aoedev_init(void);
void aoedev_exit(void);
struct aoedev *aoedev_by_aoeaddr(int maj, int min);
struct aoedev *aoedev_by_sysminor_m(ulong sysminor, ulong bufcnt);
struct aoedev *aoedev_by_sysminor_m(ulong sysminor);
void aoedev_downdev(struct aoedev *d);
int aoedev_isbusy(struct aoedev *d);
......
......@@ -24,7 +24,7 @@ static ssize_t aoedisk_show_state(struct device *dev,
return snprintf(page, PAGE_SIZE,
"%s%s\n",
(d->flags & DEVFL_UP) ? "up" : "down",
(d->flags & DEVFL_PAUSE) ? ",paused" :
(d->flags & DEVFL_KICKME) ? ",kickme" :
(d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : "");
/* I'd rather see nopen exported so we can ditch closewait */
}
......@@ -33,17 +33,49 @@ static ssize_t aoedisk_show_mac(struct device *dev,
{
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
struct aoetgt *t = d->targets[0];
if (t == NULL)
return snprintf(page, PAGE_SIZE, "none\n");
return snprintf(page, PAGE_SIZE, "%012llx\n",
(unsigned long long)mac_addr(d->addr));
(unsigned long long)mac_addr(t->addr));
}
static ssize_t aoedisk_show_netif(struct device *dev,
struct device_attribute *attr, char *page)
{
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
struct net_device *nds[8], **nd, **nnd, **ne;
struct aoetgt **t, **te;
struct aoeif *ifp, *e;
char *p;
memset(nds, 0, sizeof nds);
nd = nds;
ne = nd + ARRAY_SIZE(nds);
t = d->targets;
te = t + NTARGETS;
for (; t < te && *t; t++) {
ifp = (*t)->ifs;
e = ifp + NAOEIFS;
for (; ifp < e && ifp->nd; ifp++) {
for (nnd = nds; nnd < nd; nnd++)
if (*nnd == ifp->nd)
break;
if (nnd == nd && nd != ne)
*nd++ = ifp->nd;
}
}
return snprintf(page, PAGE_SIZE, "%s\n", d->ifp->name);
ne = nd;
nd = nds;
if (*nd == NULL)
return snprintf(page, PAGE_SIZE, "none\n");
for (p = page; nd < ne; nd++)
p += snprintf(p, PAGE_SIZE - (p-page), "%s%s",
p == page ? "" : ",", (*nd)->name);
p += snprintf(p, PAGE_SIZE - (p-page), "\n");
return p-page;
}
/* firmware version */
static ssize_t aoedisk_show_fwver(struct device *dev,
......@@ -134,7 +166,23 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
blk_queue_bounce(q, &bio);
if (bio == NULL) {
printk(KERN_ERR "aoe: bio is NULL\n");
BUG();
return 0;
}
d = bio->bi_bdev->bd_disk->private_data;
if (d == NULL) {
printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n");
BUG();
bio_endio(bio, -ENXIO);
return 0;
} else if (bio->bi_io_vec == NULL) {
printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
BUG();
bio_endio(bio, -ENXIO);
return 0;
}
buf = mempool_alloc(d->bufpool, GFP_NOIO);
if (buf == NULL) {
printk(KERN_INFO "aoe: buf allocation failure\n");
......@@ -143,14 +191,14 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
}
memset(buf, 0, sizeof(*buf));
INIT_LIST_HEAD(&buf->bufs);
buf->start_time = jiffies;
buf->stime = jiffies;
buf->bio = bio;
buf->resid = bio->bi_size;
buf->sector = bio->bi_sector;
buf->bv = &bio->bi_io_vec[bio->bi_idx];
WARN_ON(buf->bv->bv_len == 0);
buf->bv_resid = buf->bv->bv_len;
buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;
WARN_ON(buf->bv_resid == 0);
buf->bv_off = buf->bv->bv_offset;
spin_lock_irqsave(&d->lock, flags);
......@@ -229,7 +277,7 @@ aoeblk_gdalloc(void *vp)
gd->fops = &aoe_bdops;
gd->private_data = d;
gd->capacity = d->ssize;
snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%ld",
snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
d->aoemajor, d->aoeminor);
gd->queue = &d->blkq;
......
......@@ -6,6 +6,7 @@
#include <linux/hdreg.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include "aoe.h"
enum {
......@@ -68,6 +69,7 @@ revalidate(const char __user *str, size_t size)
int major, minor, n;
ulong flags;
struct aoedev *d;
struct sk_buff *skb;
char buf[16];
if (size >= sizeof buf)
......@@ -85,13 +87,20 @@ revalidate(const char __user *str, size_t size)
d = aoedev_by_aoeaddr(major, minor);
if (!d)
return -EINVAL;
spin_lock_irqsave(&d->lock, flags);
d->flags &= ~DEVFL_MAXBCNT;
d->flags |= DEVFL_PAUSE;
aoecmd_cleanslate(d);
loop:
skb = aoecmd_ata_id(d);
spin_unlock_irqrestore(&d->lock, flags);
/* try again if we are able to sleep a bit,
* otherwise give up this revalidation
*/
if (!skb && !msleep_interruptible(200)) {
spin_lock_irqsave(&d->lock, flags);
goto loop;
}
aoenet_xmit(skb);
aoecmd_cfg(major, minor);
return 0;
}
......
This diff is collapsed.
......@@ -15,15 +15,18 @@ static spinlock_t devlist_lock;
int
aoedev_isbusy(struct aoedev *d)
{
struct aoetgt **t, **te;
struct frame *f, *e;
f = d->frames;
e = f + d->nframes;
do {
if (f->tag != FREETAG)
return 1;
} while (++f < e);
t = d->targets;
te = t + NTARGETS;
for (; t < te && *t; t++) {
f = (*t)->frames;
e = f + (*t)->nframes;
for (; f < e; f++)
if (f->tag != FREETAG)
return 1;
}
return 0;
}
......@@ -55,75 +58,41 @@ dummy_timer(ulong vp)
add_timer(&d->timer);
}
/* called with devlist lock held */
static struct aoedev *
aoedev_newdev(ulong nframes)
{
struct aoedev *d;
struct frame *f, *e;
d = kzalloc(sizeof *d, GFP_ATOMIC);
f = kcalloc(nframes, sizeof *f, GFP_ATOMIC);
switch (!d || !f) {
case 0:
d->nframes = nframes;
d->frames = f;
e = f + nframes;
for (; f<e; f++) {
f->tag = FREETAG;
f->skb = new_skb(ETH_ZLEN);
if (!f->skb)
break;
}
if (f == e)
break;
while (f > d->frames) {
f--;
dev_kfree_skb(f->skb);
}
default:
if (f)
kfree(f);
if (d)
kfree(d);
return NULL;
}
INIT_WORK(&d->work, aoecmd_sleepwork);
spin_lock_init(&d->lock);
init_timer(&d->timer);
d->timer.data = (ulong) d;
d->timer.function = dummy_timer;
d->timer.expires = jiffies + HZ;
add_timer(&d->timer);
d->bufpool = NULL; /* defer to aoeblk_gdalloc */
INIT_LIST_HEAD(&d->bufq);
d->next = devlist;
devlist = d;
return d;
}
void
aoedev_downdev(struct aoedev *d)
{
struct aoetgt **t, **te;
struct frame *f, *e;
struct buf *buf;
struct bio *bio;
f = d->frames;
e = f + d->nframes;
for (; f<e; f->tag = FREETAG, f->buf = NULL, f++) {
if (f->tag == FREETAG || f->buf == NULL)
continue;
buf = f->buf;
bio = buf->bio;
if (--buf->nframesout == 0) {
mempool_free(buf, d->bufpool);
bio_endio(bio, -EIO);
t = d->targets;
te = t + NTARGETS;
for (; t < te && *t; t++) {
f = (*t)->frames;
e = f + (*t)->nframes;
for (; f < e; f->tag = FREETAG, f->buf = NULL, f++) {
if (f->tag == FREETAG || f->buf == NULL)
continue;
buf = f->buf;
bio = buf->bio;
if (--buf->nframesout == 0
&& buf != d->inprocess) {
mempool_free(buf, d->bufpool);
bio_endio(bio, -EIO);
}
}
skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0;
(*t)->maxout = (*t)->nframes;
(*t)->nout = 0;
}
buf = d->inprocess;
if (buf) {
bio = buf->bio;
mempool_free(buf, d->bufpool);
bio_endio(bio, -EIO);
}
d->inprocess = NULL;
d->htgt = NULL;
while (!list_empty(&d->bufq)) {
buf = container_of(d->bufq.next, struct buf, bufs);
......@@ -136,12 +105,12 @@ aoedev_downdev(struct aoedev *d)
if (d->gd)
d->gd->capacity = 0;
d->flags &= ~(DEVFL_UP | DEVFL_PAUSE);
d->flags &= ~DEVFL_UP;
}
/* find it or malloc it */
struct aoedev *
aoedev_by_sysminor_m(ulong sysminor, ulong bufcnt)
aoedev_by_sysminor_m(ulong sysminor)
{
struct aoedev *d;
ulong flags;
......@@ -151,40 +120,61 @@ aoedev_by_sysminor_m(ulong sysminor, ulong bufcnt)
for (d=devlist; d; d=d->next)
if (d->sysminor == sysminor)
break;
if (d == NULL) {
d = aoedev_newdev(bufcnt);
if (d == NULL) {
spin_unlock_irqrestore(&devlist_lock, flags);
printk(KERN_INFO "aoe: aoedev_newdev failure.\n");
return NULL;
}
d->sysminor = sysminor;
d->aoemajor = AOEMAJOR(sysminor);
d->aoeminor = AOEMINOR(sysminor);
}
if (d)
goto out;
d = kcalloc(1, sizeof *d, GFP_ATOMIC);
if (!d)
goto out;
INIT_WORK(&d->work, aoecmd_sleepwork);
spin_lock_init(&d->lock);
init_timer(&d->timer);
d->timer.data = (ulong) d;
d->timer.function = dummy_timer;
d->timer.expires = jiffies + HZ;
add_timer(&d->timer);
d->bufpool = NULL; /* defer to aoeblk_gdalloc */
d->tgt = d->targets;
INIT_LIST_HEAD(&d->bufq);
d->sysminor = sysminor;
d->aoemajor = AOEMAJOR(sysminor);
d->aoeminor = AOEMINOR(sysminor);
d->mintimer = MINTIMER;
d->next = devlist;
devlist = d;
out:
spin_unlock_irqrestore(&devlist_lock, flags);
return d;
}
static void
aoedev_freedev(struct aoedev *d)
freetgt(struct aoetgt *t)
{
struct frame *f, *e;
f = t->frames;
e = f + t->nframes;
for (; f < e; f++) {
skb_shinfo(f->skb)->nr_frags = 0;
dev_kfree_skb(f->skb);
}
kfree(t->frames);
kfree(t);
}
static void
aoedev_freedev(struct aoedev *d)
{
struct aoetgt **t, **e;
if (d->gd) {
aoedisk_rm_sysfs(d);
del_gendisk(d->gd);
put_disk(d->gd);
}
f = d->frames;
e = f + d->nframes;
for (; f<e; f++) {
skb_shinfo(f->skb)->nr_frags = 0;
dev_kfree_skb(f->skb);
}
kfree(d->frames);
t = d->targets;
e = t + NTARGETS;
for (; t < e && *t; t++)
freetgt(*t);
if (d->bufpool)
mempool_destroy(d->bufpool);
kfree(d);
......
......@@ -137,9 +137,12 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt,
if (n > NECODES)
n = 0;
if (net_ratelimit())
printk(KERN_ERR "aoe: error packet from %d.%d; ecode=%d '%s'\n",
be16_to_cpu(get_unaligned(&h->major)), h->minor,
h->err, aoe_errlist[n]);
printk(KERN_ERR
"%s%d.%d@%s; ecode=%d '%s'\n",
"aoe: error packet from ",
be16_to_cpu(get_unaligned(&h->major)),
h->minor, skb->dev->name,
h->err, aoe_errlist[n]);
goto exit;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment