Commit 736a2dd2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux

Pull virtio & lguest updates from Rusty Russell:
 "Lots of virtio work which wasn't quite ready for last merge window.

  Plus I dived into lguest again, reworking the pagetable code so we can
  move the switcher page: our fixmaps sometimes take more than 2MB now..."

Ugh.  Annoying conflicts with the tcm_vhost -> vhost_scsi rename.
Hopefully correctly resolved.

* tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (57 commits)
  caif_virtio: Remove bouncing email addresses
  lguest: improve code readability in lg_cpu_start.
  virtio-net: fill only rx queues which are being used
  lguest: map Switcher below fixmap.
  lguest: cache last cpu we ran on.
  lguest: map Switcher text whenever we allocate a new pagetable.
  lguest: don't share Switcher PTE pages between guests.
  lguest: expost switcher_pages array (as lg_switcher_pages).
  lguest: extract shadow PTE walking / allocating.
  lguest: make check_gpte et. al return bool.
  lguest: assume Switcher text is a single page.
  lguest: rename switcher_page to switcher_pages.
  lguest: remove RESERVE_MEM constant.
  lguest: check vaddr not pgd for Switcher protection.
  lguest: prepare to make SWITCHER_ADDR a variable.
  virtio: console: replace EMFILE with EBUSY for already-open port
  virtio-scsi: reset virtqueue affinity when doing cpu hotplug
  virtio-scsi: introduce multiqueue support
  virtio-scsi: push vq lock/unlock into virtscsi_vq_done
  virtio-scsi: pass struct virtio_scsi to virtqueue completion function
  ...
parents 0b2e3b6b 01d779a1
...@@ -6,6 +6,3 @@ kvm/ ...@@ -6,6 +6,3 @@ kvm/
- Kernel Virtual Machine. See also http://linux-kvm.org - Kernel Virtual Machine. See also http://linux-kvm.org
uml/ uml/
- User Mode Linux, builds/runs Linux kernel as a userspace program. - User Mode Linux, builds/runs Linux kernel as a userspace program.
virtio.txt
- Text version of draft virtio spec.
See http://ozlabs.org/~rusty/virtio-spec
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -8743,6 +8743,7 @@ F: drivers/virtio/ ...@@ -8743,6 +8743,7 @@ F: drivers/virtio/
F: drivers/net/virtio_net.c F: drivers/net/virtio_net.c
F: drivers/block/virtio_blk.c F: drivers/block/virtio_blk.c
F: include/linux/virtio_*.h F: include/linux/virtio_*.h
F: include/uapi/linux/virtio_*.h
VIRTIO HOST (VHOST) VIRTIO HOST (VHOST)
M: "Michael S. Tsirkin" <mst@redhat.com> M: "Michael S. Tsirkin" <mst@redhat.com>
......
...@@ -11,18 +11,11 @@ ...@@ -11,18 +11,11 @@
#define GUEST_PL 1 #define GUEST_PL 1
/* Every guest maps the core switcher code. */ /* Page for Switcher text itself, then two pages per cpu */
#define SHARED_SWITCHER_PAGES \ #define TOTAL_SWITCHER_PAGES (1 + 2 * nr_cpu_ids)
DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE)
/* Pages for switcher itself, then two pages per cpu */ /* Where we map the Switcher, in both Host and Guest. */
#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids) extern unsigned long switcher_addr;
/* We map at -4M (-2M for PAE) for ease of mapping (one PTE page). */
#ifdef CONFIG_X86_PAE
#define SWITCHER_ADDR 0xFFE00000
#else
#define SWITCHER_ADDR 0xFFC00000
#endif
/* Found in switcher.S */ /* Found in switcher.S */
extern unsigned long default_idt_entries[]; extern unsigned long default_idt_entries[];
......
...@@ -110,7 +110,7 @@ int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, ...@@ -110,7 +110,7 @@ int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
if (!sg) if (!sg)
sg = sglist; sg = sglist;
else { else {
sg->page_link &= ~0x02; sg_unmark_end(sg);
sg = sg_next(sg); sg = sg_next(sg);
} }
......
...@@ -143,7 +143,7 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, ...@@ -143,7 +143,7 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
* termination bit to avoid doing a full * termination bit to avoid doing a full
* sg_init_table() in drivers for each command. * sg_init_table() in drivers for each command.
*/ */
(*sg)->page_link &= ~0x02; sg_unmark_end(*sg);
*sg = sg_next(*sg); *sg = sg_next(*sg);
} }
......
...@@ -124,7 +124,7 @@ obj-$(CONFIG_PPC_PS3) += ps3/ ...@@ -124,7 +124,7 @@ obj-$(CONFIG_PPC_PS3) += ps3/
obj-$(CONFIG_OF) += of/ obj-$(CONFIG_OF) += of/
obj-$(CONFIG_SSB) += ssb/ obj-$(CONFIG_SSB) += ssb/
obj-$(CONFIG_BCMA) += bcma/ obj-$(CONFIG_BCMA) += bcma/
obj-$(CONFIG_VHOST_NET) += vhost/ obj-$(CONFIG_VHOST_RING) += vhost/
obj-$(CONFIG_VLYNQ) += vlynq/ obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_STAGING) += staging/ obj-$(CONFIG_STAGING) += staging/
obj-y += platform/ obj-y += platform/
......
...@@ -100,96 +100,103 @@ static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk, ...@@ -100,96 +100,103 @@ static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk,
return vbr; return vbr;
} }
static void virtblk_add_buf_wait(struct virtio_blk *vblk, static int __virtblk_add_req(struct virtqueue *vq,
struct virtblk_req *vbr, struct virtblk_req *vbr,
unsigned long out, struct scatterlist *data_sg,
unsigned long in) bool have_data)
{ {
DEFINE_WAIT(wait); struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
unsigned int num_out = 0, num_in = 0;
int type = vbr->out_hdr.type & ~VIRTIO_BLK_T_OUT;
for (;;) { sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
prepare_to_wait_exclusive(&vblk->queue_wait, &wait, sgs[num_out++] = &hdr;
TASK_UNINTERRUPTIBLE);
spin_lock_irq(vblk->disk->queue->queue_lock); /*
if (virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr, * If this is a packet command we need a couple of additional headers.
GFP_ATOMIC) < 0) { * Behind the normal outhdr we put a segment with the scsi command
spin_unlock_irq(vblk->disk->queue->queue_lock); * block, and before the normal inhdr we put the sense data and the
io_schedule(); * inhdr with additional status information.
} else { */
virtqueue_kick(vblk->vq); if (type == VIRTIO_BLK_T_SCSI_CMD) {
spin_unlock_irq(vblk->disk->queue->queue_lock); sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
break; sgs[num_out++] = &cmd;
} }
if (have_data) {
if (vbr->out_hdr.type & VIRTIO_BLK_T_OUT)
sgs[num_out++] = data_sg;
else
sgs[num_out + num_in++] = data_sg;
} }
finish_wait(&vblk->queue_wait, &wait); if (type == VIRTIO_BLK_T_SCSI_CMD) {
sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
sgs[num_out + num_in++] = &sense;
sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
sgs[num_out + num_in++] = &inhdr;
}
sg_init_one(&status, &vbr->status, sizeof(vbr->status));
sgs[num_out + num_in++] = &status;
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
} }
static inline void virtblk_add_req(struct virtblk_req *vbr, static void virtblk_add_req(struct virtblk_req *vbr, bool have_data)
unsigned int out, unsigned int in)
{ {
struct virtio_blk *vblk = vbr->vblk; struct virtio_blk *vblk = vbr->vblk;
DEFINE_WAIT(wait);
int ret;
spin_lock_irq(vblk->disk->queue->queue_lock); spin_lock_irq(vblk->disk->queue->queue_lock);
if (unlikely(virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr, while (unlikely((ret = __virtblk_add_req(vblk->vq, vbr, vbr->sg,
GFP_ATOMIC) < 0)) { have_data)) < 0)) {
prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
TASK_UNINTERRUPTIBLE);
spin_unlock_irq(vblk->disk->queue->queue_lock); spin_unlock_irq(vblk->disk->queue->queue_lock);
virtblk_add_buf_wait(vblk, vbr, out, in); io_schedule();
return; spin_lock_irq(vblk->disk->queue->queue_lock);
finish_wait(&vblk->queue_wait, &wait);
} }
virtqueue_kick(vblk->vq); virtqueue_kick(vblk->vq);
spin_unlock_irq(vblk->disk->queue->queue_lock); spin_unlock_irq(vblk->disk->queue->queue_lock);
} }
static int virtblk_bio_send_flush(struct virtblk_req *vbr) static void virtblk_bio_send_flush(struct virtblk_req *vbr)
{ {
unsigned int out = 0, in = 0;
vbr->flags |= VBLK_IS_FLUSH; vbr->flags |= VBLK_IS_FLUSH;
vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
vbr->out_hdr.sector = 0; vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = 0; vbr->out_hdr.ioprio = 0;
sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
sg_set_buf(&vbr->sg[out + in++], &vbr->status, sizeof(vbr->status));
virtblk_add_req(vbr, out, in);
return 0; virtblk_add_req(vbr, false);
} }
static int virtblk_bio_send_data(struct virtblk_req *vbr) static void virtblk_bio_send_data(struct virtblk_req *vbr)
{ {
struct virtio_blk *vblk = vbr->vblk; struct virtio_blk *vblk = vbr->vblk;
unsigned int num, out = 0, in = 0;
struct bio *bio = vbr->bio; struct bio *bio = vbr->bio;
bool have_data;
vbr->flags &= ~VBLK_IS_FLUSH; vbr->flags &= ~VBLK_IS_FLUSH;
vbr->out_hdr.type = 0; vbr->out_hdr.type = 0;
vbr->out_hdr.sector = bio->bi_sector; vbr->out_hdr.sector = bio->bi_sector;
vbr->out_hdr.ioprio = bio_prio(bio); vbr->out_hdr.ioprio = bio_prio(bio);
sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); if (blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg)) {
have_data = true;
num = blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg + out); if (bio->bi_rw & REQ_WRITE)
sg_set_buf(&vbr->sg[num + out + in++], &vbr->status,
sizeof(vbr->status));
if (num) {
if (bio->bi_rw & REQ_WRITE) {
vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
out += num; else
} else {
vbr->out_hdr.type |= VIRTIO_BLK_T_IN; vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
in += num; } else
} have_data = false;
}
virtblk_add_req(vbr, out, in); virtblk_add_req(vbr, have_data);
return 0;
} }
static void virtblk_bio_send_data_work(struct work_struct *work) static void virtblk_bio_send_data_work(struct work_struct *work)
...@@ -298,7 +305,7 @@ static void virtblk_done(struct virtqueue *vq) ...@@ -298,7 +305,7 @@ static void virtblk_done(struct virtqueue *vq)
static bool do_req(struct request_queue *q, struct virtio_blk *vblk, static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
struct request *req) struct request *req)
{ {
unsigned long num, out = 0, in = 0; unsigned int num;
struct virtblk_req *vbr; struct virtblk_req *vbr;
vbr = virtblk_alloc_req(vblk, GFP_ATOMIC); vbr = virtblk_alloc_req(vblk, GFP_ATOMIC);
...@@ -335,40 +342,15 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, ...@@ -335,40 +342,15 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
} }
} }
sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); num = blk_rq_map_sg(q, vbr->req, vblk->sg);
/*
* If this is a packet command we need a couple of additional headers.
* Behind the normal outhdr we put a segment with the scsi command
* block, and before the normal inhdr we put the sense data and the
* inhdr with additional status information before the normal inhdr.
*/
if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC)
sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
sizeof(vbr->in_hdr));
}
sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
sizeof(vbr->status));
if (num) { if (num) {
if (rq_data_dir(vbr->req) == WRITE) { if (rq_data_dir(vbr->req) == WRITE)
vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
out += num; else
} else {
vbr->out_hdr.type |= VIRTIO_BLK_T_IN; vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
in += num;
}
} }
if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr, if (__virtblk_add_req(vblk->vq, vbr, vblk->sg, num) < 0) {
GFP_ATOMIC) < 0) {
mempool_free(vbr, vblk->pool); mempool_free(vbr, vblk->pool);
return false; return false;
} }
...@@ -539,6 +521,7 @@ static void virtblk_config_changed_work(struct work_struct *work) ...@@ -539,6 +521,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
struct virtio_device *vdev = vblk->vdev; struct virtio_device *vdev = vblk->vdev;
struct request_queue *q = vblk->disk->queue; struct request_queue *q = vblk->disk->queue;
char cap_str_2[10], cap_str_10[10]; char cap_str_2[10], cap_str_10[10];
char *envp[] = { "RESIZE=1", NULL };
u64 capacity, size; u64 capacity, size;
mutex_lock(&vblk->config_lock); mutex_lock(&vblk->config_lock);
...@@ -568,6 +551,7 @@ static void virtblk_config_changed_work(struct work_struct *work) ...@@ -568,6 +551,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
set_capacity(vblk->disk, capacity); set_capacity(vblk->disk, capacity);
revalidate_disk(vblk->disk); revalidate_disk(vblk->disk);
kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
done: done:
mutex_unlock(&vblk->config_lock); mutex_unlock(&vblk->config_lock);
} }
......
...@@ -47,7 +47,7 @@ static void register_buffer(u8 *buf, size_t size) ...@@ -47,7 +47,7 @@ static void register_buffer(u8 *buf, size_t size)
sg_init_one(&sg, buf, size); sg_init_one(&sg, buf, size);
/* There should always be room for one buffer. */ /* There should always be room for one buffer. */
if (virtqueue_add_buf(vq, &sg, 0, 1, buf, GFP_KERNEL) < 0) if (virtqueue_add_inbuf(vq, &sg, 1, buf, GFP_KERNEL) < 0)
BUG(); BUG();
virtqueue_kick(vq); virtqueue_kick(vq);
......
...@@ -78,8 +78,8 @@ struct ports_driver_data { ...@@ -78,8 +78,8 @@ struct ports_driver_data {
}; };
static struct ports_driver_data pdrvdata; static struct ports_driver_data pdrvdata;
DEFINE_SPINLOCK(pdrvdata_lock); static DEFINE_SPINLOCK(pdrvdata_lock);
DECLARE_COMPLETION(early_console_added); static DECLARE_COMPLETION(early_console_added);
/* This struct holds information that's relevant only for console ports */ /* This struct holds information that's relevant only for console ports */
struct console { struct console {
...@@ -503,7 +503,7 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) ...@@ -503,7 +503,7 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
sg_init_one(sg, buf->buf, buf->size); sg_init_one(sg, buf->buf, buf->size);
ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC); ret = virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC);
virtqueue_kick(vq); virtqueue_kick(vq);
if (!ret) if (!ret)
ret = vq->num_free; ret = vq->num_free;
...@@ -572,7 +572,7 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, ...@@ -572,7 +572,7 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
sg_init_one(sg, &cpkt, sizeof(cpkt)); sg_init_one(sg, &cpkt, sizeof(cpkt));
spin_lock(&portdev->c_ovq_lock); spin_lock(&portdev->c_ovq_lock);
if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { if (virtqueue_add_outbuf(vq, sg, 1, &cpkt, GFP_ATOMIC) == 0) {
virtqueue_kick(vq); virtqueue_kick(vq);
while (!virtqueue_get_buf(vq, &len)) while (!virtqueue_get_buf(vq, &len))
cpu_relax(); cpu_relax();
...@@ -622,7 +622,7 @@ static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, ...@@ -622,7 +622,7 @@ static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
reclaim_consumed_buffers(port); reclaim_consumed_buffers(port);
err = virtqueue_add_buf(out_vq, sg, nents, 0, data, GFP_ATOMIC); err = virtqueue_add_outbuf(out_vq, sg, nents, data, GFP_ATOMIC);
/* Tell Host to go! */ /* Tell Host to go! */
virtqueue_kick(out_vq); virtqueue_kick(out_vq);
...@@ -1040,7 +1040,7 @@ static int port_fops_open(struct inode *inode, struct file *filp) ...@@ -1040,7 +1040,7 @@ static int port_fops_open(struct inode *inode, struct file *filp)
spin_lock_irq(&port->inbuf_lock); spin_lock_irq(&port->inbuf_lock);
if (port->guest_connected) { if (port->guest_connected) {
spin_unlock_irq(&port->inbuf_lock); spin_unlock_irq(&port->inbuf_lock);
ret = -EMFILE; ret = -EBUSY;
goto out; goto out;
} }
...@@ -1202,7 +1202,7 @@ int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) ...@@ -1202,7 +1202,7 @@ int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
return hvc_instantiate(0, 0, &hv_ops); return hvc_instantiate(0, 0, &hv_ops);
} }
int init_port_console(struct port *port) static int init_port_console(struct port *port)
{ {
int ret; int ret;
......
...@@ -5,10 +5,9 @@ config LGUEST ...@@ -5,10 +5,9 @@ config LGUEST
---help--- ---help---
This is a very simple module which allows you to run This is a very simple module which allows you to run
multiple instances of the same Linux kernel, using the multiple instances of the same Linux kernel, using the
"lguest" command found in the Documentation/virtual/lguest "lguest" command found in the tools/lguest directory.
directory.
Note that "lguest" is pronounced to rhyme with "fell quest", Note that "lguest" is pronounced to rhyme with "fell quest",
not "rustyvisor". See Documentation/virtual/lguest/lguest.txt. not "rustyvisor". See tools/lguest/lguest.txt.
If unsure, say N. If curious, say M. If masochistic, say Y. If unsure, say N. If curious, say M. If masochistic, say Y.
...@@ -20,9 +20,9 @@ ...@@ -20,9 +20,9 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include "lg.h" #include "lg.h"
unsigned long switcher_addr;
struct page **lg_switcher_pages;
static struct vm_struct *switcher_vma; static struct vm_struct *switcher_vma;
static struct page **switcher_page;
/* This One Big lock protects all inter-guest data structures. */ /* This One Big lock protects all inter-guest data structures. */
DEFINE_MUTEX(lguest_lock); DEFINE_MUTEX(lguest_lock);
...@@ -52,13 +52,21 @@ static __init int map_switcher(void) ...@@ -52,13 +52,21 @@ static __init int map_switcher(void)
* easy. * easy.
*/ */
/* We assume Switcher text fits into a single page. */
if (end_switcher_text - start_switcher_text > PAGE_SIZE) {
printk(KERN_ERR "lguest: switcher text too large (%zu)\n",
end_switcher_text - start_switcher_text);
return -EINVAL;
}
/* /*
* We allocate an array of struct page pointers. map_vm_area() wants * We allocate an array of struct page pointers. map_vm_area() wants
* this, rather than just an array of pages. * this, rather than just an array of pages.
*/ */
switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES, lg_switcher_pages = kmalloc(sizeof(lg_switcher_pages[0])
GFP_KERNEL); * TOTAL_SWITCHER_PAGES,
if (!switcher_page) { GFP_KERNEL);
if (!lg_switcher_pages) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
...@@ -68,32 +76,29 @@ static __init int map_switcher(void) ...@@ -68,32 +76,29 @@ static __init int map_switcher(void)
* so we make sure they're zeroed. * so we make sure they're zeroed.
*/ */
for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) { for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
switcher_page[i] = alloc_page(GFP_KERNEL|__GFP_ZERO); lg_switcher_pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!switcher_page[i]) { if (!lg_switcher_pages[i]) {
err = -ENOMEM; err = -ENOMEM;
goto free_some_pages; goto free_some_pages;
} }
} }
/* /*
* First we check that the Switcher won't overlap the fixmap area at * We place the Switcher underneath the fixmap area, which is the
* the top of memory. It's currently nowhere near, but it could have * highest virtual address we can get. This is important, since we
* very strange effects if it ever happened. * tell the Guest it can't access this memory, so we want its ceiling
* as high as possible.
*/ */
if (SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE > FIXADDR_START){ switcher_addr = FIXADDR_START - (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE;
err = -ENOMEM;
printk("lguest: mapping switcher would thwack fixmap\n");
goto free_pages;
}
/* /*
* Now we reserve the "virtual memory area" we want: 0xFFC00000 * Now we reserve the "virtual memory area" we want. We might
* (SWITCHER_ADDR). We might not get it in theory, but in practice * not get it in theory, but in practice it's worked so far.
* it's worked so far. The end address needs +1 because __get_vm_area * The end address needs +1 because __get_vm_area allocates an
* allocates an extra guard page, so we need space for that. * extra guard page, so we need space for that.
*/ */
switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR VM_ALLOC, switcher_addr, switcher_addr
+ (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
if (!switcher_vma) { if (!switcher_vma) {
err = -ENOMEM; err = -ENOMEM;
...@@ -103,12 +108,12 @@ static __init int map_switcher(void) ...@@ -103,12 +108,12 @@ static __init int map_switcher(void)
/* /*
* This code actually sets up the pages we've allocated to appear at * This code actually sets up the pages we've allocated to appear at
* SWITCHER_ADDR. map_vm_area() takes the vma we allocated above, the * switcher_addr. map_vm_area() takes the vma we allocated above, the
* kind of pages we're mapping (kernel pages), and a pointer to our * kind of pages we're mapping (kernel pages), and a pointer to our
* array of struct pages. It increments that pointer, but we don't * array of struct pages. It increments that pointer, but we don't
* care. * care.
*/ */
pagep = switcher_page; pagep = lg_switcher_pages;
err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep); err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
if (err) { if (err) {
printk("lguest: map_vm_area failed: %i\n", err); printk("lguest: map_vm_area failed: %i\n", err);
...@@ -133,8 +138,8 @@ static __init int map_switcher(void) ...@@ -133,8 +138,8 @@ static __init int map_switcher(void)
i = TOTAL_SWITCHER_PAGES; i = TOTAL_SWITCHER_PAGES;
free_some_pages: free_some_pages:
for (--i; i >= 0; i--) for (--i; i >= 0; i--)
__free_pages(switcher_page[i], 0); __free_pages(lg_switcher_pages[i], 0);
kfree(switcher_page); kfree(lg_switcher_pages);
out: out:
return err; return err;
} }
...@@ -149,8 +154,8 @@ static void unmap_switcher(void) ...@@ -149,8 +154,8 @@ static void unmap_switcher(void)
vunmap(switcher_vma->addr); vunmap(switcher_vma->addr);
/* Now we just need to free the pages we copied the switcher into */ /* Now we just need to free the pages we copied the switcher into */
for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) for (i = 0; i < TOTAL_SWITCHER_PAGES; i++)
__free_pages(switcher_page[i], 0); __free_pages(lg_switcher_pages[i], 0);
kfree(switcher_page); kfree(lg_switcher_pages);
} }
/*H:032 /*H:032
...@@ -323,15 +328,10 @@ static int __init init(void) ...@@ -323,15 +328,10 @@ static int __init init(void)
if (err) if (err)
goto out; goto out;
/* Now we set up the pagetable implementation for the Guests. */
err = init_pagetables(switcher_page, SHARED_SWITCHER_PAGES);
if (err)
goto unmap;
/* We might need to reserve an interrupt vector. */ /* We might need to reserve an interrupt vector. */
err = init_interrupts(); err = init_interrupts();
if (err) if (err)
goto free_pgtables; goto unmap;
/* /dev/lguest needs to be registered. */ /* /dev/lguest needs to be registered. */
err = lguest_device_init(); err = lguest_device_init();
...@@ -346,8 +346,6 @@ static int __init init(void) ...@@ -346,8 +346,6 @@ static int __init init(void)
free_interrupts: free_interrupts:
free_interrupts(); free_interrupts();
free_pgtables:
free_pagetables();
unmap: unmap:
unmap_switcher(); unmap_switcher();
out: out:
...@@ -359,7 +357,6 @@ static void __exit fini(void) ...@@ -359,7 +357,6 @@ static void __exit fini(void)
{ {
lguest_device_remove(); lguest_device_remove();
free_interrupts(); free_interrupts();
free_pagetables();
unmap_switcher(); unmap_switcher();
lguest_arch_host_fini(); lguest_arch_host_fini();
......
...@@ -14,11 +14,10 @@ ...@@ -14,11 +14,10 @@
#include <asm/lguest.h> #include <asm/lguest.h>
void free_pagetables(void);
int init_pagetables(struct page **switcher_page, unsigned int pages);
struct pgdir { struct pgdir {
unsigned long gpgdir; unsigned long gpgdir;
bool switcher_mapped;
int last_host_cpu;
pgd_t *pgdir; pgd_t *pgdir;
}; };
...@@ -124,6 +123,7 @@ bool lguest_address_ok(const struct lguest *lg, ...@@ -124,6 +123,7 @@ bool lguest_address_ok(const struct lguest *lg,
unsigned long addr, unsigned long len); unsigned long addr, unsigned long len);
void __lgread(struct lg_cpu *, void *, unsigned long, unsigned); void __lgread(struct lg_cpu *, void *, unsigned long, unsigned);
void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned); void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned);
extern struct page **lg_switcher_pages;
/*H:035 /*H:035
* Using memory-copy operations like that is usually inconvient, so we * Using memory-copy operations like that is usually inconvient, so we
......
...@@ -250,13 +250,13 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) ...@@ -250,13 +250,13 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
*/ */
static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
{ {
/* We have a limited number the number of CPUs in the lguest struct. */ /* We have a limited number of CPUs in the lguest struct. */
if (id >= ARRAY_SIZE(cpu->lg->cpus)) if (id >= ARRAY_SIZE(cpu->lg->cpus))
return -EINVAL; return -EINVAL;
/* Set up this CPU's id, and pointer back to the lguest struct. */ /* Set up this CPU's id, and pointer back to the lguest struct. */
cpu->id = id; cpu->id = id;
cpu->lg = container_of((cpu - id), struct lguest, cpus[0]); cpu->lg = container_of(cpu, struct lguest, cpus[id]);
cpu->lg->nr_cpus++; cpu->lg->nr_cpus++;
/* Each CPU has a timer it can set. */ /* Each CPU has a timer it can set. */
...@@ -270,7 +270,7 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) ...@@ -270,7 +270,7 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
if (!cpu->regs_page) if (!cpu->regs_page)
return -ENOMEM; return -ENOMEM;
/* We actually put the registers at the bottom of the page. */ /* We actually put the registers at the end of the page. */
cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs); cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs);
/* /*
......
This diff is collapsed.
...@@ -59,14 +59,13 @@ static struct { ...@@ -59,14 +59,13 @@ static struct {
/* Offset from where switcher.S was compiled to where we've copied it */ /* Offset from where switcher.S was compiled to where we've copied it */
static unsigned long switcher_offset(void) static unsigned long switcher_offset(void)
{ {
return SWITCHER_ADDR - (unsigned long)start_switcher_text; return switcher_addr - (unsigned long)start_switcher_text;
} }
/* This cpu's struct lguest_pages. */ /* This cpu's struct lguest_pages (after the Switcher text page) */
static struct lguest_pages *lguest_pages(unsigned int cpu) static struct lguest_pages *lguest_pages(unsigned int cpu)
{ {
return &(((struct lguest_pages *) return &(((struct lguest_pages *)(switcher_addr + PAGE_SIZE))[cpu]);
(SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
} }
static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu); static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu);
......
...@@ -40,3 +40,17 @@ config CAIF_HSI ...@@ -40,3 +40,17 @@ config CAIF_HSI
The caif low level driver for CAIF over HSI. The caif low level driver for CAIF over HSI.
Be aware that if you enable this then you also need to Be aware that if you enable this then you also need to
enable a low-level HSI driver. enable a low-level HSI driver.
config CAIF_VIRTIO
tristate "CAIF virtio transport driver"
depends on CAIF
select VHOST_RING
select VIRTIO
select GENERIC_ALLOCATOR
default n
---help---
The caif driver for CAIF over Virtio.
if CAIF_VIRTIO
source "drivers/vhost/Kconfig"
endif
...@@ -9,3 +9,6 @@ obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o ...@@ -9,3 +9,6 @@ obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
# HSI interface # HSI interface
obj-$(CONFIG_CAIF_HSI) += caif_hsi.o obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
# Virtio interface
obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o
This diff is collapsed.
...@@ -39,7 +39,6 @@ module_param(gso, bool, 0444); ...@@ -39,7 +39,6 @@ module_param(gso, bool, 0444);
#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
#define GOOD_COPY_LEN 128 #define GOOD_COPY_LEN 128
#define VIRTNET_SEND_COMMAND_SG_MAX 2
#define VIRTNET_DRIVER_VERSION "1.0.0" #define VIRTNET_DRIVER_VERSION "1.0.0"
struct virtnet_stats { struct virtnet_stats {
...@@ -444,7 +443,7 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) ...@@ -444,7 +443,7 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
err = virtqueue_add_buf(rq->vq, rq->sg, 0, 2, skb, gfp); err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
if (err < 0) if (err < 0)
dev_kfree_skb(skb); dev_kfree_skb(skb);
...@@ -489,8 +488,8 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) ...@@ -489,8 +488,8 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
/* chain first in list head */ /* chain first in list head */
first->private = (unsigned long)list; first->private = (unsigned long)list;
err = virtqueue_add_buf(rq->vq, rq->sg, 0, MAX_SKB_FRAGS + 2, err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
first, gfp); first, gfp);
if (err < 0) if (err < 0)
give_pages(rq, first); give_pages(rq, first);
...@@ -508,7 +507,7 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) ...@@ -508,7 +507,7 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
sg_init_one(rq->sg, page_address(page), PAGE_SIZE); sg_init_one(rq->sg, page_address(page), PAGE_SIZE);
err = virtqueue_add_buf(rq->vq, rq->sg, 0, 1, page, gfp); err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp);
if (err < 0) if (err < 0)
give_pages(rq, page); give_pages(rq, page);
...@@ -582,7 +581,7 @@ static void refill_work(struct work_struct *work) ...@@ -582,7 +581,7 @@ static void refill_work(struct work_struct *work)
bool still_empty; bool still_empty;
int i; int i;
for (i = 0; i < vi->max_queue_pairs; i++) { for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i]; struct receive_queue *rq = &vi->rq[i];
napi_disable(&rq->napi); napi_disable(&rq->napi);
...@@ -637,7 +636,7 @@ static int virtnet_open(struct net_device *dev) ...@@ -637,7 +636,7 @@ static int virtnet_open(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev); struct virtnet_info *vi = netdev_priv(dev);
int i; int i;
for (i = 0; i < vi->max_queue_pairs; i++) { for (i = 0; i < vi->curr_queue_pairs; i++) {
/* Make sure we have some buffers: if oom use wq. */ /* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0); schedule_delayed_work(&vi->refill, 0);
...@@ -711,8 +710,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) ...@@ -711,8 +710,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr); sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
return virtqueue_add_buf(sq->vq, sq->sg, num_sg, return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
0, skb, GFP_ATOMIC);
} }
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
...@@ -767,32 +765,35 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -767,32 +765,35 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
* never fail unless improperly formated. * never fail unless improperly formated.
*/ */
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
struct scatterlist *data, int out, int in) struct scatterlist *out,
struct scatterlist *in)
{ {
struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; struct scatterlist *sgs[4], hdr, stat;
struct virtio_net_ctrl_hdr ctrl; struct virtio_net_ctrl_hdr ctrl;
virtio_net_ctrl_ack status = ~0; virtio_net_ctrl_ack status = ~0;
unsigned int tmp; unsigned out_num = 0, in_num = 0, tmp;
int i;
/* Caller should know better */ /* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) || BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
(out + in > VIRTNET_SEND_COMMAND_SG_MAX));
out++; /* Add header */
in++; /* Add return status */
ctrl.class = class; ctrl.class = class;
ctrl.cmd = cmd; ctrl.cmd = cmd;
/* Add header */
sg_init_one(&hdr, &ctrl, sizeof(ctrl));
sgs[out_num++] = &hdr;
sg_init_table(sg, out + in); if (out)
sgs[out_num++] = out;
if (in)
sgs[out_num + in_num++] = in;
sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); /* Add return status. */
for_each_sg(data, s, out + in - 2, i) sg_init_one(&stat, &status, sizeof(status));
sg_set_buf(&sg[i + 1], sg_virt(s), s->length); sgs[out_num + in_num++] = &stat;
sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0); BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
< 0);
virtqueue_kick(vi->cvq); virtqueue_kick(vi->cvq);
...@@ -821,7 +822,7 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p) ...@@ -821,7 +822,7 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
sg_init_one(&sg, addr->sa_data, dev->addr_len); sg_init_one(&sg, addr->sa_data, dev->addr_len);
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_ADDR_SET, VIRTIO_NET_CTRL_MAC_ADDR_SET,
&sg, 1, 0)) { &sg, NULL)) {
dev_warn(&vdev->dev, dev_warn(&vdev->dev,
"Failed to set mac address by vq command.\n"); "Failed to set mac address by vq command.\n");
return -EINVAL; return -EINVAL;
...@@ -889,8 +890,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi) ...@@ -889,8 +890,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
{ {
rtnl_lock(); rtnl_lock();
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
0, 0))
dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
rtnl_unlock(); rtnl_unlock();
} }
...@@ -900,6 +900,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) ...@@ -900,6 +900,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
struct scatterlist sg; struct scatterlist sg;
struct virtio_net_ctrl_mq s; struct virtio_net_ctrl_mq s;
struct net_device *dev = vi->dev; struct net_device *dev = vi->dev;
int i;
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0; return 0;
...@@ -908,12 +909,16 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) ...@@ -908,12 +909,16 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
sg_init_one(&sg, &s, sizeof(s)); sg_init_one(&sg, &s, sizeof(s));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, 1, 0)){ VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
queue_pairs); queue_pairs);
return -EINVAL; return -EINVAL;
} else } else {
for (i = vi->curr_queue_pairs; i < queue_pairs; i++)
if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
vi->curr_queue_pairs = queue_pairs; vi->curr_queue_pairs = queue_pairs;
}
return 0; return 0;
} }
...@@ -955,7 +960,7 @@ static void virtnet_set_rx_mode(struct net_device *dev) ...@@ -955,7 +960,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_PROMISC, VIRTIO_NET_CTRL_RX_PROMISC,
sg, 1, 0)) sg, NULL))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
promisc ? "en" : "dis"); promisc ? "en" : "dis");
...@@ -963,7 +968,7 @@ static void virtnet_set_rx_mode(struct net_device *dev) ...@@ -963,7 +968,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_ALLMULTI, VIRTIO_NET_CTRL_RX_ALLMULTI,
sg, 1, 0)) sg, NULL))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
allmulti ? "en" : "dis"); allmulti ? "en" : "dis");
...@@ -1000,7 +1005,7 @@ static void virtnet_set_rx_mode(struct net_device *dev) ...@@ -1000,7 +1005,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET, VIRTIO_NET_CTRL_MAC_TABLE_SET,
sg, 2, 0)) sg, NULL))
dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
kfree(buf); kfree(buf);
...@@ -1015,7 +1020,7 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, ...@@ -1015,7 +1020,7 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
sg_init_one(&sg, &vid, sizeof(vid)); sg_init_one(&sg, &vid, sizeof(vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
return 0; return 0;
} }
...@@ -1029,7 +1034,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, ...@@ -1029,7 +1034,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
sg_init_one(&sg, &vid, sizeof(vid)); sg_init_one(&sg, &vid, sizeof(vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
return 0; return 0;
} }
...@@ -1570,7 +1575,7 @@ static int virtnet_probe(struct virtio_device *vdev) ...@@ -1570,7 +1575,7 @@ static int virtnet_probe(struct virtio_device *vdev)
} }
/* Last of all, set up some receive buffers. */ /* Last of all, set up some receive buffers. */
for (i = 0; i < vi->max_queue_pairs; i++) { for (i = 0; i < vi->curr_queue_pairs; i++) {
try_fill_recv(&vi->rq[i], GFP_KERNEL); try_fill_recv(&vi->rq[i], GFP_KERNEL);
/* If we didn't even get one input buffer, we're useless. */ /* If we didn't even get one input buffer, we're useless. */
...@@ -1694,7 +1699,7 @@ static int virtnet_restore(struct virtio_device *vdev) ...@@ -1694,7 +1699,7 @@ static int virtnet_restore(struct virtio_device *vdev)
netif_device_attach(vi->dev); netif_device_attach(vi->dev);
for (i = 0; i < vi->max_queue_pairs; i++) for (i = 0; i < vi->curr_queue_pairs; i++)
if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0); schedule_delayed_work(&vi->refill, 0);
......
...@@ -757,14 +757,14 @@ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst, ...@@ -757,14 +757,14 @@ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
mutex_lock(&vrp->tx_lock); mutex_lock(&vrp->tx_lock);
/* add message to the remote processor's virtqueue */ /* add message to the remote processor's virtqueue */
err = virtqueue_add_buf(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL); err = virtqueue_add_outbuf(vrp->svq, &sg, 1, msg, GFP_KERNEL);
if (err) { if (err) {
/* /*
* need to reclaim the buffer here, otherwise it's lost * need to reclaim the buffer here, otherwise it's lost
* (memory won't leak, but rpmsg won't use it again for TX). * (memory won't leak, but rpmsg won't use it again for TX).
* this will wait for a buffer management overhaul. * this will wait for a buffer management overhaul.
*/ */
dev_err(dev, "virtqueue_add_buf failed: %d\n", err); dev_err(dev, "virtqueue_add_outbuf failed: %d\n", err);
goto out; goto out;
} }
...@@ -839,7 +839,7 @@ static void rpmsg_recv_done(struct virtqueue *rvq) ...@@ -839,7 +839,7 @@ static void rpmsg_recv_done(struct virtqueue *rvq)
sg_init_one(&sg, msg, RPMSG_BUF_SIZE); sg_init_one(&sg, msg, RPMSG_BUF_SIZE);
/* add the buffer back to the remote processor's virtqueue */ /* add the buffer back to the remote processor's virtqueue */
err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, msg, GFP_KERNEL); err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, msg, GFP_KERNEL);
if (err < 0) { if (err < 0) {
dev_err(dev, "failed to add a virtqueue buffer: %d\n", err); dev_err(dev, "failed to add a virtqueue buffer: %d\n", err);
return; return;
...@@ -972,7 +972,7 @@ static int rpmsg_probe(struct virtio_device *vdev) ...@@ -972,7 +972,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE); sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE);
err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr, err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr,
GFP_KERNEL); GFP_KERNEL);
WARN_ON(err); /* sanity check; this can't really happen */ WARN_ON(err); /* sanity check; this can't really happen */
} }
......
This diff is collapsed.
config VHOST_NET config VHOST_NET
tristate "Host kernel accelerator for virtio net" tristate "Host kernel accelerator for virtio net"
depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP) depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP)
select VHOST_RING
---help--- ---help---
This kernel module can be loaded in host kernel to accelerate This kernel module can be loaded in host kernel to accelerate
guest networking with virtio_net. Not to be confused with virtio_net guest networking with virtio_net. Not to be confused with virtio_net
...@@ -12,7 +13,14 @@ config VHOST_NET ...@@ -12,7 +13,14 @@ config VHOST_NET
config VHOST_SCSI config VHOST_SCSI
tristate "VHOST_SCSI TCM fabric driver" tristate "VHOST_SCSI TCM fabric driver"
depends on TARGET_CORE && EVENTFD && m depends on TARGET_CORE && EVENTFD && m
select VHOST_RING
default n default n
---help--- ---help---
Say M here to enable the vhost_scsi TCM fabric module Say M here to enable the vhost_scsi TCM fabric module
for use with virtio-scsi guests for use with virtio-scsi guests
config VHOST_RING
tristate
---help---
This option is selected by any driver which needs to access
the host side of a virtio ring.
...@@ -3,3 +3,5 @@ vhost_net-y := vhost.o net.o ...@@ -3,3 +3,5 @@ vhost_net-y := vhost.o net.o
obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o
vhost_scsi-y := scsi.o vhost_scsi-y := scsi.o
obj-$(CONFIG_VHOST_RING) += vringh.o
...@@ -282,7 +282,9 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl, ...@@ -282,7 +282,9 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
return vhost_test_reset_owner(n); return vhost_test_reset_owner(n);
default: default:
mutex_lock(&n->dev.mutex); mutex_lock(&n->dev.mutex);
r = vhost_dev_ioctl(&n->dev, ioctl, arg); r = vhost_dev_ioctl(&n->dev, ioctl, argp);
if (r == -ENOIOCTLCMD)
r = vhost_vring_ioctl(&n->dev, ioctl, argp);
vhost_test_flush(n); vhost_test_flush(n);
mutex_unlock(&n->dev.mutex); mutex_unlock(&n->dev.mutex);
return r; return r;
......
This diff is collapsed.
...@@ -108,7 +108,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) ...@@ -108,7 +108,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns); sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
/* We should always be able to add one buffer to an empty queue. */ /* We should always be able to add one buffer to an empty queue. */
if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0) if (virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL) < 0)
BUG(); BUG();
virtqueue_kick(vq); virtqueue_kick(vq);
...@@ -256,7 +256,7 @@ static void stats_handle_request(struct virtio_balloon *vb) ...@@ -256,7 +256,7 @@ static void stats_handle_request(struct virtio_balloon *vb)
if (!virtqueue_get_buf(vq, &len)) if (!virtqueue_get_buf(vq, &len))
return; return;
sg_init_one(&sg, vb->stats, sizeof(vb->stats)); sg_init_one(&sg, vb->stats, sizeof(vb->stats));
if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0) if (virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL) < 0)
BUG(); BUG();
virtqueue_kick(vq); virtqueue_kick(vq);
} }
...@@ -341,7 +341,7 @@ static int init_vqs(struct virtio_balloon *vb) ...@@ -341,7 +341,7 @@ static int init_vqs(struct virtio_balloon *vb)
* use it to signal us later. * use it to signal us later.
*/ */
sg_init_one(&sg, vb->stats, sizeof vb->stats); sg_init_one(&sg, vb->stats, sizeof vb->stats);
if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb, GFP_KERNEL) if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
< 0) < 0)
BUG(); BUG();
virtqueue_kick(vb->stats_vq); virtqueue_kick(vb->stats_vq);
......
This diff is collapsed.
...@@ -171,6 +171,22 @@ static inline void sg_mark_end(struct scatterlist *sg) ...@@ -171,6 +171,22 @@ static inline void sg_mark_end(struct scatterlist *sg)
sg->page_link &= ~0x01; sg->page_link &= ~0x01;
} }
/**
* sg_unmark_end - Undo setting the end of the scatterlist
* @sg: SG entryScatterlist
*
* Description:
* Removes the termination marker from the given entry of the scatterlist.
*
**/
static inline void sg_unmark_end(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
sg->page_link &= ~0x02;
}
/** /**
* sg_phys - Return physical address of an sg entry * sg_phys - Return physical address of an sg entry
* @sg: SG entry * @sg: SG entry
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/mod_devicetable.h> #include <linux/mod_devicetable.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/vringh.h>
/** /**
* virtqueue - a queue to register buffers for sending or receiving. * virtqueue - a queue to register buffers for sending or receiving.
...@@ -40,6 +41,23 @@ int virtqueue_add_buf(struct virtqueue *vq, ...@@ -40,6 +41,23 @@ int virtqueue_add_buf(struct virtqueue *vq,
void *data, void *data,
gfp_t gfp); gfp_t gfp);
int virtqueue_add_outbuf(struct virtqueue *vq,
struct scatterlist sg[], unsigned int num,
void *data,
gfp_t gfp);
int virtqueue_add_inbuf(struct virtqueue *vq,
struct scatterlist sg[], unsigned int num,
void *data,
gfp_t gfp);
int virtqueue_add_sgs(struct virtqueue *vq,
struct scatterlist *sgs[],
unsigned int out_sgs,
unsigned int in_sgs,
void *data,
gfp_t gfp);
void virtqueue_kick(struct virtqueue *vq); void virtqueue_kick(struct virtqueue *vq);
bool virtqueue_kick_prepare(struct virtqueue *vq); bool virtqueue_kick_prepare(struct virtqueue *vq);
...@@ -64,6 +82,7 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *vq); ...@@ -64,6 +82,7 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
* @dev: underlying device. * @dev: underlying device.
* @id: the device type identification (used to match it with a driver). * @id: the device type identification (used to match it with a driver).
* @config: the configuration ops for this device. * @config: the configuration ops for this device.
* @vringh_config: configuration ops for host vrings.
* @vqs: the list of virtqueues for this device. * @vqs: the list of virtqueues for this device.
* @features: the features supported by both driver and device. * @features: the features supported by both driver and device.
* @priv: private pointer for the driver's use. * @priv: private pointer for the driver's use.
...@@ -73,6 +92,7 @@ struct virtio_device { ...@@ -73,6 +92,7 @@ struct virtio_device {
struct device dev; struct device dev;
struct virtio_device_id id; struct virtio_device_id id;
const struct virtio_config_ops *config; const struct virtio_config_ops *config;
const struct vringh_config_ops *vringh_config;
struct list_head vqs; struct list_head vqs;
/* Note that this is a Linux set_bit-style bitmap. */ /* Note that this is a Linux set_bit-style bitmap. */
unsigned long features[1]; unsigned long features[1];
......
/*
* Copyright (C) ST-Ericsson AB 2012
* Author: Sjur Brændeland <sjur.brandeland@stericsson.com>
*
* This header is BSD licensed so
* anyone can use the definitions to implement compatible remote processors
*/
#ifndef VIRTIO_CAIF_H
#define VIRTIO_CAIF_H
#include <linux/types.h>
struct virtio_caif_transf_config {
u16 headroom;
u16 tailroom;
u32 mtu;
u8 reserved[4];
};
struct virtio_caif_config {
struct virtio_caif_transf_config uplink, downlink;
u8 reserved[8];
};
#endif
...@@ -4,6 +4,63 @@ ...@@ -4,6 +4,63 @@
#include <linux/irqreturn.h> #include <linux/irqreturn.h>
#include <uapi/linux/virtio_ring.h> #include <uapi/linux/virtio_ring.h>
/*
* Barriers in virtio are tricky. Non-SMP virtio guests can't assume
* they're not on an SMP host system, so they need to assume real
* barriers. Non-SMP virtio hosts could skip the barriers, but does
* anyone care?
*
* For virtio_pci on SMP, we don't need to order with respect to MMIO
* accesses through relaxed memory I/O windows, so smp_mb() et al are
* sufficient.
*
* For using virtio to talk to real devices (eg. other heterogeneous
* CPUs) we do need real barriers. In theory, we could be using both
* kinds of virtio, so it's a runtime decision, and the branch is
* actually quite cheap.
*/
#ifdef CONFIG_SMP
static inline void virtio_mb(bool weak_barriers)
{
if (weak_barriers)
smp_mb();
else
mb();
}
static inline void virtio_rmb(bool weak_barriers)
{
if (weak_barriers)
smp_rmb();
else
rmb();
}
static inline void virtio_wmb(bool weak_barriers)
{
if (weak_barriers)
smp_wmb();
else
wmb();
}
#else
static inline void virtio_mb(bool weak_barriers)
{
mb();
}
static inline void virtio_rmb(bool weak_barriers)
{
rmb();
}
static inline void virtio_wmb(bool weak_barriers)
{
wmb();
}
#endif
struct virtio_device; struct virtio_device;
struct virtqueue; struct virtqueue;
......
This diff is collapsed.
...@@ -52,8 +52,8 @@ struct virtio_balloon_config ...@@ -52,8 +52,8 @@ struct virtio_balloon_config
#define VIRTIO_BALLOON_S_NR 6 #define VIRTIO_BALLOON_S_NR 6
struct virtio_balloon_stat { struct virtio_balloon_stat {
u16 tag; __u16 tag;
u64 val; __u64 val;
} __attribute__((packed)); } __attribute__((packed));
#endif /* _LINUX_VIRTIO_BALLOON_H */ #endif /* _LINUX_VIRTIO_BALLOON_H */
...@@ -38,5 +38,6 @@ ...@@ -38,5 +38,6 @@
#define VIRTIO_ID_SCSI 8 /* virtio scsi */ #define VIRTIO_ID_SCSI 8 /* virtio scsi */
#define VIRTIO_ID_9P 9 /* 9p virtio console */ #define VIRTIO_ID_9P 9 /* 9p virtio console */
#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */ #define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
#define VIRTIO_ID_CAIF 12 /* Virtio caif */
#endif /* _LINUX_VIRTIO_IDS_H */ #endif /* _LINUX_VIRTIO_IDS_H */
...@@ -194,11 +194,14 @@ static int pack_sg_list(struct scatterlist *sg, int start, ...@@ -194,11 +194,14 @@ static int pack_sg_list(struct scatterlist *sg, int start,
if (s > count) if (s > count)
s = count; s = count;
BUG_ON(index > limit); BUG_ON(index > limit);
/* Make sure we don't terminate early. */
sg_unmark_end(&sg[index]);
sg_set_buf(&sg[index++], data, s); sg_set_buf(&sg[index++], data, s);
count -= s; count -= s;
data += s; data += s;
} }
if (index-start)
sg_mark_end(&sg[index - 1]);
return index-start; return index-start;
} }
...@@ -236,12 +239,17 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit, ...@@ -236,12 +239,17 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit,
s = rest_of_page(data); s = rest_of_page(data);
if (s > count) if (s > count)
s = count; s = count;
/* Make sure we don't terminate early. */
sg_unmark_end(&sg[index]);
sg_set_page(&sg[index++], pdata[i++], s, data_off); sg_set_page(&sg[index++], pdata[i++], s, data_off);
data_off = 0; data_off = 0;
data += s; data += s;
count -= s; count -= s;
nr_pages--; nr_pages--;
} }
if (index-start)
sg_mark_end(&sg[index - 1]);
return index - start; return index - start;
} }
...@@ -256,9 +264,10 @@ static int ...@@ -256,9 +264,10 @@ static int
p9_virtio_request(struct p9_client *client, struct p9_req_t *req) p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
{ {
int err; int err;
int in, out; int in, out, out_sgs, in_sgs;
unsigned long flags; unsigned long flags;
struct virtio_chan *chan = client->trans; struct virtio_chan *chan = client->trans;
struct scatterlist *sgs[2];
p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n"); p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n");
...@@ -266,14 +275,19 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) ...@@ -266,14 +275,19 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
req_retry: req_retry:
spin_lock_irqsave(&chan->lock, flags); spin_lock_irqsave(&chan->lock, flags);
out_sgs = in_sgs = 0;
/* Handle out VirtIO ring buffers */ /* Handle out VirtIO ring buffers */
out = pack_sg_list(chan->sg, 0, out = pack_sg_list(chan->sg, 0,
VIRTQUEUE_NUM, req->tc->sdata, req->tc->size); VIRTQUEUE_NUM, req->tc->sdata, req->tc->size);
if (out)
sgs[out_sgs++] = chan->sg;
in = pack_sg_list(chan->sg, out, in = pack_sg_list(chan->sg, out,
VIRTQUEUE_NUM, req->rc->sdata, req->rc->capacity); VIRTQUEUE_NUM, req->rc->sdata, req->rc->capacity);
if (in)
sgs[out_sgs + in_sgs++] = chan->sg + out;
err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc, err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req->tc,
GFP_ATOMIC); GFP_ATOMIC);
if (err < 0) { if (err < 0) {
if (err == -ENOSPC) { if (err == -ENOSPC) {
...@@ -289,7 +303,7 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) ...@@ -289,7 +303,7 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
} else { } else {
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
p9_debug(P9_DEBUG_TRANS, p9_debug(P9_DEBUG_TRANS,
"virtio rpc add_buf returned failure\n"); "virtio rpc add_sgs returned failure\n");
return -EIO; return -EIO;
} }
} }
...@@ -351,11 +365,12 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, ...@@ -351,11 +365,12 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
char *uidata, char *uodata, int inlen, char *uidata, char *uodata, int inlen,
int outlen, int in_hdr_len, int kern_buf) int outlen, int in_hdr_len, int kern_buf)
{ {
int in, out, err; int in, out, err, out_sgs, in_sgs;
unsigned long flags; unsigned long flags;
int in_nr_pages = 0, out_nr_pages = 0; int in_nr_pages = 0, out_nr_pages = 0;
struct page **in_pages = NULL, **out_pages = NULL; struct page **in_pages = NULL, **out_pages = NULL;
struct virtio_chan *chan = client->trans; struct virtio_chan *chan = client->trans;
struct scatterlist *sgs[4];
p9_debug(P9_DEBUG_TRANS, "virtio request\n"); p9_debug(P9_DEBUG_TRANS, "virtio request\n");
...@@ -396,13 +411,22 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, ...@@ -396,13 +411,22 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
req->status = REQ_STATUS_SENT; req->status = REQ_STATUS_SENT;
req_retry_pinned: req_retry_pinned:
spin_lock_irqsave(&chan->lock, flags); spin_lock_irqsave(&chan->lock, flags);
out_sgs = in_sgs = 0;
/* out data */ /* out data */
out = pack_sg_list(chan->sg, 0, out = pack_sg_list(chan->sg, 0,
VIRTQUEUE_NUM, req->tc->sdata, req->tc->size); VIRTQUEUE_NUM, req->tc->sdata, req->tc->size);
if (out_pages) if (out)
sgs[out_sgs++] = chan->sg;
if (out_pages) {
sgs[out_sgs++] = chan->sg + out;
out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM, out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
out_pages, out_nr_pages, uodata, outlen); out_pages, out_nr_pages, uodata, outlen);
}
/* /*
* Take care of in data * Take care of in data
* For example TREAD have 11. * For example TREAD have 11.
...@@ -412,11 +436,17 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, ...@@ -412,11 +436,17 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
*/ */
in = pack_sg_list(chan->sg, out, in = pack_sg_list(chan->sg, out,
VIRTQUEUE_NUM, req->rc->sdata, in_hdr_len); VIRTQUEUE_NUM, req->rc->sdata, in_hdr_len);
if (in_pages) if (in)
sgs[out_sgs + in_sgs++] = chan->sg + out;
if (in_pages) {
sgs[out_sgs + in_sgs++] = chan->sg + out + in;
in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM, in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
in_pages, in_nr_pages, uidata, inlen); in_pages, in_nr_pages, uidata, inlen);
}
err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc, BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs));
err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req->tc,
GFP_ATOMIC); GFP_ATOMIC);
if (err < 0) { if (err < 0) {
if (err == -ENOSPC) { if (err == -ENOSPC) {
...@@ -432,7 +462,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, ...@@ -432,7 +462,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
} else { } else {
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
p9_debug(P9_DEBUG_TRANS, p9_debug(P9_DEBUG_TRANS,
"virtio rpc add_buf returned failure\n"); "virtio rpc add_sgs returned failure\n");
err = -EIO; err = -EIO;
goto err_out; goto err_out;
} }
......
...@@ -70,7 +70,7 @@ Running Lguest: ...@@ -70,7 +70,7 @@ Running Lguest:
- Run an lguest as root: - Run an lguest as root:
Documentation/virtual/lguest/lguest 64 vmlinux --tunnet=192.168.19.1 \ tools/lguest/lguest 64 vmlinux --tunnet=192.168.19.1 \
--block=rootfile root=/dev/vda --block=rootfile root=/dev/vda
Explanation: Explanation:
......
all: test mod all: test mod
test: virtio_test test: virtio_test vringh_test
virtio_test: virtio_ring.o virtio_test.o virtio_test: virtio_ring.o virtio_test.o
CFLAGS += -g -O2 -Wall -I. -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -MMD vringh_test: vringh_test.o vringh.o virtio_ring.o
vpath %.c ../../drivers/virtio
CFLAGS += -g -O2 -Wall -I. -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE
vpath %.c ../../drivers/virtio ../../drivers/vhost
mod: mod:
${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test
.PHONY: all test mod clean .PHONY: all test mod clean
clean: clean:
${RM} *.o vhost_test/*.o vhost_test/.*.cmd \ ${RM} *.o vringh_test virtio_test vhost_test/*.o vhost_test/.*.cmd \
vhost_test/Module.symvers vhost_test/modules.order *.d vhost_test/Module.symvers vhost_test/modules.order *.d
-include *.d -include *.d
#if defined(__i386__) || defined(__x86_64__)
#define barrier() asm volatile("" ::: "memory")
#define mb() __sync_synchronize()
#define smp_mb() mb()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
/* Weak barriers should be used. If not - it's a bug */
# define rmb() abort()
# define wmb() abort()
#else
#error Please fill in barrier macros
#endif
#ifndef BUG_H
#define BUG_H
#define BUG_ON(__BUG_ON_cond) assert(!(__BUG_ON_cond))
#define BUILD_BUG_ON(x)
#define BUG() abort()
#endif /* BUG_H */
This diff is collapsed.
#define EXPORT_SYMBOL(sym)
#define EXPORT_SYMBOL_GPL(sym)
#define EXPORT_SYMBOL_GPL_FUTURE(sym)
#define EXPORT_UNUSED_SYMBOL(sym)
#define EXPORT_UNUSED_SYMBOL_GPL(sym)
#include "../../../include/linux/irqreturn.h"
This diff is collapsed.
#include "../../../include/linux/kern_levels.h"
#define printk printf
#define vprintk vprintf
#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) int name = 0
#define __ratelimit(x) (*(x))
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#include "../../../include/linux/virtio_ring.h"
#include "../../../include/linux/vringh.h"
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment