Commit 736a2dd2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux

Pull virtio & lguest updates from Rusty Russell:
 "Lots of virtio work which wasn't quite ready for last merge window.

  Plus I dived into lguest again, reworking the pagetable code so we can
  move the switcher page: our fixmaps sometimes take more than 2MB now..."

Ugh.  Annoying conflicts with the tcm_vhost -> vhost_scsi rename.
Hopefully correctly resolved.

* tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (57 commits)
  caif_virtio: Remove bouncing email addresses
  lguest: improve code readability in lg_cpu_start.
  virtio-net: fill only rx queues which are being used
  lguest: map Switcher below fixmap.
  lguest: cache last cpu we ran on.
  lguest: map Switcher text whenever we allocate a new pagetable.
  lguest: don't share Switcher PTE pages between guests.
  lguest: expost switcher_pages array (as lg_switcher_pages).
  lguest: extract shadow PTE walking / allocating.
  lguest: make check_gpte et. al return bool.
  lguest: assume Switcher text is a single page.
  lguest: rename switcher_page to switcher_pages.
  lguest: remove RESERVE_MEM constant.
  lguest: check vaddr not pgd for Switcher protection.
  lguest: prepare to make SWITCHER_ADDR a variable.
  virtio: console: replace EMFILE with EBUSY for already-open port
  virtio-scsi: reset virtqueue affinity when doing cpu hotplug
  virtio-scsi: introduce multiqueue support
  virtio-scsi: push vq lock/unlock into virtscsi_vq_done
  virtio-scsi: pass struct virtio_scsi to virtqueue completion function
  ...
parents 0b2e3b6b 01d779a1
......@@ -6,6 +6,3 @@ kvm/
- Kernel Virtual Machine. See also http://linux-kvm.org
uml/
- User Mode Linux, builds/runs Linux kernel as a userspace program.
virtio.txt
- Text version of draft virtio spec.
See http://ozlabs.org/~rusty/virtio-spec
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -8743,6 +8743,7 @@ F: drivers/virtio/
F: drivers/net/virtio_net.c
F: drivers/block/virtio_blk.c
F: include/linux/virtio_*.h
F: include/uapi/linux/virtio_*.h
VIRTIO HOST (VHOST)
M: "Michael S. Tsirkin" <mst@redhat.com>
......
......@@ -11,18 +11,11 @@
#define GUEST_PL 1
/* Every guest maps the core switcher code. */
#define SHARED_SWITCHER_PAGES \
DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE)
/* Pages for switcher itself, then two pages per cpu */
#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids)
/* We map at -4M (-2M for PAE) for ease of mapping (one PTE page). */
#ifdef CONFIG_X86_PAE
#define SWITCHER_ADDR 0xFFE00000
#else
#define SWITCHER_ADDR 0xFFC00000
#endif
/* Page for Switcher text itself, then two pages per cpu */
#define TOTAL_SWITCHER_PAGES (1 + 2 * nr_cpu_ids)
/* Where we map the Switcher, in both Host and Guest. */
extern unsigned long switcher_addr;
/* Found in switcher.S */
extern unsigned long default_idt_entries[];
......
......@@ -110,7 +110,7 @@ int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
if (!sg)
sg = sglist;
else {
sg->page_link &= ~0x02;
sg_unmark_end(sg);
sg = sg_next(sg);
}
......
......@@ -143,7 +143,7 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
* termination bit to avoid doing a full
* sg_init_table() in drivers for each command.
*/
(*sg)->page_link &= ~0x02;
sg_unmark_end(*sg);
*sg = sg_next(*sg);
}
......
......@@ -124,7 +124,7 @@ obj-$(CONFIG_PPC_PS3) += ps3/
obj-$(CONFIG_OF) += of/
obj-$(CONFIG_SSB) += ssb/
obj-$(CONFIG_BCMA) += bcma/
obj-$(CONFIG_VHOST_NET) += vhost/
obj-$(CONFIG_VHOST_RING) += vhost/
obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
......
......@@ -100,96 +100,103 @@ static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk,
return vbr;
}
static void virtblk_add_buf_wait(struct virtio_blk *vblk,
static int __virtblk_add_req(struct virtqueue *vq,
struct virtblk_req *vbr,
unsigned long out,
unsigned long in)
struct scatterlist *data_sg,
bool have_data)
{
DEFINE_WAIT(wait);
struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
unsigned int num_out = 0, num_in = 0;
int type = vbr->out_hdr.type & ~VIRTIO_BLK_T_OUT;
for (;;) {
prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
TASK_UNINTERRUPTIBLE);
sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
sgs[num_out++] = &hdr;
spin_lock_irq(vblk->disk->queue->queue_lock);
if (virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
GFP_ATOMIC) < 0) {
spin_unlock_irq(vblk->disk->queue->queue_lock);
io_schedule();
} else {
virtqueue_kick(vblk->vq);
spin_unlock_irq(vblk->disk->queue->queue_lock);
break;
/*
* If this is a packet command we need a couple of additional headers.
* Behind the normal outhdr we put a segment with the scsi command
* block, and before the normal inhdr we put the sense data and the
* inhdr with additional status information.
*/
if (type == VIRTIO_BLK_T_SCSI_CMD) {
sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
sgs[num_out++] = &cmd;
}
if (have_data) {
if (vbr->out_hdr.type & VIRTIO_BLK_T_OUT)
sgs[num_out++] = data_sg;
else
sgs[num_out + num_in++] = data_sg;
}
finish_wait(&vblk->queue_wait, &wait);
if (type == VIRTIO_BLK_T_SCSI_CMD) {
sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
sgs[num_out + num_in++] = &sense;
sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
sgs[num_out + num_in++] = &inhdr;
}
sg_init_one(&status, &vbr->status, sizeof(vbr->status));
sgs[num_out + num_in++] = &status;
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
static inline void virtblk_add_req(struct virtblk_req *vbr,
unsigned int out, unsigned int in)
static void virtblk_add_req(struct virtblk_req *vbr, bool have_data)
{
struct virtio_blk *vblk = vbr->vblk;
DEFINE_WAIT(wait);
int ret;
spin_lock_irq(vblk->disk->queue->queue_lock);
if (unlikely(virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
GFP_ATOMIC) < 0)) {
while (unlikely((ret = __virtblk_add_req(vblk->vq, vbr, vbr->sg,
have_data)) < 0)) {
prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
TASK_UNINTERRUPTIBLE);
spin_unlock_irq(vblk->disk->queue->queue_lock);
virtblk_add_buf_wait(vblk, vbr, out, in);
return;
io_schedule();
spin_lock_irq(vblk->disk->queue->queue_lock);
finish_wait(&vblk->queue_wait, &wait);
}
virtqueue_kick(vblk->vq);
spin_unlock_irq(vblk->disk->queue->queue_lock);
}
static int virtblk_bio_send_flush(struct virtblk_req *vbr)
static void virtblk_bio_send_flush(struct virtblk_req *vbr)
{
unsigned int out = 0, in = 0;
vbr->flags |= VBLK_IS_FLUSH;
vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = 0;
sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
sg_set_buf(&vbr->sg[out + in++], &vbr->status, sizeof(vbr->status));
virtblk_add_req(vbr, out, in);
return 0;
virtblk_add_req(vbr, false);
}
static int virtblk_bio_send_data(struct virtblk_req *vbr)
static void virtblk_bio_send_data(struct virtblk_req *vbr)
{
struct virtio_blk *vblk = vbr->vblk;
unsigned int num, out = 0, in = 0;
struct bio *bio = vbr->bio;
bool have_data;
vbr->flags &= ~VBLK_IS_FLUSH;
vbr->out_hdr.type = 0;
vbr->out_hdr.sector = bio->bi_sector;
vbr->out_hdr.ioprio = bio_prio(bio);
sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
num = blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg + out);
sg_set_buf(&vbr->sg[num + out + in++], &vbr->status,
sizeof(vbr->status));
if (num) {
if (bio->bi_rw & REQ_WRITE) {
if (blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg)) {
have_data = true;
if (bio->bi_rw & REQ_WRITE)
vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
out += num;
} else {
else
vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
in += num;
}
}
virtblk_add_req(vbr, out, in);
} else
have_data = false;
return 0;
virtblk_add_req(vbr, have_data);
}
static void virtblk_bio_send_data_work(struct work_struct *work)
......@@ -298,7 +305,7 @@ static void virtblk_done(struct virtqueue *vq)
static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
struct request *req)
{
unsigned long num, out = 0, in = 0;
unsigned int num;
struct virtblk_req *vbr;
vbr = virtblk_alloc_req(vblk, GFP_ATOMIC);
......@@ -335,40 +342,15 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
}
}
sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
/*
* If this is a packet command we need a couple of additional headers.
* Behind the normal outhdr we put a segment with the scsi command
* block, and before the normal inhdr we put the sense data and the
* inhdr with additional status information before the normal inhdr.
*/
if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC)
sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
sizeof(vbr->in_hdr));
}
sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
sizeof(vbr->status));
num = blk_rq_map_sg(q, vbr->req, vblk->sg);
if (num) {
if (rq_data_dir(vbr->req) == WRITE) {
if (rq_data_dir(vbr->req) == WRITE)
vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
out += num;
} else {
else
vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
in += num;
}
}
if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr,
GFP_ATOMIC) < 0) {
if (__virtblk_add_req(vblk->vq, vbr, vblk->sg, num) < 0) {
mempool_free(vbr, vblk->pool);
return false;
}
......@@ -539,6 +521,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
struct virtio_device *vdev = vblk->vdev;
struct request_queue *q = vblk->disk->queue;
char cap_str_2[10], cap_str_10[10];
char *envp[] = { "RESIZE=1", NULL };
u64 capacity, size;
mutex_lock(&vblk->config_lock);
......@@ -568,6 +551,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
set_capacity(vblk->disk, capacity);
revalidate_disk(vblk->disk);
kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
done:
mutex_unlock(&vblk->config_lock);
}
......
......@@ -47,7 +47,7 @@ static void register_buffer(u8 *buf, size_t size)
sg_init_one(&sg, buf, size);
/* There should always be room for one buffer. */
if (virtqueue_add_buf(vq, &sg, 0, 1, buf, GFP_KERNEL) < 0)
if (virtqueue_add_inbuf(vq, &sg, 1, buf, GFP_KERNEL) < 0)
BUG();
virtqueue_kick(vq);
......
......@@ -78,8 +78,8 @@ struct ports_driver_data {
};
static struct ports_driver_data pdrvdata;
DEFINE_SPINLOCK(pdrvdata_lock);
DECLARE_COMPLETION(early_console_added);
static DEFINE_SPINLOCK(pdrvdata_lock);
static DECLARE_COMPLETION(early_console_added);
/* This struct holds information that's relevant only for console ports */
struct console {
......@@ -503,7 +503,7 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
sg_init_one(sg, buf->buf, buf->size);
ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC);
ret = virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC);
virtqueue_kick(vq);
if (!ret)
ret = vq->num_free;
......@@ -572,7 +572,7 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
sg_init_one(sg, &cpkt, sizeof(cpkt));
spin_lock(&portdev->c_ovq_lock);
if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) {
if (virtqueue_add_outbuf(vq, sg, 1, &cpkt, GFP_ATOMIC) == 0) {
virtqueue_kick(vq);
while (!virtqueue_get_buf(vq, &len))
cpu_relax();
......@@ -622,7 +622,7 @@ static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
reclaim_consumed_buffers(port);
err = virtqueue_add_buf(out_vq, sg, nents, 0, data, GFP_ATOMIC);
err = virtqueue_add_outbuf(out_vq, sg, nents, data, GFP_ATOMIC);
/* Tell Host to go! */
virtqueue_kick(out_vq);
......@@ -1040,7 +1040,7 @@ static int port_fops_open(struct inode *inode, struct file *filp)
spin_lock_irq(&port->inbuf_lock);
if (port->guest_connected) {
spin_unlock_irq(&port->inbuf_lock);
ret = -EMFILE;
ret = -EBUSY;
goto out;
}
......@@ -1202,7 +1202,7 @@ int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
return hvc_instantiate(0, 0, &hv_ops);
}
int init_port_console(struct port *port)
static int init_port_console(struct port *port)
{
int ret;
......
......@@ -5,10 +5,9 @@ config LGUEST
---help---
This is a very simple module which allows you to run
multiple instances of the same Linux kernel, using the
"lguest" command found in the Documentation/virtual/lguest
directory.
"lguest" command found in the tools/lguest directory.
Note that "lguest" is pronounced to rhyme with "fell quest",
not "rustyvisor". See Documentation/virtual/lguest/lguest.txt.
not "rustyvisor". See tools/lguest/lguest.txt.
If unsure, say N. If curious, say M. If masochistic, say Y.
......@@ -20,9 +20,9 @@
#include <asm/asm-offsets.h>
#include "lg.h"
unsigned long switcher_addr;
struct page **lg_switcher_pages;
static struct vm_struct *switcher_vma;
static struct page **switcher_page;
/* This One Big lock protects all inter-guest data structures. */
DEFINE_MUTEX(lguest_lock);
......@@ -52,13 +52,21 @@ static __init int map_switcher(void)
* easy.
*/
/* We assume Switcher text fits into a single page. */
if (end_switcher_text - start_switcher_text > PAGE_SIZE) {
printk(KERN_ERR "lguest: switcher text too large (%zu)\n",
end_switcher_text - start_switcher_text);
return -EINVAL;
}
/*
* We allocate an array of struct page pointers. map_vm_area() wants
* this, rather than just an array of pages.
*/
switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES,
lg_switcher_pages = kmalloc(sizeof(lg_switcher_pages[0])
* TOTAL_SWITCHER_PAGES,
GFP_KERNEL);
if (!switcher_page) {
if (!lg_switcher_pages) {
err = -ENOMEM;
goto out;
}
......@@ -68,32 +76,29 @@ static __init int map_switcher(void)
* so we make sure they're zeroed.
*/
for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
switcher_page[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!switcher_page[i]) {
lg_switcher_pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!lg_switcher_pages[i]) {
err = -ENOMEM;
goto free_some_pages;
}
}
/*
* First we check that the Switcher won't overlap the fixmap area at
* the top of memory. It's currently nowhere near, but it could have
* very strange effects if it ever happened.
* We place the Switcher underneath the fixmap area, which is the
* highest virtual address we can get. This is important, since we
* tell the Guest it can't access this memory, so we want its ceiling
* as high as possible.
*/
if (SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE > FIXADDR_START){
err = -ENOMEM;
printk("lguest: mapping switcher would thwack fixmap\n");
goto free_pages;
}
switcher_addr = FIXADDR_START - (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE;
/*
* Now we reserve the "virtual memory area" we want: 0xFFC00000
* (SWITCHER_ADDR). We might not get it in theory, but in practice
* it's worked so far. The end address needs +1 because __get_vm_area
* allocates an extra guard page, so we need space for that.
* Now we reserve the "virtual memory area" we want. We might
* not get it in theory, but in practice it's worked so far.
* The end address needs +1 because __get_vm_area allocates an
* extra guard page, so we need space for that.
*/
switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
VM_ALLOC, switcher_addr, switcher_addr
+ (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
if (!switcher_vma) {
err = -ENOMEM;
......@@ -103,12 +108,12 @@ static __init int map_switcher(void)
/*
* This code actually sets up the pages we've allocated to appear at
* SWITCHER_ADDR. map_vm_area() takes the vma we allocated above, the
* switcher_addr. map_vm_area() takes the vma we allocated above, the
* kind of pages we're mapping (kernel pages), and a pointer to our
* array of struct pages. It increments that pointer, but we don't
* care.
*/
pagep = switcher_page;
pagep = lg_switcher_pages;
err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
if (err) {
printk("lguest: map_vm_area failed: %i\n", err);
......@@ -133,8 +138,8 @@ static __init int map_switcher(void)
i = TOTAL_SWITCHER_PAGES;
free_some_pages:
for (--i; i >= 0; i--)
__free_pages(switcher_page[i], 0);
kfree(switcher_page);
__free_pages(lg_switcher_pages[i], 0);
kfree(lg_switcher_pages);
out:
return err;
}
......@@ -149,8 +154,8 @@ static void unmap_switcher(void)
vunmap(switcher_vma->addr);
/* Now we just need to free the pages we copied the switcher into */
for (i = 0; i < TOTAL_SWITCHER_PAGES; i++)
__free_pages(switcher_page[i], 0);
kfree(switcher_page);
__free_pages(lg_switcher_pages[i], 0);
kfree(lg_switcher_pages);
}
/*H:032
......@@ -323,15 +328,10 @@ static int __init init(void)
if (err)
goto out;
/* Now we set up the pagetable implementation for the Guests. */
err = init_pagetables(switcher_page, SHARED_SWITCHER_PAGES);
if (err)
goto unmap;
/* We might need to reserve an interrupt vector. */
err = init_interrupts();
if (err)
goto free_pgtables;
goto unmap;
/* /dev/lguest needs to be registered. */
err = lguest_device_init();
......@@ -346,8 +346,6 @@ static int __init init(void)
free_interrupts:
free_interrupts();
free_pgtables:
free_pagetables();
unmap:
unmap_switcher();
out:
......@@ -359,7 +357,6 @@ static void __exit fini(void)
{
lguest_device_remove();
free_interrupts();
free_pagetables();
unmap_switcher();
lguest_arch_host_fini();
......
......@@ -14,11 +14,10 @@
#include <asm/lguest.h>
void free_pagetables(void);
int init_pagetables(struct page **switcher_page, unsigned int pages);
struct pgdir {
unsigned long gpgdir;
bool switcher_mapped;
int last_host_cpu;
pgd_t *pgdir;
};
......@@ -124,6 +123,7 @@ bool lguest_address_ok(const struct lguest *lg,
unsigned long addr, unsigned long len);
void __lgread(struct lg_cpu *, void *, unsigned long, unsigned);
void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned);
extern struct page **lg_switcher_pages;
/*H:035
* Using memory-copy operations like that is usually inconvient, so we
......
......@@ -250,13 +250,13 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
*/
static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
{
/* We have a limited number the number of CPUs in the lguest struct. */
/* We have a limited number of CPUs in the lguest struct. */
if (id >= ARRAY_SIZE(cpu->lg->cpus))
return -EINVAL;
/* Set up this CPU's id, and pointer back to the lguest struct. */
cpu->id = id;
cpu->lg = container_of((cpu - id), struct lguest, cpus[0]);
cpu->lg = container_of(cpu, struct lguest, cpus[id]);
cpu->lg->nr_cpus++;
/* Each CPU has a timer it can set. */
......@@ -270,7 +270,7 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
if (!cpu->regs_page)
return -ENOMEM;
/* We actually put the registers at the bottom of the page. */
/* We actually put the registers at the end of the page. */
cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs);
/*
......
This diff is collapsed.
......@@ -59,14 +59,13 @@ static struct {
/* Offset from where switcher.S was compiled to where we've copied it */
static unsigned long switcher_offset(void)
{
return SWITCHER_ADDR - (unsigned long)start_switcher_text;
return switcher_addr - (unsigned long)start_switcher_text;
}
/* This cpu's struct lguest_pages. */
/* This cpu's struct lguest_pages (after the Switcher text page) */
static struct lguest_pages *lguest_pages(unsigned int cpu)
{
return &(((struct lguest_pages *)
(SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
return &(((struct lguest_pages *)(switcher_addr + PAGE_SIZE))[cpu]);
}
static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu);
......
......@@ -40,3 +40,17 @@ config CAIF_HSI
The caif low level driver for CAIF over HSI.
Be aware that if you enable this then you also need to
enable a low-level HSI driver.
config CAIF_VIRTIO
tristate "CAIF virtio transport driver"
depends on CAIF
select VHOST_RING
select VIRTIO
select GENERIC_ALLOCATOR
default n
---help---
The caif driver for CAIF over Virtio.
if CAIF_VIRTIO
source "drivers/vhost/Kconfig"
endif
......@@ -9,3 +9,6 @@ obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
# HSI interface
obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
# Virtio interface
obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o
This diff is collapsed.
......@@ -39,7 +39,6 @@ module_param(gso, bool, 0444);
#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
#define GOOD_COPY_LEN 128
#define VIRTNET_SEND_COMMAND_SG_MAX 2
#define VIRTNET_DRIVER_VERSION "1.0.0"
struct virtnet_stats {
......@@ -444,7 +443,7 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
err = virtqueue_add_buf(rq->vq, rq->sg, 0, 2, skb, gfp);
err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
if (err < 0)
dev_kfree_skb(skb);
......@@ -489,7 +488,7 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
/* chain first in list head */
first->private = (unsigned long)list;
err = virtqueue_add_buf(rq->vq, rq->sg, 0, MAX_SKB_FRAGS + 2,
err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
first, gfp);
if (err < 0)
give_pages(rq, first);
......@@ -508,7 +507,7 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
sg_init_one(rq->sg, page_address(page), PAGE_SIZE);
err = virtqueue_add_buf(rq->vq, rq->sg, 0, 1, page, gfp);
err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp);
if (err < 0)
give_pages(rq, page);
......@@ -582,7 +581,7 @@ static void refill_work(struct work_struct *work)
bool still_empty;
int i;
for (i = 0; i < vi->max_queue_pairs; i++) {
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
napi_disable(&rq->napi);
......@@ -637,7 +636,7 @@ static int virtnet_open(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int i;
for (i = 0; i < vi->max_queue_pairs; i++) {
for (i = 0; i < vi->curr_queue_pairs; i++) {
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
......@@ -711,8 +710,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
return virtqueue_add_buf(sq->vq, sq->sg, num_sg,
0, skb, GFP_ATOMIC);
return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
}
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
......@@ -767,32 +765,35 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
* never fail unless improperly formated.
*/
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
struct scatterlist *data, int out, int in)
struct scatterlist *out,
struct scatterlist *in)
{
struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
struct scatterlist *sgs[4], hdr, stat;
struct virtio_net_ctrl_hdr ctrl;
virtio_net_ctrl_ack status = ~0;
unsigned int tmp;
int i;
unsigned out_num = 0, in_num = 0, tmp;
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
(out + in > VIRTNET_SEND_COMMAND_SG_MAX));
out++; /* Add header */
in++; /* Add return status */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
ctrl.class = class;
ctrl.cmd = cmd;
/* Add header */
sg_init_one(&hdr, &ctrl, sizeof(ctrl));
sgs[out_num++] = &hdr;
sg_init_table(sg, out + in);
if (out)
sgs[out_num++] = out;
if (in)
sgs[out_num + in_num++] = in;
sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
for_each_sg(data, s, out + in - 2, i)
sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
/* Add return status. */
sg_init_one(&stat, &status, sizeof(status));
sgs[out_num + in_num++] = &stat;
BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
< 0);
virtqueue_kick(vi->cvq);
......@@ -821,7 +822,7 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
sg_init_one(&sg, addr->sa_data, dev->addr_len);
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_ADDR_SET,
&sg, 1, 0)) {
&sg, NULL)) {
dev_warn(&vdev->dev,
"Failed to set mac address by vq command.\n");
return -EINVAL;
......@@ -889,8 +890,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
rtnl_lock();
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL,
0, 0))
VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
rtnl_unlock();
}
......@@ -900,6 +900,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
struct scatterlist sg;
struct virtio_net_ctrl_mq s;
struct net_device *dev = vi->dev;
int i;
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0;
......@@ -908,12 +909,16 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
sg_init_one(&sg, &s, sizeof(s));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, 1, 0)){
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
queue_pairs);
return -EINVAL;
} else
} else {
for (i = vi->curr_queue_pairs; i < queue_pairs; i++)
if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
vi->curr_queue_pairs = queue_pairs;
}
return 0;
}
......@@ -955,7 +960,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_PROMISC,
sg, 1, 0))
sg, NULL))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
promisc ? "en" : "dis");
......@@ -963,7 +968,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_ALLMULTI,
sg, 1, 0))
sg, NULL))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
allmulti ? "en" : "dis");
......@@ -1000,7 +1005,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET,
sg, 2, 0))
sg, NULL))
dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
kfree(buf);
......@@ -1015,7 +1020,7 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
sg_init_one(&sg, &vid, sizeof(vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
return 0;
}
......@@ -1029,7 +1034,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
sg_init_one(&sg, &vid, sizeof(vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
return 0;
}
......@@ -1570,7 +1575,7 @@ static int virtnet_probe(struct virtio_device *vdev)
}
/* Last of all, set up some receive buffers. */
for (i = 0; i < vi->max_queue_pairs; i++) {
for (i = 0; i < vi->curr_queue_pairs; i++) {
try_fill_recv(&vi->rq[i], GFP_KERNEL);
/* If we didn't even get one input buffer, we're useless. */
......@@ -1694,7 +1699,7 @@ static int virtnet_restore(struct virtio_device *vdev)
netif_device_attach(vi->dev);
for (i = 0; i < vi->max_queue_pairs; i++)
for (i = 0; i < vi->curr_queue_pairs; i++)
if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
......
......@@ -757,14 +757,14 @@ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
mutex_lock(&vrp->tx_lock);
/* add message to the remote processor's virtqueue */
err = virtqueue_add_buf(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL);
err = virtqueue_add_outbuf(vrp->svq, &sg, 1, msg, GFP_KERNEL);
if (err) {
/*
* need to reclaim the buffer here, otherwise it's lost
* (memory won't leak, but rpmsg won't use it again for TX).
* this will wait for a buffer management overhaul.
*/
dev_err(dev, "virtqueue_add_buf failed: %d\n", err);
dev_err(dev, "virtqueue_add_outbuf failed: %d\n", err);
goto out;
}
......@@ -839,7 +839,7 @@ static void rpmsg_recv_done(struct virtqueue *rvq)
sg_init_one(&sg, msg, RPMSG_BUF_SIZE);
/* add the buffer back to the remote processor's virtqueue */
err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, msg, GFP_KERNEL);
err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, msg, GFP_KERNEL);
if (err < 0) {
dev_err(dev, "failed to add a virtqueue buffer: %d\n", err);
return;
......@@ -972,7 +972,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE);
err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr,
err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr,
GFP_KERNEL);
WARN_ON(err); /* sanity check; this can't really happen */
}
......
This diff is collapsed.
config VHOST_NET
tristate "Host kernel accelerator for virtio net"
depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP)
select VHOST_RING
---help---
This kernel module can be loaded in host kernel to accelerate
guest networking with virtio_net. Not to be confused with virtio_net
......@@ -12,7 +13,14 @@ config VHOST_NET
config VHOST_SCSI
tristate "VHOST_SCSI TCM fabric driver"
depends on TARGET_CORE && EVENTFD && m
select VHOST_RING
default n
---help---
Say M here to enable the vhost_scsi TCM fabric module
for use with virtio-scsi guests
config VHOST_RING
tristate
---help---
This option is selected by any driver which needs to access
the host side of a virtio ring.
......@@ -3,3 +3,5 @@ vhost_net-y := vhost.o net.o
obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o
vhost_scsi-y := scsi.o
obj-$(CONFIG_VHOST_RING) += vringh.o
......@@ -282,7 +282,9 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
return vhost_test_reset_owner(n);
default:
mutex_lock(&n->dev.mutex);
r = vhost_dev_ioctl(&n->dev, ioctl, arg);
r = vhost_dev_ioctl(&n->dev, ioctl, argp);
if (r == -ENOIOCTLCMD)
r = vhost_vring_ioctl(&n->dev, ioctl, argp);
vhost_test_flush(n);
mutex_unlock(&n->dev.mutex);
return r;
......
This diff is collapsed.
......@@ -108,7 +108,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
/* We should always be able to add one buffer to an empty queue. */
if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
if (virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL) < 0)
BUG();
virtqueue_kick(vq);
......@@ -256,7 +256,7 @@ static void stats_handle_request(struct virtio_balloon *vb)
if (!virtqueue_get_buf(vq, &len))
return;
sg_init_one(&sg, vb->stats, sizeof(vb->stats));
if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
if (virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL) < 0)
BUG();
virtqueue_kick(vq);
}
......@@ -341,7 +341,7 @@ static int init_vqs(struct virtio_balloon *vb)
* use it to signal us later.
*/
sg_init_one(&sg, vb->stats, sizeof vb->stats);
if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb, GFP_KERNEL)
if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
< 0)
BUG();
virtqueue_kick(vb->stats_vq);
......
This diff is collapsed.
......@@ -171,6 +171,22 @@ static inline void sg_mark_end(struct scatterlist *sg)
sg->page_link &= ~0x01;
}
/**
* sg_unmark_end - Undo setting the end of the scatterlist
* @sg: SG entryScatterlist
*
* Description:
* Removes the termination marker from the given entry of the scatterlist.
*
**/
static inline void sg_unmark_end(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
sg->page_link &= ~0x02;
}
/**
* sg_phys - Return physical address of an sg entry
* @sg: SG entry
......
......@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/gfp.h>
#include <linux/vringh.h>
/**
* virtqueue - a queue to register buffers for sending or receiving.
......@@ -40,6 +41,23 @@ int virtqueue_add_buf(struct virtqueue *vq,
void *data,
gfp_t gfp);
int virtqueue_add_outbuf(struct virtqueue *vq,
struct scatterlist sg[], unsigned int num,
void *data,
gfp_t gfp);
int virtqueue_add_inbuf(struct virtqueue *vq,
struct scatterlist sg[], unsigned int num,
void *data,
gfp_t gfp);
int virtqueue_add_sgs(struct virtqueue *vq,
struct scatterlist *sgs[],
unsigned int out_sgs,
unsigned int in_sgs,
void *data,
gfp_t gfp);
void virtqueue_kick(struct virtqueue *vq);
bool virtqueue_kick_prepare(struct virtqueue *vq);
......@@ -64,6 +82,7 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
* @dev: underlying device.
* @id: the device type identification (used to match it with a driver).
* @config: the configuration ops for this device.
* @vringh_config: configuration ops for host vrings.
* @vqs: the list of virtqueues for this device.
* @features: the features supported by both driver and device.
* @priv: private pointer for the driver's use.
......@@ -73,6 +92,7 @@ struct virtio_device {
struct device dev;
struct virtio_device_id id;
const struct virtio_config_ops *config;
const struct vringh_config_ops *vringh_config;
struct list_head vqs;
/* Note that this is a Linux set_bit-style bitmap. */
unsigned long features[1];
......
/*
* Copyright (C) ST-Ericsson AB 2012
* Author: Sjur Brændeland <sjur.brandeland@stericsson.com>
*
* This header is BSD licensed so
* anyone can use the definitions to implement compatible remote processors
*/
#ifndef VIRTIO_CAIF_H
#define VIRTIO_CAIF_H
#include <linux/types.h>
struct virtio_caif_transf_config {
u16 headroom;
u16 tailroom;
u32 mtu;
u8 reserved[4];
};
struct virtio_caif_config {
struct virtio_caif_transf_config uplink, downlink;
u8 reserved[8];
};
#endif
......@@ -4,6 +4,63 @@
#include <linux/irqreturn.h>
#include <uapi/linux/virtio_ring.h>
/*
* Barriers in virtio are tricky. Non-SMP virtio guests can't assume
* they're not on an SMP host system, so they need to assume real
* barriers. Non-SMP virtio hosts could skip the barriers, but does
* anyone care?
*
* For virtio_pci on SMP, we don't need to order with respect to MMIO
* accesses through relaxed memory I/O windows, so smp_mb() et al are
* sufficient.
*
* For using virtio to talk to real devices (eg. other heterogeneous
* CPUs) we do need real barriers. In theory, we could be using both
* kinds of virtio, so it's a runtime decision, and the branch is
* actually quite cheap.
*/
#ifdef CONFIG_SMP
static inline void virtio_mb(bool weak_barriers)
{
if (weak_barriers)
smp_mb();
else
mb();
}
static inline void virtio_rmb(bool weak_barriers)
{
if (weak_barriers)
smp_rmb();
else
rmb();
}
static inline void virtio_wmb(bool weak_barriers)
{
if (weak_barriers)
smp_wmb();
else
wmb();
}
#else
static inline void virtio_mb(bool weak_barriers)
{
mb();
}
static inline void virtio_rmb(bool weak_barriers)
{
rmb();
}
static inline void virtio_wmb(bool weak_barriers)
{
wmb();
}
#endif
struct virtio_device;
struct virtqueue;
......
/*
* Linux host-side vring helpers; for when the kernel needs to access
* someone else's vring.
*
* Copyright IBM Corporation, 2013.
* Parts taken from drivers/vhost/vhost.c Copyright 2009 Red Hat, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Written by: Rusty Russell <rusty@rustcorp.com.au>
*/
#ifndef _LINUX_VRINGH_H
#define _LINUX_VRINGH_H
#include <uapi/linux/virtio_ring.h>
#include <linux/uio.h>
#include <linux/slab.h>
#include <asm/barrier.h>
/* virtio_ring with information needed for host access. */
struct vringh {
/* Guest publishes used event idx (note: we always do). */
bool event_indices;
/* Can we get away with weak barriers? */
bool weak_barriers;
/* Last available index we saw (ie. where we're up to). */
u16 last_avail_idx;
/* Last index we used. */
u16 last_used_idx;
/* How many descriptors we've completed since last need_notify(). */
u32 completed;
/* The vring (note: it may contain user pointers!) */
struct vring vring;
/* The function to call to notify the guest about added buffers */
void (*notify)(struct vringh *);
};
/**
* struct vringh_config_ops - ops for creating a host vring from a virtio driver
* @find_vrhs: find the host vrings and instantiate them
* vdev: the virtio_device
* nhvrs: the number of host vrings to find
* hvrs: on success, includes new host vrings
* callbacks: array of driver callbacks, for each host vring
* include a NULL entry for vqs that do not need a callback
* Returns 0 on success or error status
* @del_vrhs: free the host vrings found by find_vrhs().
*/
struct virtio_device;
typedef void vrh_callback_t(struct virtio_device *, struct vringh *);
struct vringh_config_ops {
int (*find_vrhs)(struct virtio_device *vdev, unsigned nhvrs,
struct vringh *vrhs[], vrh_callback_t *callbacks[]);
void (*del_vrhs)(struct virtio_device *vdev);
};
/* The memory the vring can access, and what offset to apply. */
struct vringh_range {
u64 start, end_incl;
u64 offset;
};
/**
* struct vringh_iov - iovec mangler.
*
* Mangles iovec in place, and restores it.
* Remaining data is iov + i, of used - i elements.
*/
struct vringh_iov {
struct iovec *iov;
size_t consumed; /* Within iov[i] */
unsigned i, used, max_num;
};
/**
* struct vringh_iov - kvec mangler.
*
* Mangles kvec in place, and restores it.
* Remaining data is iov + i, of used - i elements.
*/
struct vringh_kiov {
struct kvec *iov;
size_t consumed; /* Within iov[i] */
unsigned i, used, max_num;
};
/* Flag on max_num to indicate we're kmalloced. */
#define VRINGH_IOV_ALLOCATED 0x8000000
/* Helpers for userspace vrings. */
int vringh_init_user(struct vringh *vrh, u32 features,
unsigned int num, bool weak_barriers,
struct vring_desc __user *desc,
struct vring_avail __user *avail,
struct vring_used __user *used);
static inline void vringh_iov_init(struct vringh_iov *iov,
struct iovec *iovec, unsigned num)
{
iov->used = iov->i = 0;
iov->consumed = 0;
iov->max_num = num;
iov->iov = iovec;
}
static inline void vringh_iov_reset(struct vringh_iov *iov)
{
iov->iov[iov->i].iov_len += iov->consumed;
iov->iov[iov->i].iov_base -= iov->consumed;
iov->consumed = 0;
iov->i = 0;
}
static inline void vringh_iov_cleanup(struct vringh_iov *iov)
{
if (iov->max_num & VRINGH_IOV_ALLOCATED)
kfree(iov->iov);
iov->max_num = iov->used = iov->i = iov->consumed = 0;
iov->iov = NULL;
}
/* Convert a descriptor into iovecs. */
int vringh_getdesc_user(struct vringh *vrh,
struct vringh_iov *riov,
struct vringh_iov *wiov,
bool (*getrange)(struct vringh *vrh,
u64 addr, struct vringh_range *r),
u16 *head);
/* Copy bytes from readable vsg, consuming it (and incrementing wiov->i). */
ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len);
/* Copy bytes into writable vsg, consuming it (and incrementing wiov->i). */
ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
const void *src, size_t len);
/* Mark a descriptor as used. */
int vringh_complete_user(struct vringh *vrh, u16 head, u32 len);
int vringh_complete_multi_user(struct vringh *vrh,
const struct vring_used_elem used[],
unsigned num_used);
/* Pretend we've never seen descriptor (for easy error handling). */
void vringh_abandon_user(struct vringh *vrh, unsigned int num);
/* Do we need to fire the eventfd to notify the other side? */
int vringh_need_notify_user(struct vringh *vrh);
bool vringh_notify_enable_user(struct vringh *vrh);
void vringh_notify_disable_user(struct vringh *vrh);
/* Helpers for kernelspace vrings. */
int vringh_init_kern(struct vringh *vrh, u32 features,
unsigned int num, bool weak_barriers,
struct vring_desc *desc,
struct vring_avail *avail,
struct vring_used *used);
static inline void vringh_kiov_init(struct vringh_kiov *kiov,
struct kvec *kvec, unsigned num)
{
kiov->used = kiov->i = 0;
kiov->consumed = 0;
kiov->max_num = num;
kiov->iov = kvec;
}
static inline void vringh_kiov_reset(struct vringh_kiov *kiov)
{
kiov->iov[kiov->i].iov_len += kiov->consumed;
kiov->iov[kiov->i].iov_base -= kiov->consumed;
kiov->consumed = 0;
kiov->i = 0;
}
static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov)
{
if (kiov->max_num & VRINGH_IOV_ALLOCATED)
kfree(kiov->iov);
kiov->max_num = kiov->used = kiov->i = kiov->consumed = 0;
kiov->iov = NULL;
}
int vringh_getdesc_kern(struct vringh *vrh,
struct vringh_kiov *riov,
struct vringh_kiov *wiov,
u16 *head,
gfp_t gfp);
ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len);
ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
const void *src, size_t len);
void vringh_abandon_kern(struct vringh *vrh, unsigned int num);
int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len);
bool vringh_notify_enable_kern(struct vringh *vrh);
void vringh_notify_disable_kern(struct vringh *vrh);
int vringh_need_notify_kern(struct vringh *vrh);
/* Notify the guest about buffers added to the used ring */
static inline void vringh_notify(struct vringh *vrh)
{
if (vrh->notify)
vrh->notify(vrh);
}
#endif /* _LINUX_VRINGH_H */
......@@ -52,8 +52,8 @@ struct virtio_balloon_config
#define VIRTIO_BALLOON_S_NR 6
struct virtio_balloon_stat {
u16 tag;
u64 val;
__u16 tag;
__u64 val;
} __attribute__((packed));
#endif /* _LINUX_VIRTIO_BALLOON_H */
......@@ -38,5 +38,6 @@
#define VIRTIO_ID_SCSI 8 /* virtio scsi */
#define VIRTIO_ID_9P 9 /* 9p virtio console */
#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
#define VIRTIO_ID_CAIF 12 /* Virtio caif */
#endif /* _LINUX_VIRTIO_IDS_H */
This diff is collapsed.
......@@ -70,7 +70,7 @@ Running Lguest:
- Run an lguest as root:
Documentation/virtual/lguest/lguest 64 vmlinux --tunnet=192.168.19.1 \
tools/lguest/lguest 64 vmlinux --tunnet=192.168.19.1 \
--block=rootfile root=/dev/vda
Explanation:
......
This diff is collapsed.
This diff is collapsed.
#ifndef BUG_H
#define BUG_H
#define BUG_ON(__BUG_ON_cond) assert(!(__BUG_ON_cond))
#define BUILD_BUG_ON(x)
#define BUG() abort()
#endif /* BUG_H */
This diff is collapsed.
This diff is collapsed.
#include "../../../include/linux/irqreturn.h"
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#include "../../../include/linux/virtio_ring.h"
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment