Commit ca995ce4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-6.7-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

 - two small cleanup patches

 - a fix for PCI passthrough under Xen

 - a four patch series speeding up virtio under Xen with user space
   backends

* tag 'for-linus-6.7-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen-pciback: Consider INTx disabled when MSI/MSI-X is enabled
  xen: privcmd: Add support for ioeventfd
  xen: evtchn: Allow shared registration of IRQ handers
  xen: irqfd: Use _IOW instead of the internal _IOC() macro
  xen: Make struct privcmd_irqfd's layout architecture independent
  xen/xenbus: Add __counted_by for struct read_buffer and use struct_size()
  xenbus: fix error exit in xenbus_init()
parents 8999ad99 2c269f42
......@@ -269,12 +269,12 @@ config XEN_PRIVCMD
disaggregated Xen setups this driver might be needed for other
domains, too.
config XEN_PRIVCMD_IRQFD
bool "Xen irqfd support"
config XEN_PRIVCMD_EVENTFD
bool "Xen Ioeventfd and irqfd support"
depends on XEN_PRIVCMD && XEN_VIRTIO && EVENTFD
help
Using the irqfd mechanism a virtio backend running in a daemon can
speed up interrupt injection into a guest.
Using the ioeventfd / irqfd mechanism a virtio backend running in a
daemon can speed up interrupt delivery from / to a guest.
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
......
......@@ -1229,7 +1229,8 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
bind_evtchn_to_cpu(evtchn, 0, false);
} else {
struct irq_info *info = info_for_irq(irq);
WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
if (!WARN_ON(!info || info->type != IRQT_EVTCHN))
info->refcnt++;
}
out:
......
......@@ -397,7 +397,7 @@ static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port,
if (rc < 0)
goto err;
rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, 0,
rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, IRQF_SHARED,
u->name, evtchn);
if (rc < 0)
goto err;
......
......@@ -29,15 +29,18 @@
#include <linux/seq_file.h>
#include <linux/miscdevice.h>
#include <linux/moduleparam.h>
#include <linux/virtio_mmio.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/privcmd.h>
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
#include <xen/interface/hvm/dm_op.h>
#include <xen/interface/hvm/ioreq.h>
#include <xen/features.h>
#include <xen/page.h>
#include <xen/xen-ops.h>
......@@ -782,6 +785,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
goto out;
pages = vma->vm_private_data;
for (i = 0; i < kdata.num; i++) {
xen_pfn_t pfn =
page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
......@@ -838,7 +842,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
return rc;
}
#ifdef CONFIG_XEN_PRIVCMD_IRQFD
#ifdef CONFIG_XEN_PRIVCMD_EVENTFD
/* Irqfd support */
static struct workqueue_struct *irqfd_cleanup_wq;
static DEFINE_MUTEX(irqfds_lock);
......@@ -935,7 +939,7 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
return -ENOMEM;
dm_op = kirqfd + 1;
if (copy_from_user(dm_op, irqfd->dm_op, irqfd->size)) {
if (copy_from_user(dm_op, u64_to_user_ptr(irqfd->dm_op), irqfd->size)) {
ret = -EFAULT;
goto error_kfree;
}
......@@ -1079,6 +1083,389 @@ static void privcmd_irqfd_exit(void)
destroy_workqueue(irqfd_cleanup_wq);
}
/* Ioeventfd Support */
#define QUEUE_NOTIFY_VQ_MASK 0xFFFF
static DEFINE_MUTEX(ioreq_lock);
static LIST_HEAD(ioreq_list);
/* per-eventfd structure */
struct privcmd_kernel_ioeventfd {
struct eventfd_ctx *eventfd;
struct list_head list;
u64 addr;
unsigned int addr_len;
unsigned int vq;
};
/* per-guest CPU / port structure */
struct ioreq_port {
int vcpu;
unsigned int port;
struct privcmd_kernel_ioreq *kioreq;
};
/* per-guest structure */
struct privcmd_kernel_ioreq {
domid_t dom;
unsigned int vcpus;
u64 uioreq;
struct ioreq *ioreq;
spinlock_t lock; /* Protects ioeventfds list */
struct list_head ioeventfds;
struct list_head list;
struct ioreq_port ports[0];
};
static irqreturn_t ioeventfd_interrupt(int irq, void *dev_id)
{
struct ioreq_port *port = dev_id;
struct privcmd_kernel_ioreq *kioreq = port->kioreq;
struct ioreq *ioreq = &kioreq->ioreq[port->vcpu];
struct privcmd_kernel_ioeventfd *kioeventfd;
unsigned int state = STATE_IOREQ_READY;
if (ioreq->state != STATE_IOREQ_READY ||
ioreq->type != IOREQ_TYPE_COPY || ioreq->dir != IOREQ_WRITE)
return IRQ_NONE;
/*
* We need a barrier, smp_mb(), here to ensure reads are finished before
* `state` is updated. Since the lock implementation ensures that
* appropriate barrier will be added anyway, we can avoid adding
* explicit barrier here.
*
* Ideally we don't need to update `state` within the locks, but we do
* that here to avoid adding explicit barrier.
*/
spin_lock(&kioreq->lock);
ioreq->state = STATE_IOREQ_INPROCESS;
list_for_each_entry(kioeventfd, &kioreq->ioeventfds, list) {
if (ioreq->addr == kioeventfd->addr + VIRTIO_MMIO_QUEUE_NOTIFY &&
ioreq->size == kioeventfd->addr_len &&
(ioreq->data & QUEUE_NOTIFY_VQ_MASK) == kioeventfd->vq) {
eventfd_signal(kioeventfd->eventfd, 1);
state = STATE_IORESP_READY;
break;
}
}
spin_unlock(&kioreq->lock);
/*
* We need a barrier, smp_mb(), here to ensure writes are finished
* before `state` is updated. Since the lock implementation ensures that
* appropriate barrier will be added anyway, we can avoid adding
* explicit barrier here.
*/
ioreq->state = state;
if (state == STATE_IORESP_READY) {
notify_remote_via_evtchn(port->port);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static void ioreq_free(struct privcmd_kernel_ioreq *kioreq)
{
struct ioreq_port *ports = kioreq->ports;
int i;
lockdep_assert_held(&ioreq_lock);
list_del(&kioreq->list);
for (i = kioreq->vcpus - 1; i >= 0; i--)
unbind_from_irqhandler(irq_from_evtchn(ports[i].port), &ports[i]);
kfree(kioreq);
}
static
struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd)
{
struct privcmd_kernel_ioreq *kioreq;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct page **pages;
unsigned int *ports;
int ret, size, i;
lockdep_assert_held(&ioreq_lock);
size = struct_size(kioreq, ports, ioeventfd->vcpus);
kioreq = kzalloc(size, GFP_KERNEL);
if (!kioreq)
return ERR_PTR(-ENOMEM);
kioreq->dom = ioeventfd->dom;
kioreq->vcpus = ioeventfd->vcpus;
kioreq->uioreq = ioeventfd->ioreq;
spin_lock_init(&kioreq->lock);
INIT_LIST_HEAD(&kioreq->ioeventfds);
/* The memory for ioreq server must have been mapped earlier */
mmap_write_lock(mm);
vma = find_vma(mm, (unsigned long)ioeventfd->ioreq);
if (!vma) {
pr_err("Failed to find vma for ioreq page!\n");
mmap_write_unlock(mm);
ret = -EFAULT;
goto error_kfree;
}
pages = vma->vm_private_data;
kioreq->ioreq = (struct ioreq *)(page_to_virt(pages[0]));
mmap_write_unlock(mm);
size = sizeof(*ports) * kioreq->vcpus;
ports = kzalloc(size, GFP_KERNEL);
if (!ports) {
ret = -ENOMEM;
goto error_kfree;
}
if (copy_from_user(ports, u64_to_user_ptr(ioeventfd->ports), size)) {
ret = -EFAULT;
goto error_kfree_ports;
}
for (i = 0; i < kioreq->vcpus; i++) {
kioreq->ports[i].vcpu = i;
kioreq->ports[i].port = ports[i];
kioreq->ports[i].kioreq = kioreq;
ret = bind_evtchn_to_irqhandler_lateeoi(ports[i],
ioeventfd_interrupt, IRQF_SHARED, "ioeventfd",
&kioreq->ports[i]);
if (ret < 0)
goto error_unbind;
}
kfree(ports);
list_add_tail(&kioreq->list, &ioreq_list);
return kioreq;
error_unbind:
while (--i >= 0)
unbind_from_irqhandler(irq_from_evtchn(ports[i]), &kioreq->ports[i]);
error_kfree_ports:
kfree(ports);
error_kfree:
kfree(kioreq);
return ERR_PTR(ret);
}
static struct privcmd_kernel_ioreq *
get_ioreq(struct privcmd_ioeventfd *ioeventfd, struct eventfd_ctx *eventfd)
{
struct privcmd_kernel_ioreq *kioreq;
unsigned long flags;
list_for_each_entry(kioreq, &ioreq_list, list) {
struct privcmd_kernel_ioeventfd *kioeventfd;
/*
* kioreq fields can be accessed here without a lock as they are
* never updated after being added to the ioreq_list.
*/
if (kioreq->uioreq != ioeventfd->ioreq) {
continue;
} else if (kioreq->dom != ioeventfd->dom ||
kioreq->vcpus != ioeventfd->vcpus) {
pr_err("Invalid ioeventfd configuration mismatch, dom (%u vs %u), vcpus (%u vs %u)\n",
kioreq->dom, ioeventfd->dom, kioreq->vcpus,
ioeventfd->vcpus);
return ERR_PTR(-EINVAL);
}
/* Look for a duplicate eventfd for the same guest */
spin_lock_irqsave(&kioreq->lock, flags);
list_for_each_entry(kioeventfd, &kioreq->ioeventfds, list) {
if (eventfd == kioeventfd->eventfd) {
spin_unlock_irqrestore(&kioreq->lock, flags);
return ERR_PTR(-EBUSY);
}
}
spin_unlock_irqrestore(&kioreq->lock, flags);
return kioreq;
}
/* Matching kioreq isn't found, allocate a new one */
return alloc_ioreq(ioeventfd);
}
static void ioeventfd_free(struct privcmd_kernel_ioeventfd *kioeventfd)
{
list_del(&kioeventfd->list);
eventfd_ctx_put(kioeventfd->eventfd);
kfree(kioeventfd);
}
static int privcmd_ioeventfd_assign(struct privcmd_ioeventfd *ioeventfd)
{
struct privcmd_kernel_ioeventfd *kioeventfd;
struct privcmd_kernel_ioreq *kioreq;
unsigned long flags;
struct fd f;
int ret;
/* Check for range overflow */
if (ioeventfd->addr + ioeventfd->addr_len < ioeventfd->addr)
return -EINVAL;
/* Vhost requires us to support length 1, 2, 4, and 8 */
if (!(ioeventfd->addr_len == 1 || ioeventfd->addr_len == 2 ||
ioeventfd->addr_len == 4 || ioeventfd->addr_len == 8))
return -EINVAL;
/* 4096 vcpus limit enough ? */
if (!ioeventfd->vcpus || ioeventfd->vcpus > 4096)
return -EINVAL;
kioeventfd = kzalloc(sizeof(*kioeventfd), GFP_KERNEL);
if (!kioeventfd)
return -ENOMEM;
f = fdget(ioeventfd->event_fd);
if (!f.file) {
ret = -EBADF;
goto error_kfree;
}
kioeventfd->eventfd = eventfd_ctx_fileget(f.file);
fdput(f);
if (IS_ERR(kioeventfd->eventfd)) {
ret = PTR_ERR(kioeventfd->eventfd);
goto error_kfree;
}
kioeventfd->addr = ioeventfd->addr;
kioeventfd->addr_len = ioeventfd->addr_len;
kioeventfd->vq = ioeventfd->vq;
mutex_lock(&ioreq_lock);
kioreq = get_ioreq(ioeventfd, kioeventfd->eventfd);
if (IS_ERR(kioreq)) {
mutex_unlock(&ioreq_lock);
ret = PTR_ERR(kioreq);
goto error_eventfd;
}
spin_lock_irqsave(&kioreq->lock, flags);
list_add_tail(&kioeventfd->list, &kioreq->ioeventfds);
spin_unlock_irqrestore(&kioreq->lock, flags);
mutex_unlock(&ioreq_lock);
return 0;
error_eventfd:
eventfd_ctx_put(kioeventfd->eventfd);
error_kfree:
kfree(kioeventfd);
return ret;
}
static int privcmd_ioeventfd_deassign(struct privcmd_ioeventfd *ioeventfd)
{
struct privcmd_kernel_ioreq *kioreq, *tkioreq;
struct eventfd_ctx *eventfd;
unsigned long flags;
int ret = 0;
eventfd = eventfd_ctx_fdget(ioeventfd->event_fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
mutex_lock(&ioreq_lock);
list_for_each_entry_safe(kioreq, tkioreq, &ioreq_list, list) {
struct privcmd_kernel_ioeventfd *kioeventfd, *tmp;
/*
* kioreq fields can be accessed here without a lock as they are
* never updated after being added to the ioreq_list.
*/
if (kioreq->dom != ioeventfd->dom ||
kioreq->uioreq != ioeventfd->ioreq ||
kioreq->vcpus != ioeventfd->vcpus)
continue;
spin_lock_irqsave(&kioreq->lock, flags);
list_for_each_entry_safe(kioeventfd, tmp, &kioreq->ioeventfds, list) {
if (eventfd == kioeventfd->eventfd) {
ioeventfd_free(kioeventfd);
spin_unlock_irqrestore(&kioreq->lock, flags);
if (list_empty(&kioreq->ioeventfds))
ioreq_free(kioreq);
goto unlock;
}
}
spin_unlock_irqrestore(&kioreq->lock, flags);
break;
}
pr_err("Ioeventfd isn't already assigned, dom: %u, addr: %llu\n",
ioeventfd->dom, ioeventfd->addr);
ret = -ENODEV;
unlock:
mutex_unlock(&ioreq_lock);
eventfd_ctx_put(eventfd);
return ret;
}
static long privcmd_ioctl_ioeventfd(struct file *file, void __user *udata)
{
struct privcmd_data *data = file->private_data;
struct privcmd_ioeventfd ioeventfd;
if (copy_from_user(&ioeventfd, udata, sizeof(ioeventfd)))
return -EFAULT;
/* No other flags should be set */
if (ioeventfd.flags & ~PRIVCMD_IOEVENTFD_FLAG_DEASSIGN)
return -EINVAL;
/* If restriction is in place, check the domid matches */
if (data->domid != DOMID_INVALID && data->domid != ioeventfd.dom)
return -EPERM;
if (ioeventfd.flags & PRIVCMD_IOEVENTFD_FLAG_DEASSIGN)
return privcmd_ioeventfd_deassign(&ioeventfd);
return privcmd_ioeventfd_assign(&ioeventfd);
}
static void privcmd_ioeventfd_exit(void)
{
struct privcmd_kernel_ioreq *kioreq, *tmp;
unsigned long flags;
mutex_lock(&ioreq_lock);
list_for_each_entry_safe(kioreq, tmp, &ioreq_list, list) {
struct privcmd_kernel_ioeventfd *kioeventfd, *tmp;
spin_lock_irqsave(&kioreq->lock, flags);
list_for_each_entry_safe(kioeventfd, tmp, &kioreq->ioeventfds, list)
ioeventfd_free(kioeventfd);
spin_unlock_irqrestore(&kioreq->lock, flags);
ioreq_free(kioreq);
}
mutex_unlock(&ioreq_lock);
}
#else
static inline long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
{
......@@ -1093,7 +1480,16 @@ static inline int privcmd_irqfd_init(void)
static inline void privcmd_irqfd_exit(void)
{
}
#endif /* CONFIG_XEN_PRIVCMD_IRQFD */
static inline long privcmd_ioctl_ioeventfd(struct file *file, void __user *udata)
{
return -EOPNOTSUPP;
}
static inline void privcmd_ioeventfd_exit(void)
{
}
#endif /* CONFIG_XEN_PRIVCMD_EVENTFD */
static long privcmd_ioctl(struct file *file,
unsigned int cmd, unsigned long data)
......@@ -1134,6 +1530,10 @@ static long privcmd_ioctl(struct file *file,
ret = privcmd_ioctl_irqfd(file, udata);
break;
case IOCTL_PRIVCMD_IOEVENTFD:
ret = privcmd_ioctl_ioeventfd(file, udata);
break;
default:
break;
}
......@@ -1278,6 +1678,7 @@ static int __init privcmd_init(void)
static void __exit privcmd_exit(void)
{
privcmd_ioeventfd_exit();
privcmd_irqfd_exit();
misc_deregister(&privcmd_dev);
misc_deregister(&xen_privcmdbuf_dev);
......
......@@ -288,12 +288,6 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
u16 val;
int ret = 0;
err = pci_read_config_word(dev, PCI_COMMAND, &val);
if (err)
return err;
if (!(val & PCI_COMMAND_INTX_DISABLE))
ret |= INTERRUPT_TYPE_INTX;
/*
* Do not trust dev->msi(x)_enabled here, as enabling could be done
* bypassing the pci_*msi* functions, by the qemu.
......@@ -316,6 +310,19 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
if (val & PCI_MSIX_FLAGS_ENABLE)
ret |= INTERRUPT_TYPE_MSIX;
}
/*
* PCIe spec says device cannot use INTx if MSI/MSI-X is enabled,
* so check for INTx only when both are disabled.
*/
if (!ret) {
err = pci_read_config_word(dev, PCI_COMMAND, &val);
if (err)
return err;
if (!(val & PCI_COMMAND_INTX_DISABLE))
ret |= INTERRUPT_TYPE_INTX;
}
return ret ?: INTERRUPT_TYPE_NONE;
}
......
......@@ -236,10 +236,16 @@ static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value,
return PCIBIOS_SET_FAILED;
if (new_value & field_config->enable_bit) {
/* don't allow enabling together with other interrupt types */
/*
* Don't allow enabling together with other interrupt type, but do
* allow enabling MSI(-X) while INTx is still active to please Linuxes
* MSI(-X) startup sequence. It is safe to do, as according to PCI
* spec, device with enabled MSI(-X) shouldn't use INTx.
*/
int int_type = xen_pcibk_get_interrupt_type(dev);
if (int_type == INTERRUPT_TYPE_NONE ||
int_type == INTERRUPT_TYPE_INTX ||
int_type == field_config->int_type)
goto write;
return PCIBIOS_SET_FAILED;
......
......@@ -104,24 +104,9 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
pci_clear_mwi(dev);
}
if (dev_data && dev_data->allow_interrupt_control) {
if ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE) {
if (value & PCI_COMMAND_INTX_DISABLE) {
pci_intx(dev, 0);
} else {
/* Do not allow enabling INTx together with MSI or MSI-X. */
switch (xen_pcibk_get_interrupt_type(dev)) {
case INTERRUPT_TYPE_NONE:
pci_intx(dev, 1);
break;
case INTERRUPT_TYPE_INTX:
break;
default:
return PCIBIOS_SET_FAILED;
}
}
}
}
if (dev_data && dev_data->allow_interrupt_control &&
((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE))
pci_intx(dev, !(value & PCI_COMMAND_INTX_DISABLE));
cmd->val = value;
......
......@@ -82,7 +82,7 @@ struct read_buffer {
struct list_head list;
unsigned int cons;
unsigned int len;
char msg[];
char msg[] __counted_by(len);
};
struct xenbus_file_priv {
......@@ -195,7 +195,7 @@ static int queue_reply(struct list_head *queue, const void *data, size_t len)
if (len > XENSTORE_PAYLOAD_MAX)
return -EINVAL;
rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
rb = kmalloc(struct_size(rb, msg, len), GFP_KERNEL);
if (rb == NULL)
return -ENOMEM;
......
......@@ -1025,7 +1025,7 @@ static int __init xenbus_init(void)
if (err < 0) {
pr_err("xenstore_late_init couldn't bind irq err=%d\n",
err);
return err;
goto out_error;
}
xs_init_irq = err;
......
......@@ -102,7 +102,7 @@ struct privcmd_mmap_resource {
#define PRIVCMD_IRQFD_FLAG_DEASSIGN (1 << 0)
struct privcmd_irqfd {
void __user *dm_op;
__u64 dm_op;
__u32 size; /* Size of structure pointed by dm_op */
__u32 fd;
__u32 flags;
......@@ -110,6 +110,22 @@ struct privcmd_irqfd {
__u8 pad[2];
};
/* For privcmd_ioeventfd::flags */
#define PRIVCMD_IOEVENTFD_FLAG_DEASSIGN (1 << 0)
struct privcmd_ioeventfd {
__u64 ioreq;
__u64 ports;
__u64 addr;
__u32 addr_len;
__u32 event_fd;
__u32 vcpus;
__u32 vq;
__u32 flags;
domid_t dom;
__u8 pad[2];
};
/*
* @cmd: IOCTL_PRIVCMD_HYPERCALL
* @arg: &privcmd_hypercall_t
......@@ -138,6 +154,8 @@ struct privcmd_irqfd {
#define IOCTL_PRIVCMD_MMAP_RESOURCE \
_IOC(_IOC_NONE, 'P', 7, sizeof(struct privcmd_mmap_resource))
#define IOCTL_PRIVCMD_IRQFD \
_IOC(_IOC_NONE, 'P', 8, sizeof(struct privcmd_irqfd))
_IOW('P', 8, struct privcmd_irqfd)
#define IOCTL_PRIVCMD_IOEVENTFD \
_IOW('P', 9, struct privcmd_ioeventfd)
#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
/* SPDX-License-Identifier: MIT */
/*
* ioreq.h: I/O request definitions for device models
* Copyright (c) 2004, Intel Corporation.
*/
#ifndef __XEN_PUBLIC_HVM_IOREQ_H__
#define __XEN_PUBLIC_HVM_IOREQ_H__
#define IOREQ_READ 1
#define IOREQ_WRITE 0
#define STATE_IOREQ_NONE 0
#define STATE_IOREQ_READY 1
#define STATE_IOREQ_INPROCESS 2
#define STATE_IORESP_READY 3
#define IOREQ_TYPE_PIO 0 /* pio */
#define IOREQ_TYPE_COPY 1 /* mmio ops */
#define IOREQ_TYPE_PCI_CONFIG 2
#define IOREQ_TYPE_TIMEOFFSET 7
#define IOREQ_TYPE_INVALIDATE 8 /* mapcache */
/*
* VMExit dispatcher should cooperate with instruction decoder to
* prepare this structure and notify service OS and DM by sending
* virq.
*
* For I/O type IOREQ_TYPE_PCI_CONFIG, the physical address is formatted
* as follows:
*
* 63....48|47..40|39..35|34..32|31........0
* SEGMENT |BUS |DEV |FN |OFFSET
*/
struct ioreq {
uint64_t addr; /* physical address */
uint64_t data; /* data (or paddr of data) */
uint32_t count; /* for rep prefixes */
uint32_t size; /* size in bytes */
uint32_t vp_eport; /* evtchn for notifications to/from device model */
uint16_t _pad0;
uint8_t state:4;
uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr
* of the real data to use. */
uint8_t dir:1; /* 1=read, 0=write */
uint8_t df:1;
uint8_t _pad1:1;
uint8_t type; /* I/O type */
};
#endif /* __XEN_PUBLIC_HVM_IOREQ_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment