Commit 3a6d28b1 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk

Merge branch 'stable/xen-pciback-0.6.3' into stable/drivers

* stable/xen-pciback-0.6.3:
  xen/pciback: Have 'passthrough' option instead of XEN_PCIDEV_BACKEND_PASS and XEN_PCIDEV_BACKEND_VPCI
  xen/pciback: Remove the DEBUG option.
  xen/pciback: Drop two backends, squash and cleanup some code.
  xen/pciback: Print out the MSI/MSI-X (PIRQ) values
  xen/pciback: Don't setup an fake IRQ handler for SR-IOV devices.
  xen: rename pciback module to xen-pciback.
  xen/pciback: Fine-grain the spinlocks and fix BUG: scheduling while atomic cases.
  xen/pciback: Allocate IRQ handler for device that is shared with guest.
  xen/pciback: Disable MSI/MSI-X when reseting a device
  xen/pciback: guest SR-IOV support for PV guest
  xen/pciback: Register the owner (domain) of the PCI device.
  xen/pciback: Cleanup the driver based on checkpatch warnings and errors.
  xen/pciback: xen pci backend driver.

Conflicts:
	drivers/xen/Kconfig
parents 136d9ebf 2ebdc426
......@@ -129,4 +129,26 @@ config XEN_TMEM
Shim to interface in-kernel Transcendent Memory hooks
(e.g. cleancache and frontswap) to Xen tmem hypercalls.
config XEN_PCIDEV_BACKEND
tristate "Xen PCI-device backend driver"
depends on PCI && X86 && XEN
depends on XEN_BACKEND
default m
help
The PCI device backend driver allows the kernel to export arbitrary
PCI devices to other guests. If you select this to be a module, you
will need to make sure no other driver has bound to the device(s)
you want to make visible to other guests.
The parameter "passthrough" allows you specify how you want the PCI
devices to appear in the guest. You can choose the default (0) where
PCI topology starts at 00.00.0, or (1) for passthrough if you want
the PCI devices topology appear the same as in the host.
The "hide" parameter (only applicable if backend driver is compiled
into the kernel) allows you to bind the PCI devices to this module
from the default device drivers. The argument is the list of PCI BDFs:
xen-pciback.hide=(03:00.0)(04:00.0)
If in doubt, say m.
endmenu
......@@ -18,6 +18,7 @@ obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o
obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_DOM0) += pci.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
......
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback.o
xen-pciback-y := pci_stub.o pciback_ops.o xenbus.o
xen-pciback-y += conf_space.o conf_space_header.o \
conf_space_capability.o \
conf_space_quirks.o vpci.o \
passthrough.o
This diff is collapsed.
/*
* PCI Backend - Common data structures for overriding the configuration space
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#ifndef __XEN_PCIBACK_CONF_SPACE_H__
#define __XEN_PCIBACK_CONF_SPACE_H__
#include <linux/list.h>
#include <linux/err.h>
/* conf_field_init can return an errno in a ptr with ERR_PTR() */
typedef void *(*conf_field_init) (struct pci_dev *dev, int offset);
typedef void (*conf_field_reset) (struct pci_dev *dev, int offset, void *data);
typedef void (*conf_field_free) (struct pci_dev *dev, int offset, void *data);
typedef int (*conf_dword_write) (struct pci_dev *dev, int offset, u32 value,
void *data);
typedef int (*conf_word_write) (struct pci_dev *dev, int offset, u16 value,
void *data);
typedef int (*conf_byte_write) (struct pci_dev *dev, int offset, u8 value,
void *data);
typedef int (*conf_dword_read) (struct pci_dev *dev, int offset, u32 *value,
void *data);
typedef int (*conf_word_read) (struct pci_dev *dev, int offset, u16 *value,
void *data);
typedef int (*conf_byte_read) (struct pci_dev *dev, int offset, u8 *value,
void *data);
/* These are the fields within the configuration space which we
* are interested in intercepting reads/writes to and changing their
* values.
*/
struct config_field {
unsigned int offset;
unsigned int size;
unsigned int mask;
conf_field_init init;
conf_field_reset reset;
conf_field_free release;
void (*clean) (struct config_field *field);
union {
struct {
conf_dword_write write;
conf_dword_read read;
} dw;
struct {
conf_word_write write;
conf_word_read read;
} w;
struct {
conf_byte_write write;
conf_byte_read read;
} b;
} u;
struct list_head list;
};
struct config_field_entry {
struct list_head list;
const struct config_field *field;
unsigned int base_offset;
void *data;
};
#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
/* Add fields to a device - the add_fields macro expects to get a pointer to
* the first entry in an array (of which the ending is marked by size==0)
*/
int xen_pcibk_config_add_field_offset(struct pci_dev *dev,
const struct config_field *field,
unsigned int offset);
static inline int xen_pcibk_config_add_field(struct pci_dev *dev,
const struct config_field *field)
{
return xen_pcibk_config_add_field_offset(dev, field, 0);
}
static inline int xen_pcibk_config_add_fields(struct pci_dev *dev,
const struct config_field *field)
{
int i, err = 0;
for (i = 0; field[i].size != 0; i++) {
err = xen_pcibk_config_add_field(dev, &field[i]);
if (err)
break;
}
return err;
}
static inline int xen_pcibk_config_add_fields_offset(struct pci_dev *dev,
const struct config_field *field,
unsigned int offset)
{
int i, err = 0;
for (i = 0; field[i].size != 0; i++) {
err = xen_pcibk_config_add_field_offset(dev, &field[i], offset);
if (err)
break;
}
return err;
}
/* Read/Write the real configuration space */
int xen_pcibk_read_config_byte(struct pci_dev *dev, int offset, u8 *value,
void *data);
int xen_pcibk_read_config_word(struct pci_dev *dev, int offset, u16 *value,
void *data);
int xen_pcibk_read_config_dword(struct pci_dev *dev, int offset, u32 *value,
void *data);
int xen_pcibk_write_config_byte(struct pci_dev *dev, int offset, u8 value,
void *data);
int xen_pcibk_write_config_word(struct pci_dev *dev, int offset, u16 value,
void *data);
int xen_pcibk_write_config_dword(struct pci_dev *dev, int offset, u32 value,
void *data);
int xen_pcibk_config_capability_init(void);
int xen_pcibk_config_header_add_fields(struct pci_dev *dev);
int xen_pcibk_config_capability_add_fields(struct pci_dev *dev);
#endif /* __XEN_PCIBACK_CONF_SPACE_H__ */
/*
* PCI Backend - Handles the virtual fields found on the capability lists
* in the configuration space.
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include "pciback.h"
#include "conf_space.h"
static LIST_HEAD(capabilities);
struct xen_pcibk_config_capability {
struct list_head cap_list;
int capability;
/* If the device has the capability found above, add these fields */
const struct config_field *fields;
};
static const struct config_field caplist_header[] = {
{
.offset = PCI_CAP_LIST_ID,
.size = 2, /* encompass PCI_CAP_LIST_ID & PCI_CAP_LIST_NEXT */
.u.w.read = xen_pcibk_read_config_word,
.u.w.write = NULL,
},
{}
};
static inline void register_capability(struct xen_pcibk_config_capability *cap)
{
list_add_tail(&cap->cap_list, &capabilities);
}
int xen_pcibk_config_capability_add_fields(struct pci_dev *dev)
{
int err = 0;
struct xen_pcibk_config_capability *cap;
int cap_offset;
list_for_each_entry(cap, &capabilities, cap_list) {
cap_offset = pci_find_capability(dev, cap->capability);
if (cap_offset) {
dev_dbg(&dev->dev, "Found capability 0x%x at 0x%x\n",
cap->capability, cap_offset);
err = xen_pcibk_config_add_fields_offset(dev,
caplist_header,
cap_offset);
if (err)
goto out;
err = xen_pcibk_config_add_fields_offset(dev,
cap->fields,
cap_offset);
if (err)
goto out;
}
}
out:
return err;
}
static int vpd_address_write(struct pci_dev *dev, int offset, u16 value,
void *data)
{
/* Disallow writes to the vital product data */
if (value & PCI_VPD_ADDR_F)
return PCIBIOS_SET_FAILED;
else
return pci_write_config_word(dev, offset, value);
}
static const struct config_field caplist_vpd[] = {
{
.offset = PCI_VPD_ADDR,
.size = 2,
.u.w.read = xen_pcibk_read_config_word,
.u.w.write = vpd_address_write,
},
{
.offset = PCI_VPD_DATA,
.size = 4,
.u.dw.read = xen_pcibk_read_config_dword,
.u.dw.write = NULL,
},
{}
};
static int pm_caps_read(struct pci_dev *dev, int offset, u16 *value,
void *data)
{
int err;
u16 real_value;
err = pci_read_config_word(dev, offset, &real_value);
if (err)
goto out;
*value = real_value & ~PCI_PM_CAP_PME_MASK;
out:
return err;
}
/* PM_OK_BITS specifies the bits that the driver domain is allowed to change.
* Can't allow driver domain to enable PMEs - they're shared */
#define PM_OK_BITS (PCI_PM_CTRL_PME_STATUS|PCI_PM_CTRL_DATA_SEL_MASK)
static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
void *data)
{
int err;
u16 old_value;
pci_power_t new_state, old_state;
err = pci_read_config_word(dev, offset, &old_value);
if (err)
goto out;
old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
new_value &= PM_OK_BITS;
if ((old_value & PM_OK_BITS) != new_value) {
new_value = (old_value & ~PM_OK_BITS) | new_value;
err = pci_write_config_word(dev, offset, new_value);
if (err)
goto out;
}
/* Let pci core handle the power management change */
dev_dbg(&dev->dev, "set power state to %x\n", new_state);
err = pci_set_power_state(dev, new_state);
if (err) {
err = PCIBIOS_SET_FAILED;
goto out;
}
out:
return err;
}
/* Ensure PMEs are disabled */
static void *pm_ctrl_init(struct pci_dev *dev, int offset)
{
int err;
u16 value;
err = pci_read_config_word(dev, offset, &value);
if (err)
goto out;
if (value & PCI_PM_CTRL_PME_ENABLE) {
value &= ~PCI_PM_CTRL_PME_ENABLE;
err = pci_write_config_word(dev, offset, value);
}
out:
return ERR_PTR(err);
}
static const struct config_field caplist_pm[] = {
{
.offset = PCI_PM_PMC,
.size = 2,
.u.w.read = pm_caps_read,
},
{
.offset = PCI_PM_CTRL,
.size = 2,
.init = pm_ctrl_init,
.u.w.read = xen_pcibk_read_config_word,
.u.w.write = pm_ctrl_write,
},
{
.offset = PCI_PM_PPB_EXTENSIONS,
.size = 1,
.u.b.read = xen_pcibk_read_config_byte,
},
{
.offset = PCI_PM_DATA_REGISTER,
.size = 1,
.u.b.read = xen_pcibk_read_config_byte,
},
{}
};
static struct xen_pcibk_config_capability xen_pcibk_config_capability_pm = {
.capability = PCI_CAP_ID_PM,
.fields = caplist_pm,
};
static struct xen_pcibk_config_capability xen_pcibk_config_capability_vpd = {
.capability = PCI_CAP_ID_VPD,
.fields = caplist_vpd,
};
int xen_pcibk_config_capability_init(void)
{
register_capability(&xen_pcibk_config_capability_vpd);
register_capability(&xen_pcibk_config_capability_pm);
return 0;
}
/*
* PCI Backend - Handles the virtual fields in the configuration space headers.
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include "pciback.h"
#include "conf_space.h"
struct pci_bar_info {
u32 val;
u32 len_val;
int which;
};
#define DRV_NAME "xen-pciback"
#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
{
int i;
int ret;
ret = xen_pcibk_read_config_word(dev, offset, value, data);
if (!atomic_read(&dev->enable_cnt))
return ret;
for (i = 0; i < PCI_ROM_RESOURCE; i++) {
if (dev->resource[i].flags & IORESOURCE_IO)
*value |= PCI_COMMAND_IO;
if (dev->resource[i].flags & IORESOURCE_MEM)
*value |= PCI_COMMAND_MEMORY;
}
return ret;
}
static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
{
struct xen_pcibk_dev_data *dev_data;
int err;
dev_data = pci_get_drvdata(dev);
if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: enable\n",
pci_name(dev));
err = pci_enable_device(dev);
if (err)
return err;
if (dev_data)
dev_data->enable_intx = 1;
} else if (pci_is_enabled(dev) && !is_enable_cmd(value)) {
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: disable\n",
pci_name(dev));
pci_disable_device(dev);
if (dev_data)
dev_data->enable_intx = 0;
}
if (!dev->is_busmaster && is_master_cmd(value)) {
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n",
pci_name(dev));
pci_set_master(dev);
}
if (value & PCI_COMMAND_INVALIDATE) {
if (unlikely(verbose_request))
printk(KERN_DEBUG
DRV_NAME ": %s: enable memory-write-invalidate\n",
pci_name(dev));
err = pci_set_mwi(dev);
if (err) {
printk(KERN_WARNING
DRV_NAME ": %s: cannot enable "
"memory-write-invalidate (%d)\n",
pci_name(dev), err);
value &= ~PCI_COMMAND_INVALIDATE;
}
}
return pci_write_config_word(dev, offset, value);
}
static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
{
struct pci_bar_info *bar = data;
if (unlikely(!bar)) {
printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n",
pci_name(dev));
return XEN_PCI_ERR_op_failed;
}
/* A write to obtain the length must happen as a 32-bit write.
* This does not (yet) support writing individual bytes
*/
if (value == ~PCI_ROM_ADDRESS_ENABLE)
bar->which = 1;
else {
u32 tmpval;
pci_read_config_dword(dev, offset, &tmpval);
if (tmpval != bar->val && value == bar->val) {
/* Allow restoration of bar value. */
pci_write_config_dword(dev, offset, bar->val);
}
bar->which = 0;
}
/* Do we need to support enabling/disabling the rom address here? */
return 0;
}
/* For the BARs, only allow writes which write ~0 or
* the correct resource information
* (Needed for when the driver probes the resource usage)
*/
static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
{
struct pci_bar_info *bar = data;
if (unlikely(!bar)) {
printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n",
pci_name(dev));
return XEN_PCI_ERR_op_failed;
}
/* A write to obtain the length must happen as a 32-bit write.
* This does not (yet) support writing individual bytes
*/
if (value == ~0)
bar->which = 1;
else {
u32 tmpval;
pci_read_config_dword(dev, offset, &tmpval);
if (tmpval != bar->val && value == bar->val) {
/* Allow restoration of bar value. */
pci_write_config_dword(dev, offset, bar->val);
}
bar->which = 0;
}
return 0;
}
static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data)
{
struct pci_bar_info *bar = data;
if (unlikely(!bar)) {
printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n",
pci_name(dev));
return XEN_PCI_ERR_op_failed;
}
*value = bar->which ? bar->len_val : bar->val;
return 0;
}
static inline void read_dev_bar(struct pci_dev *dev,
struct pci_bar_info *bar_info, int offset,
u32 len_mask)
{
int pos;
struct resource *res = dev->resource;
if (offset == PCI_ROM_ADDRESS || offset == PCI_ROM_ADDRESS1)
pos = PCI_ROM_RESOURCE;
else {
pos = (offset - PCI_BASE_ADDRESS_0) / 4;
if (pos && ((res[pos - 1].flags & (PCI_BASE_ADDRESS_SPACE |
PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
(PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_64))) {
bar_info->val = res[pos - 1].start >> 32;
bar_info->len_val = res[pos - 1].end >> 32;
return;
}
}
bar_info->val = res[pos].start |
(res[pos].flags & PCI_REGION_FLAG_MASK);
bar_info->len_val = res[pos].end - res[pos].start + 1;
}
static void *bar_init(struct pci_dev *dev, int offset)
{
struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
if (!bar)
return ERR_PTR(-ENOMEM);
read_dev_bar(dev, bar, offset, ~0);
bar->which = 0;
return bar;
}
static void *rom_init(struct pci_dev *dev, int offset)
{
struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
if (!bar)
return ERR_PTR(-ENOMEM);
read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
bar->which = 0;
return bar;
}
static void bar_reset(struct pci_dev *dev, int offset, void *data)
{
struct pci_bar_info *bar = data;
bar->which = 0;
}
static void bar_release(struct pci_dev *dev, int offset, void *data)
{
kfree(data);
}
static int xen_pcibk_read_vendor(struct pci_dev *dev, int offset,
u16 *value, void *data)
{
*value = dev->vendor;
return 0;
}
static int xen_pcibk_read_device(struct pci_dev *dev, int offset,
u16 *value, void *data)
{
*value = dev->device;
return 0;
}
static int interrupt_read(struct pci_dev *dev, int offset, u8 * value,
void *data)
{
*value = (u8) dev->irq;
return 0;
}
static int bist_write(struct pci_dev *dev, int offset, u8 value, void *data)
{
u8 cur_value;
int err;
err = pci_read_config_byte(dev, offset, &cur_value);
if (err)
goto out;
if ((cur_value & ~PCI_BIST_START) == (value & ~PCI_BIST_START)
|| value == PCI_BIST_START)
err = pci_write_config_byte(dev, offset, value);
out:
return err;
}
static const struct config_field header_common[] = {
{
.offset = PCI_VENDOR_ID,
.size = 2,
.u.w.read = xen_pcibk_read_vendor,
},
{
.offset = PCI_DEVICE_ID,
.size = 2,
.u.w.read = xen_pcibk_read_device,
},
{
.offset = PCI_COMMAND,
.size = 2,
.u.w.read = command_read,
.u.w.write = command_write,
},
{
.offset = PCI_INTERRUPT_LINE,
.size = 1,
.u.b.read = interrupt_read,
},
{
.offset = PCI_INTERRUPT_PIN,
.size = 1,
.u.b.read = xen_pcibk_read_config_byte,
},
{
/* Any side effects of letting driver domain control cache line? */
.offset = PCI_CACHE_LINE_SIZE,
.size = 1,
.u.b.read = xen_pcibk_read_config_byte,
.u.b.write = xen_pcibk_write_config_byte,
},
{
.offset = PCI_LATENCY_TIMER,
.size = 1,
.u.b.read = xen_pcibk_read_config_byte,
},
{
.offset = PCI_BIST,
.size = 1,
.u.b.read = xen_pcibk_read_config_byte,
.u.b.write = bist_write,
},
{}
};
#define CFG_FIELD_BAR(reg_offset) \
{ \
.offset = reg_offset, \
.size = 4, \
.init = bar_init, \
.reset = bar_reset, \
.release = bar_release, \
.u.dw.read = bar_read, \
.u.dw.write = bar_write, \
}
#define CFG_FIELD_ROM(reg_offset) \
{ \
.offset = reg_offset, \
.size = 4, \
.init = rom_init, \
.reset = bar_reset, \
.release = bar_release, \
.u.dw.read = bar_read, \
.u.dw.write = rom_write, \
}
static const struct config_field header_0[] = {
CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
CFG_FIELD_BAR(PCI_BASE_ADDRESS_2),
CFG_FIELD_BAR(PCI_BASE_ADDRESS_3),
CFG_FIELD_BAR(PCI_BASE_ADDRESS_4),
CFG_FIELD_BAR(PCI_BASE_ADDRESS_5),
CFG_FIELD_ROM(PCI_ROM_ADDRESS),
{}
};
static const struct config_field header_1[] = {
CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
CFG_FIELD_ROM(PCI_ROM_ADDRESS1),
{}
};
int xen_pcibk_config_header_add_fields(struct pci_dev *dev)
{
int err;
err = xen_pcibk_config_add_fields(dev, header_common);
if (err)
goto out;
switch (dev->hdr_type) {
case PCI_HEADER_TYPE_NORMAL:
err = xen_pcibk_config_add_fields(dev, header_0);
break;
case PCI_HEADER_TYPE_BRIDGE:
err = xen_pcibk_config_add_fields(dev, header_1);
break;
default:
err = -EINVAL;
printk(KERN_ERR DRV_NAME ": %s: Unsupported header type %d!\n",
pci_name(dev), dev->hdr_type);
break;
}
out:
return err;
}
/*
* PCI Backend - Handle special overlays for broken devices.
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
* Author: Chris Bookholt <hap10@epoch.ncsc.mil>
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include "pciback.h"
#include "conf_space.h"
#include "conf_space_quirks.h"
LIST_HEAD(xen_pcibk_quirks);
#define DRV_NAME "xen-pciback"
static inline const struct pci_device_id *
match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
{
if ((id->vendor == PCI_ANY_ID || id->vendor == dev->vendor) &&
(id->device == PCI_ANY_ID || id->device == dev->device) &&
(id->subvendor == PCI_ANY_ID ||
id->subvendor == dev->subsystem_vendor) &&
(id->subdevice == PCI_ANY_ID ||
id->subdevice == dev->subsystem_device) &&
!((id->class ^ dev->class) & id->class_mask))
return id;
return NULL;
}
static struct xen_pcibk_config_quirk *xen_pcibk_find_quirk(struct pci_dev *dev)
{
struct xen_pcibk_config_quirk *tmp_quirk;
list_for_each_entry(tmp_quirk, &xen_pcibk_quirks, quirks_list)
if (match_one_device(&tmp_quirk->devid, dev) != NULL)
goto out;
tmp_quirk = NULL;
printk(KERN_DEBUG DRV_NAME
":quirk didn't match any device xen_pciback knows about\n");
out:
return tmp_quirk;
}
static inline void register_quirk(struct xen_pcibk_config_quirk *quirk)
{
list_add_tail(&quirk->quirks_list, &xen_pcibk_quirks);
}
int xen_pcibk_field_is_dup(struct pci_dev *dev, unsigned int reg)
{
int ret = 0;
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
struct config_field_entry *cfg_entry;
list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
if (OFFSET(cfg_entry) == reg) {
ret = 1;
break;
}
}
return ret;
}
int xen_pcibk_config_quirks_add_field(struct pci_dev *dev, struct config_field
*field)
{
int err = 0;
switch (field->size) {
case 1:
field->u.b.read = xen_pcibk_read_config_byte;
field->u.b.write = xen_pcibk_write_config_byte;
break;
case 2:
field->u.w.read = xen_pcibk_read_config_word;
field->u.w.write = xen_pcibk_write_config_word;
break;
case 4:
field->u.dw.read = xen_pcibk_read_config_dword;
field->u.dw.write = xen_pcibk_write_config_dword;
break;
default:
err = -EINVAL;
goto out;
}
xen_pcibk_config_add_field(dev, field);
out:
return err;
}
int xen_pcibk_config_quirks_init(struct pci_dev *dev)
{
struct xen_pcibk_config_quirk *quirk;
int ret = 0;
quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC);
if (!quirk) {
ret = -ENOMEM;
goto out;
}
quirk->devid.vendor = dev->vendor;
quirk->devid.device = dev->device;
quirk->devid.subvendor = dev->subsystem_vendor;
quirk->devid.subdevice = dev->subsystem_device;
quirk->devid.class = 0;
quirk->devid.class_mask = 0;
quirk->devid.driver_data = 0UL;
quirk->pdev = dev;
register_quirk(quirk);
out:
return ret;
}
void xen_pcibk_config_field_free(struct config_field *field)
{
kfree(field);
}
int xen_pcibk_config_quirk_release(struct pci_dev *dev)
{
struct xen_pcibk_config_quirk *quirk;
int ret = 0;
quirk = xen_pcibk_find_quirk(dev);
if (!quirk) {
ret = -ENXIO;
goto out;
}
list_del(&quirk->quirks_list);
kfree(quirk);
out:
return ret;
}
/*
* PCI Backend - Data structures for special overlays for broken devices.
*
* Ryan Wilson <hap9@epoch.ncsc.mil>
* Chris Bookholt <hap10@epoch.ncsc.mil>
*/
#ifndef __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
#define __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
#include <linux/pci.h>
#include <linux/list.h>
struct xen_pcibk_config_quirk {
struct list_head quirks_list;
struct pci_device_id devid;
struct pci_dev *pdev;
};
int xen_pcibk_config_quirks_add_field(struct pci_dev *dev, struct config_field
*field);
int xen_pcibk_config_quirks_remove_field(struct pci_dev *dev, int reg);
int xen_pcibk_config_quirks_init(struct pci_dev *dev);
void xen_pcibk_config_field_free(struct config_field *field);
int xen_pcibk_config_quirk_release(struct pci_dev *dev);
int xen_pcibk_field_is_dup(struct pci_dev *dev, unsigned int reg);
#endif
/*
* PCI Backend - Provides restricted access to the real PCI bus topology
* to the frontend
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include "pciback.h"
struct passthrough_dev_data {
/* Access to dev_list must be protected by lock */
struct list_head dev_list;
spinlock_t lock;
};
static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
unsigned int domain,
unsigned int bus,
unsigned int devfn)
{
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry;
struct pci_dev *dev = NULL;
unsigned long flags;
spin_lock_irqsave(&dev_data->lock, flags);
list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
&& bus == (unsigned int)dev_entry->dev->bus->number
&& devfn == dev_entry->dev->devfn) {
dev = dev_entry->dev;
break;
}
}
spin_unlock_irqrestore(&dev_data->lock, flags);
return dev;
}
static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev,
int devid, publish_pci_dev_cb publish_cb)
{
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry;
unsigned long flags;
unsigned int domain, bus, devfn;
int err;
dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
if (!dev_entry)
return -ENOMEM;
dev_entry->dev = dev;
spin_lock_irqsave(&dev_data->lock, flags);
list_add_tail(&dev_entry->list, &dev_data->dev_list);
spin_unlock_irqrestore(&dev_data->lock, flags);
/* Publish this device. */
domain = (unsigned int)pci_domain_nr(dev->bus);
bus = (unsigned int)dev->bus->number;
devfn = dev->devfn;
err = publish_cb(pdev, domain, bus, devfn, devid);
return err;
}
static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev)
{
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry, *t;
struct pci_dev *found_dev = NULL;
unsigned long flags;
spin_lock_irqsave(&dev_data->lock, flags);
list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
if (dev_entry->dev == dev) {
list_del(&dev_entry->list);
found_dev = dev_entry->dev;
kfree(dev_entry);
}
}
spin_unlock_irqrestore(&dev_data->lock, flags);
if (found_dev)
pcistub_put_pci_dev(found_dev);
}
static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
{
struct passthrough_dev_data *dev_data;
dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data)
return -ENOMEM;
spin_lock_init(&dev_data->lock);
INIT_LIST_HEAD(&dev_data->dev_list);
pdev->pci_dev_data = dev_data;
return 0;
}
static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
publish_pci_root_cb publish_root_cb)
{
int err = 0;
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry, *e, *tmp;
struct pci_dev *dev;
int found;
unsigned int domain, bus;
spin_lock(&dev_data->lock);
list_for_each_entry_safe(dev_entry, tmp, &dev_data->dev_list, list) {
/* Only publish this device as a root if none of its
* parent bridges are exported
*/
found = 0;
dev = dev_entry->dev->bus->self;
for (; !found && dev != NULL; dev = dev->bus->self) {
list_for_each_entry(e, &dev_data->dev_list, list) {
if (dev == e->dev) {
found = 1;
break;
}
}
}
domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
bus = (unsigned int)dev_entry->dev->bus->number;
if (!found) {
spin_unlock(&dev_data->lock);
err = publish_root_cb(pdev, domain, bus);
if (err)
break;
spin_lock(&dev_data->lock);
}
}
if (!err)
spin_unlock(&dev_data->lock);
return err;
}
static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
{
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry, *t;
list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
list_del(&dev_entry->list);
pcistub_put_pci_dev(dev_entry->dev);
kfree(dev_entry);
}
kfree(dev_data);
pdev->pci_dev_data = NULL;
}
static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
struct xen_pcibk_device *pdev,
unsigned int *domain, unsigned int *bus,
unsigned int *devfn)
{
*domain = pci_domain_nr(pcidev->bus);
*bus = pcidev->bus->number;
*devfn = pcidev->devfn;
return 1;
}
struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
.name = "passthrough",
.init = __xen_pcibk_init_devices,
.free = __xen_pcibk_release_devices,
.find = __xen_pcibk_get_pcifront_dev,
.publish = __xen_pcibk_publish_pci_roots,
.release = __xen_pcibk_release_pci_dev,
.add = __xen_pcibk_add_pci_dev,
.get = __xen_pcibk_get_pci_dev,
};
This diff is collapsed.
/*
* PCI Backend Common Data Structures & Function Declarations
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#ifndef __XEN_PCIBACK_H__
#define __XEN_PCIBACK_H__
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <xen/xenbus.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/atomic.h>
#include <xen/interface/io/pciif.h>
struct pci_dev_entry {
struct list_head list;
struct pci_dev *dev;
};
#define _PDEVF_op_active (0)
#define PDEVF_op_active (1<<(_PDEVF_op_active))
#define _PCIB_op_pending (1)
#define PCIB_op_pending (1<<(_PCIB_op_pending))
struct xen_pcibk_device {
void *pci_dev_data;
spinlock_t dev_lock;
struct xenbus_device *xdev;
struct xenbus_watch be_watch;
u8 be_watching;
int evtchn_irq;
struct xen_pci_sharedinfo *sh_info;
unsigned long flags;
struct work_struct op_work;
};
struct xen_pcibk_dev_data {
struct list_head config_fields;
unsigned int permissive:1;
unsigned int warned_on_write:1;
unsigned int enable_intx:1;
unsigned int isr_on:1; /* Whether the IRQ handler is installed. */
unsigned int ack_intr:1; /* .. and ACK-ing */
unsigned long handled;
unsigned int irq; /* Saved in case device transitions to MSI/MSI-X */
char irq_name[0]; /* xen-pcibk[000:04:00.0] */
};
/* Used by XenBus and xen_pcibk_ops.c */
extern wait_queue_head_t xen_pcibk_aer_wait_queue;
extern struct workqueue_struct *xen_pcibk_wq;
/* Used by pcistub.c and conf_space_quirks.c */
extern struct list_head xen_pcibk_quirks;
/* Get/Put PCI Devices that are hidden from the PCI Backend Domain */
struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
int domain, int bus,
int slot, int func);
struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev);
void pcistub_put_pci_dev(struct pci_dev *dev);
/* Ensure a device is turned off or reset */
void xen_pcibk_reset_device(struct pci_dev *pdev);
/* Access a virtual configuration space for a PCI device */
int xen_pcibk_config_init(void);
int xen_pcibk_config_init_dev(struct pci_dev *dev);
void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev);
void xen_pcibk_config_reset_dev(struct pci_dev *dev);
void xen_pcibk_config_free_dev(struct pci_dev *dev);
int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
u32 *ret_val);
int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size,
u32 value);
/* Handle requests for specific devices from the frontend */
typedef int (*publish_pci_dev_cb) (struct xen_pcibk_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn, unsigned int devid);
typedef int (*publish_pci_root_cb) (struct xen_pcibk_device *pdev,
unsigned int domain, unsigned int bus);
/* Backend registration for the two types of BDF representation:
* vpci - BDFs start at 00
* passthrough - BDFs are exactly like in the host.
*/
struct xen_pcibk_backend {
char *name;
int (*init)(struct xen_pcibk_device *pdev);
void (*free)(struct xen_pcibk_device *pdev);
int (*find)(struct pci_dev *pcidev, struct xen_pcibk_device *pdev,
unsigned int *domain, unsigned int *bus,
unsigned int *devfn);
int (*publish)(struct xen_pcibk_device *pdev, publish_pci_root_cb cb);
void (*release)(struct xen_pcibk_device *pdev, struct pci_dev *dev);
int (*add)(struct xen_pcibk_device *pdev, struct pci_dev *dev,
int devid, publish_pci_dev_cb publish_cb);
struct pci_dev *(*get)(struct xen_pcibk_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn);
};
extern struct xen_pcibk_backend xen_pcibk_vpci_backend;
extern struct xen_pcibk_backend xen_pcibk_passthrough_backend;
extern struct xen_pcibk_backend *xen_pcibk_backend;
static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev,
int devid,
publish_pci_dev_cb publish_cb)
{
if (xen_pcibk_backend && xen_pcibk_backend->add)
return xen_pcibk_backend->add(pdev, dev, devid, publish_cb);
return -1;
};
static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev)
{
if (xen_pcibk_backend && xen_pcibk_backend->free)
return xen_pcibk_backend->release(pdev, dev);
};
static inline struct pci_dev *
xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain,
unsigned int bus, unsigned int devfn)
{
if (xen_pcibk_backend && xen_pcibk_backend->get)
return xen_pcibk_backend->get(pdev, domain, bus, devfn);
return NULL;
};
/**
* Add for domain0 PCIE-AER handling. Get guest domain/bus/devfn in xen_pcibk
* before sending aer request to pcifront, so that guest could identify
* device, coopearte with xen_pcibk to finish aer recovery job if device driver
* has the capability
*/
static inline int xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
struct xen_pcibk_device *pdev,
unsigned int *domain,
unsigned int *bus,
unsigned int *devfn)
{
if (xen_pcibk_backend && xen_pcibk_backend->find)
return xen_pcibk_backend->find(pcidev, pdev, domain, bus,
devfn);
return -1;
};
static inline int xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
{
if (xen_pcibk_backend && xen_pcibk_backend->init)
return xen_pcibk_backend->init(pdev);
return -1;
};
static inline int xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
publish_pci_root_cb cb)
{
if (xen_pcibk_backend && xen_pcibk_backend->publish)
return xen_pcibk_backend->publish(pdev, cb);
return -1;
};
static inline void xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
{
if (xen_pcibk_backend && xen_pcibk_backend->free)
return xen_pcibk_backend->free(pdev);
};
/* Handles events from front-end */
irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id);
void xen_pcibk_do_op(struct work_struct *data);
int xen_pcibk_xenbus_register(void);
void xen_pcibk_xenbus_unregister(void);
extern int verbose_request;
void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev);
#endif
/* Handles shared IRQs that can to device domain and control domain. */
void xen_pcibk_irq_handler(struct pci_dev *dev, int reset);
This diff is collapsed.
/*
* PCI Backend - Provides a Virtual PCI bus (with real devices)
* to the frontend
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include "pciback.h"
#define PCI_SLOT_MAX 32
#define DRV_NAME "xen-pciback"
struct vpci_dev_data {
/* Access to dev_list must be protected by lock */
struct list_head dev_list[PCI_SLOT_MAX];
spinlock_t lock;
};
static inline struct list_head *list_first(struct list_head *head)
{
return head->next;
}
static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
unsigned int domain,
unsigned int bus,
unsigned int devfn)
{
struct pci_dev_entry *entry;
struct pci_dev *dev = NULL;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
unsigned long flags;
if (domain != 0 || bus != 0)
return NULL;
if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
spin_lock_irqsave(&vpci_dev->lock, flags);
list_for_each_entry(entry,
&vpci_dev->dev_list[PCI_SLOT(devfn)],
list) {
if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
dev = entry->dev;
break;
}
}
spin_unlock_irqrestore(&vpci_dev->lock, flags);
}
return dev;
}
static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
{
if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
&& l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
return 1;
return 0;
}
static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev, int devid,
publish_pci_dev_cb publish_cb)
{
int err = 0, slot, func = -1;
struct pci_dev_entry *t, *dev_entry;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
unsigned long flags;
if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
err = -EFAULT;
xenbus_dev_fatal(pdev->xdev, err,
"Can't export bridges on the virtual PCI bus");
goto out;
}
dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
if (!dev_entry) {
err = -ENOMEM;
xenbus_dev_fatal(pdev->xdev, err,
"Error adding entry to virtual PCI bus");
goto out;
}
dev_entry->dev = dev;
spin_lock_irqsave(&vpci_dev->lock, flags);
/* Keep multi-function devices together on the virtual PCI bus */
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (!list_empty(&vpci_dev->dev_list[slot])) {
t = list_entry(list_first(&vpci_dev->dev_list[slot]),
struct pci_dev_entry, list);
if (match_slot(dev, t->dev)) {
pr_info(DRV_NAME ": vpci: %s: "
"assign to virtual slot %d func %d\n",
pci_name(dev), slot,
PCI_FUNC(dev->devfn));
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
func = PCI_FUNC(dev->devfn);
goto unlock;
}
}
}
/* Assign to a new slot on the virtual PCI bus */
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (list_empty(&vpci_dev->dev_list[slot])) {
printk(KERN_INFO DRV_NAME
": vpci: %s: assign to virtual slot %d\n",
pci_name(dev), slot);
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
func = PCI_FUNC(dev->devfn);
goto unlock;
}
}
err = -ENOMEM;
xenbus_dev_fatal(pdev->xdev, err,
"No more space on root virtual PCI bus");
unlock:
spin_unlock_irqrestore(&vpci_dev->lock, flags);
/* Publish this device. */
if (!err)
err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
out:
return err;
}
static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev)
{
int slot;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
struct pci_dev *found_dev = NULL;
unsigned long flags;
spin_lock_irqsave(&vpci_dev->lock, flags);
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
struct pci_dev_entry *e, *tmp;
list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
list) {
if (e->dev == dev) {
list_del(&e->list);
found_dev = e->dev;
kfree(e);
goto out;
}
}
}
out:
spin_unlock_irqrestore(&vpci_dev->lock, flags);
if (found_dev)
pcistub_put_pci_dev(found_dev);
}
static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
{
int slot;
struct vpci_dev_data *vpci_dev;
vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
if (!vpci_dev)
return -ENOMEM;
spin_lock_init(&vpci_dev->lock);
for (slot = 0; slot < PCI_SLOT_MAX; slot++)
INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
pdev->pci_dev_data = vpci_dev;
return 0;
}
static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
publish_pci_root_cb publish_cb)
{
/* The Virtual PCI bus has only one root */
return publish_cb(pdev, 0, 0);
}
static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
{
int slot;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
struct pci_dev_entry *e, *tmp;
list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
list) {
list_del(&e->list);
pcistub_put_pci_dev(e->dev);
kfree(e);
}
}
kfree(vpci_dev);
pdev->pci_dev_data = NULL;
}
static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
struct xen_pcibk_device *pdev,
unsigned int *domain, unsigned int *bus,
unsigned int *devfn)
{
struct pci_dev_entry *entry;
struct pci_dev *dev = NULL;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
unsigned long flags;
int found = 0, slot;
spin_lock_irqsave(&vpci_dev->lock, flags);
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
list_for_each_entry(entry,
&vpci_dev->dev_list[slot],
list) {
dev = entry->dev;
if (dev && dev->bus->number == pcidev->bus->number
&& pci_domain_nr(dev->bus) ==
pci_domain_nr(pcidev->bus)
&& dev->devfn == pcidev->devfn) {
found = 1;
*domain = 0;
*bus = 0;
*devfn = PCI_DEVFN(slot,
PCI_FUNC(pcidev->devfn));
}
}
}
spin_unlock_irqrestore(&vpci_dev->lock, flags);
return found;
}
struct xen_pcibk_backend xen_pcibk_vpci_backend = {
.name = "vpci",
.init = __xen_pcibk_init_devices,
.free = __xen_pcibk_release_devices,
.find = __xen_pcibk_get_pcifront_dev,
.publish = __xen_pcibk_publish_pci_roots,
.release = __xen_pcibk_release_pci_dev,
.add = __xen_pcibk_add_pci_dev,
.get = __xen_pcibk_get_pci_dev,
};
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment