Commit a92336a1 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk

xen/pciback: Drop two backends, squash and cleanup some code.

 - Remove the slot and controller controller backend as they
   are not used.
 - Document the find pciback_[read|write]_config_[byte|word|dword]
   to make it easier to find.
 - Collapse the code from conf_space_capability_msi into pciback_ops.c
 - Collapse conf_space_capability_[pm|vpd].c in conf_space_capability.c
   [and remove the conf_space_capability.h file]
 - Rename all visible functions from pciback to xen_pcibk.
 - Rename all the printk/pr_info, etc that use the "pciback" to say
   "xen-pciback".
 - Convert functions that are not referenced outside the code to be
   static to save on name space.
 - Do the same thing for structures that are internal to the driver.
 - Run checkpatch.pl after the renames and fixup its warnings and
   fix any compile errors caused by the variable rename
 - Cleanup any structs that checkpath.pl commented about or just
   look odd.
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent c288b67b
......@@ -3,14 +3,9 @@ obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback.o
xen-pciback-y := pci_stub.o pciback_ops.o xenbus.o
xen-pciback-y += conf_space.o conf_space_header.o \
conf_space_capability.o \
conf_space_capability_vpd.o \
conf_space_capability_pm.o \
conf_space_quirks.o
xen-pciback-$(CONFIG_PCI_MSI) += conf_space_capability_msi.o
xen-pciback-$(CONFIG_XEN_PCIDEV_BACKEND_VPCI) += vpci.o
xen-pciback-$(CONFIG_XEN_PCIDEV_BACKEND_SLOT) += slot.o
xen-pciback-$(CONFIG_XEN_PCIDEV_BACKEND_PASS) += passthrough.o
xen-pciback-$(CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER) += controller.o
ifeq ($(CONFIG_XEN_PCIDEV_BE_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
......
......@@ -15,11 +15,14 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
#define DRV_NAME "xen-pciback"
static int permissive;
module_param(permissive, bool, 0644);
/* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
* xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */
#define DEFINE_PCI_CONFIG(op, size, type) \
int pciback_##op##_config_##size \
int xen_pcibk_##op##_config_##size \
(struct pci_dev *dev, int offset, type value, void *data) \
{ \
return pci_##op##_config_##size(dev, offset, value); \
......@@ -138,11 +141,11 @@ static int pcibios_err_to_errno(int err)
return err;
}
int pciback_config_read(struct pci_dev *dev, int offset, int size,
u32 *ret_val)
int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
u32 *ret_val)
{
int err = 0;
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
const struct config_field_entry *cfg_entry;
const struct config_field *field;
int req_start, req_end, field_start, field_end;
......@@ -151,7 +154,7 @@ int pciback_config_read(struct pci_dev *dev, int offset, int size,
u32 value = 0, tmp_val;
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x\n",
printk(KERN_DEBUG DRV_NAME ": %s: read %d bytes at 0x%x\n",
pci_name(dev), size, offset);
if (!valid_request(offset, size)) {
......@@ -195,17 +198,17 @@ int pciback_config_read(struct pci_dev *dev, int offset, int size,
out:
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x = %x\n",
printk(KERN_DEBUG DRV_NAME ": %s: read %d bytes at 0x%x = %x\n",
pci_name(dev), size, offset, value);
*ret_val = value;
return pcibios_err_to_errno(err);
}
int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value)
int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
{
int err = 0, handled = 0;
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
const struct config_field_entry *cfg_entry;
const struct config_field *field;
u32 tmp_val;
......@@ -213,7 +216,7 @@ int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value)
if (unlikely(verbose_request))
printk(KERN_DEBUG
"pciback: %s: write request %d bytes at 0x%x = %x\n",
DRV_NAME ": %s: write request %d bytes at 0x%x = %x\n",
pci_name(dev), size, offset, value);
if (!valid_request(offset, size))
......@@ -231,7 +234,7 @@ int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value)
|| (req_end > field_start && req_end <= field_end)) {
tmp_val = 0;
err = pciback_config_read(dev, field_start,
err = xen_pcibk_config_read(dev, field_start,
field->size, &tmp_val);
if (err)
break;
......@@ -290,9 +293,9 @@ int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value)
return pcibios_err_to_errno(err);
}
void pciback_config_free_dyn_fields(struct pci_dev *dev)
void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev)
{
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
struct config_field_entry *cfg_entry, *t;
const struct config_field *field;
......@@ -316,9 +319,9 @@ void pciback_config_free_dyn_fields(struct pci_dev *dev)
}
}
void pciback_config_reset_dev(struct pci_dev *dev)
void xen_pcibk_config_reset_dev(struct pci_dev *dev)
{
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
const struct config_field_entry *cfg_entry;
const struct config_field *field;
......@@ -334,9 +337,9 @@ void pciback_config_reset_dev(struct pci_dev *dev)
}
}
void pciback_config_free_dev(struct pci_dev *dev)
void xen_pcibk_config_free_dev(struct pci_dev *dev)
{
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
struct config_field_entry *cfg_entry, *t;
const struct config_field *field;
......@@ -356,12 +359,12 @@ void pciback_config_free_dev(struct pci_dev *dev)
}
}
int pciback_config_add_field_offset(struct pci_dev *dev,
int xen_pcibk_config_add_field_offset(struct pci_dev *dev,
const struct config_field *field,
unsigned int base_offset)
{
int err = 0;
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
struct config_field_entry *cfg_entry;
void *tmp;
......@@ -376,7 +379,7 @@ int pciback_config_add_field_offset(struct pci_dev *dev,
cfg_entry->base_offset = base_offset;
/* silently ignore duplicate fields */
err = pciback_field_is_dup(dev, OFFSET(cfg_entry));
err = xen_pcibk_field_is_dup(dev, OFFSET(cfg_entry));
if (err)
goto out;
......@@ -406,30 +409,30 @@ int pciback_config_add_field_offset(struct pci_dev *dev,
* certain registers (like the base address registers (BARs) so that we can
* keep the client from manipulating them directly.
*/
int pciback_config_init_dev(struct pci_dev *dev)
int xen_pcibk_config_init_dev(struct pci_dev *dev)
{
int err = 0;
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
dev_dbg(&dev->dev, "initializing virtual configuration space\n");
INIT_LIST_HEAD(&dev_data->config_fields);
err = pciback_config_header_add_fields(dev);
err = xen_pcibk_config_header_add_fields(dev);
if (err)
goto out;
err = pciback_config_capability_add_fields(dev);
err = xen_pcibk_config_capability_add_fields(dev);
if (err)
goto out;
err = pciback_config_quirks_init(dev);
err = xen_pcibk_config_quirks_init(dev);
out:
return err;
}
int pciback_config_init(void)
int xen_pcibk_config_init(void)
{
return pciback_config_capability_init();
return xen_pcibk_config_capability_init();
}
......@@ -69,35 +69,35 @@ struct config_field_entry {
/* Add fields to a device - the add_fields macro expects to get a pointer to
* the first entry in an array (of which the ending is marked by size==0)
*/
int pciback_config_add_field_offset(struct pci_dev *dev,
int xen_pcibk_config_add_field_offset(struct pci_dev *dev,
const struct config_field *field,
unsigned int offset);
static inline int pciback_config_add_field(struct pci_dev *dev,
static inline int xen_pcibk_config_add_field(struct pci_dev *dev,
const struct config_field *field)
{
return pciback_config_add_field_offset(dev, field, 0);
return xen_pcibk_config_add_field_offset(dev, field, 0);
}
static inline int pciback_config_add_fields(struct pci_dev *dev,
static inline int xen_pcibk_config_add_fields(struct pci_dev *dev,
const struct config_field *field)
{
int i, err = 0;
for (i = 0; field[i].size != 0; i++) {
err = pciback_config_add_field(dev, &field[i]);
err = xen_pcibk_config_add_field(dev, &field[i]);
if (err)
break;
}
return err;
}
static inline int pciback_config_add_fields_offset(struct pci_dev *dev,
static inline int xen_pcibk_config_add_fields_offset(struct pci_dev *dev,
const struct config_field *field,
unsigned int offset)
{
int i, err = 0;
for (i = 0; field[i].size != 0; i++) {
err = pciback_config_add_field_offset(dev, &field[i], offset);
err = xen_pcibk_config_add_field_offset(dev, &field[i], offset);
if (err)
break;
}
......@@ -105,22 +105,22 @@ static inline int pciback_config_add_fields_offset(struct pci_dev *dev,
}
/* Read/Write the real configuration space */
int pciback_read_config_byte(struct pci_dev *dev, int offset, u8 *value,
void *data);
int pciback_read_config_word(struct pci_dev *dev, int offset, u16 *value,
void *data);
int pciback_read_config_dword(struct pci_dev *dev, int offset, u32 *value,
void *data);
int pciback_write_config_byte(struct pci_dev *dev, int offset, u8 value,
void *data);
int pciback_write_config_word(struct pci_dev *dev, int offset, u16 value,
void *data);
int pciback_write_config_dword(struct pci_dev *dev, int offset, u32 value,
int xen_pcibk_read_config_byte(struct pci_dev *dev, int offset, u8 *value,
void *data);
int xen_pcibk_read_config_word(struct pci_dev *dev, int offset, u16 *value,
void *data);
int xen_pcibk_read_config_dword(struct pci_dev *dev, int offset, u32 *value,
void *data);
int xen_pcibk_write_config_byte(struct pci_dev *dev, int offset, u8 value,
void *data);
int xen_pcibk_write_config_word(struct pci_dev *dev, int offset, u16 value,
void *data);
int xen_pcibk_write_config_dword(struct pci_dev *dev, int offset, u32 value,
void *data);
int pciback_config_capability_init(void);
int xen_pcibk_config_capability_init(void);
int pciback_config_header_add_fields(struct pci_dev *dev);
int pciback_config_capability_add_fields(struct pci_dev *dev);
int xen_pcibk_config_header_add_fields(struct pci_dev *dev);
int xen_pcibk_config_capability_add_fields(struct pci_dev *dev);
#endif /* __XEN_PCIBACK_CONF_SPACE_H__ */
......@@ -9,29 +9,36 @@
#include <linux/pci.h>
#include "pciback.h"
#include "conf_space.h"
#include "conf_space_capability.h"
static LIST_HEAD(capabilities);
struct xen_pcibk_config_capability {
struct list_head cap_list;
int capability;
/* If the device has the capability found above, add these fields */
const struct config_field *fields;
};
static const struct config_field caplist_header[] = {
{
.offset = PCI_CAP_LIST_ID,
.size = 2, /* encompass PCI_CAP_LIST_ID & PCI_CAP_LIST_NEXT */
.u.w.read = pciback_read_config_word,
.u.w.read = xen_pcibk_read_config_word,
.u.w.write = NULL,
},
{}
};
static inline void register_capability(struct pciback_config_capability *cap)
static inline void register_capability(struct xen_pcibk_config_capability *cap)
{
list_add_tail(&cap->cap_list, &capabilities);
}
int pciback_config_capability_add_fields(struct pci_dev *dev)
int xen_pcibk_config_capability_add_fields(struct pci_dev *dev)
{
int err = 0;
struct pciback_config_capability *cap;
struct xen_pcibk_config_capability *cap;
int cap_offset;
list_for_each_entry(cap, &capabilities, cap_list) {
......@@ -40,12 +47,12 @@ int pciback_config_capability_add_fields(struct pci_dev *dev)
dev_dbg(&dev->dev, "Found capability 0x%x at 0x%x\n",
cap->capability, cap_offset);
err = pciback_config_add_fields_offset(dev,
err = xen_pcibk_config_add_fields_offset(dev,
caplist_header,
cap_offset);
if (err)
goto out;
err = pciback_config_add_fields_offset(dev,
err = xen_pcibk_config_add_fields_offset(dev,
cap->fields,
cap_offset);
if (err)
......@@ -57,10 +64,144 @@ int pciback_config_capability_add_fields(struct pci_dev *dev)
return err;
}
int pciback_config_capability_init(void)
static int vpd_address_write(struct pci_dev *dev, int offset, u16 value,
void *data)
{
/* Disallow writes to the vital product data */
if (value & PCI_VPD_ADDR_F)
return PCIBIOS_SET_FAILED;
else
return pci_write_config_word(dev, offset, value);
}
static const struct config_field caplist_vpd[] = {
{
.offset = PCI_VPD_ADDR,
.size = 2,
.u.w.read = xen_pcibk_read_config_word,
.u.w.write = vpd_address_write,
},
{
.offset = PCI_VPD_DATA,
.size = 4,
.u.dw.read = xen_pcibk_read_config_dword,
.u.dw.write = NULL,
},
{}
};
static int pm_caps_read(struct pci_dev *dev, int offset, u16 *value,
void *data)
{
int err;
u16 real_value;
err = pci_read_config_word(dev, offset, &real_value);
if (err)
goto out;
*value = real_value & ~PCI_PM_CAP_PME_MASK;
out:
return err;
}
/* PM_OK_BITS specifies the bits that the driver domain is allowed to change.
* Can't allow driver domain to enable PMEs - they're shared */
#define PM_OK_BITS (PCI_PM_CTRL_PME_STATUS|PCI_PM_CTRL_DATA_SEL_MASK)
static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
void *data)
{
int err;
u16 old_value;
pci_power_t new_state, old_state;
err = pci_read_config_word(dev, offset, &old_value);
if (err)
goto out;
old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
new_value &= PM_OK_BITS;
if ((old_value & PM_OK_BITS) != new_value) {
new_value = (old_value & ~PM_OK_BITS) | new_value;
err = pci_write_config_word(dev, offset, new_value);
if (err)
goto out;
}
/* Let pci core handle the power management change */
dev_dbg(&dev->dev, "set power state to %x\n", new_state);
err = pci_set_power_state(dev, new_state);
if (err) {
err = PCIBIOS_SET_FAILED;
goto out;
}
out:
return err;
}
/* Ensure PMEs are disabled */
static void *pm_ctrl_init(struct pci_dev *dev, int offset)
{
int err;
u16 value;
err = pci_read_config_word(dev, offset, &value);
if (err)
goto out;
if (value & PCI_PM_CTRL_PME_ENABLE) {
value &= ~PCI_PM_CTRL_PME_ENABLE;
err = pci_write_config_word(dev, offset, value);
}
out:
return ERR_PTR(err);
}
static const struct config_field caplist_pm[] = {
{
.offset = PCI_PM_PMC,
.size = 2,
.u.w.read = pm_caps_read,
},
{
.offset = PCI_PM_CTRL,
.size = 2,
.init = pm_ctrl_init,
.u.w.read = xen_pcibk_read_config_word,
.u.w.write = pm_ctrl_write,
},
{
.offset = PCI_PM_PPB_EXTENSIONS,
.size = 1,
.u.b.read = xen_pcibk_read_config_byte,
},
{
.offset = PCI_PM_DATA_REGISTER,
.size = 1,
.u.b.read = xen_pcibk_read_config_byte,
},
{}
};
static struct xen_pcibk_config_capability xen_pcibk_config_capability_pm = {
.capability = PCI_CAP_ID_PM,
.fields = caplist_pm,
};
static struct xen_pcibk_config_capability xen_pcibk_config_capability_vpd = {
.capability = PCI_CAP_ID_VPD,
.fields = caplist_vpd,
};
int xen_pcibk_config_capability_init(void)
{
register_capability(&pciback_config_capability_vpd);
register_capability(&pciback_config_capability_pm);
register_capability(&xen_pcibk_config_capability_vpd);
register_capability(&xen_pcibk_config_capability_pm);
return 0;
}
/*
* PCI Backend - Data structures for special overlays for structures on
* the capability list.
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#ifndef __PCIBACK_CONFIG_CAPABILITY_H__
#define __PCIBACK_CONFIG_CAPABILITY_H__
#include <linux/pci.h>
#include <linux/list.h>
struct pciback_config_capability {
struct list_head cap_list;
int capability;
/* If the device has the capability found above, add these fields */
const struct config_field *fields;
};
extern struct pciback_config_capability pciback_config_capability_vpd;
extern struct pciback_config_capability pciback_config_capability_pm;
#endif
/*
* PCI Backend -- Configuration overlay for MSI capability
*/
#include <linux/pci.h>
#include <linux/slab.h>
#include "conf_space.h"
#include "conf_space_capability.h"
#include <xen/interface/io/pciif.h>
#include <xen/events.h>
#include "pciback.h"
int pciback_enable_msi(struct pciback_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
struct pciback_dev_data *dev_data;
int otherend = pdev->xdev->otherend_id;
int status;
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: enable MSI\n", pci_name(dev));
status = pci_enable_msi(dev);
if (status) {
printk(KERN_ERR "error enable msi for guest %x status %x\n",
otherend, status);
op->value = 0;
return XEN_PCI_ERR_op_failed;
}
/* The value the guest needs is actually the IDT vector, not the
* the local domain's IRQ number. */
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: MSI: %d\n", pci_name(dev),
op->value);
dev_data = pci_get_drvdata(dev);
if (dev_data)
dev_data->ack_intr = 0;
return 0;
}
int pciback_disable_msi(struct pciback_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
struct pciback_dev_data *dev_data;
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: disable MSI\n", pci_name(dev));
pci_disable_msi(dev);
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: MSI: %d\n", pci_name(dev),
op->value);
dev_data = pci_get_drvdata(dev);
if (dev_data)
dev_data->ack_intr = 1;
return 0;
}
int pciback_enable_msix(struct pciback_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
struct pciback_dev_data *dev_data;
int i, result;
struct msix_entry *entries;
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: enable MSI-X\n",
pci_name(dev));
if (op->value > SH_INFO_MAX_VEC)
return -EINVAL;
entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
if (entries == NULL)
return -ENOMEM;
for (i = 0; i < op->value; i++) {
entries[i].entry = op->msix_entries[i].entry;
entries[i].vector = op->msix_entries[i].vector;
}
result = pci_enable_msix(dev, entries, op->value);
if (result == 0) {
for (i = 0; i < op->value; i++) {
op->msix_entries[i].entry = entries[i].entry;
if (entries[i].vector)
op->msix_entries[i].vector =
xen_pirq_from_irq(entries[i].vector);
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: " \
"MSI-X[%d]: %d\n",
pci_name(dev), i,
op->msix_entries[i].vector);
}
} else {
printk(KERN_WARNING "pciback: %s: failed to enable MSI-X: err %d!\n",
pci_name(dev), result);
}
kfree(entries);
op->value = result;
dev_data = pci_get_drvdata(dev);
if (dev_data)
dev_data->ack_intr = 0;
return result;
}
int pciback_disable_msix(struct pciback_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
struct pciback_dev_data *dev_data;
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: disable MSI-X\n",
pci_name(dev));
pci_disable_msix(dev);
/*
* SR-IOV devices (which don't have any legacy IRQ) have
* an undefined IRQ value of zero.
*/
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: MSI-X: %d\n", pci_name(dev),
op->value);
dev_data = pci_get_drvdata(dev);
if (dev_data)
dev_data->ack_intr = 1;
return 0;
}
/*
* PCI Backend - Configuration space overlay for power management
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#include <linux/pci.h>
#include "conf_space.h"
#include "conf_space_capability.h"
static int pm_caps_read(struct pci_dev *dev, int offset, u16 *value,
void *data)
{
int err;
u16 real_value;
err = pci_read_config_word(dev, offset, &real_value);
if (err)
goto out;
*value = real_value & ~PCI_PM_CAP_PME_MASK;
out:
return err;
}
/* PM_OK_BITS specifies the bits that the driver domain is allowed to change.
* Can't allow driver domain to enable PMEs - they're shared */
#define PM_OK_BITS (PCI_PM_CTRL_PME_STATUS|PCI_PM_CTRL_DATA_SEL_MASK)
static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
void *data)
{
int err;
u16 old_value;
pci_power_t new_state, old_state;
err = pci_read_config_word(dev, offset, &old_value);
if (err)
goto out;
old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
new_value &= PM_OK_BITS;
if ((old_value & PM_OK_BITS) != new_value) {
new_value = (old_value & ~PM_OK_BITS) | new_value;
err = pci_write_config_word(dev, offset, new_value);
if (err)
goto out;
}
/* Let pci core handle the power management change */
dev_dbg(&dev->dev, "set power state to %x\n", new_state);
err = pci_set_power_state(dev, new_state);
if (err) {
err = PCIBIOS_SET_FAILED;
goto out;
}
out:
return err;
}
/* Ensure PMEs are disabled */
static void *pm_ctrl_init(struct pci_dev *dev, int offset)
{
int err;
u16 value;
err = pci_read_config_word(dev, offset, &value);
if (err)
goto out;
if (value & PCI_PM_CTRL_PME_ENABLE) {
value &= ~PCI_PM_CTRL_PME_ENABLE;
err = pci_write_config_word(dev, offset, value);
}
out:
return ERR_PTR(err);
}
static const struct config_field caplist_pm[] = {
{
.offset = PCI_PM_PMC,
.size = 2,
.u.w.read = pm_caps_read,
},
{
.offset = PCI_PM_CTRL,
.size = 2,
.init = pm_ctrl_init,
.u.w.read = pciback_read_config_word,
.u.w.write = pm_ctrl_write,
},
{
.offset = PCI_PM_PPB_EXTENSIONS,
.size = 1,
.u.b.read = pciback_read_config_byte,
},
{
.offset = PCI_PM_DATA_REGISTER,
.size = 1,
.u.b.read = pciback_read_config_byte,
},
{}
};
struct pciback_config_capability pciback_config_capability_pm = {
.capability = PCI_CAP_ID_PM,
.fields = caplist_pm,
};
/*
* PCI Backend - Configuration space overlay for Vital Product Data
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#include <linux/pci.h>
#include "conf_space.h"
#include "conf_space_capability.h"
static int vpd_address_write(struct pci_dev *dev, int offset, u16 value,
void *data)
{
/* Disallow writes to the vital product data */
if (value & PCI_VPD_ADDR_F)
return PCIBIOS_SET_FAILED;
else
return pci_write_config_word(dev, offset, value);
}
static const struct config_field caplist_vpd[] = {
{
.offset = PCI_VPD_ADDR,
.size = 2,
.u.w.read = pciback_read_config_word,
.u.w.write = vpd_address_write,
},
{
.offset = PCI_VPD_DATA,
.size = 4,
.u.dw.read = pciback_read_config_dword,
.u.dw.write = NULL,
},
{}
};
struct pciback_config_capability pciback_config_capability_vpd = {
.capability = PCI_CAP_ID_VPD,
.fields = caplist_vpd,
};
......@@ -15,6 +15,7 @@ struct pci_bar_info {
int which;
};
#define DRV_NAME "xen-pciback"
#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
......@@ -23,7 +24,7 @@ static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
int i;
int ret;
ret = pciback_read_config_word(dev, offset, value, data);
ret = xen_pcibk_read_config_word(dev, offset, value, data);
if (!atomic_read(&dev->enable_cnt))
return ret;
......@@ -39,13 +40,13 @@ static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
{
struct pciback_dev_data *dev_data;
struct xen_pcibk_dev_data *dev_data;
int err;
dev_data = pci_get_drvdata(dev);
if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: enable\n",
printk(KERN_DEBUG DRV_NAME ": %s: enable\n",
pci_name(dev));
err = pci_enable_device(dev);
if (err)
......@@ -54,7 +55,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
dev_data->enable_intx = 1;
} else if (pci_is_enabled(dev) && !is_enable_cmd(value)) {
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: disable\n",
printk(KERN_DEBUG DRV_NAME ": %s: disable\n",
pci_name(dev));
pci_disable_device(dev);
if (dev_data)
......@@ -63,7 +64,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
if (!dev->is_busmaster && is_master_cmd(value)) {
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: set bus master\n",
printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n",
pci_name(dev));
pci_set_master(dev);
}
......@@ -71,12 +72,12 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
if (value & PCI_COMMAND_INVALIDATE) {
if (unlikely(verbose_request))
printk(KERN_DEBUG
"pciback: %s: enable memory-write-invalidate\n",
DRV_NAME ": %s: enable memory-write-invalidate\n",
pci_name(dev));
err = pci_set_mwi(dev);
if (err) {
printk(KERN_WARNING
"pciback: %s: cannot enable "
DRV_NAME ": %s: cannot enable "
"memory-write-invalidate (%d)\n",
pci_name(dev), err);
value &= ~PCI_COMMAND_INVALIDATE;
......@@ -91,7 +92,7 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
struct pci_bar_info *bar = data;
if (unlikely(!bar)) {
printk(KERN_WARNING "pciback: driver data not found for %s\n",
printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n",
pci_name(dev));
return XEN_PCI_ERR_op_failed;
}
......@@ -125,7 +126,7 @@ static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
struct pci_bar_info *bar = data;
if (unlikely(!bar)) {
printk(KERN_WARNING "pciback: driver data not found for %s\n",
printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n",
pci_name(dev));
return XEN_PCI_ERR_op_failed;
}
......@@ -153,7 +154,7 @@ static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data)
struct pci_bar_info *bar = data;
if (unlikely(!bar)) {
printk(KERN_WARNING "pciback: driver data not found for %s\n",
printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n",
pci_name(dev));
return XEN_PCI_ERR_op_failed;
}
......@@ -227,7 +228,7 @@ static void bar_release(struct pci_dev *dev, int offset, void *data)
kfree(data);
}
static int pciback_read_vendor(struct pci_dev *dev, int offset,
static int xen_pcibk_read_vendor(struct pci_dev *dev, int offset,
u16 *value, void *data)
{
*value = dev->vendor;
......@@ -235,7 +236,7 @@ static int pciback_read_vendor(struct pci_dev *dev, int offset,
return 0;
}
static int pciback_read_device(struct pci_dev *dev, int offset,
static int xen_pcibk_read_device(struct pci_dev *dev, int offset,
u16 *value, void *data)
{
*value = dev->device;
......@@ -272,12 +273,12 @@ static const struct config_field header_common[] = {
{
.offset = PCI_VENDOR_ID,
.size = 2,
.u.w.read = pciback_read_vendor,
.u.w.read = xen_pcibk_read_vendor,
},
{
.offset = PCI_DEVICE_ID,
.size = 2,
.u.w.read = pciback_read_device,
.u.w.read = xen_pcibk_read_device,
},
{
.offset = PCI_COMMAND,
......@@ -293,24 +294,24 @@ static const struct config_field header_common[] = {
{
.offset = PCI_INTERRUPT_PIN,
.size = 1,
.u.b.read = pciback_read_config_byte,
.u.b.read = xen_pcibk_read_config_byte,
},
{
/* Any side effects of letting driver domain control cache line? */
.offset = PCI_CACHE_LINE_SIZE,
.size = 1,
.u.b.read = pciback_read_config_byte,
.u.b.write = pciback_write_config_byte,
.u.b.read = xen_pcibk_read_config_byte,
.u.b.write = xen_pcibk_write_config_byte,
},
{
.offset = PCI_LATENCY_TIMER,
.size = 1,
.u.b.read = pciback_read_config_byte,
.u.b.read = xen_pcibk_read_config_byte,
},
{
.offset = PCI_BIST,
.size = 1,
.u.b.read = pciback_read_config_byte,
.u.b.read = xen_pcibk_read_config_byte,
.u.b.write = bist_write,
},
{}
......@@ -356,26 +357,26 @@ static const struct config_field header_1[] = {
{}
};
int pciback_config_header_add_fields(struct pci_dev *dev)
int xen_pcibk_config_header_add_fields(struct pci_dev *dev)
{
int err;
err = pciback_config_add_fields(dev, header_common);
err = xen_pcibk_config_add_fields(dev, header_common);
if (err)
goto out;
switch (dev->hdr_type) {
case PCI_HEADER_TYPE_NORMAL:
err = pciback_config_add_fields(dev, header_0);
err = xen_pcibk_config_add_fields(dev, header_0);
break;
case PCI_HEADER_TYPE_BRIDGE:
err = pciback_config_add_fields(dev, header_1);
err = xen_pcibk_config_add_fields(dev, header_1);
break;
default:
err = -EINVAL;
printk(KERN_ERR "pciback: %s: Unsupported header type %d!\n",
printk(KERN_ERR DRV_NAME ": %s: Unsupported header type %d!\n",
pci_name(dev), dev->hdr_type);
break;
}
......
......@@ -11,8 +11,8 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
LIST_HEAD(pciback_quirks);
LIST_HEAD(xen_pcibk_quirks);
#define DRV_NAME "xen-pciback"
static inline const struct pci_device_id *
match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
{
......@@ -27,29 +27,29 @@ match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
return NULL;
}
struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev)
static struct xen_pcibk_config_quirk *xen_pcibk_find_quirk(struct pci_dev *dev)
{
struct pciback_config_quirk *tmp_quirk;
struct xen_pcibk_config_quirk *tmp_quirk;
list_for_each_entry(tmp_quirk, &pciback_quirks, quirks_list)
list_for_each_entry(tmp_quirk, &xen_pcibk_quirks, quirks_list)
if (match_one_device(&tmp_quirk->devid, dev) != NULL)
goto out;
tmp_quirk = NULL;
printk(KERN_DEBUG
"quirk didn't match any device pciback knows about\n");
printk(KERN_DEBUG DRV_NAME
":quirk didn't match any device xen_pciback knows about\n");
out:
return tmp_quirk;
}
static inline void register_quirk(struct pciback_config_quirk *quirk)
static inline void register_quirk(struct xen_pcibk_config_quirk *quirk)
{
list_add_tail(&quirk->quirks_list, &pciback_quirks);
list_add_tail(&quirk->quirks_list, &xen_pcibk_quirks);
}
int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg)
int xen_pcibk_field_is_dup(struct pci_dev *dev, unsigned int reg)
{
int ret = 0;
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
struct config_field_entry *cfg_entry;
list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
......@@ -61,38 +61,38 @@ int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg)
return ret;
}
int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
int xen_pcibk_config_quirks_add_field(struct pci_dev *dev, struct config_field
*field)
{
int err = 0;
switch (field->size) {
case 1:
field->u.b.read = pciback_read_config_byte;
field->u.b.write = pciback_write_config_byte;
field->u.b.read = xen_pcibk_read_config_byte;
field->u.b.write = xen_pcibk_write_config_byte;
break;
case 2:
field->u.w.read = pciback_read_config_word;
field->u.w.write = pciback_write_config_word;
field->u.w.read = xen_pcibk_read_config_word;
field->u.w.write = xen_pcibk_write_config_word;
break;
case 4:
field->u.dw.read = pciback_read_config_dword;
field->u.dw.write = pciback_write_config_dword;
field->u.dw.read = xen_pcibk_read_config_dword;
field->u.dw.write = xen_pcibk_write_config_dword;
break;
default:
err = -EINVAL;
goto out;
}
pciback_config_add_field(dev, field);
xen_pcibk_config_add_field(dev, field);
out:
return err;
}
int pciback_config_quirks_init(struct pci_dev *dev)
int xen_pcibk_config_quirks_init(struct pci_dev *dev)
{
struct pciback_config_quirk *quirk;
struct xen_pcibk_config_quirk *quirk;
int ret = 0;
quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC);
......@@ -116,17 +116,17 @@ int pciback_config_quirks_init(struct pci_dev *dev)
return ret;
}
void pciback_config_field_free(struct config_field *field)
void xen_pcibk_config_field_free(struct config_field *field)
{
kfree(field);
}
int pciback_config_quirk_release(struct pci_dev *dev)
int xen_pcibk_config_quirk_release(struct pci_dev *dev)
{
struct pciback_config_quirk *quirk;
struct xen_pcibk_config_quirk *quirk;
int ret = 0;
quirk = pciback_find_quirk(dev);
quirk = xen_pcibk_find_quirk(dev);
if (!quirk) {
ret = -ENXIO;
goto out;
......
......@@ -11,25 +11,23 @@
#include <linux/pci.h>
#include <linux/list.h>
struct pciback_config_quirk {
struct xen_pcibk_config_quirk {
struct list_head quirks_list;
struct pci_device_id devid;
struct pci_dev *pdev;
};
struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev);
int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
int xen_pcibk_config_quirks_add_field(struct pci_dev *dev, struct config_field
*field);
int pciback_config_quirks_remove_field(struct pci_dev *dev, int reg);
int xen_pcibk_config_quirks_remove_field(struct pci_dev *dev, int reg);
int pciback_config_quirks_init(struct pci_dev *dev);
int xen_pcibk_config_quirks_init(struct pci_dev *dev);
void pciback_config_field_free(struct config_field *field);
void xen_pcibk_config_field_free(struct config_field *field);
int pciback_config_quirk_release(struct pci_dev *dev);
int xen_pcibk_config_quirk_release(struct pci_dev *dev);
int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg);
int xen_pcibk_field_is_dup(struct pci_dev *dev, unsigned int reg);
#endif
/*
* Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
* Alex Williamson <alex.williamson@hp.com>
*
* PCI "Controller" Backend - virtualize PCI bus topology based on PCI
* controllers. Devices under the same PCI controller are exposed on the
* same virtual domain:bus. Within a bus, device slots are virtualized
* to compact the bus.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/acpi.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include "pciback.h"
#define PCI_MAX_BUSSES 255
#define PCI_MAX_SLOTS 32
struct controller_dev_entry {
struct list_head list;
struct pci_dev *dev;
unsigned int devfn;
};
struct controller_list_entry {
struct list_head list;
struct pci_controller *controller;
unsigned int domain;
unsigned int bus;
unsigned int next_devfn;
struct list_head dev_list;
};
struct controller_dev_data {
struct list_head list;
unsigned int next_domain;
unsigned int next_bus;
spinlock_t lock;
};
struct walk_info {
struct pciback_device *pdev;
int resource_count;
int root_num;
};
struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn)
{
struct controller_dev_data *dev_data = pdev->pci_dev_data;
struct controller_dev_entry *dev_entry;
struct controller_list_entry *cntrl_entry;
struct pci_dev *dev = NULL;
unsigned long flags;
spin_lock_irqsave(&dev_data->lock, flags);
list_for_each_entry(cntrl_entry, &dev_data->list, list) {
if (cntrl_entry->domain != domain ||
cntrl_entry->bus != bus)
continue;
list_for_each_entry(dev_entry, &cntrl_entry->dev_list, list) {
if (devfn == dev_entry->devfn) {
dev = dev_entry->dev;
goto found;
}
}
}
found:
spin_unlock_irqrestore(&dev_data->lock, flags);
return dev;
}
int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
int devid, publish_pci_dev_cb publish_cb)
{
struct controller_dev_data *dev_data = pdev->pci_dev_data;
struct controller_dev_entry *dev_entry;
struct controller_list_entry *cntrl_entry;
struct pci_controller *dev_controller = PCI_CONTROLLER(dev);
unsigned long flags;
int ret = 0, found = 0;
spin_lock_irqsave(&dev_data->lock, flags);
/* Look to see if we already have a domain:bus for this controller */
list_for_each_entry(cntrl_entry, &dev_data->list, list) {
if (cntrl_entry->controller == dev_controller) {
found = 1;
break;
}
}
if (!found) {
cntrl_entry = kmalloc(sizeof(*cntrl_entry), GFP_ATOMIC);
if (!cntrl_entry) {
ret = -ENOMEM;
goto out;
}
cntrl_entry->controller = dev_controller;
cntrl_entry->next_devfn = PCI_DEVFN(0, 0);
cntrl_entry->domain = dev_data->next_domain;
cntrl_entry->bus = dev_data->next_bus++;
if (dev_data->next_bus > PCI_MAX_BUSSES) {
dev_data->next_domain++;
dev_data->next_bus = 0;
}
INIT_LIST_HEAD(&cntrl_entry->dev_list);
list_add_tail(&cntrl_entry->list, &dev_data->list);
}
if (PCI_SLOT(cntrl_entry->next_devfn) > PCI_MAX_SLOTS) {
/*
* While it seems unlikely, this can actually happen if
* a controller has P2P bridges under it.
*/
xenbus_dev_fatal(pdev->xdev, -ENOSPC, "Virtual bus %04x:%02x "
"is full, no room to export %04x:%02x:%02x.%x",
cntrl_entry->domain, cntrl_entry->bus,
pci_domain_nr(dev->bus), dev->bus->number,
PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
ret = -ENOSPC;
goto out;
}
dev_entry = kmalloc(sizeof(*dev_entry), GFP_ATOMIC);
if (!dev_entry) {
if (list_empty(&cntrl_entry->dev_list)) {
list_del(&cntrl_entry->list);
kfree(cntrl_entry);
}
ret = -ENOMEM;
goto out;
}
dev_entry->dev = dev;
dev_entry->devfn = cntrl_entry->next_devfn;
list_add_tail(&dev_entry->list, &cntrl_entry->dev_list);
cntrl_entry->next_devfn += PCI_DEVFN(1, 0);
out:
spin_unlock_irqrestore(&dev_data->lock, flags);
/* TODO: Publish virtual domain:bus:slot.func here. */
return ret;
}
void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
{
struct controller_dev_data *dev_data = pdev->pci_dev_data;
struct controller_list_entry *cntrl_entry;
struct controller_dev_entry *dev_entry = NULL;
struct pci_dev *found_dev = NULL;
unsigned long flags;
spin_lock_irqsave(&dev_data->lock, flags);
list_for_each_entry(cntrl_entry, &dev_data->list, list) {
if (cntrl_entry->controller != PCI_CONTROLLER(dev))
continue;
list_for_each_entry(dev_entry, &cntrl_entry->dev_list, list) {
if (dev_entry->dev == dev) {
found_dev = dev_entry->dev;
break;
}
}
}
if (!found_dev) {
spin_unlock_irqrestore(&dev_data->lock, flags);
return;
}
list_del(&dev_entry->list);
kfree(dev_entry);
if (list_empty(&cntrl_entry->dev_list)) {
list_del(&cntrl_entry->list);
kfree(cntrl_entry);
}
spin_unlock_irqrestore(&dev_data->lock, flags);
pcistub_put_pci_dev(found_dev);
}
int pciback_init_devices(struct pciback_device *pdev)
{
struct controller_dev_data *dev_data;
dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data)
return -ENOMEM;
spin_lock_init(&dev_data->lock);
INIT_LIST_HEAD(&dev_data->list);
/* Starting domain:bus numbers */
dev_data->next_domain = 0;
dev_data->next_bus = 0;
pdev->pci_dev_data = dev_data;
return 0;
}
static acpi_status write_xenbus_resource(struct acpi_resource *res, void *data)
{
struct walk_info *info = data;
struct acpi_resource_address64 addr;
acpi_status status;
int i, len, err;
char str[32], tmp[3];
unsigned char *ptr, *buf;
status = acpi_resource_to_address64(res, &addr);
/* Do we care about this range? Let's check. */
if (!ACPI_SUCCESS(status) ||
!(addr.resource_type == ACPI_MEMORY_RANGE ||
addr.resource_type == ACPI_IO_RANGE) ||
!addr.address_length || addr.producer_consumer != ACPI_PRODUCER)
return AE_OK;
/*
* Furthermore, we really only care to tell the guest about
* address ranges that require address translation of some sort.
*/
if (!(addr.resource_type == ACPI_MEMORY_RANGE &&
addr.info.mem.translation) &&
!(addr.resource_type == ACPI_IO_RANGE &&
addr.info.io.translation))
return AE_OK;
/* Store the resource in xenbus for the guest */
len = snprintf(str, sizeof(str), "root-%d-resource-%d",
info->root_num, info->resource_count);
if (unlikely(len >= (sizeof(str) - 1)))
return AE_OK;
buf = kzalloc((sizeof(*res) * 2) + 1, GFP_KERNEL);
if (!buf)
return AE_OK;
/* Clean out resource_source */
res->data.address64.resource_source.index = 0xFF;
res->data.address64.resource_source.string_length = 0;
res->data.address64.resource_source.string_ptr = NULL;
ptr = (unsigned char *)res;
/* Turn the acpi_resource into an ASCII byte stream */
for (i = 0; i < sizeof(*res); i++) {
snprintf(tmp, sizeof(tmp), "%02x", ptr[i]);
strncat(buf, tmp, 2);
}
err = xenbus_printf(XBT_NIL, info->pdev->xdev->nodename,
str, "%s", buf);
if (!err)
info->resource_count++;
kfree(buf);
return AE_OK;
}
int pciback_publish_pci_roots(struct pciback_device *pdev,
publish_pci_root_cb publish_root_cb)
{
struct controller_dev_data *dev_data = pdev->pci_dev_data;
struct controller_list_entry *cntrl_entry;
int i, root_num, len, err = 0;
unsigned int domain, bus;
char str[64];
struct walk_info info;
spin_lock(&dev_data->lock);
list_for_each_entry(cntrl_entry, &dev_data->list, list) {
/* First publish all the domain:bus info */
err = publish_root_cb(pdev, cntrl_entry->domain,
cntrl_entry->bus);
if (err)
goto out;
/*
* Now figure out which root-%d this belongs to
* so we can associate resources with it.
*/
err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
"root_num", "%d", &root_num);
if (err != 1)
goto out;
for (i = 0; i < root_num; i++) {
len = snprintf(str, sizeof(str), "root-%d", i);
if (unlikely(len >= (sizeof(str) - 1))) {
err = -ENOMEM;
goto out;
}
err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
str, "%x:%x", &domain, &bus);
if (err != 2)
goto out;
/* Is this the one we just published? */
if (domain == cntrl_entry->domain &&
bus == cntrl_entry->bus)
break;
}
if (i == root_num)
goto out;
info.pdev = pdev;
info.resource_count = 0;
info.root_num = i;
/* Let ACPI do the heavy lifting on decoding resources */
acpi_walk_resources(cntrl_entry->controller->acpi_handle,
METHOD_NAME__CRS, write_xenbus_resource,
&info);
/* No resouces. OK. On to the next one */
if (!info.resource_count)
continue;
/* Store the number of resources we wrote for this root-%d */
len = snprintf(str, sizeof(str), "root-%d-resources", i);
if (unlikely(len >= (sizeof(str) - 1))) {
err = -ENOMEM;
goto out;
}
err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
"%d", info.resource_count);
if (err)
goto out;
}
/* Finally, write some magic to synchronize with the guest. */
len = snprintf(str, sizeof(str), "root-resource-magic");
if (unlikely(len >= (sizeof(str) - 1))) {
err = -ENOMEM;
goto out;
}
err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
"%lx", (sizeof(struct acpi_resource) * 2) + 1);
out:
spin_unlock(&dev_data->lock);
return err;
}
void pciback_release_devices(struct pciback_device *pdev)
{
struct controller_dev_data *dev_data = pdev->pci_dev_data;
struct controller_list_entry *cntrl_entry, *c;
struct controller_dev_entry *dev_entry, *d;
list_for_each_entry_safe(cntrl_entry, c, &dev_data->list, list) {
list_for_each_entry_safe(dev_entry, d,
&cntrl_entry->dev_list, list) {
list_del(&dev_entry->list);
pcistub_put_pci_dev(dev_entry->dev);
kfree(dev_entry);
}
list_del(&cntrl_entry->list);
kfree(cntrl_entry);
}
kfree(dev_data);
pdev->pci_dev_data = NULL;
}
int pciback_get_pcifront_dev(struct pci_dev *pcidev,
struct pciback_device *pdev,
unsigned int *domain, unsigned int *bus, unsigned int *devfn)
{
struct controller_dev_data *dev_data = pdev->pci_dev_data;
struct controller_dev_entry *dev_entry;
struct controller_list_entry *cntrl_entry;
unsigned long flags;
int found = 0;
spin_lock_irqsave(&dev_data->lock, flags);
list_for_each_entry(cntrl_entry, &dev_data->list, list) {
list_for_each_entry(dev_entry, &cntrl_entry->dev_list, list) {
if ((dev_entry->dev->bus->number ==
pcidev->bus->number) &&
(dev_entry->dev->devfn ==
pcidev->devfn) &&
(pci_domain_nr(dev_entry->dev->bus) ==
pci_domain_nr(pcidev->bus))) {
found = 1;
*domain = cntrl_entry->domain;
*bus = cntrl_entry->bus;
*devfn = dev_entry->devfn;
goto out;
}
}
}
out:
spin_unlock_irqrestore(&dev_data->lock, flags);
return found;
}
......@@ -16,9 +16,9 @@ struct passthrough_dev_data {
spinlock_t lock;
};
struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn)
struct pci_dev *xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn)
{
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry;
......@@ -41,8 +41,8 @@ struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
return dev;
}
int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
int devid, publish_pci_dev_cb publish_cb)
int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev,
int devid, publish_pci_dev_cb publish_cb)
{
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry;
......@@ -68,7 +68,8 @@ int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
return err;
}
void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev)
{
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry, *t;
......@@ -91,7 +92,7 @@ void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
pcistub_put_pci_dev(found_dev);
}
int pciback_init_devices(struct pciback_device *pdev)
int xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
{
struct passthrough_dev_data *dev_data;
......@@ -108,8 +109,8 @@ int pciback_init_devices(struct pciback_device *pdev)
return 0;
}
int pciback_publish_pci_roots(struct pciback_device *pdev,
publish_pci_root_cb publish_root_cb)
int xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
publish_pci_root_cb publish_root_cb)
{
int err = 0;
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
......@@ -153,7 +154,7 @@ int pciback_publish_pci_roots(struct pciback_device *pdev,
return err;
}
void pciback_release_devices(struct pciback_device *pdev)
void xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
{
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry, *t;
......@@ -168,11 +169,10 @@ void pciback_release_devices(struct pciback_device *pdev)
pdev->pci_dev_data = NULL;
}
int pciback_get_pcifront_dev(struct pci_dev *pcidev,
struct pciback_device *pdev,
unsigned int *domain, unsigned int *bus,
unsigned int *devfn)
int xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
struct xen_pcibk_device *pdev,
unsigned int *domain, unsigned int *bus,
unsigned int *devfn)
{
*domain = pci_domain_nr(pcidev->bus);
*bus = pcidev->bus->number;
......
......@@ -21,12 +21,12 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
#define DRV_NAME "pciback"
#define DRV_NAME "xen-pciback"
static char *pci_devs_to_hide;
wait_queue_head_t aer_wait_queue;
/*Add sem for sync AER handling and pciback remove/reconfigue ops,
* We want to avoid in middle of AER ops, pciback devices is being removed
wait_queue_head_t xen_pcibk_aer_wait_queue;
/*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops,
* We want to avoid in middle of AER ops, xen_pcibk devices is being removed
*/
static DECLARE_RWSEM(pcistub_sem);
module_param_named(hide, pci_devs_to_hide, charp, 0444);
......@@ -46,7 +46,7 @@ struct pcistub_device {
spinlock_t lock;
struct pci_dev *dev;
struct pciback_device *pdev;/* non-NULL if struct pci_dev is in use */
struct xen_pcibk_device *pdev;/* non-NULL if struct pci_dev is in use */
};
/* Access to pcistub_devices & seized_devices lists and the initialize_devices
......@@ -95,9 +95,9 @@ static void pcistub_device_release(struct kref *kref)
xen_unregister_device_domain_owner(psdev->dev);
/* Clean-up the device */
pciback_reset_device(psdev->dev);
pciback_config_free_dyn_fields(psdev->dev);
pciback_config_free_dev(psdev->dev);
xen_pcibk_reset_device(psdev->dev);
xen_pcibk_config_free_dyn_fields(psdev->dev);
xen_pcibk_config_free_dev(psdev->dev);
kfree(pci_get_drvdata(psdev->dev));
pci_set_drvdata(psdev->dev, NULL);
......@@ -142,7 +142,7 @@ static struct pcistub_device *pcistub_device_find(int domain, int bus,
return psdev;
}
static struct pci_dev *pcistub_device_get_pci_dev(struct pciback_device *pdev,
static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev,
struct pcistub_device *psdev)
{
struct pci_dev *pci_dev = NULL;
......@@ -163,7 +163,7 @@ static struct pci_dev *pcistub_device_get_pci_dev(struct pciback_device *pdev,
return pci_dev;
}
struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
int domain, int bus,
int slot, int func)
{
......@@ -187,7 +187,7 @@ struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
return found_dev;
}
struct pci_dev *pcistub_get_pci_dev(struct pciback_device *pdev,
struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev)
{
struct pcistub_device *psdev;
......@@ -224,15 +224,15 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
/*hold this lock for avoiding breaking link between
* pcistub and pciback when AER is in processing
* pcistub and xen_pcibk when AER is in processing
*/
down_write(&pcistub_sem);
/* Cleanup our device
* (so it's ready for the next domain)
*/
pciback_reset_device(found_psdev->dev);
pciback_config_free_dyn_fields(found_psdev->dev);
pciback_config_reset_dev(found_psdev->dev);
xen_pcibk_reset_device(found_psdev->dev);
xen_pcibk_config_free_dyn_fields(found_psdev->dev);
xen_pcibk_config_reset_dev(found_psdev->dev);
spin_lock_irqsave(&found_psdev->lock, flags);
found_psdev->pdev = NULL;
......@@ -282,13 +282,13 @@ static int __devinit pcistub_match(struct pci_dev *dev)
static int __devinit pcistub_init_device(struct pci_dev *dev)
{
struct pciback_dev_data *dev_data;
struct xen_pcibk_dev_data *dev_data;
int err = 0;
dev_dbg(&dev->dev, "initializing...\n");
/* The PCI backend is not intended to be a module (or to work with
* removable PCI devices (yet). If it were, pciback_config_free()
* removable PCI devices (yet). If it were, xen_pcibk_config_free()
* would need to be called somewhere to free the memory allocated
* here and then to call kfree(pci_get_drvdata(psdev->dev)).
*/
......@@ -308,8 +308,8 @@ static int __devinit pcistub_init_device(struct pci_dev *dev)
dev_dbg(&dev->dev, "initializing config\n");
init_waitqueue_head(&aer_wait_queue);
err = pciback_config_init_dev(dev);
init_waitqueue_head(&xen_pcibk_aer_wait_queue);
err = xen_pcibk_config_init_dev(dev);
if (err)
goto out;
......@@ -329,12 +329,12 @@ static int __devinit pcistub_init_device(struct pci_dev *dev)
* data is setup before we export)
*/
dev_dbg(&dev->dev, "reset device\n");
pciback_reset_device(dev);
xen_pcibk_reset_device(dev);
return 0;
config_release:
pciback_config_free_dev(dev);
xen_pcibk_config_free_dev(dev);
out:
pci_set_drvdata(dev, NULL);
......@@ -354,7 +354,7 @@ static int __init pcistub_init_devices_late(void)
unsigned long flags;
int err = 0;
pr_debug("pciback: pcistub_init_devices_late\n");
pr_debug(DRV_NAME ": pcistub_init_devices_late\n");
spin_lock_irqsave(&pcistub_devices_lock, flags);
......@@ -458,7 +458,7 @@ static void pcistub_remove(struct pci_dev *dev)
spin_lock_irqsave(&pcistub_devices_lock, flags);
pciback_config_quirk_release(dev);
xen_pcibk_config_quirk_release(dev);
list_for_each_entry(psdev, &pcistub_devices, dev_list) {
if (psdev->dev == dev) {
......@@ -474,17 +474,17 @@ static void pcistub_remove(struct pci_dev *dev)
found_psdev->pdev);
if (found_psdev->pdev) {
printk(KERN_WARNING "pciback: ****** removing device "
printk(KERN_WARNING DRV_NAME ": ****** removing device "
"%s while still in-use! ******\n",
pci_name(found_psdev->dev));
printk(KERN_WARNING "pciback: ****** driver domain may "
"still access this device's i/o resources!\n");
printk(KERN_WARNING "pciback: ****** shutdown driver "
printk(KERN_WARNING DRV_NAME ": ****** driver domain may"
" still access this device's i/o resources!\n");
printk(KERN_WARNING DRV_NAME ": ****** shutdown driver "
"domain before binding device\n");
printk(KERN_WARNING "pciback: ****** to other drivers "
printk(KERN_WARNING DRV_NAME ": ****** to other drivers "
"or domains\n");
pciback_release_pci_dev(found_psdev->pdev,
xen_pcibk_release_pci_dev(found_psdev->pdev,
found_psdev->dev);
}
......@@ -541,11 +541,12 @@ static void kill_domain_by_device(struct pcistub_device *psdev)
}
/* For each aer recovery step error_detected, mmio_enabled, etc, front_end and
* backend need to have cooperation. In pciback, those steps will do similar
* backend need to have cooperation. In xen_pcibk, those steps will do similar
* jobs: send service request and waiting for front_end response.
*/
static pci_ers_result_t common_process(struct pcistub_device *psdev,
pci_channel_state_t state, int aer_cmd, pci_ers_result_t result)
pci_channel_state_t state, int aer_cmd,
pci_ers_result_t result)
{
pci_ers_result_t res = result;
struct xen_pcie_aer_op *aer_op;
......@@ -557,21 +558,21 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
/*useful for error_detected callback*/
aer_op->err = state;
/*pcifront_end BDF*/
ret = pciback_get_pcifront_dev(psdev->dev, psdev->pdev,
ret = xen_pcibk_get_pcifront_dev(psdev->dev, psdev->pdev,
&aer_op->domain, &aer_op->bus, &aer_op->devfn);
if (!ret) {
dev_err(&psdev->dev->dev,
"pciback: failed to get pcifront device\n");
DRV_NAME ": failed to get pcifront device\n");
return PCI_ERS_RESULT_NONE;
}
wmb();
dev_dbg(&psdev->dev->dev,
"pciback: aer_op %x dom %x bus %x devfn %x\n",
DRV_NAME ": aer_op %x dom %x bus %x devfn %x\n",
aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn);
/*local flag to mark there's aer request, pciback callback will use this
* flag to judge whether we need to check pci-front give aer service
* ack signal
/*local flag to mark there's aer request, xen_pcibk callback will use
* this flag to judge whether we need to check pci-front give aer
* service ack signal
*/
set_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
......@@ -584,8 +585,9 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
wmb();
notify_remote_via_irq(psdev->pdev->evtchn_irq);
ret = wait_event_timeout(aer_wait_queue, !(test_bit(_XEN_PCIB_active,
(unsigned long *)&psdev->pdev->sh_info->flags)), 300*HZ);
ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
!(test_bit(_XEN_PCIB_active, (unsigned long *)
&psdev->pdev->sh_info->flags)), 300*HZ);
if (!ret) {
if (test_bit(_XEN_PCIB_active,
......@@ -603,8 +605,8 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
if (test_bit(_XEN_PCIF_active,
(unsigned long *)&psdev->pdev->sh_info->flags)) {
dev_dbg(&psdev->dev->dev,
"schedule pci_conf service in pciback\n");
test_and_schedule_op(psdev->pdev);
"schedule pci_conf service in xen_pcibk\n");
xen_pcibk_test_and_schedule_op(psdev->pdev);
}
res = (pci_ers_result_t)aer_op->err;
......@@ -612,19 +614,19 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
}
/*
* pciback_slot_reset: it will send the slot_reset request to pcifront in case
* xen_pcibk_slot_reset: it will send the slot_reset request to pcifront in case
* of the device driver could provide this service, and then wait for pcifront
* ack.
* @dev: pointer to PCI devices
* return value is used by aer_core do_recovery policy
*/
static pci_ers_result_t pciback_slot_reset(struct pci_dev *dev)
static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
{
struct pcistub_device *psdev;
pci_ers_result_t result;
result = PCI_ERS_RESULT_RECOVERED;
dev_dbg(&dev->dev, "pciback_slot_reset(bus:%x,devfn:%x)\n",
dev_dbg(&dev->dev, "xen_pcibk_slot_reset(bus:%x,devfn:%x)\n",
dev->bus->number, dev->devfn);
down_write(&pcistub_sem);
......@@ -635,12 +637,12 @@ static pci_ers_result_t pciback_slot_reset(struct pci_dev *dev)
if (!psdev || !psdev->pdev) {
dev_err(&dev->dev,
"pciback device is not found/assigned\n");
DRV_NAME " device is not found/assigned\n");
goto end;
}
if (!psdev->pdev->sh_info) {
dev_err(&dev->dev, "pciback device is not connected or owned"
dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
" by HVM, kill it\n");
kill_domain_by_device(psdev);
goto release;
......@@ -669,20 +671,20 @@ static pci_ers_result_t pciback_slot_reset(struct pci_dev *dev)
}
/*pciback_mmio_enabled: it will send the mmio_enabled request to pcifront
/*xen_pcibk_mmio_enabled: it will send the mmio_enabled request to pcifront
* in case of the device driver could provide this service, and then wait
* for pcifront ack
* @dev: pointer to PCI devices
* return value is used by aer_core do_recovery policy
*/
static pci_ers_result_t pciback_mmio_enabled(struct pci_dev *dev)
static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
{
struct pcistub_device *psdev;
pci_ers_result_t result;
result = PCI_ERS_RESULT_RECOVERED;
dev_dbg(&dev->dev, "pciback_mmio_enabled(bus:%x,devfn:%x)\n",
dev_dbg(&dev->dev, "xen_pcibk_mmio_enabled(bus:%x,devfn:%x)\n",
dev->bus->number, dev->devfn);
down_write(&pcistub_sem);
......@@ -693,12 +695,12 @@ static pci_ers_result_t pciback_mmio_enabled(struct pci_dev *dev)
if (!psdev || !psdev->pdev) {
dev_err(&dev->dev,
"pciback device is not found/assigned\n");
DRV_NAME " device is not found/assigned\n");
goto end;
}
if (!psdev->pdev->sh_info) {
dev_err(&dev->dev, "pciback device is not connected or owned"
dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
" by HVM, kill it\n");
kill_domain_by_device(psdev);
goto release;
......@@ -725,7 +727,7 @@ static pci_ers_result_t pciback_mmio_enabled(struct pci_dev *dev)
return result;
}
/*pciback_error_detected: it will send the error_detected request to pcifront
/*xen_pcibk_error_detected: it will send the error_detected request to pcifront
* in case of the device driver could provide this service, and then wait
* for pcifront ack.
* @dev: pointer to PCI devices
......@@ -733,14 +735,14 @@ static pci_ers_result_t pciback_mmio_enabled(struct pci_dev *dev)
* return value is used by aer_core do_recovery policy
*/
static pci_ers_result_t pciback_error_detected(struct pci_dev *dev,
static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
pci_channel_state_t error)
{
struct pcistub_device *psdev;
pci_ers_result_t result;
result = PCI_ERS_RESULT_CAN_RECOVER;
dev_dbg(&dev->dev, "pciback_error_detected(bus:%x,devfn:%x)\n",
dev_dbg(&dev->dev, "xen_pcibk_error_detected(bus:%x,devfn:%x)\n",
dev->bus->number, dev->devfn);
down_write(&pcistub_sem);
......@@ -751,12 +753,12 @@ static pci_ers_result_t pciback_error_detected(struct pci_dev *dev,
if (!psdev || !psdev->pdev) {
dev_err(&dev->dev,
"pciback device is not found/assigned\n");
DRV_NAME " device is not found/assigned\n");
goto end;
}
if (!psdev->pdev->sh_info) {
dev_err(&dev->dev, "pciback device is not connected or owned"
dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
" by HVM, kill it\n");
kill_domain_by_device(psdev);
goto release;
......@@ -784,17 +786,17 @@ static pci_ers_result_t pciback_error_detected(struct pci_dev *dev,
return result;
}
/*pciback_error_resume: it will send the error_resume request to pcifront
/*xen_pcibk_error_resume: it will send the error_resume request to pcifront
* in case of the device driver could provide this service, and then wait
* for pcifront ack.
* @dev: pointer to PCI devices
*/
static void pciback_error_resume(struct pci_dev *dev)
static void xen_pcibk_error_resume(struct pci_dev *dev)
{
struct pcistub_device *psdev;
dev_dbg(&dev->dev, "pciback_error_resume(bus:%x,devfn:%x)\n",
dev_dbg(&dev->dev, "xen_pcibk_error_resume(bus:%x,devfn:%x)\n",
dev->bus->number, dev->devfn);
down_write(&pcistub_sem);
......@@ -805,12 +807,12 @@ static void pciback_error_resume(struct pci_dev *dev)
if (!psdev || !psdev->pdev) {
dev_err(&dev->dev,
"pciback device is not found/assigned\n");
DRV_NAME " device is not found/assigned\n");
goto end;
}
if (!psdev->pdev->sh_info) {
dev_err(&dev->dev, "pciback device is not connected or owned"
dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
" by HVM, kill it\n");
kill_domain_by_device(psdev);
goto release;
......@@ -832,12 +834,12 @@ static void pciback_error_resume(struct pci_dev *dev)
return;
}
/*add pciback AER handling*/
static struct pci_error_handlers pciback_error_handler = {
.error_detected = pciback_error_detected,
.mmio_enabled = pciback_mmio_enabled,
.slot_reset = pciback_slot_reset,
.resume = pciback_error_resume,
/*add xen_pcibk AER handling*/
static struct pci_error_handlers xen_pcibk_error_handler = {
.error_detected = xen_pcibk_error_detected,
.mmio_enabled = xen_pcibk_mmio_enabled,
.slot_reset = xen_pcibk_slot_reset,
.resume = xen_pcibk_error_resume,
};
/*
......@@ -845,12 +847,14 @@ static struct pci_error_handlers pciback_error_handler = {
* for a normal device. I don't want it to be loaded automatically.
*/
static struct pci_driver pciback_pci_driver = {
.name = DRV_NAME,
static struct pci_driver xen_pcibk_pci_driver = {
/* The name should be xen_pciback, but until the tools are updated
* we will keep it as pciback. */
.name = "pciback",
.id_table = pcistub_ids,
.probe = pcistub_probe,
.remove = pcistub_remove,
.err_handler = &pciback_error_handler,
.err_handler = &xen_pcibk_error_handler,
};
static inline int str_to_slot(const char *buf, int *domain, int *bus,
......@@ -899,7 +903,7 @@ static int pcistub_device_id_add(int domain, int bus, int slot, int func)
pci_dev_id->bus = bus;
pci_dev_id->devfn = PCI_DEVFN(slot, func);
pr_debug("pciback: wants to seize %04x:%02x:%02x.%01x\n",
pr_debug(DRV_NAME ": wants to seize %04x:%02x:%02x.%01x\n",
domain, bus, slot, func);
spin_lock_irqsave(&device_ids_lock, flags);
......@@ -929,7 +933,7 @@ static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
err = 0;
pr_debug("pciback: removed %04x:%02x:%02x.%01x from "
pr_debug(DRV_NAME ": removed %04x:%02x:%02x.%01x from "
"seize list\n", domain, bus, slot, func);
}
}
......@@ -965,9 +969,9 @@ static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg,
field->init = NULL;
field->reset = NULL;
field->release = NULL;
field->clean = pciback_config_field_free;
field->clean = xen_pcibk_config_field_free;
err = pciback_config_quirks_add_field(dev, field);
err = xen_pcibk_config_quirks_add_field(dev, field);
if (err)
kfree(field);
out:
......@@ -1041,7 +1045,7 @@ DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
{
struct pcistub_device *psdev;
struct pciback_dev_data *dev_data;
struct xen_pcibk_dev_data *dev_data;
size_t count = 0;
unsigned long flags;
......@@ -1073,7 +1077,7 @@ static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,
size_t count)
{
struct pcistub_device *psdev;
struct pciback_dev_data *dev_data;
struct xen_pcibk_dev_data *dev_data;
int domain, bus, slot, func;
int err = -ENOENT;
......@@ -1127,13 +1131,13 @@ static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
{
int count = 0;
unsigned long flags;
struct pciback_config_quirk *quirk;
struct pciback_dev_data *dev_data;
struct xen_pcibk_config_quirk *quirk;
struct xen_pcibk_dev_data *dev_data;
const struct config_field *field;
const struct config_field_entry *cfg_entry;
spin_lock_irqsave(&device_ids_lock, flags);
list_for_each_entry(quirk, &pciback_quirks, quirks_list) {
list_for_each_entry(quirk, &xen_pcibk_quirks, quirks_list) {
if (count >= PAGE_SIZE)
goto out;
......@@ -1175,7 +1179,7 @@ static ssize_t permissive_add(struct device_driver *drv, const char *buf,
int domain, bus, slot, func;
int err;
struct pcistub_device *psdev;
struct pciback_dev_data *dev_data;
struct xen_pcibk_dev_data *dev_data;
err = str_to_slot(buf, &domain, &bus, &slot, &func);
if (err)
goto out;
......@@ -1213,7 +1217,7 @@ static ssize_t permissive_add(struct device_driver *drv, const char *buf,
static ssize_t permissive_show(struct device_driver *drv, char *buf)
{
struct pcistub_device *psdev;
struct pciback_dev_data *dev_data;
struct xen_pcibk_dev_data *dev_data;
size_t count = 0;
unsigned long flags;
spin_lock_irqsave(&pcistub_devices_lock, flags);
......@@ -1237,17 +1241,18 @@ DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add);
static void pcistub_exit(void)
{
driver_remove_file(&pciback_pci_driver.driver, &driver_attr_new_slot);
driver_remove_file(&pciback_pci_driver.driver,
driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_new_slot);
driver_remove_file(&xen_pcibk_pci_driver.driver,
&driver_attr_remove_slot);
driver_remove_file(&pciback_pci_driver.driver, &driver_attr_slots);
driver_remove_file(&pciback_pci_driver.driver, &driver_attr_quirks);
driver_remove_file(&pciback_pci_driver.driver, &driver_attr_permissive);
driver_remove_file(&pciback_pci_driver.driver,
driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_slots);
driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_quirks);
driver_remove_file(&xen_pcibk_pci_driver.driver,
&driver_attr_permissive);
driver_remove_file(&xen_pcibk_pci_driver.driver,
&driver_attr_irq_handlers);
driver_remove_file(&pciback_pci_driver.driver,
driver_remove_file(&xen_pcibk_pci_driver.driver,
&driver_attr_irq_handler_state);
pci_unregister_driver(&pciback_pci_driver);
pci_unregister_driver(&xen_pcibk_pci_driver);
}
static int __init pcistub_init(void)
......@@ -1286,30 +1291,30 @@ static int __init pcistub_init(void)
* first one to get offered PCI devices as they become
* available (and thus we can be the first to grab them)
*/
err = pci_register_driver(&pciback_pci_driver);
err = pci_register_driver(&xen_pcibk_pci_driver);
if (err < 0)
goto out;
err = driver_create_file(&pciback_pci_driver.driver,
err = driver_create_file(&xen_pcibk_pci_driver.driver,
&driver_attr_new_slot);
if (!err)
err = driver_create_file(&pciback_pci_driver.driver,
err = driver_create_file(&xen_pcibk_pci_driver.driver,
&driver_attr_remove_slot);
if (!err)
err = driver_create_file(&pciback_pci_driver.driver,
err = driver_create_file(&xen_pcibk_pci_driver.driver,
&driver_attr_slots);
if (!err)
err = driver_create_file(&pciback_pci_driver.driver,
err = driver_create_file(&xen_pcibk_pci_driver.driver,
&driver_attr_quirks);
if (!err)
err = driver_create_file(&pciback_pci_driver.driver,
err = driver_create_file(&xen_pcibk_pci_driver.driver,
&driver_attr_permissive);
if (!err)
err = driver_create_file(&pciback_pci_driver.driver,
err = driver_create_file(&xen_pcibk_pci_driver.driver,
&driver_attr_irq_handlers);
if (!err)
err = driver_create_file(&pciback_pci_driver.driver,
err = driver_create_file(&xen_pcibk_pci_driver.driver,
&driver_attr_irq_handler_state);
if (err)
pcistub_exit();
......@@ -1318,7 +1323,7 @@ static int __init pcistub_init(void)
return err;
parse_error:
printk(KERN_ERR "pciback: Error parsing pci_devs_to_hide at \"%s\"\n",
printk(KERN_ERR DRV_NAME ": Error parsing pci_devs_to_hide at \"%s\"\n",
pci_devs_to_hide + pos);
return -EINVAL;
}
......@@ -1326,7 +1331,7 @@ static int __init pcistub_init(void)
#ifndef MODULE
/*
* fs_initcall happens before device_initcall
* so pciback *should* get called first (b/c we
* so xen_pcibk *should* get called first (b/c we
* want to suck up any device before other drivers
* get a chance by being the first pci device
* driver to register)
......@@ -1334,14 +1339,14 @@ static int __init pcistub_init(void)
fs_initcall(pcistub_init);
#endif
static int __init pciback_init(void)
static int __init xen_pcibk_init(void)
{
int err;
if (!xen_initial_domain())
return -ENODEV;
err = pciback_config_init();
err = xen_pcibk_config_init();
if (err)
return err;
......@@ -1352,20 +1357,20 @@ static int __init pciback_init(void)
#endif
pcistub_init_devices_late();
err = pciback_xenbus_register();
err = xen_pcibk_xenbus_register();
if (err)
pcistub_exit();
return err;
}
static void __exit pciback_cleanup(void)
static void __exit xen_pcibk_cleanup(void)
{
pciback_xenbus_unregister();
xen_pcibk_xenbus_unregister();
pcistub_exit();
}
module_init(pciback_init);
module_exit(pciback_cleanup);
module_init(xen_pcibk_init);
module_exit(xen_pcibk_cleanup);
MODULE_LICENSE("Dual BSD/GPL");
......@@ -25,25 +25,19 @@ struct pci_dev_entry {
#define _PCIB_op_pending (1)
#define PCIB_op_pending (1<<(_PCIB_op_pending))
struct pciback_device {
struct xen_pcibk_device {
void *pci_dev_data;
spinlock_t dev_lock;
struct xenbus_device *xdev;
struct xenbus_watch be_watch;
u8 be_watching;
int evtchn_irq;
struct xen_pci_sharedinfo *sh_info;
unsigned long flags;
struct work_struct op_work;
};
struct pciback_dev_data {
struct xen_pcibk_dev_data {
struct list_head config_fields;
unsigned int permissive:1;
unsigned int warned_on_write:1;
......@@ -52,91 +46,78 @@ struct pciback_dev_data {
unsigned int ack_intr:1; /* .. and ACK-ing */
unsigned long handled;
unsigned int irq; /* Saved in case device transitions to MSI/MSI-X */
char irq_name[0]; /* pciback[000:04:00.0] */
char irq_name[0]; /* xen-pcibk[000:04:00.0] */
};
/* Used by XenBus and pciback_ops.c */
extern wait_queue_head_t aer_wait_queue;
extern struct workqueue_struct *pciback_wq;
/* Used by XenBus and xen_pcibk_ops.c */
extern wait_queue_head_t xen_pcibk_aer_wait_queue;
extern struct workqueue_struct *xen_pcibk_wq;
/* Used by pcistub.c and conf_space_quirks.c */
extern struct list_head pciback_quirks;
extern struct list_head xen_pcibk_quirks;
/* Get/Put PCI Devices that are hidden from the PCI Backend Domain */
struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
int domain, int bus,
int slot, int func);
struct pci_dev *pcistub_get_pci_dev(struct pciback_device *pdev,
struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev);
void pcistub_put_pci_dev(struct pci_dev *dev);
/* Ensure a device is turned off or reset */
void pciback_reset_device(struct pci_dev *pdev);
void xen_pcibk_reset_device(struct pci_dev *pdev);
/* Access a virtual configuration space for a PCI device */
int pciback_config_init(void);
int pciback_config_init_dev(struct pci_dev *dev);
void pciback_config_free_dyn_fields(struct pci_dev *dev);
void pciback_config_reset_dev(struct pci_dev *dev);
void pciback_config_free_dev(struct pci_dev *dev);
int pciback_config_read(struct pci_dev *dev, int offset, int size,
u32 *ret_val);
int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value);
int xen_pcibk_config_init(void);
int xen_pcibk_config_init_dev(struct pci_dev *dev);
void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev);
void xen_pcibk_config_reset_dev(struct pci_dev *dev);
void xen_pcibk_config_free_dev(struct pci_dev *dev);
int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
u32 *ret_val);
int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size,
u32 value);
/* Handle requests for specific devices from the frontend */
typedef int (*publish_pci_dev_cb) (struct pciback_device *pdev,
typedef int (*publish_pci_dev_cb) (struct xen_pcibk_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn, unsigned int devid);
typedef int (*publish_pci_root_cb) (struct pciback_device *pdev,
typedef int (*publish_pci_root_cb) (struct xen_pcibk_device *pdev,
unsigned int domain, unsigned int bus);
int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
int devid, publish_pci_dev_cb publish_cb);
void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev);
struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn);
int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev,
int devid, publish_pci_dev_cb publish_cb);
void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev);
struct pci_dev *xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn);
/**
* Add for domain0 PCIE-AER handling. Get guest domain/bus/devfn in pciback
* Add for domain0 PCIE-AER handling. Get guest domain/bus/devfn in xen_pcibk
* before sending aer request to pcifront, so that guest could identify
* device, coopearte with pciback to finish aer recovery job if device driver
* device, coopearte with xen_pcibk to finish aer recovery job if device driver
* has the capability
*/
int pciback_get_pcifront_dev(struct pci_dev *pcidev,
struct pciback_device *pdev,
unsigned int *domain, unsigned int *bus,
unsigned int *devfn);
int pciback_init_devices(struct pciback_device *pdev);
int pciback_publish_pci_roots(struct pciback_device *pdev,
publish_pci_root_cb cb);
void pciback_release_devices(struct pciback_device *pdev);
int xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
struct xen_pcibk_device *pdev,
unsigned int *domain, unsigned int *bus,
unsigned int *devfn);
int xen_pcibk_init_devices(struct xen_pcibk_device *pdev);
int xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
publish_pci_root_cb cb);
void xen_pcibk_release_devices(struct xen_pcibk_device *pdev);
/* Handles events from front-end */
irqreturn_t pciback_handle_event(int irq, void *dev_id);
void pciback_do_op(struct work_struct *data);
irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id);
void xen_pcibk_do_op(struct work_struct *data);
int pciback_xenbus_register(void);
void pciback_xenbus_unregister(void);
int xen_pcibk_xenbus_register(void);
void xen_pcibk_xenbus_unregister(void);
#ifdef CONFIG_PCI_MSI
int pciback_enable_msi(struct pciback_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op);
int pciback_disable_msi(struct pciback_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op);
int pciback_enable_msix(struct pciback_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op);
int pciback_disable_msix(struct pciback_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op);
#endif
extern int verbose_request;
void test_and_schedule_op(struct pciback_device *pdev);
void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev);
#endif
/* Handles shared IRQs that can to device domain and control domain. */
void pciback_irq_handler(struct pci_dev *dev, int reset);
irqreturn_t pciback_guest_interrupt(int irq, void *dev_id);
void xen_pcibk_irq_handler(struct pci_dev *dev, int reset);
......@@ -10,16 +10,19 @@
#include <linux/sched.h>
#include "pciback.h"
#define DRV_NAME "xen-pciback"
int verbose_request;
module_param(verbose_request, int, 0644);
static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id);
/* Ensure a device is has the fake IRQ handler "turned on/off" and is
* ready to be exported. This MUST be run after pciback_reset_device
* ready to be exported. This MUST be run after xen_pcibk_reset_device
* which does the actual PCI device enable/disable.
*/
void pciback_control_isr(struct pci_dev *dev, int reset)
static void xen_pcibk_control_isr(struct pci_dev *dev, int reset)
{
struct pciback_dev_data *dev_data;
struct xen_pcibk_dev_data *dev_data;
int rc;
int enable = 0;
......@@ -66,7 +69,7 @@ void pciback_control_isr(struct pci_dev *dev, int reset)
if (enable) {
rc = request_irq(dev_data->irq,
pciback_guest_interrupt, IRQF_SHARED,
xen_pcibk_guest_interrupt, IRQF_SHARED,
dev_data->irq_name, dev);
if (rc) {
dev_err(&dev->dev, "%s: failed to install fake IRQ " \
......@@ -92,14 +95,14 @@ void pciback_control_isr(struct pci_dev *dev, int reset)
}
/* Ensure a device is "turned off" and ready to be exported.
* (Also see pciback_config_reset to ensure virtual configuration space is
* (Also see xen_pcibk_config_reset to ensure virtual configuration space is
* ready to be re-exported)
*/
void pciback_reset_device(struct pci_dev *dev)
void xen_pcibk_reset_device(struct pci_dev *dev)
{
u16 cmd;
pciback_control_isr(dev, 1 /* reset device */);
xen_pcibk_control_isr(dev, 1 /* reset device */);
/* Disable devices (but not bridges) */
if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
......@@ -126,43 +129,176 @@ void pciback_reset_device(struct pci_dev *dev)
}
}
}
#ifdef CONFIG_PCI_MSI
static
int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
struct xen_pcibk_dev_data *dev_data;
int otherend = pdev->xdev->otherend_id;
int status;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev));
status = pci_enable_msi(dev);
if (status) {
printk(KERN_ERR "error enable msi for guest %x status %x\n",
otherend, status);
op->value = 0;
return XEN_PCI_ERR_op_failed;
}
/* The value the guest needs is actually the IDT vector, not the
* the local domain's IRQ number. */
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
op->value);
dev_data = pci_get_drvdata(dev);
if (dev_data)
dev_data->ack_intr = 0;
return 0;
}
static
int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
struct xen_pcibk_dev_data *dev_data;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n",
pci_name(dev));
pci_disable_msi(dev);
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
op->value);
dev_data = pci_get_drvdata(dev);
if (dev_data)
dev_data->ack_intr = 1;
return 0;
}
static
int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
struct xen_pcibk_dev_data *dev_data;
int i, result;
struct msix_entry *entries;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
pci_name(dev));
if (op->value > SH_INFO_MAX_VEC)
return -EINVAL;
entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
if (entries == NULL)
return -ENOMEM;
for (i = 0; i < op->value; i++) {
entries[i].entry = op->msix_entries[i].entry;
entries[i].vector = op->msix_entries[i].vector;
}
result = pci_enable_msix(dev, entries, op->value);
if (result == 0) {
for (i = 0; i < op->value; i++) {
op->msix_entries[i].entry = entries[i].entry;
if (entries[i].vector)
op->msix_entries[i].vector =
xen_pirq_from_irq(entries[i].vector);
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: " \
"MSI-X[%d]: %d\n",
pci_name(dev), i,
op->msix_entries[i].vector);
}
} else {
printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n",
pci_name(dev), result);
}
kfree(entries);
op->value = result;
dev_data = pci_get_drvdata(dev);
if (dev_data)
dev_data->ack_intr = 0;
return result;
}
static
int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
struct xen_pcibk_dev_data *dev_data;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n",
pci_name(dev));
pci_disable_msix(dev);
/*
* SR-IOV devices (which don't have any legacy IRQ) have
* an undefined IRQ value of zero.
*/
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev),
op->value);
dev_data = pci_get_drvdata(dev);
if (dev_data)
dev_data->ack_intr = 1;
return 0;
}
#endif
/*
* Now the same evtchn is used for both pcifront conf_read_write request
* as well as pcie aer front end ack. We use a new work_queue to schedule
* pciback conf_read_write service for avoiding confict with aer_core
* xen_pcibk conf_read_write service for avoiding confict with aer_core
* do_recovery job which also use the system default work_queue
*/
void test_and_schedule_op(struct pciback_device *pdev)
void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
{
/* Check that frontend is requesting an operation and that we are not
* already processing a request */
if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
&& !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) {
queue_work(pciback_wq, &pdev->op_work);
queue_work(xen_pcibk_wq, &pdev->op_work);
}
/*_XEN_PCIB_active should have been cleared by pcifront. And also make
sure pciback is waiting for ack by checking _PCIB_op_pending*/
sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/
if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags)
&& test_bit(_PCIB_op_pending, &pdev->flags)) {
wake_up(&aer_wait_queue);
wake_up(&xen_pcibk_aer_wait_queue);
}
}
/* Performing the configuration space reads/writes must not be done in atomic
* context because some of the pci_* functions can sleep (mostly due to ACPI
* use of semaphores). This function is intended to be called from a work
* queue in process context taking a struct pciback_device as a parameter */
* queue in process context taking a struct xen_pcibk_device as a parameter */
void pciback_do_op(struct work_struct *data)
void xen_pcibk_do_op(struct work_struct *data)
{
struct pciback_device *pdev =
container_of(data, struct pciback_device, op_work);
struct xen_pcibk_device *pdev =
container_of(data, struct xen_pcibk_device, op_work);
struct pci_dev *dev;
struct pciback_dev_data *dev_data = NULL;
struct xen_pcibk_dev_data *dev_data = NULL;
struct xen_pci_op *op = &pdev->sh_info->op;
int test_intx = 0;
dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
if (dev == NULL)
op->err = XEN_PCI_ERR_dev_not_found;
......@@ -172,25 +308,25 @@ void pciback_do_op(struct work_struct *data)
test_intx = dev_data->enable_intx;
switch (op->cmd) {
case XEN_PCI_OP_conf_read:
op->err = pciback_config_read(dev,
op->err = xen_pcibk_config_read(dev,
op->offset, op->size, &op->value);
break;
case XEN_PCI_OP_conf_write:
op->err = pciback_config_write(dev,
op->err = xen_pcibk_config_write(dev,
op->offset, op->size, op->value);
break;
#ifdef CONFIG_PCI_MSI
case XEN_PCI_OP_enable_msi:
op->err = pciback_enable_msi(pdev, dev, op);
op->err = xen_pcibk_enable_msi(pdev, dev, op);
break;
case XEN_PCI_OP_disable_msi:
op->err = pciback_disable_msi(pdev, dev, op);
op->err = xen_pcibk_disable_msi(pdev, dev, op);
break;
case XEN_PCI_OP_enable_msix:
op->err = pciback_enable_msix(pdev, dev, op);
op->err = xen_pcibk_enable_msix(pdev, dev, op);
break;
case XEN_PCI_OP_disable_msix:
op->err = pciback_disable_msix(pdev, dev, op);
op->err = xen_pcibk_disable_msix(pdev, dev, op);
break;
#endif
default:
......@@ -201,7 +337,7 @@ void pciback_do_op(struct work_struct *data)
if (!op->err && dev && dev_data) {
/* Transition detected */
if ((dev_data->enable_intx != test_intx))
pciback_control_isr(dev, 0 /* no reset */);
xen_pcibk_control_isr(dev, 0 /* no reset */);
}
/* Tell the driver domain that we're done. */
wmb();
......@@ -216,21 +352,21 @@ void pciback_do_op(struct work_struct *data)
/* Check to see if the driver domain tried to start another request in
* between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
*/
test_and_schedule_op(pdev);
xen_pcibk_test_and_schedule_op(pdev);
}
irqreturn_t pciback_handle_event(int irq, void *dev_id)
irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id)
{
struct pciback_device *pdev = dev_id;
struct xen_pcibk_device *pdev = dev_id;
test_and_schedule_op(pdev);
xen_pcibk_test_and_schedule_op(pdev);
return IRQ_HANDLED;
}
irqreturn_t pciback_guest_interrupt(int irq, void *dev_id)
static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id)
{
struct pci_dev *dev = (struct pci_dev *)dev_id;
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
if (dev_data->isr_on && dev_data->ack_intr) {
dev_data->handled++;
......
/*
* PCI Backend - Provides a Virtual PCI bus (with real devices)
* to the frontend
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil> (vpci.c)
* Author: Tristan Gingold <tristan.gingold@bull.net>, from vpci.c
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include "pciback.h"
/* There are at most 32 slots in a pci bus. */
#define PCI_SLOT_MAX 32
#define PCI_BUS_NBR 2
struct slot_dev_data {
/* Access to dev_list must be protected by lock */
struct pci_dev *slots[PCI_BUS_NBR][PCI_SLOT_MAX];
spinlock_t lock;
};
struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn)
{
struct pci_dev *dev = NULL;
struct slot_dev_data *slot_dev = pdev->pci_dev_data;
unsigned long flags;
if (domain != 0 || PCI_FUNC(devfn) != 0)
return NULL;
if (PCI_SLOT(devfn) >= PCI_SLOT_MAX || bus >= PCI_BUS_NBR)
return NULL;
spin_lock_irqsave(&slot_dev->lock, flags);
dev = slot_dev->slots[bus][PCI_SLOT(devfn)];
spin_unlock_irqrestore(&slot_dev->lock, flags);
return dev;
}
int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
int devid, publish_pci_dev_cb publish_cb)
{
int err = 0, slot, bus;
struct slot_dev_data *slot_dev = pdev->pci_dev_data;
unsigned long flags;
if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
err = -EFAULT;
xenbus_dev_fatal(pdev->xdev, err,
"Can't export bridges on the virtual PCI bus");
goto out;
}
spin_lock_irqsave(&slot_dev->lock, flags);
/* Assign to a new slot on the virtual PCI bus */
for (bus = 0; bus < PCI_BUS_NBR; bus++)
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (slot_dev->slots[bus][slot] == NULL) {
printk(KERN_INFO
"pciback: slot: %s: assign to virtual "
"slot %d, bus %d\n",
pci_name(dev), slot, bus);
slot_dev->slots[bus][slot] = dev;
goto unlock;
}
}
err = -ENOMEM;
xenbus_dev_fatal(pdev->xdev, err,
"No more space on root virtual PCI bus");
unlock:
spin_unlock_irqrestore(&slot_dev->lock, flags);
/* Publish this device. */
if (!err)
err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, 0), devid);
out:
return err;
}
void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
{
int slot, bus;
struct slot_dev_data *slot_dev = pdev->pci_dev_data;
struct pci_dev *found_dev = NULL;
unsigned long flags;
spin_lock_irqsave(&slot_dev->lock, flags);
for (bus = 0; bus < PCI_BUS_NBR; bus++)
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (slot_dev->slots[bus][slot] == dev) {
slot_dev->slots[bus][slot] = NULL;
found_dev = dev;
goto out;
}
}
out:
spin_unlock_irqrestore(&slot_dev->lock, flags);
if (found_dev)
pcistub_put_pci_dev(found_dev);
}
int pciback_init_devices(struct pciback_device *pdev)
{
int slot, bus;
struct slot_dev_data *slot_dev;
slot_dev = kmalloc(sizeof(*slot_dev), GFP_KERNEL);
if (!slot_dev)
return -ENOMEM;
spin_lock_init(&slot_dev->lock);
for (bus = 0; bus < PCI_BUS_NBR; bus++)
for (slot = 0; slot < PCI_SLOT_MAX; slot++)
slot_dev->slots[bus][slot] = NULL;
pdev->pci_dev_data = slot_dev;
return 0;
}
int pciback_publish_pci_roots(struct pciback_device *pdev,
publish_pci_root_cb publish_cb)
{
/* The Virtual PCI bus has only one root */
return publish_cb(pdev, 0, 0);
}
void pciback_release_devices(struct pciback_device *pdev)
{
int slot, bus;
struct slot_dev_data *slot_dev = pdev->pci_dev_data;
struct pci_dev *dev;
for (bus = 0; bus < PCI_BUS_NBR; bus++)
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
dev = slot_dev->slots[bus][slot];
if (dev != NULL)
pcistub_put_pci_dev(dev);
}
kfree(slot_dev);
pdev->pci_dev_data = NULL;
}
int pciback_get_pcifront_dev(struct pci_dev *pcidev,
struct pciback_device *pdev,
unsigned int *domain, unsigned int *bus,
unsigned int *devfn)
{
int slot, busnr;
struct slot_dev_data *slot_dev = pdev->pci_dev_data;
struct pci_dev *dev;
int found = 0;
unsigned long flags;
spin_lock_irqsave(&slot_dev->lock, flags);
for (busnr = 0; busnr < PCI_BUS_NBR; bus++)
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
dev = slot_dev->slots[busnr][slot];
if (dev && dev->bus->number == pcidev->bus->number
&& dev->devfn == pcidev->devfn
&& pci_domain_nr(dev->bus) ==
pci_domain_nr(pcidev->bus)) {
found = 1;
*domain = 0;
*bus = busnr;
*devfn = PCI_DEVFN(slot, 0);
goto out;
}
}
out:
spin_unlock_irqrestore(&slot_dev->lock, flags);
return found;
}
......@@ -12,6 +12,7 @@
#include "pciback.h"
#define PCI_SLOT_MAX 32
#define DRV_NAME "xen-pciback"
struct vpci_dev_data {
/* Access to dev_list must be protected by lock */
......@@ -24,9 +25,9 @@ static inline struct list_head *list_first(struct list_head *head)
return head->next;
}
struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn)
struct pci_dev *xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn)
{
struct pci_dev_entry *entry;
struct pci_dev *dev = NULL;
......@@ -62,8 +63,8 @@ static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
return 0;
}
int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
int devid, publish_pci_dev_cb publish_cb)
int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev,
int devid, publish_pci_dev_cb publish_cb)
{
int err = 0, slot, func = -1;
struct pci_dev_entry *t, *dev_entry;
......@@ -96,7 +97,7 @@ int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
struct pci_dev_entry, list);
if (match_slot(dev, t->dev)) {
pr_info("pciback: vpci: %s: "
pr_info(DRV_NAME ": vpci: %s: "
"assign to virtual slot %d func %d\n",
pci_name(dev), slot,
PCI_FUNC(dev->devfn));
......@@ -111,8 +112,8 @@ int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
/* Assign to a new slot on the virtual PCI bus */
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (list_empty(&vpci_dev->dev_list[slot])) {
printk(KERN_INFO
"pciback: vpci: %s: assign to virtual slot %d\n",
printk(KERN_INFO DRV_NAME
": vpci: %s: assign to virtual slot %d\n",
pci_name(dev), slot);
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
......@@ -136,7 +137,8 @@ int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
return err;
}
void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev)
{
int slot;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
......@@ -165,7 +167,7 @@ void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
pcistub_put_pci_dev(found_dev);
}
int pciback_init_devices(struct pciback_device *pdev)
int xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
{
int slot;
struct vpci_dev_data *vpci_dev;
......@@ -184,14 +186,14 @@ int pciback_init_devices(struct pciback_device *pdev)
return 0;
}
int pciback_publish_pci_roots(struct pciback_device *pdev,
publish_pci_root_cb publish_cb)
int xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
publish_pci_root_cb publish_cb)
{
/* The Virtual PCI bus has only one root */
return publish_cb(pdev, 0, 0);
}
void pciback_release_devices(struct pciback_device *pdev)
void xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
{
int slot;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
......@@ -210,10 +212,10 @@ void pciback_release_devices(struct pciback_device *pdev)
pdev->pci_dev_data = NULL;
}
int pciback_get_pcifront_dev(struct pci_dev *pcidev,
struct pciback_device *pdev,
unsigned int *domain, unsigned int *bus,
unsigned int *devfn)
int xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
struct xen_pcibk_device *pdev,
unsigned int *domain, unsigned int *bus,
unsigned int *devfn)
{
struct pci_dev_entry *entry;
struct pci_dev *dev = NULL;
......
......@@ -14,14 +14,15 @@
#include <linux/workqueue.h>
#include "pciback.h"
#define DRV_NAME "xen-pciback"
#define INVALID_EVTCHN_IRQ (-1)
struct workqueue_struct *pciback_wq;
struct workqueue_struct *xen_pcibk_wq;
static struct pciback_device *alloc_pdev(struct xenbus_device *xdev)
static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
{
struct pciback_device *pdev;
struct xen_pcibk_device *pdev;
pdev = kzalloc(sizeof(struct pciback_device), GFP_KERNEL);
pdev = kzalloc(sizeof(struct xen_pcibk_device), GFP_KERNEL);
if (pdev == NULL)
goto out;
dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
......@@ -35,9 +36,9 @@ static struct pciback_device *alloc_pdev(struct xenbus_device *xdev)
pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
pdev->be_watching = 0;
INIT_WORK(&pdev->op_work, pciback_do_op);
INIT_WORK(&pdev->op_work, xen_pcibk_do_op);
if (pciback_init_devices(pdev)) {
if (xen_pcibk_init_devices(pdev)) {
kfree(pdev);
pdev = NULL;
}
......@@ -45,7 +46,7 @@ static struct pciback_device *alloc_pdev(struct xenbus_device *xdev)
return pdev;
}
static void pciback_disconnect(struct pciback_device *pdev)
static void xen_pcibk_disconnect(struct xen_pcibk_device *pdev)
{
spin_lock(&pdev->dev_lock);
......@@ -60,7 +61,7 @@ static void pciback_disconnect(struct pciback_device *pdev)
* before releasing the shared memory */
/* Note, the workqueue does not use spinlocks at all.*/
flush_workqueue(pciback_wq);
flush_workqueue(xen_pcibk_wq);
spin_lock(&pdev->dev_lock);
if (pdev->sh_info != NULL) {
......@@ -71,16 +72,16 @@ static void pciback_disconnect(struct pciback_device *pdev)
}
static void free_pdev(struct pciback_device *pdev)
static void free_pdev(struct xen_pcibk_device *pdev)
{
if (pdev->be_watching) {
unregister_xenbus_watch(&pdev->be_watch);
pdev->be_watching = 0;
}
pciback_disconnect(pdev);
xen_pcibk_disconnect(pdev);
pciback_release_devices(pdev);
xen_pcibk_release_devices(pdev);
dev_set_drvdata(&pdev->xdev->dev, NULL);
pdev->xdev = NULL;
......@@ -88,7 +89,7 @@ static void free_pdev(struct pciback_device *pdev)
kfree(pdev);
}
static int pciback_do_attach(struct pciback_device *pdev, int gnt_ref,
static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
int remote_evtchn)
{
int err = 0;
......@@ -110,8 +111,8 @@ static int pciback_do_attach(struct pciback_device *pdev, int gnt_ref,
spin_unlock(&pdev->dev_lock);
err = bind_interdomain_evtchn_to_irqhandler(
pdev->xdev->otherend_id, remote_evtchn, pciback_handle_event,
0, "pciback", pdev);
pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event,
0, DRV_NAME, pdev);
if (err < 0) {
xenbus_dev_fatal(pdev->xdev, err,
"Error binding event channel to IRQ");
......@@ -128,7 +129,7 @@ static int pciback_do_attach(struct pciback_device *pdev, int gnt_ref,
return err;
}
static int pciback_attach(struct pciback_device *pdev)
static int xen_pcibk_attach(struct xen_pcibk_device *pdev)
{
int err = 0;
int gnt_ref, remote_evtchn;
......@@ -161,12 +162,12 @@ static int pciback_attach(struct pciback_device *pdev)
if (magic == NULL || strcmp(magic, XEN_PCI_MAGIC) != 0) {
xenbus_dev_fatal(pdev->xdev, -EFAULT,
"version mismatch (%s/%s) with pcifront - "
"halting pciback",
"halting xen_pcibk",
magic, XEN_PCI_MAGIC);
goto out;
}
err = pciback_do_attach(pdev, gnt_ref, remote_evtchn);
err = xen_pcibk_do_attach(pdev, gnt_ref, remote_evtchn);
if (err)
goto out;
......@@ -185,7 +186,7 @@ static int pciback_attach(struct pciback_device *pdev)
return err;
}
static int pciback_publish_pci_dev(struct pciback_device *pdev,
static int xen_pcibk_publish_pci_dev(struct xen_pcibk_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn, unsigned int devid)
{
......@@ -207,7 +208,7 @@ static int pciback_publish_pci_dev(struct pciback_device *pdev,
return err;
}
static int pciback_export_device(struct pciback_device *pdev,
static int xen_pcibk_export_device(struct xen_pcibk_device *pdev,
int domain, int bus, int slot, int func,
int devid)
{
......@@ -228,7 +229,8 @@ static int pciback_export_device(struct pciback_device *pdev,
goto out;
}
err = pciback_add_pci_dev(pdev, dev, devid, pciback_publish_pci_dev);
err = xen_pcibk_add_pci_dev(pdev, dev, devid,
xen_pcibk_publish_pci_dev);
if (err)
goto out;
......@@ -253,7 +255,7 @@ static int pciback_export_device(struct pciback_device *pdev,
return err;
}
static int pciback_remove_device(struct pciback_device *pdev,
static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev,
int domain, int bus, int slot, int func)
{
int err = 0;
......@@ -262,7 +264,7 @@ static int pciback_remove_device(struct pciback_device *pdev,
dev_dbg(&pdev->xdev->dev, "removing dom %x bus %x slot %x func %x\n",
domain, bus, slot, func);
dev = pciback_get_pci_dev(pdev, domain, bus, PCI_DEVFN(slot, func));
dev = xen_pcibk_get_pci_dev(pdev, domain, bus, PCI_DEVFN(slot, func));
if (!dev) {
err = -EINVAL;
dev_dbg(&pdev->xdev->dev, "Couldn't locate PCI device "
......@@ -274,13 +276,13 @@ static int pciback_remove_device(struct pciback_device *pdev,
dev_dbg(&dev->dev, "unregistering for %d\n", pdev->xdev->otherend_id);
xen_unregister_device_domain_owner(dev);
pciback_release_pci_dev(pdev, dev);
xen_pcibk_release_pci_dev(pdev, dev);
out:
return err;
}
static int pciback_publish_pci_root(struct pciback_device *pdev,
static int xen_pcibk_publish_pci_root(struct xen_pcibk_device *pdev,
unsigned int domain, unsigned int bus)
{
unsigned int d, b;
......@@ -340,7 +342,7 @@ static int pciback_publish_pci_root(struct pciback_device *pdev,
return err;
}
static int pciback_reconfigure(struct pciback_device *pdev)
static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
{
int err = 0;
int num_devs;
......@@ -411,14 +413,14 @@ static int pciback_reconfigure(struct pciback_device *pdev)
goto out;
}
err = pciback_export_device(pdev, domain, bus, slot,
err = xen_pcibk_export_device(pdev, domain, bus, slot,
func, i);
if (err)
goto out;
/* Publish pci roots. */
err = pciback_publish_pci_roots(pdev,
pciback_publish_pci_root);
err = xen_pcibk_publish_pci_roots(pdev,
xen_pcibk_publish_pci_root);
if (err) {
xenbus_dev_fatal(pdev->xdev, err,
"Error while publish PCI root"
......@@ -465,7 +467,7 @@ static int pciback_reconfigure(struct pciback_device *pdev)
goto out;
}
err = pciback_remove_device(pdev, domain, bus, slot,
err = xen_pcibk_remove_device(pdev, domain, bus, slot,
func);
if (err)
goto out;
......@@ -493,20 +495,20 @@ static int pciback_reconfigure(struct pciback_device *pdev)
return 0;
}
static void pciback_frontend_changed(struct xenbus_device *xdev,
static void xen_pcibk_frontend_changed(struct xenbus_device *xdev,
enum xenbus_state fe_state)
{
struct pciback_device *pdev = dev_get_drvdata(&xdev->dev);
struct xen_pcibk_device *pdev = dev_get_drvdata(&xdev->dev);
dev_dbg(&xdev->dev, "fe state changed %d\n", fe_state);
switch (fe_state) {
case XenbusStateInitialised:
pciback_attach(pdev);
xen_pcibk_attach(pdev);
break;
case XenbusStateReconfiguring:
pciback_reconfigure(pdev);
xen_pcibk_reconfigure(pdev);
break;
case XenbusStateConnected:
......@@ -517,12 +519,12 @@ static void pciback_frontend_changed(struct xenbus_device *xdev,
break;
case XenbusStateClosing:
pciback_disconnect(pdev);
xen_pcibk_disconnect(pdev);
xenbus_switch_state(xdev, XenbusStateClosing);
break;
case XenbusStateClosed:
pciback_disconnect(pdev);
xen_pcibk_disconnect(pdev);
xenbus_switch_state(xdev, XenbusStateClosed);
if (xenbus_dev_is_online(xdev))
break;
......@@ -537,7 +539,7 @@ static void pciback_frontend_changed(struct xenbus_device *xdev,
}
}
static int pciback_setup_backend(struct pciback_device *pdev)
static int xen_pcibk_setup_backend(struct xen_pcibk_device *pdev)
{
/* Get configuration from xend (if available now) */
int domain, bus, slot, func;
......@@ -590,7 +592,7 @@ static int pciback_setup_backend(struct pciback_device *pdev)
goto out;
}
err = pciback_export_device(pdev, domain, bus, slot, func, i);
err = xen_pcibk_export_device(pdev, domain, bus, slot, func, i);
if (err)
goto out;
......@@ -612,7 +614,7 @@ static int pciback_setup_backend(struct pciback_device *pdev)
}
}
err = pciback_publish_pci_roots(pdev, pciback_publish_pci_root);
err = xen_pcibk_publish_pci_roots(pdev, xen_pcibk_publish_pci_root);
if (err) {
xenbus_dev_fatal(pdev->xdev, err,
"Error while publish PCI root buses "
......@@ -628,20 +630,20 @@ static int pciback_setup_backend(struct pciback_device *pdev)
out:
if (!err)
/* see if pcifront is already configured (if not, we'll wait) */
pciback_attach(pdev);
xen_pcibk_attach(pdev);
return err;
}
static void pciback_be_watch(struct xenbus_watch *watch,
static void xen_pcibk_be_watch(struct xenbus_watch *watch,
const char **vec, unsigned int len)
{
struct pciback_device *pdev =
container_of(watch, struct pciback_device, be_watch);
struct xen_pcibk_device *pdev =
container_of(watch, struct xen_pcibk_device, be_watch);
switch (xenbus_read_driver_state(pdev->xdev->nodename)) {
case XenbusStateInitWait:
pciback_setup_backend(pdev);
xen_pcibk_setup_backend(pdev);
break;
default:
......@@ -649,16 +651,16 @@ static void pciback_be_watch(struct xenbus_watch *watch,
}
}
static int pciback_xenbus_probe(struct xenbus_device *dev,
static int xen_pcibk_xenbus_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
int err = 0;
struct pciback_device *pdev = alloc_pdev(dev);
struct xen_pcibk_device *pdev = alloc_pdev(dev);
if (pdev == NULL) {
err = -ENOMEM;
xenbus_dev_fatal(dev, err,
"Error allocating pciback_device struct");
"Error allocating xen_pcibk_device struct");
goto out;
}
......@@ -669,7 +671,7 @@ static int pciback_xenbus_probe(struct xenbus_device *dev,
/* watch the backend node for backend configuration information */
err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch,
pciback_be_watch);
xen_pcibk_be_watch);
if (err)
goto out;
......@@ -678,15 +680,15 @@ static int pciback_xenbus_probe(struct xenbus_device *dev,
/* We need to force a call to our callback here in case
* xend already configured us!
*/
pciback_be_watch(&pdev->be_watch, NULL, 0);
xen_pcibk_be_watch(&pdev->be_watch, NULL, 0);
out:
return err;
}
static int pciback_xenbus_remove(struct xenbus_device *dev)
static int xen_pcibk_xenbus_remove(struct xenbus_device *dev)
{
struct pciback_device *pdev = dev_get_drvdata(&dev->dev);
struct xen_pcibk_device *pdev = dev_get_drvdata(&dev->dev);
if (pdev != NULL)
free_pdev(pdev);
......@@ -699,28 +701,28 @@ static const struct xenbus_device_id xenpci_ids[] = {
{""},
};
static struct xenbus_driver xenbus_pciback_driver = {
.name = "pciback",
static struct xenbus_driver xenbus_xen_pcibk_driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
.ids = xenpci_ids,
.probe = pciback_xenbus_probe,
.remove = pciback_xenbus_remove,
.otherend_changed = pciback_frontend_changed,
.probe = xen_pcibk_xenbus_probe,
.remove = xen_pcibk_xenbus_remove,
.otherend_changed = xen_pcibk_frontend_changed,
};
int __init pciback_xenbus_register(void)
int __init xen_pcibk_xenbus_register(void)
{
pciback_wq = create_workqueue("pciback_workqueue");
if (!pciback_wq) {
xen_pcibk_wq = create_workqueue("xen_pciback_workqueue");
if (!xen_pcibk_wq) {
printk(KERN_ERR "%s: create"
"pciback_workqueue failed\n", __func__);
"xen_pciback_workqueue failed\n", __func__);
return -EFAULT;
}
return xenbus_register_backend(&xenbus_pciback_driver);
return xenbus_register_backend(&xenbus_xen_pcibk_driver);
}
void __exit pciback_xenbus_unregister(void)
void __exit xen_pcibk_xenbus_unregister(void)
{
destroy_workqueue(pciback_wq);
xenbus_unregister_driver(&xenbus_pciback_driver);
destroy_workqueue(xen_pcibk_wq);
xenbus_unregister_driver(&xenbus_xen_pcibk_driver);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment