Commit a92336a1 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk

xen/pciback: Drop two backends, squash and cleanup some code.

 - Remove the slot and controller controller backend as they
   are not used.
 - Document the find pciback_[read|write]_config_[byte|word|dword]
   to make it easier to find.
 - Collapse the code from conf_space_capability_msi into pciback_ops.c
 - Collapse conf_space_capability_[pm|vpd].c in conf_space_capability.c
   [and remove the conf_space_capability.h file]
 - Rename all visible functions from pciback to xen_pcibk.
 - Rename all the printk/pr_info, etc that use the "pciback" to say
   "xen-pciback".
 - Convert functions that are not referenced outside the code to be
   static to save on name space.
 - Do the same thing for structures that are internal to the driver.
 - Run checkpatch.pl after the renames and fixup its warnings and
   fix any compile errors caused by the variable rename
 - Cleanup any structs that checkpath.pl commented about or just
   look odd.
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent c288b67b
......@@ -3,14 +3,9 @@ obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback.o
xen-pciback-y := pci_stub.o pciback_ops.o xenbus.o
xen-pciback-y += conf_space.o conf_space_header.o \
conf_space_capability.o \
conf_space_capability_vpd.o \
conf_space_capability_pm.o \
conf_space_quirks.o
xen-pciback-$(CONFIG_PCI_MSI) += conf_space_capability_msi.o
xen-pciback-$(CONFIG_XEN_PCIDEV_BACKEND_VPCI) += vpci.o
xen-pciback-$(CONFIG_XEN_PCIDEV_BACKEND_SLOT) += slot.o
xen-pciback-$(CONFIG_XEN_PCIDEV_BACKEND_PASS) += passthrough.o
xen-pciback-$(CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER) += controller.o
ifeq ($(CONFIG_XEN_PCIDEV_BE_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
......
......@@ -15,11 +15,14 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
#define DRV_NAME "xen-pciback"
static int permissive;
module_param(permissive, bool, 0644);
/* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
* xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */
#define DEFINE_PCI_CONFIG(op, size, type) \
int pciback_##op##_config_##size \
int xen_pcibk_##op##_config_##size \
(struct pci_dev *dev, int offset, type value, void *data) \
{ \
return pci_##op##_config_##size(dev, offset, value); \
......@@ -138,11 +141,11 @@ static int pcibios_err_to_errno(int err)
return err;
}
int pciback_config_read(struct pci_dev *dev, int offset, int size,
int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
u32 *ret_val)
{
int err = 0;
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
const struct config_field_entry *cfg_entry;
const struct config_field *field;
int req_start, req_end, field_start, field_end;
......@@ -151,7 +154,7 @@ int pciback_config_read(struct pci_dev *dev, int offset, int size,
u32 value = 0, tmp_val;
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x\n",
printk(KERN_DEBUG DRV_NAME ": %s: read %d bytes at 0x%x\n",
pci_name(dev), size, offset);
if (!valid_request(offset, size)) {
......@@ -195,17 +198,17 @@ int pciback_config_read(struct pci_dev *dev, int offset, int size,
out:
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x = %x\n",
printk(KERN_DEBUG DRV_NAME ": %s: read %d bytes at 0x%x = %x\n",
pci_name(dev), size, offset, value);
*ret_val = value;
return pcibios_err_to_errno(err);
}
int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value)
int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
{
int err = 0, handled = 0;
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
const struct config_field_entry *cfg_entry;
const struct config_field *field;
u32 tmp_val;
......@@ -213,7 +216,7 @@ int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value)
if (unlikely(verbose_request))
printk(KERN_DEBUG
"pciback: %s: write request %d bytes at 0x%x = %x\n",
DRV_NAME ": %s: write request %d bytes at 0x%x = %x\n",
pci_name(dev), size, offset, value);
if (!valid_request(offset, size))
......@@ -231,7 +234,7 @@ int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value)
|| (req_end > field_start && req_end <= field_end)) {
tmp_val = 0;
err = pciback_config_read(dev, field_start,
err = xen_pcibk_config_read(dev, field_start,
field->size, &tmp_val);
if (err)
break;
......@@ -290,9 +293,9 @@ int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value)
return pcibios_err_to_errno(err);
}
void pciback_config_free_dyn_fields(struct pci_dev *dev)
void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev)
{
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
struct config_field_entry *cfg_entry, *t;
const struct config_field *field;
......@@ -316,9 +319,9 @@ void pciback_config_free_dyn_fields(struct pci_dev *dev)
}
}
void pciback_config_reset_dev(struct pci_dev *dev)
void xen_pcibk_config_reset_dev(struct pci_dev *dev)
{
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
const struct config_field_entry *cfg_entry;
const struct config_field *field;
......@@ -334,9 +337,9 @@ void pciback_config_reset_dev(struct pci_dev *dev)
}
}
void pciback_config_free_dev(struct pci_dev *dev)
void xen_pcibk_config_free_dev(struct pci_dev *dev)
{
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
struct config_field_entry *cfg_entry, *t;
const struct config_field *field;
......@@ -356,12 +359,12 @@ void pciback_config_free_dev(struct pci_dev *dev)
}
}
int pciback_config_add_field_offset(struct pci_dev *dev,
int xen_pcibk_config_add_field_offset(struct pci_dev *dev,
const struct config_field *field,
unsigned int base_offset)
{
int err = 0;
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
struct config_field_entry *cfg_entry;
void *tmp;
......@@ -376,7 +379,7 @@ int pciback_config_add_field_offset(struct pci_dev *dev,
cfg_entry->base_offset = base_offset;
/* silently ignore duplicate fields */
err = pciback_field_is_dup(dev, OFFSET(cfg_entry));
err = xen_pcibk_field_is_dup(dev, OFFSET(cfg_entry));
if (err)
goto out;
......@@ -406,30 +409,30 @@ int pciback_config_add_field_offset(struct pci_dev *dev,
* certain registers (like the base address registers (BARs) so that we can
* keep the client from manipulating them directly.
*/
int pciback_config_init_dev(struct pci_dev *dev)
int xen_pcibk_config_init_dev(struct pci_dev *dev)
{
int err = 0;
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
dev_dbg(&dev->dev, "initializing virtual configuration space\n");
INIT_LIST_HEAD(&dev_data->config_fields);
err = pciback_config_header_add_fields(dev);
err = xen_pcibk_config_header_add_fields(dev);
if (err)
goto out;
err = pciback_config_capability_add_fields(dev);
err = xen_pcibk_config_capability_add_fields(dev);
if (err)
goto out;
err = pciback_config_quirks_init(dev);
err = xen_pcibk_config_quirks_init(dev);
out:
return err;
}
int pciback_config_init(void)
int xen_pcibk_config_init(void)
{
return pciback_config_capability_init();
return xen_pcibk_config_capability_init();
}
......@@ -69,35 +69,35 @@ struct config_field_entry {
/* Add fields to a device - the add_fields macro expects to get a pointer to
* the first entry in an array (of which the ending is marked by size==0)
*/
int pciback_config_add_field_offset(struct pci_dev *dev,
int xen_pcibk_config_add_field_offset(struct pci_dev *dev,
const struct config_field *field,
unsigned int offset);
static inline int pciback_config_add_field(struct pci_dev *dev,
static inline int xen_pcibk_config_add_field(struct pci_dev *dev,
const struct config_field *field)
{
return pciback_config_add_field_offset(dev, field, 0);
return xen_pcibk_config_add_field_offset(dev, field, 0);
}
static inline int pciback_config_add_fields(struct pci_dev *dev,
static inline int xen_pcibk_config_add_fields(struct pci_dev *dev,
const struct config_field *field)
{
int i, err = 0;
for (i = 0; field[i].size != 0; i++) {
err = pciback_config_add_field(dev, &field[i]);
err = xen_pcibk_config_add_field(dev, &field[i]);
if (err)
break;
}
return err;
}
static inline int pciback_config_add_fields_offset(struct pci_dev *dev,
static inline int xen_pcibk_config_add_fields_offset(struct pci_dev *dev,
const struct config_field *field,
unsigned int offset)
{
int i, err = 0;
for (i = 0; field[i].size != 0; i++) {
err = pciback_config_add_field_offset(dev, &field[i], offset);
err = xen_pcibk_config_add_field_offset(dev, &field[i], offset);
if (err)
break;
}
......@@ -105,22 +105,22 @@ static inline int pciback_config_add_fields_offset(struct pci_dev *dev,
}
/* Read/Write the real configuration space */
int pciback_read_config_byte(struct pci_dev *dev, int offset, u8 *value,
int xen_pcibk_read_config_byte(struct pci_dev *dev, int offset, u8 *value,
void *data);
int pciback_read_config_word(struct pci_dev *dev, int offset, u16 *value,
int xen_pcibk_read_config_word(struct pci_dev *dev, int offset, u16 *value,
void *data);
int pciback_read_config_dword(struct pci_dev *dev, int offset, u32 *value,
int xen_pcibk_read_config_dword(struct pci_dev *dev, int offset, u32 *value,
void *data);
int pciback_write_config_byte(struct pci_dev *dev, int offset, u8 value,
int xen_pcibk_write_config_byte(struct pci_dev *dev, int offset, u8 value,
void *data);
int pciback_write_config_word(struct pci_dev *dev, int offset, u16 value,
int xen_pcibk_write_config_word(struct pci_dev *dev, int offset, u16 value,
void *data);
int pciback_write_config_dword(struct pci_dev *dev, int offset, u32 value,
int xen_pcibk_write_config_dword(struct pci_dev *dev, int offset, u32 value,
void *data);
int pciback_config_capability_init(void);
int xen_pcibk_config_capability_init(void);
int pciback_config_header_add_fields(struct pci_dev *dev);
int pciback_config_capability_add_fields(struct pci_dev *dev);
int xen_pcibk_config_header_add_fields(struct pci_dev *dev);
int xen_pcibk_config_capability_add_fields(struct pci_dev *dev);
#endif /* __XEN_PCIBACK_CONF_SPACE_H__ */
......@@ -9,29 +9,36 @@
#include <linux/pci.h>
#include "pciback.h"
#include "conf_space.h"
#include "conf_space_capability.h"
static LIST_HEAD(capabilities);
struct xen_pcibk_config_capability {
struct list_head cap_list;
int capability;
/* If the device has the capability found above, add these fields */
const struct config_field *fields;
};
static const struct config_field caplist_header[] = {
{
.offset = PCI_CAP_LIST_ID,
.size = 2, /* encompass PCI_CAP_LIST_ID & PCI_CAP_LIST_NEXT */
.u.w.read = pciback_read_config_word,
.u.w.read = xen_pcibk_read_config_word,
.u.w.write = NULL,
},
{}
};
static inline void register_capability(struct pciback_config_capability *cap)
static inline void register_capability(struct xen_pcibk_config_capability *cap)
{
list_add_tail(&cap->cap_list, &capabilities);
}
int pciback_config_capability_add_fields(struct pci_dev *dev)
int xen_pcibk_config_capability_add_fields(struct pci_dev *dev)
{
int err = 0;
struct pciback_config_capability *cap;
struct xen_pcibk_config_capability *cap;
int cap_offset;
list_for_each_entry(cap, &capabilities, cap_list) {
......@@ -40,12 +47,12 @@ int pciback_config_capability_add_fields(struct pci_dev *dev)
dev_dbg(&dev->dev, "Found capability 0x%x at 0x%x\n",
cap->capability, cap_offset);
err = pciback_config_add_fields_offset(dev,
err = xen_pcibk_config_add_fields_offset(dev,
caplist_header,
cap_offset);
if (err)
goto out;
err = pciback_config_add_fields_offset(dev,
err = xen_pcibk_config_add_fields_offset(dev,
cap->fields,
cap_offset);
if (err)
......@@ -57,10 +64,144 @@ int pciback_config_capability_add_fields(struct pci_dev *dev)
return err;
}
int pciback_config_capability_init(void)
static int vpd_address_write(struct pci_dev *dev, int offset, u16 value,
void *data)
{
/* Disallow writes to the vital product data */
if (value & PCI_VPD_ADDR_F)
return PCIBIOS_SET_FAILED;
else
return pci_write_config_word(dev, offset, value);
}
static const struct config_field caplist_vpd[] = {
{
.offset = PCI_VPD_ADDR,
.size = 2,
.u.w.read = xen_pcibk_read_config_word,
.u.w.write = vpd_address_write,
},
{
.offset = PCI_VPD_DATA,
.size = 4,
.u.dw.read = xen_pcibk_read_config_dword,
.u.dw.write = NULL,
},
{}
};
static int pm_caps_read(struct pci_dev *dev, int offset, u16 *value,
void *data)
{
int err;
u16 real_value;
err = pci_read_config_word(dev, offset, &real_value);
if (err)
goto out;
*value = real_value & ~PCI_PM_CAP_PME_MASK;
out:
return err;
}
/* PM_OK_BITS specifies the bits that the driver domain is allowed to change.
* Can't allow driver domain to enable PMEs - they're shared */
#define PM_OK_BITS (PCI_PM_CTRL_PME_STATUS|PCI_PM_CTRL_DATA_SEL_MASK)
static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
void *data)
{
int err;
u16 old_value;
pci_power_t new_state, old_state;
err = pci_read_config_word(dev, offset, &old_value);
if (err)
goto out;
old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
new_value &= PM_OK_BITS;
if ((old_value & PM_OK_BITS) != new_value) {
new_value = (old_value & ~PM_OK_BITS) | new_value;
err = pci_write_config_word(dev, offset, new_value);
if (err)
goto out;
}
/* Let pci core handle the power management change */
dev_dbg(&dev->dev, "set power state to %x\n", new_state);
err = pci_set_power_state(dev, new_state);
if (err) {
err = PCIBIOS_SET_FAILED;
goto out;
}
out:
return err;
}
/* Ensure PMEs are disabled */
static void *pm_ctrl_init(struct pci_dev *dev, int offset)
{
int err;
u16 value;
err = pci_read_config_word(dev, offset, &value);
if (err)
goto out;
if (value & PCI_PM_CTRL_PME_ENABLE) {
value &= ~PCI_PM_CTRL_PME_ENABLE;
err = pci_write_config_word(dev, offset, value);
}
out:
return ERR_PTR(err);
}
static const struct config_field caplist_pm[] = {
{
.offset = PCI_PM_PMC,
.size = 2,
.u.w.read = pm_caps_read,
},
{
.offset = PCI_PM_CTRL,
.size = 2,
.init = pm_ctrl_init,
.u.w.read = xen_pcibk_read_config_word,
.u.w.write = pm_ctrl_write,
},
{
.offset = PCI_PM_PPB_EXTENSIONS,
.size = 1,
.u.b.read = xen_pcibk_read_config_byte,
},
{
.offset = PCI_PM_DATA_REGISTER,
.size = 1,
.u.b.read = xen_pcibk_read_config_byte,
},
{}
};
static struct xen_pcibk_config_capability xen_pcibk_config_capability_pm = {
.capability = PCI_CAP_ID_PM,
.fields = caplist_pm,
};
static struct xen_pcibk_config_capability xen_pcibk_config_capability_vpd = {
.capability = PCI_CAP_ID_VPD,
.fields = caplist_vpd,
};
int xen_pcibk_config_capability_init(void)
{
register_capability(&pciback_config_capability_vpd);
register_capability(&pciback_config_capability_pm);
register_capability(&xen_pcibk_config_capability_vpd);
register_capability(&xen_pcibk_config_capability_pm);
return 0;
}
/*
* PCI Backend - Data structures for special overlays for structures on
* the capability list.
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#ifndef __PCIBACK_CONFIG_CAPABILITY_H__
#define __PCIBACK_CONFIG_CAPABILITY_H__
#include <linux/pci.h>
#include <linux/list.h>
struct pciback_config_capability {
struct list_head cap_list;
int capability;
/* If the device has the capability found above, add these fields */
const struct config_field *fields;
};
extern struct pciback_config_capability pciback_config_capability_vpd;
extern struct pciback_config_capability pciback_config_capability_pm;
#endif
/*
* PCI Backend -- Configuration overlay for MSI capability
*/
#include <linux/pci.h>
#include <linux/slab.h>
#include "conf_space.h"
#include "conf_space_capability.h"
#include <xen/interface/io/pciif.h>
#include <xen/events.h>
#include "pciback.h"
int pciback_enable_msi(struct pciback_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
struct pciback_dev_data *dev_data;
int otherend = pdev->xdev->otherend_id;
int status;
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: enable MSI\n", pci_name(dev));
status = pci_enable_msi(dev);
if (status) {
printk(KERN_ERR "error enable msi for guest %x status %x\n",
otherend, status);
op->value = 0;
return XEN_PCI_ERR_op_failed;
}
/* The value the guest needs is actually the IDT vector, not the
* the local domain's IRQ number. */
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: MSI: %d\n", pci_name(dev),
op->value);
dev_data = pci_get_drvdata(dev);
if (dev_data)
dev_data->ack_intr = 0;
return 0;
}
int pciback_disable_msi(struct pciback_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
struct pciback_dev_data *dev_data;
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: disable MSI\n", pci_name(dev));
pci_disable_msi(dev);
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: MSI: %d\n", pci_name(dev),
op->value);
dev_data = pci_get_drvdata(dev);
if (dev_data)
dev_data->ack_intr = 1;
return 0;
}
int pciback_enable_msix(struct pciback_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
struct pciback_dev_data *dev_data;
int i, result;
struct msix_entry *entries;
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: enable MSI-X\n",
pci_name(dev));
if (op->value > SH_INFO_MAX_VEC)
return -EINVAL;
entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
if (entries == NULL)
return -ENOMEM;
for (i = 0; i < op->value; i++) {
entries[i].entry = op->msix_entries[i].entry;
entries[i].vector = op->msix_entries[i].vector;
}
result = pci_enable_msix(dev, entries, op->value);
if (result == 0) {
for (i = 0; i < op->value; i++) {
op->msix_entries[i].entry = entries[i].entry;
if (entries[i].vector)
op->msix_entries[i].vector =
xen_pirq_from_irq(entries[i].vector);
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: " \
"MSI-X[%d]: %d\n",
pci_name(dev), i,
op->msix_entries[i].vector);
}
} else {
printk(KERN_WARNING "pciback: %s: failed to enable MSI-X: err %d!\n",
pci_name(dev), result);
}
kfree(entries);
op->value = result;
dev_data = pci_get_drvdata(dev);
if (dev_data)
dev_data->ack_intr = 0;
return result;
}
int pciback_disable_msix(struct pciback_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
struct pciback_dev_data *dev_data;
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: disable MSI-X\n",
pci_name(dev));
pci_disable_msix(dev);
/*
* SR-IOV devices (which don't have any legacy IRQ) have
* an undefined IRQ value of zero.
*/
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: MSI-X: %d\n", pci_name(dev),
op->value);
dev_data = pci_get_drvdata(dev);
if (dev_data)
dev_data->ack_intr = 1;
return 0;
}
/*
* PCI Backend - Configuration space overlay for power management
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#include <linux/pci.h>
#include "conf_space.h"
#include "conf_space_capability.h"
static int pm_caps_read(struct pci_dev *dev, int offset, u16 *value,
void *data)
{
int err;
u16 real_value;
err = pci_read_config_word(dev, offset, &real_value);
if (err)
goto out;
*value = real_value & ~PCI_PM_CAP_PME_MASK;
out:
return err;
}
/* PM_OK_BITS specifies the bits that the driver domain is allowed to change.
* Can't allow driver domain to enable PMEs - they're shared */
#define PM_OK_BITS (PCI_PM_CTRL_PME_STATUS|PCI_PM_CTRL_DATA_SEL_MASK)
static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
void *data)
{
int err;
u16 old_value;
pci_power_t new_state, old_state;
err = pci_read_config_word(dev, offset, &old_value);
if (err)
goto out;
old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
new_value &= PM_OK_BITS;
if ((old_value & PM_OK_BITS) != new_value) {
new_value = (old_value & ~PM_OK_BITS) | new_value;
err = pci_write_config_word(dev, offset, new_value);
if (err)
goto out;
}
/* Let pci core handle the power management change */
dev_dbg(&dev->dev, "set power state to %x\n", new_state);
err = pci_set_power_state(dev, new_state);
if (err) {
err = PCIBIOS_SET_FAILED;
goto out;
}
out:
return err;
}
/* Ensure PMEs are disabled */
static void *pm_ctrl_init(struct pci_dev *dev, int offset)
{
int err;
u16 value;
err = pci_read_config_word(dev, offset, &value);
if (err)
goto out;
if (value & PCI_PM_CTRL_PME_ENABLE) {
value &= ~PCI_PM_CTRL_PME_ENABLE;
err = pci_write_config_word(dev, offset, value);
}
out:
return ERR_PTR(err);
}
static const struct config_field caplist_pm[] = {
{
.offset = PCI_PM_PMC,
.size = 2,
.u.w.read = pm_caps_read,
},
{
.offset = PCI_PM_CTRL,
.size = 2,
.init = pm_ctrl_init,
.u.w.read = pciback_read_config_word,
.u.w.write = pm_ctrl_write,
},
{
.offset = PCI_PM_PPB_EXTENSIONS,
.size = 1,
.u.b.read = pciback_read_config_byte,
},
{
.offset = PCI_PM_DATA_REGISTER,
.size = 1,
.u.b.read = pciback_read_config_byte,
},
{}
};
struct pciback_config_capability pciback_config_capability_pm = {
.capability = PCI_CAP_ID_PM,
.fields = caplist_pm,
};
/*
* PCI Backend - Configuration space overlay for Vital Product Data
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#include <linux/pci.h>
#include "conf_space.h"
#include "conf_space_capability.h"
static int vpd_address_write(struct pci_dev *dev, int offset, u16 value,
void *data)
{
/* Disallow writes to the vital product data */
if (value & PCI_VPD_ADDR_F)
return PCIBIOS_SET_FAILED;
else
return pci_write_config_word(dev, offset, value);
}
static const struct config_field caplist_vpd[] = {
{
.offset = PCI_VPD_ADDR,
.size = 2,
.u.w.read = pciback_read_config_word,
.u.w.write = vpd_address_write,
},
{
.offset = PCI_VPD_DATA,
.size = 4,
.u.dw.read = pciback_read_config_dword,
.u.dw.write = NULL,
},
{}
};
struct pciback_config_capability pciback_config_capability_vpd = {
.capability = PCI_CAP_ID_VPD,
.fields = caplist_vpd,
};
......@@ -15,6 +15,7 @@ struct pci_bar_info {
int which;
};
#define DRV_NAME "xen-pciback"
#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
......@@ -23,7 +24,7 @@ static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
int i;
int ret;
ret = pciback_read_config_word(dev, offset, value, data);
ret = xen_pcibk_read_config_word(dev, offset, value, data);
if (!atomic_read(&dev->enable_cnt))
return ret;
......@@ -39,13 +40,13 @@ static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
{
struct pciback_dev_data *dev_data;
struct xen_pcibk_dev_data *dev_data;
int err;
dev_data = pci_get_drvdata(dev);
if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: enable\n",
printk(KERN_DEBUG DRV_NAME ": %s: enable\n",
pci_name(dev));
err = pci_enable_device(dev);
if (err)
......@@ -54,7 +55,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
dev_data->enable_intx = 1;
} else if (pci_is_enabled(dev) && !is_enable_cmd(value)) {
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: disable\n",
printk(KERN_DEBUG DRV_NAME ": %s: disable\n",
pci_name(dev));
pci_disable_device(dev);
if (dev_data)
......@@ -63,7 +64,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
if (!dev->is_busmaster && is_master_cmd(value)) {
if (unlikely(verbose_request))
printk(KERN_DEBUG "pciback: %s: set bus master\n",
printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n",
pci_name(dev));
pci_set_master(dev);
}
......@@ -71,12 +72,12 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
if (value & PCI_COMMAND_INVALIDATE) {
if (unlikely(verbose_request))
printk(KERN_DEBUG
"pciback: %s: enable memory-write-invalidate\n",
DRV_NAME ": %s: enable memory-write-invalidate\n",
pci_name(dev));
err = pci_set_mwi(dev);
if (err) {
printk(KERN_WARNING
"pciback: %s: cannot enable "
DRV_NAME ": %s: cannot enable "
"memory-write-invalidate (%d)\n",
pci_name(dev), err);
value &= ~PCI_COMMAND_INVALIDATE;
......@@ -91,7 +92,7 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
struct pci_bar_info *bar = data;
if (unlikely(!bar)) {
printk(KERN_WARNING "pciback: driver data not found for %s\n",
printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n",
pci_name(dev));
return XEN_PCI_ERR_op_failed;
}
......@@ -125,7 +126,7 @@ static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
struct pci_bar_info *bar = data;
if (unlikely(!bar)) {
printk(KERN_WARNING "pciback: driver data not found for %s\n",
printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n",
pci_name(dev));
return XEN_PCI_ERR_op_failed;
}
......@@ -153,7 +154,7 @@ static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data)
struct pci_bar_info *bar = data;
if (unlikely(!bar)) {
printk(KERN_WARNING "pciback: driver data not found for %s\n",
printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n",
pci_name(dev));
return XEN_PCI_ERR_op_failed;
}
......@@ -227,7 +228,7 @@ static void bar_release(struct pci_dev *dev, int offset, void *data)
kfree(data);
}
static int pciback_read_vendor(struct pci_dev *dev, int offset,
static int xen_pcibk_read_vendor(struct pci_dev *dev, int offset,
u16 *value, void *data)
{
*value = dev->vendor;
......@@ -235,7 +236,7 @@ static int pciback_read_vendor(struct pci_dev *dev, int offset,
return 0;
}
static int pciback_read_device(struct pci_dev *dev, int offset,
static int xen_pcibk_read_device(struct pci_dev *dev, int offset,
u16 *value, void *data)
{
*value = dev->device;
......@@ -272,12 +273,12 @@ static const struct config_field header_common[] = {
{
.offset = PCI_VENDOR_ID,
.size = 2,
.u.w.read = pciback_read_vendor,
.u.w.read = xen_pcibk_read_vendor,
},
{
.offset = PCI_DEVICE_ID,
.size = 2,
.u.w.read = pciback_read_device,
.u.w.read = xen_pcibk_read_device,
},
{
.offset = PCI_COMMAND,
......@@ -293,24 +294,24 @@ static const struct config_field header_common[] = {
{
.offset = PCI_INTERRUPT_PIN,
.size = 1,
.u.b.read = pciback_read_config_byte,
.u.b.read = xen_pcibk_read_config_byte,
},
{
/* Any side effects of letting driver domain control cache line? */
.offset = PCI_CACHE_LINE_SIZE,
.size = 1,
.u.b.read = pciback_read_config_byte,
.u.b.write = pciback_write_config_byte,
.u.b.read = xen_pcibk_read_config_byte,
.u.b.write = xen_pcibk_write_config_byte,
},
{
.offset = PCI_LATENCY_TIMER,
.size = 1,
.u.b.read = pciback_read_config_byte,
.u.b.read = xen_pcibk_read_config_byte,
},
{
.offset = PCI_BIST,
.size = 1,
.u.b.read = pciback_read_config_byte,
.u.b.read = xen_pcibk_read_config_byte,
.u.b.write = bist_write,
},
{}
......@@ -356,26 +357,26 @@ static const struct config_field header_1[] = {
{}
};
int pciback_config_header_add_fields(struct pci_dev *dev)
int xen_pcibk_config_header_add_fields(struct pci_dev *dev)
{
int err;
err = pciback_config_add_fields(dev, header_common);
err = xen_pcibk_config_add_fields(dev, header_common);
if (err)
goto out;
switch (dev->hdr_type) {
case PCI_HEADER_TYPE_NORMAL:
err = pciback_config_add_fields(dev, header_0);
err = xen_pcibk_config_add_fields(dev, header_0);
break;
case PCI_HEADER_TYPE_BRIDGE:
err = pciback_config_add_fields(dev, header_1);
err = xen_pcibk_config_add_fields(dev, header_1);
break;
default:
err = -EINVAL;
printk(KERN_ERR "pciback: %s: Unsupported header type %d!\n",
printk(KERN_ERR DRV_NAME ": %s: Unsupported header type %d!\n",
pci_name(dev), dev->hdr_type);
break;
}
......
......@@ -11,8 +11,8 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
LIST_HEAD(pciback_quirks);
LIST_HEAD(xen_pcibk_quirks);
#define DRV_NAME "xen-pciback"
static inline const struct pci_device_id *
match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
{
......@@ -27,29 +27,29 @@ match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
return NULL;
}
struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev)
static struct xen_pcibk_config_quirk *xen_pcibk_find_quirk(struct pci_dev *dev)
{
struct pciback_config_quirk *tmp_quirk;
struct xen_pcibk_config_quirk *tmp_quirk;
list_for_each_entry(tmp_quirk, &pciback_quirks, quirks_list)
list_for_each_entry(tmp_quirk, &xen_pcibk_quirks, quirks_list)
if (match_one_device(&tmp_quirk->devid, dev) != NULL)
goto out;
tmp_quirk = NULL;
printk(KERN_DEBUG
"quirk didn't match any device pciback knows about\n");
printk(KERN_DEBUG DRV_NAME
":quirk didn't match any device xen_pciback knows about\n");
out:
return tmp_quirk;
}
static inline void register_quirk(struct pciback_config_quirk *quirk)
static inline void register_quirk(struct xen_pcibk_config_quirk *quirk)
{
list_add_tail(&quirk->quirks_list, &pciback_quirks);
list_add_tail(&quirk->quirks_list, &xen_pcibk_quirks);
}
int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg)
int xen_pcibk_field_is_dup(struct pci_dev *dev, unsigned int reg)
{
int ret = 0;
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
struct config_field_entry *cfg_entry;
list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
......@@ -61,38 +61,38 @@ int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg)
return ret;
}
int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
int xen_pcibk_config_quirks_add_field(struct pci_dev *dev, struct config_field
*field)
{
int err = 0;
switch (field->size) {
case 1:
field->u.b.read = pciback_read_config_byte;
field->u.b.write = pciback_write_config_byte;
field->u.b.read = xen_pcibk_read_config_byte;
field->u.b.write = xen_pcibk_write_config_byte;
break;
case 2:
field->u.w.read = pciback_read_config_word;
field->u.w.write = pciback_write_config_word;
field->u.w.read = xen_pcibk_read_config_word;
field->u.w.write = xen_pcibk_write_config_word;
break;
case 4:
field->u.dw.read = pciback_read_config_dword;
field->u.dw.write = pciback_write_config_dword;
field->u.dw.read = xen_pcibk_read_config_dword;
field->u.dw.write = xen_pcibk_write_config_dword;
break;
default:
err = -EINVAL;
goto out;
}
pciback_config_add_field(dev, field);
xen_pcibk_config_add_field(dev, field);
out:
return err;
}
int pciback_config_quirks_init(struct pci_dev *dev)
int xen_pcibk_config_quirks_init(struct pci_dev *dev)
{
struct pciback_config_quirk *quirk;
struct xen_pcibk_config_quirk *quirk;
int ret = 0;
quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC);
......@@ -116,17 +116,17 @@ int pciback_config_quirks_init(struct pci_dev *dev)
return ret;
}
void pciback_config_field_free(struct config_field *field)
void xen_pcibk_config_field_free(struct config_field *field)
{
kfree(field);
}
int pciback_config_quirk_release(struct pci_dev *dev)
int xen_pcibk_config_quirk_release(struct pci_dev *dev)
{
struct pciback_config_quirk *quirk;
struct xen_pcibk_config_quirk *quirk;
int ret = 0;
quirk = pciback_find_quirk(dev);
quirk = xen_pcibk_find_quirk(dev);
if (!quirk) {
ret = -ENXIO;
goto out;
......
......@@ -11,25 +11,23 @@
#include <linux/pci.h>
#include <linux/list.h>
struct pciback_config_quirk {
struct xen_pcibk_config_quirk {
struct list_head quirks_list;
struct pci_device_id devid;
struct pci_dev *pdev;
};
struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev);
int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
int xen_pcibk_config_quirks_add_field(struct pci_dev *dev, struct config_field
*field);
int pciback_config_quirks_remove_field(struct pci_dev *dev, int reg);
int xen_pcibk_config_quirks_remove_field(struct pci_dev *dev, int reg);
int pciback_config_quirks_init(struct pci_dev *dev);
int xen_pcibk_config_quirks_init(struct pci_dev *dev);
void pciback_config_field_free(struct config_field *field);
void xen_pcibk_config_field_free(struct config_field *field);
int pciback_config_quirk_release(struct pci_dev *dev);
int xen_pcibk_config_quirk_release(struct pci_dev *dev);
int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg);
int xen_pcibk_field_is_dup(struct pci_dev *dev, unsigned int reg);
#endif
This diff is collapsed.
......@@ -16,7 +16,7 @@ struct passthrough_dev_data {
spinlock_t lock;
};
struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
struct pci_dev *xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn)
{
......@@ -41,7 +41,7 @@ struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
return dev;
}
int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev,
int devid, publish_pci_dev_cb publish_cb)
{
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
......@@ -68,7 +68,8 @@ int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
return err;
}
void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev)
{
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry, *t;
......@@ -91,7 +92,7 @@ void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
pcistub_put_pci_dev(found_dev);
}
int pciback_init_devices(struct pciback_device *pdev)
int xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
{
struct passthrough_dev_data *dev_data;
......@@ -108,7 +109,7 @@ int pciback_init_devices(struct pciback_device *pdev)
return 0;
}
int pciback_publish_pci_roots(struct pciback_device *pdev,
int xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
publish_pci_root_cb publish_root_cb)
{
int err = 0;
......@@ -153,7 +154,7 @@ int pciback_publish_pci_roots(struct pciback_device *pdev,
return err;
}
void pciback_release_devices(struct pciback_device *pdev)
void xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
{
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry, *t;
......@@ -168,11 +169,10 @@ void pciback_release_devices(struct pciback_device *pdev)
pdev->pci_dev_data = NULL;
}
int pciback_get_pcifront_dev(struct pci_dev *pcidev,
struct pciback_device *pdev,
int xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
struct xen_pcibk_device *pdev,
unsigned int *domain, unsigned int *bus,
unsigned int *devfn)
{
*domain = pci_domain_nr(pcidev->bus);
*bus = pcidev->bus->number;
......
This diff is collapsed.
......@@ -25,25 +25,19 @@ struct pci_dev_entry {
#define _PCIB_op_pending (1)
#define PCIB_op_pending (1<<(_PCIB_op_pending))
struct pciback_device {
struct xen_pcibk_device {
void *pci_dev_data;
spinlock_t dev_lock;
struct xenbus_device *xdev;
struct xenbus_watch be_watch;
u8 be_watching;
int evtchn_irq;
struct xen_pci_sharedinfo *sh_info;
unsigned long flags;
struct work_struct op_work;
};
struct pciback_dev_data {
struct xen_pcibk_dev_data {
struct list_head config_fields;
unsigned int permissive:1;
unsigned int warned_on_write:1;
......@@ -52,91 +46,78 @@ struct pciback_dev_data {
unsigned int ack_intr:1; /* .. and ACK-ing */
unsigned long handled;
unsigned int irq; /* Saved in case device transitions to MSI/MSI-X */
char irq_name[0]; /* pciback[000:04:00.0] */
char irq_name[0]; /* xen-pcibk[000:04:00.0] */
};
/* Used by XenBus and pciback_ops.c */
extern wait_queue_head_t aer_wait_queue;
extern struct workqueue_struct *pciback_wq;
/* Used by XenBus and xen_pcibk_ops.c */
extern wait_queue_head_t xen_pcibk_aer_wait_queue;
extern struct workqueue_struct *xen_pcibk_wq;
/* Used by pcistub.c and conf_space_quirks.c */
extern struct list_head pciback_quirks;
extern struct list_head xen_pcibk_quirks;
/* Get/Put PCI Devices that are hidden from the PCI Backend Domain */
struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
int domain, int bus,
int slot, int func);
struct pci_dev *pcistub_get_pci_dev(struct pciback_device *pdev,
struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev);
void pcistub_put_pci_dev(struct pci_dev *dev);
/* Ensure a device is turned off or reset */
void pciback_reset_device(struct pci_dev *pdev);
void xen_pcibk_reset_device(struct pci_dev *pdev);
/* Access a virtual configuration space for a PCI device */
int pciback_config_init(void);
int pciback_config_init_dev(struct pci_dev *dev);
void pciback_config_free_dyn_fields(struct pci_dev *dev);
void pciback_config_reset_dev(struct pci_dev *dev);
void pciback_config_free_dev(struct pci_dev *dev);
int pciback_config_read(struct pci_dev *dev, int offset, int size,
int xen_pcibk_config_init(void);
int xen_pcibk_config_init_dev(struct pci_dev *dev);
void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev);
void xen_pcibk_config_reset_dev(struct pci_dev *dev);
void xen_pcibk_config_free_dev(struct pci_dev *dev);
int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
u32 *ret_val);
int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value);
int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size,
u32 value);
/* Handle requests for specific devices from the frontend */
typedef int (*publish_pci_dev_cb) (struct pciback_device *pdev,
typedef int (*publish_pci_dev_cb) (struct xen_pcibk_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn, unsigned int devid);
typedef int (*publish_pci_root_cb) (struct pciback_device *pdev,
typedef int (*publish_pci_root_cb) (struct xen_pcibk_device *pdev,
unsigned int domain, unsigned int bus);
int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev,
int devid, publish_pci_dev_cb publish_cb);
void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev);
struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev);
struct pci_dev *xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn);
/**
* Add for domain0 PCIE-AER handling. Get guest domain/bus/devfn in pciback
* Add for domain0 PCIE-AER handling. Get guest domain/bus/devfn in xen_pcibk
* before sending aer request to pcifront, so that guest could identify
* device, coopearte with pciback to finish aer recovery job if device driver
* device, coopearte with xen_pcibk to finish aer recovery job if device driver
* has the capability
*/
int pciback_get_pcifront_dev(struct pci_dev *pcidev,
struct pciback_device *pdev,
int xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
struct xen_pcibk_device *pdev,
unsigned int *domain, unsigned int *bus,
unsigned int *devfn);
int pciback_init_devices(struct pciback_device *pdev);
int pciback_publish_pci_roots(struct pciback_device *pdev,
int xen_pcibk_init_devices(struct xen_pcibk_device *pdev);
int xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
publish_pci_root_cb cb);
void pciback_release_devices(struct pciback_device *pdev);
void xen_pcibk_release_devices(struct xen_pcibk_device *pdev);
/* Handles events from front-end */
irqreturn_t pciback_handle_event(int irq, void *dev_id);
void pciback_do_op(struct work_struct *data);
int pciback_xenbus_register(void);
void pciback_xenbus_unregister(void);
#ifdef CONFIG_PCI_MSI
int pciback_enable_msi(struct pciback_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op);
irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id);
void xen_pcibk_do_op(struct work_struct *data);
int pciback_disable_msi(struct pciback_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op);
int xen_pcibk_xenbus_register(void);
void xen_pcibk_xenbus_unregister(void);
int pciback_enable_msix(struct pciback_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op);
int pciback_disable_msix(struct pciback_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op);
#endif
extern int verbose_request;
void test_and_schedule_op(struct pciback_device *pdev);
void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev);
#endif
/* Handles shared IRQs that can to device domain and control domain. */
void pciback_irq_handler(struct pci_dev *dev, int reset);
irqreturn_t pciback_guest_interrupt(int irq, void *dev_id);
void xen_pcibk_irq_handler(struct pci_dev *dev, int reset);
......@@ -10,16 +10,19 @@
#include <linux/sched.h>
#include "pciback.h"
#define DRV_NAME "xen-pciback"
int verbose_request;
module_param(verbose_request, int, 0644);
static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id);
/* Ensure a device is has the fake IRQ handler "turned on/off" and is
* ready to be exported. This MUST be run after pciback_reset_device
* ready to be exported. This MUST be run after xen_pcibk_reset_device
* which does the actual PCI device enable/disable.
*/
void pciback_control_isr(struct pci_dev *dev, int reset)
static void xen_pcibk_control_isr(struct pci_dev *dev, int reset)
{
struct pciback_dev_data *dev_data;
struct xen_pcibk_dev_data *dev_data;
int rc;
int enable = 0;
......@@ -66,7 +69,7 @@ void pciback_control_isr(struct pci_dev *dev, int reset)
if (enable) {
rc = request_irq(dev_data->irq,
pciback_guest_interrupt, IRQF_SHARED,
xen_pcibk_guest_interrupt, IRQF_SHARED,
dev_data->irq_name, dev);
if (rc) {
dev_err(&dev->dev, "%s: failed to install fake IRQ " \
......@@ -92,14 +95,14 @@ void pciback_control_isr(struct pci_dev *dev, int reset)
}
/* Ensure a device is "turned off" and ready to be exported.
* (Also see pciback_config_reset to ensure virtual configuration space is
* (Also see xen_pcibk_config_reset to ensure virtual configuration space is
* ready to be re-exported)
*/
void pciback_reset_device(struct pci_dev *dev)
void xen_pcibk_reset_device(struct pci_dev *dev)
{
u16 cmd;
pciback_control_isr(dev, 1 /* reset device */);
xen_pcibk_control_isr(dev, 1 /* reset device */);
/* Disable devices (but not bridges) */
if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
......@@ -126,43 +129,176 @@ void pciback_reset_device(struct pci_dev *dev)
}
}
}
#ifdef CONFIG_PCI_MSI
static
int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
struct xen_pcibk_dev_data *dev_data;
int otherend = pdev->xdev->otherend_id;
int status;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev));
status = pci_enable_msi(dev);
if (status) {
printk(KERN_ERR "error enable msi for guest %x status %x\n",
otherend, status);
op->value = 0;
return XEN_PCI_ERR_op_failed;
}
/* The value the guest needs is actually the IDT vector, not the
* the local domain's IRQ number. */
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
op->value);
dev_data = pci_get_drvdata(dev);
if (dev_data)
dev_data->ack_intr = 0;
return 0;
}
static
int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
struct xen_pcibk_dev_data *dev_data;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n",
pci_name(dev));
pci_disable_msi(dev);
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
op->value);
dev_data = pci_get_drvdata(dev);
if (dev_data)
dev_data->ack_intr = 1;
return 0;
}
static
int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
struct xen_pcibk_dev_data *dev_data;
int i, result;
struct msix_entry *entries;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
pci_name(dev));
if (op->value > SH_INFO_MAX_VEC)
return -EINVAL;
entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
if (entries == NULL)
return -ENOMEM;
for (i = 0; i < op->value; i++) {
entries[i].entry = op->msix_entries[i].entry;
entries[i].vector = op->msix_entries[i].vector;
}
result = pci_enable_msix(dev, entries, op->value);
if (result == 0) {
for (i = 0; i < op->value; i++) {
op->msix_entries[i].entry = entries[i].entry;
if (entries[i].vector)
op->msix_entries[i].vector =
xen_pirq_from_irq(entries[i].vector);
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: " \
"MSI-X[%d]: %d\n",
pci_name(dev), i,
op->msix_entries[i].vector);
}
} else {
printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n",
pci_name(dev), result);
}
kfree(entries);
op->value = result;
dev_data = pci_get_drvdata(dev);
if (dev_data)
dev_data->ack_intr = 0;
return result;
}
static
int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
struct xen_pcibk_dev_data *dev_data;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n",
pci_name(dev));
pci_disable_msix(dev);
/*
* SR-IOV devices (which don't have any legacy IRQ) have
* an undefined IRQ value of zero.
*/
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev),
op->value);
dev_data = pci_get_drvdata(dev);
if (dev_data)
dev_data->ack_intr = 1;
return 0;
}
#endif
/*
* Now the same evtchn is used for both pcifront conf_read_write request
* as well as pcie aer front end ack. We use a new work_queue to schedule
* pciback conf_read_write service for avoiding confict with aer_core
* xen_pcibk conf_read_write service for avoiding confict with aer_core
* do_recovery job which also use the system default work_queue
*/
void test_and_schedule_op(struct pciback_device *pdev)
void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
{
/* Check that frontend is requesting an operation and that we are not
* already processing a request */
if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
&& !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) {
queue_work(pciback_wq, &pdev->op_work);
queue_work(xen_pcibk_wq, &pdev->op_work);
}
/*_XEN_PCIB_active should have been cleared by pcifront. And also make
sure pciback is waiting for ack by checking _PCIB_op_pending*/
sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/
if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags)
&& test_bit(_PCIB_op_pending, &pdev->flags)) {
wake_up(&aer_wait_queue);
wake_up(&xen_pcibk_aer_wait_queue);
}
}
/* Performing the configuration space reads/writes must not be done in atomic
* context because some of the pci_* functions can sleep (mostly due to ACPI
* use of semaphores). This function is intended to be called from a work
* queue in process context taking a struct pciback_device as a parameter */
* queue in process context taking a struct xen_pcibk_device as a parameter */
void pciback_do_op(struct work_struct *data)
void xen_pcibk_do_op(struct work_struct *data)
{
struct pciback_device *pdev =
container_of(data, struct pciback_device, op_work);
struct xen_pcibk_device *pdev =
container_of(data, struct xen_pcibk_device, op_work);
struct pci_dev *dev;
struct pciback_dev_data *dev_data = NULL;
struct xen_pcibk_dev_data *dev_data = NULL;
struct xen_pci_op *op = &pdev->sh_info->op;
int test_intx = 0;
dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
if (dev == NULL)
op->err = XEN_PCI_ERR_dev_not_found;
......@@ -172,25 +308,25 @@ void pciback_do_op(struct work_struct *data)
test_intx = dev_data->enable_intx;
switch (op->cmd) {
case XEN_PCI_OP_conf_read:
op->err = pciback_config_read(dev,
op->err = xen_pcibk_config_read(dev,
op->offset, op->size, &op->value);
break;
case XEN_PCI_OP_conf_write:
op->err = pciback_config_write(dev,
op->err = xen_pcibk_config_write(dev,
op->offset, op->size, op->value);
break;
#ifdef CONFIG_PCI_MSI
case XEN_PCI_OP_enable_msi:
op->err = pciback_enable_msi(pdev, dev, op);
op->err = xen_pcibk_enable_msi(pdev, dev, op);
break;
case XEN_PCI_OP_disable_msi:
op->err = pciback_disable_msi(pdev, dev, op);
op->err = xen_pcibk_disable_msi(pdev, dev, op);
break;
case XEN_PCI_OP_enable_msix:
op->err = pciback_enable_msix(pdev, dev, op);
op->err = xen_pcibk_enable_msix(pdev, dev, op);
break;
case XEN_PCI_OP_disable_msix:
op->err = pciback_disable_msix(pdev, dev, op);
op->err = xen_pcibk_disable_msix(pdev, dev, op);
break;
#endif
default:
......@@ -201,7 +337,7 @@ void pciback_do_op(struct work_struct *data)
if (!op->err && dev && dev_data) {
/* Transition detected */
if ((dev_data->enable_intx != test_intx))
pciback_control_isr(dev, 0 /* no reset */);
xen_pcibk_control_isr(dev, 0 /* no reset */);
}
/* Tell the driver domain that we're done. */
wmb();
......@@ -216,21 +352,21 @@ void pciback_do_op(struct work_struct *data)
/* Check to see if the driver domain tried to start another request in
* between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
*/
test_and_schedule_op(pdev);
xen_pcibk_test_and_schedule_op(pdev);
}
irqreturn_t pciback_handle_event(int irq, void *dev_id)
irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id)
{
struct pciback_device *pdev = dev_id;
struct xen_pcibk_device *pdev = dev_id;
test_and_schedule_op(pdev);
xen_pcibk_test_and_schedule_op(pdev);
return IRQ_HANDLED;
}
irqreturn_t pciback_guest_interrupt(int irq, void *dev_id)
static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id)
{
struct pci_dev *dev = (struct pci_dev *)dev_id;
struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
if (dev_data->isr_on && dev_data->ack_intr) {
dev_data->handled++;
......
/*
* PCI Backend - Provides a Virtual PCI bus (with real devices)
* to the frontend
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil> (vpci.c)
* Author: Tristan Gingold <tristan.gingold@bull.net>, from vpci.c
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include "pciback.h"
/* There are at most 32 slots in a pci bus. */
#define PCI_SLOT_MAX 32
#define PCI_BUS_NBR 2
struct slot_dev_data {
/* Access to dev_list must be protected by lock */
struct pci_dev *slots[PCI_BUS_NBR][PCI_SLOT_MAX];
spinlock_t lock;
};
struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn)
{
struct pci_dev *dev = NULL;
struct slot_dev_data *slot_dev = pdev->pci_dev_data;
unsigned long flags;
if (domain != 0 || PCI_FUNC(devfn) != 0)
return NULL;
if (PCI_SLOT(devfn) >= PCI_SLOT_MAX || bus >= PCI_BUS_NBR)
return NULL;
spin_lock_irqsave(&slot_dev->lock, flags);
dev = slot_dev->slots[bus][PCI_SLOT(devfn)];
spin_unlock_irqrestore(&slot_dev->lock, flags);
return dev;
}
int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
int devid, publish_pci_dev_cb publish_cb)
{
int err = 0, slot, bus;
struct slot_dev_data *slot_dev = pdev->pci_dev_data;
unsigned long flags;
if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
err = -EFAULT;
xenbus_dev_fatal(pdev->xdev, err,
"Can't export bridges on the virtual PCI bus");
goto out;
}
spin_lock_irqsave(&slot_dev->lock, flags);
/* Assign to a new slot on the virtual PCI bus */
for (bus = 0; bus < PCI_BUS_NBR; bus++)
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (slot_dev->slots[bus][slot] == NULL) {
printk(KERN_INFO
"pciback: slot: %s: assign to virtual "
"slot %d, bus %d\n",
pci_name(dev), slot, bus);
slot_dev->slots[bus][slot] = dev;
goto unlock;
}
}
err = -ENOMEM;
xenbus_dev_fatal(pdev->xdev, err,
"No more space on root virtual PCI bus");
unlock:
spin_unlock_irqrestore(&slot_dev->lock, flags);
/* Publish this device. */
if (!err)
err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, 0), devid);
out:
return err;
}
void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
{
int slot, bus;
struct slot_dev_data *slot_dev = pdev->pci_dev_data;
struct pci_dev *found_dev = NULL;
unsigned long flags;
spin_lock_irqsave(&slot_dev->lock, flags);
for (bus = 0; bus < PCI_BUS_NBR; bus++)
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (slot_dev->slots[bus][slot] == dev) {
slot_dev->slots[bus][slot] = NULL;
found_dev = dev;
goto out;
}
}
out:
spin_unlock_irqrestore(&slot_dev->lock, flags);
if (found_dev)
pcistub_put_pci_dev(found_dev);
}
int pciback_init_devices(struct pciback_device *pdev)
{
int slot, bus;
struct slot_dev_data *slot_dev;
slot_dev = kmalloc(sizeof(*slot_dev), GFP_KERNEL);
if (!slot_dev)
return -ENOMEM;
spin_lock_init(&slot_dev->lock);
for (bus = 0; bus < PCI_BUS_NBR; bus++)
for (slot = 0; slot < PCI_SLOT_MAX; slot++)
slot_dev->slots[bus][slot] = NULL;
pdev->pci_dev_data = slot_dev;
return 0;
}
int pciback_publish_pci_roots(struct pciback_device *pdev,
publish_pci_root_cb publish_cb)
{
/* The Virtual PCI bus has only one root */
return publish_cb(pdev, 0, 0);
}
void pciback_release_devices(struct pciback_device *pdev)
{
int slot, bus;
struct slot_dev_data *slot_dev = pdev->pci_dev_data;
struct pci_dev *dev;
for (bus = 0; bus < PCI_BUS_NBR; bus++)
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
dev = slot_dev->slots[bus][slot];
if (dev != NULL)
pcistub_put_pci_dev(dev);
}
kfree(slot_dev);
pdev->pci_dev_data = NULL;
}
int pciback_get_pcifront_dev(struct pci_dev *pcidev,
struct pciback_device *pdev,
unsigned int *domain, unsigned int *bus,
unsigned int *devfn)
{
int slot, busnr;
struct slot_dev_data *slot_dev = pdev->pci_dev_data;
struct pci_dev *dev;
int found = 0;
unsigned long flags;
spin_lock_irqsave(&slot_dev->lock, flags);
for (busnr = 0; busnr < PCI_BUS_NBR; bus++)
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
dev = slot_dev->slots[busnr][slot];
if (dev && dev->bus->number == pcidev->bus->number
&& dev->devfn == pcidev->devfn
&& pci_domain_nr(dev->bus) ==
pci_domain_nr(pcidev->bus)) {
found = 1;
*domain = 0;
*bus = busnr;
*devfn = PCI_DEVFN(slot, 0);
goto out;
}
}
out:
spin_unlock_irqrestore(&slot_dev->lock, flags);
return found;
}
......@@ -12,6 +12,7 @@
#include "pciback.h"
#define PCI_SLOT_MAX 32
#define DRV_NAME "xen-pciback"
struct vpci_dev_data {
/* Access to dev_list must be protected by lock */
......@@ -24,7 +25,7 @@ static inline struct list_head *list_first(struct list_head *head)
return head->next;
}
struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
struct pci_dev *xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
unsigned int domain, unsigned int bus,
unsigned int devfn)
{
......@@ -62,7 +63,7 @@ static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
return 0;
}
int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev,
int devid, publish_pci_dev_cb publish_cb)
{
int err = 0, slot, func = -1;
......@@ -96,7 +97,7 @@ int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
struct pci_dev_entry, list);
if (match_slot(dev, t->dev)) {
pr_info("pciback: vpci: %s: "
pr_info(DRV_NAME ": vpci: %s: "
"assign to virtual slot %d func %d\n",
pci_name(dev), slot,
PCI_FUNC(dev->devfn));
......@@ -111,8 +112,8 @@ int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
/* Assign to a new slot on the virtual PCI bus */
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (list_empty(&vpci_dev->dev_list[slot])) {
printk(KERN_INFO
"pciback: vpci: %s: assign to virtual slot %d\n",
printk(KERN_INFO DRV_NAME
": vpci: %s: assign to virtual slot %d\n",
pci_name(dev), slot);
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
......@@ -136,7 +137,8 @@ int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
return err;
}
void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev)
{
int slot;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
......@@ -165,7 +167,7 @@ void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
pcistub_put_pci_dev(found_dev);
}
int pciback_init_devices(struct pciback_device *pdev)
int xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
{
int slot;
struct vpci_dev_data *vpci_dev;
......@@ -184,14 +186,14 @@ int pciback_init_devices(struct pciback_device *pdev)
return 0;
}
int pciback_publish_pci_roots(struct pciback_device *pdev,
int xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
publish_pci_root_cb publish_cb)
{
/* The Virtual PCI bus has only one root */
return publish_cb(pdev, 0, 0);
}
void pciback_release_devices(struct pciback_device *pdev)
void xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
{
int slot;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
......@@ -210,8 +212,8 @@ void pciback_release_devices(struct pciback_device *pdev)
pdev->pci_dev_data = NULL;
}
int pciback_get_pcifront_dev(struct pci_dev *pcidev,
struct pciback_device *pdev,
int xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
struct xen_pcibk_device *pdev,
unsigned int *domain, unsigned int *bus,
unsigned int *devfn)
{
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment