Commit 692b8c66 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-4.5-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen bug fixes from David Vrabel:

 - Two scsiback fixes (resource leak and spurious warning).

 - Fix DMA mapping of compound pages on arm/arm64.

 - Fix some pciback regressions in MSI-X handling.

 - Fix a pcifront crash due to some uninitialize state.

* tag 'for-linus-4.5-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/pcifront: Fix mysterious crashes when NUMA locality information was extracted.
  xen/pcifront: Report the errors better.
  xen/pciback: Save the number of MSI-X entries to be copied later.
  xen/pciback: Check PF instead of VF for PCI_COMMAND_MEMORY
  xen: fix potential integer overflow in queue_reply
  xen/arm: correctly handle DMA mapping of compound pages
  xen/scsiback: avoid warnings when adding multiple LUNs to a domain
  xen/scsiback: correct frontend counting
parents dea08e60 4d8c8bd6
...@@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, ...@@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size, dma_addr_t dev_addr, unsigned long offset, size_t size,
enum dma_data_direction dir, struct dma_attrs *attrs) enum dma_data_direction dir, struct dma_attrs *attrs)
{ {
bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page); unsigned long page_pfn = page_to_xen_pfn(page);
unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
unsigned long compound_pages =
(1<<compound_order(page)) * XEN_PFN_PER_PAGE;
bool local = (page_pfn <= dev_pfn) &&
(dev_pfn - page_pfn < compound_pages);
/* /*
* Dom0 is mapped 1:1, while the Linux page can be spanned accross * Dom0 is mapped 1:1, while the Linux page can span across
* multiple Xen page, it's not possible to have a mix of local and * multiple Xen pages, it's not possible for it to contain a
* foreign Xen page. So if the first xen_pfn == mfn the page is local * mix of local and foreign Xen pages. So if the first xen_pfn
* otherwise it's a foreign page grant-mapped in dom0. If the page is * == mfn the page is local otherwise it's a foreign page
* local we can safely call the native dma_ops function, otherwise we * grant-mapped in dom0. If the page is local we can safely
* call the xen specific function. * call the native dma_ops function, otherwise we call the xen
* specific function.
*/ */
if (local) if (local)
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
......
...@@ -57,7 +57,7 @@ static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev, ...@@ -57,7 +57,7 @@ static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
{ {
if (xen_pci_frontend && xen_pci_frontend->enable_msi) if (xen_pci_frontend && xen_pci_frontend->enable_msi)
return xen_pci_frontend->enable_msi(dev, vectors); return xen_pci_frontend->enable_msi(dev, vectors);
return -ENODEV; return -ENOSYS;
} }
static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev) static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
{ {
...@@ -69,7 +69,7 @@ static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev, ...@@ -69,7 +69,7 @@ static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev,
{ {
if (xen_pci_frontend && xen_pci_frontend->enable_msix) if (xen_pci_frontend && xen_pci_frontend->enable_msix)
return xen_pci_frontend->enable_msix(dev, vectors, nvec); return xen_pci_frontend->enable_msix(dev, vectors, nvec);
return -ENODEV; return -ENOSYS;
} }
static inline void xen_pci_frontend_disable_msix(struct pci_dev *dev) static inline void xen_pci_frontend_disable_msix(struct pci_dev *dev)
{ {
......
...@@ -196,7 +196,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) ...@@ -196,7 +196,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
return 0; return 0;
error: error:
dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n"); if (ret == -ENOSYS)
dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
else if (ret)
dev_err(&dev->dev, "Xen PCI frontend error: %d!\n", ret);
free: free:
kfree(v); kfree(v);
return ret; return ret;
......
...@@ -53,7 +53,7 @@ struct pcifront_device { ...@@ -53,7 +53,7 @@ struct pcifront_device {
}; };
struct pcifront_sd { struct pcifront_sd {
int domain; struct pci_sysdata sd;
struct pcifront_device *pdev; struct pcifront_device *pdev;
}; };
...@@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd, ...@@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
unsigned int domain, unsigned int bus, unsigned int domain, unsigned int bus,
struct pcifront_device *pdev) struct pcifront_device *pdev)
{ {
sd->domain = domain; /* Because we do not expose that information via XenBus. */
sd->sd.node = first_online_node;
sd->sd.domain = domain;
sd->pdev = pdev; sd->pdev = pdev;
} }
...@@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev, ...@@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n", dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
domain, bus); domain, bus);
bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL); bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
sd = kmalloc(sizeof(*sd), GFP_KERNEL); sd = kzalloc(sizeof(*sd), GFP_KERNEL);
if (!bus_entry || !sd) { if (!bus_entry || !sd) {
err = -ENOMEM; err = -ENOMEM;
goto err_out; goto err_out;
......
...@@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, ...@@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
/* /*
* PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
* to access the BARs where the MSI-X entries reside. * to access the BARs where the MSI-X entries reside.
* But VF devices are unique in which the PF needs to be checked.
*/ */
pci_read_config_word(dev, PCI_COMMAND, &cmd); pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY)) if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
return -ENXIO; return -ENXIO;
...@@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data) ...@@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data)
struct xen_pcibk_dev_data *dev_data = NULL; struct xen_pcibk_dev_data *dev_data = NULL;
struct xen_pci_op *op = &pdev->op; struct xen_pci_op *op = &pdev->op;
int test_intx = 0; int test_intx = 0;
#ifdef CONFIG_PCI_MSI
unsigned int nr = 0;
#endif
*op = pdev->sh_info->op; *op = pdev->sh_info->op;
barrier(); barrier();
...@@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data) ...@@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data)
op->err = xen_pcibk_disable_msi(pdev, dev, op); op->err = xen_pcibk_disable_msi(pdev, dev, op);
break; break;
case XEN_PCI_OP_enable_msix: case XEN_PCI_OP_enable_msix:
nr = op->value;
op->err = xen_pcibk_enable_msix(pdev, dev, op); op->err = xen_pcibk_enable_msix(pdev, dev, op);
break; break;
case XEN_PCI_OP_disable_msix: case XEN_PCI_OP_disable_msix:
...@@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data) ...@@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data)
if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) { if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
unsigned int i; unsigned int i;
for (i = 0; i < op->value; i++) for (i = 0; i < nr; i++)
pdev->sh_info->op.msix_entries[i].vector = pdev->sh_info->op.msix_entries[i].vector =
op->msix_entries[i].vector; op->msix_entries[i].vector;
} }
......
...@@ -848,6 +848,24 @@ static int scsiback_map(struct vscsibk_info *info) ...@@ -848,6 +848,24 @@ static int scsiback_map(struct vscsibk_info *info)
return scsiback_init_sring(info, ring_ref, evtchn); return scsiback_init_sring(info, ring_ref, evtchn);
} }
/*
Check for a translation entry being present
*/
static struct v2p_entry *scsiback_chk_translation_entry(
struct vscsibk_info *info, struct ids_tuple *v)
{
struct list_head *head = &(info->v2p_entry_lists);
struct v2p_entry *entry;
list_for_each_entry(entry, head, l)
if ((entry->v.chn == v->chn) &&
(entry->v.tgt == v->tgt) &&
(entry->v.lun == v->lun))
return entry;
return NULL;
}
/* /*
Add a new translation entry Add a new translation entry
*/ */
...@@ -855,9 +873,7 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info, ...@@ -855,9 +873,7 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
char *phy, struct ids_tuple *v) char *phy, struct ids_tuple *v)
{ {
int err = 0; int err = 0;
struct v2p_entry *entry;
struct v2p_entry *new; struct v2p_entry *new;
struct list_head *head = &(info->v2p_entry_lists);
unsigned long flags; unsigned long flags;
char *lunp; char *lunp;
unsigned long long unpacked_lun; unsigned long long unpacked_lun;
...@@ -917,15 +933,10 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info, ...@@ -917,15 +933,10 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
spin_lock_irqsave(&info->v2p_lock, flags); spin_lock_irqsave(&info->v2p_lock, flags);
/* Check double assignment to identical virtual ID */ /* Check double assignment to identical virtual ID */
list_for_each_entry(entry, head, l) { if (scsiback_chk_translation_entry(info, v)) {
if ((entry->v.chn == v->chn) && pr_warn("Virtual ID is already used. Assignment was not performed.\n");
(entry->v.tgt == v->tgt) && err = -EEXIST;
(entry->v.lun == v->lun)) { goto out;
pr_warn("Virtual ID is already used. Assignment was not performed.\n");
err = -EEXIST;
goto out;
}
} }
/* Create a new translation entry and add to the list */ /* Create a new translation entry and add to the list */
...@@ -933,18 +944,18 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info, ...@@ -933,18 +944,18 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
new->v = *v; new->v = *v;
new->tpg = tpg; new->tpg = tpg;
new->lun = unpacked_lun; new->lun = unpacked_lun;
list_add_tail(&new->l, head); list_add_tail(&new->l, &info->v2p_entry_lists);
out: out:
spin_unlock_irqrestore(&info->v2p_lock, flags); spin_unlock_irqrestore(&info->v2p_lock, flags);
out_free: out_free:
mutex_lock(&tpg->tv_tpg_mutex); if (err) {
tpg->tv_tpg_fe_count--; mutex_lock(&tpg->tv_tpg_mutex);
mutex_unlock(&tpg->tv_tpg_mutex); tpg->tv_tpg_fe_count--;
mutex_unlock(&tpg->tv_tpg_mutex);
if (err)
kfree(new); kfree(new);
}
return err; return err;
} }
...@@ -956,39 +967,40 @@ static void __scsiback_del_translation_entry(struct v2p_entry *entry) ...@@ -956,39 +967,40 @@ static void __scsiback_del_translation_entry(struct v2p_entry *entry)
} }
/* /*
Delete the translation entry specfied Delete the translation entry specified
*/ */
static int scsiback_del_translation_entry(struct vscsibk_info *info, static int scsiback_del_translation_entry(struct vscsibk_info *info,
struct ids_tuple *v) struct ids_tuple *v)
{ {
struct v2p_entry *entry; struct v2p_entry *entry;
struct list_head *head = &(info->v2p_entry_lists);
unsigned long flags; unsigned long flags;
int ret = 0;
spin_lock_irqsave(&info->v2p_lock, flags); spin_lock_irqsave(&info->v2p_lock, flags);
/* Find out the translation entry specified */ /* Find out the translation entry specified */
list_for_each_entry(entry, head, l) { entry = scsiback_chk_translation_entry(info, v);
if ((entry->v.chn == v->chn) && if (entry)
(entry->v.tgt == v->tgt) && __scsiback_del_translation_entry(entry);
(entry->v.lun == v->lun)) { else
goto found; ret = -ENOENT;
}
}
spin_unlock_irqrestore(&info->v2p_lock, flags);
return 1;
found:
/* Delete the translation entry specfied */
__scsiback_del_translation_entry(entry);
spin_unlock_irqrestore(&info->v2p_lock, flags); spin_unlock_irqrestore(&info->v2p_lock, flags);
return 0; return ret;
} }
static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state, static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
char *phy, struct ids_tuple *vir, int try) char *phy, struct ids_tuple *vir, int try)
{ {
struct v2p_entry *entry;
unsigned long flags;
if (try) {
spin_lock_irqsave(&info->v2p_lock, flags);
entry = scsiback_chk_translation_entry(info, vir);
spin_unlock_irqrestore(&info->v2p_lock, flags);
if (entry)
return;
}
if (!scsiback_add_translation_entry(info, phy, vir)) { if (!scsiback_add_translation_entry(info, phy, vir)) {
if (xenbus_printf(XBT_NIL, info->dev->nodename, state, if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
"%d", XenbusStateInitialised)) { "%d", XenbusStateInitialised)) {
......
...@@ -188,6 +188,8 @@ static int queue_reply(struct list_head *queue, const void *data, size_t len) ...@@ -188,6 +188,8 @@ static int queue_reply(struct list_head *queue, const void *data, size_t len)
if (len == 0) if (len == 0)
return 0; return 0;
if (len > XENSTORE_PAYLOAD_MAX)
return -EINVAL;
rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
if (rb == NULL) if (rb == NULL)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment