Commit b8277600 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk

xen/swiotlb: Use the swiotlb_late_init_with_tbl to init Xen-SWIOTLB late when PV PCI is used.

With this patch we provide the functionality to initialize the
Xen-SWIOTLB late in the bootup cycle - specifically for
Xen PCI-frontend. We still will work if the user had
supplied 'iommu=soft' on the Linux command line.

Note: We cannot depend on after_bootmem to automatically
determine whether this is early or not. This is because
when PCI IOMMUs are initialized it is after after_bootmem but
before a lot of "other" subsystems are initialized.

CC: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
[v1: Fix smatch warnings]
[v2: Added check for xen_swiotlb]
[v3: Rebased with new xen-swiotlb changes]
[v4: squashed xen/swiotlb: Depending on after_bootmem is not correct in]
Reviewed-by: default avatarStefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 5bab7864
...@@ -5,10 +5,12 @@ ...@@ -5,10 +5,12 @@
extern int xen_swiotlb; extern int xen_swiotlb;
extern int __init pci_xen_swiotlb_detect(void); extern int __init pci_xen_swiotlb_detect(void);
extern void __init pci_xen_swiotlb_init(void); extern void __init pci_xen_swiotlb_init(void);
extern int pci_xen_swiotlb_init_late(void);
#else #else
#define xen_swiotlb (0) #define xen_swiotlb (0)
static inline int __init pci_xen_swiotlb_detect(void) { return 0; } static inline int __init pci_xen_swiotlb_detect(void) { return 0; }
static inline void __init pci_xen_swiotlb_init(void) { } static inline void __init pci_xen_swiotlb_init(void) { }
static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
#endif #endif
#endif /* _ASM_X86_SWIOTLB_XEN_H */ #endif /* _ASM_X86_SWIOTLB_XEN_H */
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/dma.h> #include <asm/dma.h>
#endif #endif
#include <linux/export.h>
int xen_swiotlb __read_mostly; int xen_swiotlb __read_mostly;
static struct dma_map_ops xen_swiotlb_dma_ops = { static struct dma_map_ops xen_swiotlb_dma_ops = {
...@@ -69,13 +69,33 @@ int __init pci_xen_swiotlb_detect(void) ...@@ -69,13 +69,33 @@ int __init pci_xen_swiotlb_detect(void)
void __init pci_xen_swiotlb_init(void) void __init pci_xen_swiotlb_init(void)
{ {
if (xen_swiotlb) { if (xen_swiotlb) {
xen_swiotlb_init(1); xen_swiotlb_init(1, true /* early */);
dma_ops = &xen_swiotlb_dma_ops; dma_ops = &xen_swiotlb_dma_ops;
/* Make sure ACS will be enabled */ /* Make sure ACS will be enabled */
pci_request_acs(); pci_request_acs();
} }
} }
int pci_xen_swiotlb_init_late(void)
{
int rc;
if (xen_swiotlb)
return 0;
rc = xen_swiotlb_init(1, false /* late */);
if (rc)
return rc;
dma_ops = &xen_swiotlb_dma_ops;
/* Make sure ACS will be enabled */
pci_request_acs();
return 0;
}
EXPORT_SYMBOL_GPL(pci_xen_swiotlb_init_late);
IOMMU_INIT_FINISH(pci_xen_swiotlb_detect, IOMMU_INIT_FINISH(pci_xen_swiotlb_detect,
0, 0,
pci_xen_swiotlb_init, pci_xen_swiotlb_init,
......
...@@ -176,9 +176,9 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err) ...@@ -176,9 +176,9 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
} }
return ""; return "";
} }
void __init xen_swiotlb_init(int verbose) int __ref xen_swiotlb_init(int verbose, bool early)
{ {
unsigned long bytes; unsigned long bytes, order;
int rc = -ENOMEM; int rc = -ENOMEM;
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
unsigned int repeat = 3; unsigned int repeat = 3;
...@@ -186,10 +186,28 @@ void __init xen_swiotlb_init(int verbose) ...@@ -186,10 +186,28 @@ void __init xen_swiotlb_init(int verbose)
xen_io_tlb_nslabs = swiotlb_nr_tbl(); xen_io_tlb_nslabs = swiotlb_nr_tbl();
retry: retry:
bytes = xen_set_nslabs(xen_io_tlb_nslabs); bytes = xen_set_nslabs(xen_io_tlb_nslabs);
order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
/* /*
* Get IO TLB memory from any location. * Get IO TLB memory from any location.
*/ */
if (early)
xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes)); xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
else {
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order);
if (xen_io_tlb_start)
break;
order--;
}
if (order != get_order(bytes)) {
pr_warn("Warning: only able to allocate %ld MB "
"for software IO TLB\n", (PAGE_SIZE << order) >> 20);
xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
}
}
if (!xen_io_tlb_start) { if (!xen_io_tlb_start) {
m_ret = XEN_SWIOTLB_ENOMEM; m_ret = XEN_SWIOTLB_ENOMEM;
goto error; goto error;
...@@ -202,14 +220,21 @@ void __init xen_swiotlb_init(int verbose) ...@@ -202,14 +220,21 @@ void __init xen_swiotlb_init(int verbose)
bytes, bytes,
xen_io_tlb_nslabs); xen_io_tlb_nslabs);
if (rc) { if (rc) {
if (early)
free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes)); free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
else {
free_pages((unsigned long)xen_io_tlb_start, order);
xen_io_tlb_start = NULL;
}
m_ret = XEN_SWIOTLB_EFIXUP; m_ret = XEN_SWIOTLB_EFIXUP;
goto error; goto error;
} }
start_dma_addr = xen_virt_to_bus(xen_io_tlb_start); start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
if (early)
swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose); swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
else
return; rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
return rc;
error: error:
if (repeat--) { if (repeat--) {
xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */ xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
...@@ -218,10 +243,13 @@ void __init xen_swiotlb_init(int verbose) ...@@ -218,10 +243,13 @@ void __init xen_swiotlb_init(int verbose)
(xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20); (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
goto retry; goto retry;
} }
xen_raw_printk("%s (rc:%d)", xen_swiotlb_error(m_ret), rc); pr_err("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
if (early)
panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc); panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
else
free_pages((unsigned long)xen_io_tlb_start, order);
return rc;
} }
void * void *
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags, dma_addr_t *dma_handle, gfp_t flags,
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
extern void xen_swiotlb_init(int verbose); extern int xen_swiotlb_init(int verbose, bool early);
extern void extern void
*xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, *xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment