Commit 75f1cdf1 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by Ingo Molnar

x86: Handle HW IOMMU initialization failure gracefully

If HW IOMMU initialization fails (Intel VT-d often does this,
typically due to BIOS bugs), we fall back to nommu. It doesn't
work for the majority since nowadays we have more than 4GB
memory so we must use swiotlb instead of nommu.

The problem is that it's too late to initialize swiotlb when HW
IOMMU initialization fails. We need to allocate swiotlb memory
earlier from bootmem allocator. Chris explained the issue in
detail:

  http://marc.info/?l=linux-kernel&m=125657444317079&w=2

The current x86 IOMMU initialization sequence is too complicated
and handling the above issue makes it more hacky.

This patch changes x86 IOMMU initialization sequence to handle
the above issue cleanly.

The new x86 IOMMU initialization sequence are:

1. we initialize the swiotlb (and setting swiotlb to 1) in the case
   of (max_pfn > MAX_DMA32_PFN && !no_iommu). dma_ops is set to
   swiotlb_dma_ops or nommu_dma_ops. if swiotlb usage is forced by
   the boot option, we finish here.

2. we call the detection functions of all the IOMMUs

3. the detection function sets x86_init.iommu.iommu_init to the
   IOMMU initialization function (so we can avoid calling the
   initialization functions of all the IOMMUs needlessly).

4. if the IOMMU initialization function doesn't need to swiotlb
   then sets swiotlb to zero (e.g. the initialization is
   sucessful).

5. if we find that swiotlb is set to zero, we free swiotlb
   resource.
Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: chrisw@sous-sol.org
Cc: dwmw2@infradead.org
Cc: joerg.roedel@amd.com
Cc: muli@il.ibm.com
LKML-Reference: <1257849980-22640-10-git-send-email-fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent ad32e8cb
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
#define _ASM_X86_IOMMU_H #define _ASM_X86_IOMMU_H
static inline void iommu_shutdown_noop(void) {} static inline void iommu_shutdown_noop(void) {}
extern void no_iommu_init(void);
extern struct dma_map_ops nommu_dma_ops; extern struct dma_map_ops nommu_dma_ops;
extern int force_iommu, no_iommu; extern int force_iommu, no_iommu;
extern int iommu_detected; extern int iommu_detected;
......
...@@ -2110,8 +2110,8 @@ int __init amd_iommu_init_dma_ops(void) ...@@ -2110,8 +2110,8 @@ int __init amd_iommu_init_dma_ops(void)
prealloc_protection_domains(); prealloc_protection_domains();
iommu_detected = 1; iommu_detected = 1;
force_iommu = 1;
bad_dma_address = 0; bad_dma_address = 0;
swiotlb = 0;
#ifdef CONFIG_GART_IOMMU #ifdef CONFIG_GART_IOMMU
gart_iommu_aperture_disabled = 1; gart_iommu_aperture_disabled = 1;
gart_iommu_aperture = 0; gart_iommu_aperture = 0;
......
...@@ -1330,7 +1330,7 @@ static int __init early_amd_iommu_detect(struct acpi_table_header *table) ...@@ -1330,7 +1330,7 @@ static int __init early_amd_iommu_detect(struct acpi_table_header *table)
void __init amd_iommu_detect(void) void __init amd_iommu_detect(void)
{ {
if (swiotlb || no_iommu || (iommu_detected && !gart_iommu_aperture)) if (no_iommu || (iommu_detected && !gart_iommu_aperture))
return; return;
if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
......
...@@ -458,7 +458,7 @@ void __init gart_iommu_hole_init(void) ...@@ -458,7 +458,7 @@ void __init gart_iommu_hole_init(void)
if (aper_alloc) { if (aper_alloc) {
/* Got the aperture from the AGP bridge */ /* Got the aperture from the AGP bridge */
} else if (swiotlb && !valid_agp) { } else if (!valid_agp) {
/* Do nothing */ /* Do nothing */
} else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) || } else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) ||
force_iommu || force_iommu ||
......
...@@ -1360,7 +1360,7 @@ void __init detect_calgary(void) ...@@ -1360,7 +1360,7 @@ void __init detect_calgary(void)
* if the user specified iommu=off or iommu=soft or we found * if the user specified iommu=off or iommu=soft or we found
* another HW IOMMU already, bail out. * another HW IOMMU already, bail out.
*/ */
if (swiotlb || no_iommu || iommu_detected) if (no_iommu || iommu_detected)
return; return;
if (!use_calgary) if (!use_calgary)
...@@ -1445,10 +1445,6 @@ void __init detect_calgary(void) ...@@ -1445,10 +1445,6 @@ void __init detect_calgary(void)
printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d\n", printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d\n",
specified_table_size); specified_table_size);
/* swiotlb for devices that aren't behind the Calgary. */
if (max_pfn > MAX_DMA32_PFN)
swiotlb = 1;
x86_init.iommu.iommu_init = calgary_iommu_init; x86_init.iommu.iommu_init = calgary_iommu_init;
} }
return; return;
...@@ -1476,11 +1472,7 @@ int __init calgary_iommu_init(void) ...@@ -1476,11 +1472,7 @@ int __init calgary_iommu_init(void)
return ret; return ret;
} }
force_iommu = 1;
bad_dma_address = 0x0; bad_dma_address = 0x0;
/* dma_ops is set to swiotlb or nommu */
if (!dma_ops)
dma_ops = &nommu_dma_ops;
return 0; return 0;
} }
......
...@@ -124,24 +124,24 @@ static void __init dma32_free_bootmem(void) ...@@ -124,24 +124,24 @@ static void __init dma32_free_bootmem(void)
void __init pci_iommu_alloc(void) void __init pci_iommu_alloc(void)
{ {
/* swiotlb is forced by the boot option */
int use_swiotlb = swiotlb;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/* free the range so iommu could get some range less than 4G */ /* free the range so iommu could get some range less than 4G */
dma32_free_bootmem(); dma32_free_bootmem();
#endif #endif
pci_swiotlb_init();
if (use_swiotlb)
return;
/*
* The order of these functions is important for
* fall-back/fail-over reasons
*/
gart_iommu_hole_init(); gart_iommu_hole_init();
detect_calgary(); detect_calgary();
detect_intel_iommu(); detect_intel_iommu();
/* needs to be called after gart_iommu_hole_init */
amd_iommu_detect(); amd_iommu_detect();
pci_swiotlb_init();
} }
void *dma_generic_alloc_coherent(struct device *dev, size_t size, void *dma_generic_alloc_coherent(struct device *dev, size_t size,
...@@ -291,10 +291,15 @@ static int __init pci_iommu_init(void) ...@@ -291,10 +291,15 @@ static int __init pci_iommu_init(void)
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
dma_debug_add_bus(&pci_bus_type); dma_debug_add_bus(&pci_bus_type);
#endif #endif
x86_init.iommu.iommu_init(); x86_init.iommu.iommu_init();
no_iommu_init(); if (swiotlb) {
printk(KERN_INFO "PCI-DMA: "
"Using software bounce buffering for IO (SWIOTLB)\n");
swiotlb_print_info();
} else
swiotlb_free();
return 0; return 0;
} }
/* Must execute after PCI subsystem */ /* Must execute after PCI subsystem */
......
...@@ -833,6 +833,7 @@ int __init gart_iommu_init(void) ...@@ -833,6 +833,7 @@ int __init gart_iommu_init(void)
flush_gart(); flush_gart();
dma_ops = &gart_dma_ops; dma_ops = &gart_dma_ops;
x86_platform.iommu_shutdown = gart_iommu_shutdown; x86_platform.iommu_shutdown = gart_iommu_shutdown;
swiotlb = 0;
return 0; return 0;
} }
......
...@@ -103,12 +103,3 @@ struct dma_map_ops nommu_dma_ops = { ...@@ -103,12 +103,3 @@ struct dma_map_ops nommu_dma_ops = {
.sync_sg_for_device = nommu_sync_sg_for_device, .sync_sg_for_device = nommu_sync_sg_for_device,
.is_phys = 1, .is_phys = 1,
}; };
void __init no_iommu_init(void)
{
if (dma_ops)
return;
force_iommu = 0; /* no HW IOMMU */
dma_ops = &nommu_dma_ops;
}
...@@ -46,13 +46,12 @@ void __init pci_swiotlb_init(void) ...@@ -46,13 +46,12 @@ void __init pci_swiotlb_init(void)
{ {
/* don't initialize swiotlb if iommu=off (no_iommu=1) */ /* don't initialize swiotlb if iommu=off (no_iommu=1) */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN)) if (!no_iommu && max_pfn > MAX_DMA32_PFN)
swiotlb = 1; swiotlb = 1;
#endif #endif
if (swiotlb_force)
swiotlb = 1;
if (swiotlb) { if (swiotlb) {
swiotlb_init(0); swiotlb_init(0);
dma_ops = &swiotlb_dma_ops; dma_ops = &swiotlb_dma_ops;
} } else
dma_ops = &nommu_dma_ops;
} }
...@@ -613,8 +613,7 @@ void __init detect_intel_iommu(void) ...@@ -613,8 +613,7 @@ void __init detect_intel_iommu(void)
"x2apic and Intr-remapping.\n"); "x2apic and Intr-remapping.\n");
#endif #endif
#ifdef CONFIG_DMAR #ifdef CONFIG_DMAR
if (ret && !no_iommu && !iommu_detected && !swiotlb && if (ret && !no_iommu && !iommu_detected && !dmar_disabled)
!dmar_disabled)
iommu_detected = 1; iommu_detected = 1;
#endif #endif
#ifdef CONFIG_X86 #ifdef CONFIG_X86
......
...@@ -3231,7 +3231,7 @@ int __init intel_iommu_init(void) ...@@ -3231,7 +3231,7 @@ int __init intel_iommu_init(void)
* Check the need for DMA-remapping initialization now. * Check the need for DMA-remapping initialization now.
* Above initialization will also be used by Interrupt-remapping. * Above initialization will also be used by Interrupt-remapping.
*/ */
if (no_iommu || swiotlb || dmar_disabled) if (no_iommu || dmar_disabled)
return -ENODEV; return -ENODEV;
iommu_init_mempool(); iommu_init_mempool();
...@@ -3252,7 +3252,9 @@ int __init intel_iommu_init(void) ...@@ -3252,7 +3252,9 @@ int __init intel_iommu_init(void)
"PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
init_timer(&unmap_timer); init_timer(&unmap_timer);
force_iommu = 1; #ifdef CONFIG_SWIOTLB
swiotlb = 0;
#endif
dma_ops = &intel_dma_ops; dma_ops = &intel_dma_ops;
init_iommu_sysfs(); init_iommu_sysfs();
......
...@@ -109,8 +109,10 @@ setup_io_tlb_npages(char *str) ...@@ -109,8 +109,10 @@ setup_io_tlb_npages(char *str)
} }
if (*str == ',') if (*str == ',')
++str; ++str;
if (!strcmp(str, "force")) if (!strcmp(str, "force")) {
swiotlb_force = 1; swiotlb_force = 1;
swiotlb = 1;
}
return 1; return 1;
} }
__setup("swiotlb=", setup_io_tlb_npages); __setup("swiotlb=", setup_io_tlb_npages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment