Commit 665d3e2a authored by Joerg Roedel's avatar Joerg Roedel Committed by H. Peter Anvin

x86, gart: Make sure GART does not map physmem above 1TB

The GART can only map physical memory below 1TB. Make sure
the gart driver in the kernel does not try to map memory
above 1TB.

Cc: <stable@kernel.org>
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Link: http://lkml.kernel.org/r/1303134346-5805-5-git-send-email-joerg.roedel@amd.comSigned-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent c34151a7
...@@ -81,6 +81,9 @@ static u32 gart_unmapped_entry; ...@@ -81,6 +81,9 @@ static u32 gart_unmapped_entry;
#define AGPEXTERN #define AGPEXTERN
#endif #endif
/* GART can only remap to physical addresses < 1TB */
#define GART_MAX_PHYS_ADDR (1ULL << 40)
/* backdoor interface to AGP driver */ /* backdoor interface to AGP driver */
AGPEXTERN int agp_memory_reserved; AGPEXTERN int agp_memory_reserved;
AGPEXTERN __u32 *agp_gatt_table; AGPEXTERN __u32 *agp_gatt_table;
...@@ -212,9 +215,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, ...@@ -212,9 +215,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
size_t size, int dir, unsigned long align_mask) size_t size, int dir, unsigned long align_mask)
{ {
unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
unsigned long iommu_page = alloc_iommu(dev, npages, align_mask); unsigned long iommu_page;
int i; int i;
if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
return bad_dma_addr;
iommu_page = alloc_iommu(dev, npages, align_mask);
if (iommu_page == -1) { if (iommu_page == -1) {
if (!nonforced_iommu(dev, phys_mem, size)) if (!nonforced_iommu(dev, phys_mem, size))
return phys_mem; return phys_mem;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment