Commit 6876b3ba authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Dave Airlie

drm: fix for non-coherent DMA PowerPC

This patch fixes bits of the DRM so to make the radeon DRI work on
non-cache coherent PCI DMA variants of the PowerPC processors.

It moves the few places that needs change to wrappers to that
other architectures with similar issues can easily add their
own changes to those wrappers, at least until we have more useful
generic kernel API.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarDave Airlie <airlied@linux.ie>
parent 2b46278b
......@@ -168,6 +168,12 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
}
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
dma_sync_single_for_device(&dev->pdev->dev,
bus_address,
max_pages * sizeof(u32),
PCI_DMA_TODEVICE);
ret = 1;
#if defined(__i386__) || defined(__x86_64__)
......
......@@ -36,6 +36,15 @@
#define DEBUG_SCATTER 0
static inline void *drm_vmalloc_dma(unsigned long size)
{
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
#else
return vmalloc_32(size);
#endif
}
void drm_sg_cleanup(struct drm_sg_mem * entry)
{
struct page *page;
......@@ -104,7 +113,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
}
memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
entry->virtual = vmalloc_32(pages << PAGE_SHIFT);
entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
if (!entry->virtual) {
drm_free(entry->busaddr,
entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
......
......@@ -54,13 +54,24 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
pgprot_val(tmp) |= _PAGE_NO_CACHE;
if (map_type == _DRM_REGISTERS)
pgprot_val(tmp) |= _PAGE_GUARDED;
#endif
#if defined(__ia64__)
#elif defined(__ia64__)
if (efi_range_is_wc(vma->vm_start, vma->vm_end -
vma->vm_start))
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
#elif defined(__sparc__)
tmp = pgprot_noncached(tmp);
#endif
return tmp;
}
static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
{
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
tmp |= _PAGE_NO_CACHE;
#endif
return tmp;
}
......@@ -603,9 +614,6 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
offset = dev->driver->get_reg_ofs(dev);
vma->vm_flags |= VM_IO; /* not in core dump */
vma->vm_page_prot = drm_io_prot(map->type, vma);
#ifdef __sparc__
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
......@@ -624,6 +632,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
page_to_pfn(virt_to_page(map->handle)),
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
/* fall through to _DRM_SHM */
case _DRM_SHM:
vma->vm_ops = &drm_vm_shm_ops;
......@@ -631,6 +640,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
/* Don't let this area swap. Change when
DRM_KERNEL advisory is supported. */
vma->vm_flags |= VM_RESERVED;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
break;
case _DRM_SCATTER_GATHER:
vma->vm_ops = &drm_vm_sg_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment