Commit fd1496a0 authored by Alexandre Courbot's avatar Alexandre Courbot Committed by Ben Skeggs

drm/nouveau: map pages using DMA API

The DMA API is the recommended way to map pages no matter what the
underlying bus is. Use the DMA functions for page mapping and remove
currently existing wrappers.
Signed-off-by: default avatarAlexandre Courbot <acourbot@nvidia.com>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 3967633d
...@@ -487,31 +487,6 @@ nv_device_resource_len(struct nouveau_device *device, unsigned int bar) ...@@ -487,31 +487,6 @@ nv_device_resource_len(struct nouveau_device *device, unsigned int bar)
} }
} }
dma_addr_t
nv_device_map_page(struct nouveau_device *device, struct page *page)
{
dma_addr_t ret;
if (nv_device_is_pci(device)) {
ret = pci_map_page(device->pdev, page, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(device->pdev, ret))
ret = 0;
} else {
ret = page_to_phys(page);
}
return ret;
}
void
nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr)
{
if (nv_device_is_pci(device))
pci_unmap_page(device->pdev, addr, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
}
int int
nv_device_get_irq(struct nouveau_device *device, bool stall) nv_device_get_irq(struct nouveau_device *device, bool stall)
{ {
......
...@@ -174,12 +174,6 @@ nv_device_resource_start(struct nouveau_device *device, unsigned int bar); ...@@ -174,12 +174,6 @@ nv_device_resource_start(struct nouveau_device *device, unsigned int bar);
resource_size_t resource_size_t
nv_device_resource_len(struct nouveau_device *device, unsigned int bar); nv_device_resource_len(struct nouveau_device *device, unsigned int bar);
dma_addr_t
nv_device_map_page(struct nouveau_device *device, struct page *page);
void
nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr);
int int
nv_device_get_irq(struct nouveau_device *device, bool stall); nv_device_get_irq(struct nouveau_device *device, bool stall);
......
...@@ -250,7 +250,9 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, ...@@ -250,7 +250,9 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (priv->r100c08_page) { if (priv->r100c08_page) {
priv->r100c08 = nv_device_map_page(device, priv->r100c08_page); priv->r100c08 = dma_map_page(nv_device_base(device),
priv->r100c08_page, 0, PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (!priv->r100c08) if (!priv->r100c08)
nv_warn(priv, "failed 0x100c08 page map\n"); nv_warn(priv, "failed 0x100c08 page map\n");
} else { } else {
...@@ -268,7 +270,8 @@ nv50_fb_dtor(struct nouveau_object *object) ...@@ -268,7 +270,8 @@ nv50_fb_dtor(struct nouveau_object *object)
struct nv50_fb_priv *priv = (void *)object; struct nv50_fb_priv *priv = (void *)object;
if (priv->r100c08_page) { if (priv->r100c08_page) {
nv_device_unmap_page(device, priv->r100c08); dma_unmap_page(nv_device_base(device), priv->r100c08, PAGE_SIZE,
DMA_BIDIRECTIONAL);
__free_page(priv->r100c08_page); __free_page(priv->r100c08_page);
} }
......
...@@ -70,7 +70,8 @@ nvc0_fb_dtor(struct nouveau_object *object) ...@@ -70,7 +70,8 @@ nvc0_fb_dtor(struct nouveau_object *object)
struct nvc0_fb_priv *priv = (void *)object; struct nvc0_fb_priv *priv = (void *)object;
if (priv->r100c10_page) { if (priv->r100c10_page) {
nv_device_unmap_page(device, priv->r100c10); dma_unmap_page(nv_device_base(device), priv->r100c10, PAGE_SIZE,
DMA_BIDIRECTIONAL);
__free_page(priv->r100c10_page); __free_page(priv->r100c10_page);
} }
...@@ -93,7 +94,9 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, ...@@ -93,7 +94,9 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (priv->r100c10_page) { if (priv->r100c10_page) {
priv->r100c10 = nv_device_map_page(device, priv->r100c10_page); priv->r100c10 = dma_map_page(nv_device_base(device),
priv->r100c10_page, 0, PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (!priv->r100c10) if (!priv->r100c10)
return -EFAULT; return -EFAULT;
} }
......
...@@ -1340,6 +1340,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) ...@@ -1340,6 +1340,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
struct nouveau_drm *drm; struct nouveau_drm *drm;
struct nouveau_device *device; struct nouveau_device *device;
struct drm_device *dev; struct drm_device *dev;
struct device *pdev;
unsigned i; unsigned i;
int r; int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
...@@ -1358,6 +1359,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) ...@@ -1358,6 +1359,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
drm = nouveau_bdev(ttm->bdev); drm = nouveau_bdev(ttm->bdev);
device = nv_device(drm->device); device = nv_device(drm->device);
dev = drm->dev; dev = drm->dev;
pdev = nv_device_base(device);
#if __OS_HAS_AGP #if __OS_HAS_AGP
if (drm->agp.stat == ENABLED) { if (drm->agp.stat == ENABLED) {
...@@ -1377,17 +1379,22 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) ...@@ -1377,17 +1379,22 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
} }
for (i = 0; i < ttm->num_pages; i++) { for (i = 0; i < ttm->num_pages; i++) {
ttm_dma->dma_address[i] = nv_device_map_page(device, dma_addr_t addr;
ttm->pages[i]);
if (!ttm_dma->dma_address[i]) { addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(pdev, addr)) {
while (--i) { while (--i) {
nv_device_unmap_page(device, dma_unmap_page(pdev, ttm_dma->dma_address[i],
ttm_dma->dma_address[i]); PAGE_SIZE, DMA_BIDIRECTIONAL);
ttm_dma->dma_address[i] = 0; ttm_dma->dma_address[i] = 0;
} }
ttm_pool_unpopulate(ttm); ttm_pool_unpopulate(ttm);
return -EFAULT; return -EFAULT;
} }
ttm_dma->dma_address[i] = addr;
} }
return 0; return 0;
} }
...@@ -1399,6 +1406,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) ...@@ -1399,6 +1406,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
struct nouveau_drm *drm; struct nouveau_drm *drm;
struct nouveau_device *device; struct nouveau_device *device;
struct drm_device *dev; struct drm_device *dev;
struct device *pdev;
unsigned i; unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
...@@ -1408,6 +1416,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) ...@@ -1408,6 +1416,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
drm = nouveau_bdev(ttm->bdev); drm = nouveau_bdev(ttm->bdev);
device = nv_device(drm->device); device = nv_device(drm->device);
dev = drm->dev; dev = drm->dev;
pdev = nv_device_base(device);
#if __OS_HAS_AGP #if __OS_HAS_AGP
if (drm->agp.stat == ENABLED) { if (drm->agp.stat == ENABLED) {
...@@ -1425,7 +1434,8 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) ...@@ -1425,7 +1434,8 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; i++) { for (i = 0; i < ttm->num_pages; i++) {
if (ttm_dma->dma_address[i]) { if (ttm_dma->dma_address[i]) {
nv_device_unmap_page(device, ttm_dma->dma_address[i]); dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
DMA_BIDIRECTIONAL);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment