Commit a0d9a8fe authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau: remove allocations from gart populate() hook

Since some somewhat questionable changes a while back, TTM provides a
completely empty array of struct dma_address that stays around for the
entire lifetime of the TTM object.

Lets use this array, *always*, rather than wasting yet more memory on
another array who's purpose is identical, as well as yet another bool array
of the same size saying *which* of the previous two arrays to use...

This change will also solve the high order allocation failures seen by
some people while using nouveau.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent a1484512
...@@ -12,8 +12,8 @@ struct nouveau_sgdma_be { ...@@ -12,8 +12,8 @@ struct nouveau_sgdma_be {
struct drm_device *dev; struct drm_device *dev;
dma_addr_t *pages; dma_addr_t *pages;
bool *ttm_alloced;
unsigned nr_pages; unsigned nr_pages;
bool unmap_pages;
u64 offset; u64 offset;
bool bound; bool bound;
...@@ -26,43 +26,28 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, ...@@ -26,43 +26,28 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev; struct drm_device *dev = nvbe->dev;
int i;
NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages); NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
if (nvbe->pages) nvbe->pages = dma_addrs;
return -EINVAL; nvbe->nr_pages = num_pages;
nvbe->unmap_pages = true;
nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
if (!nvbe->pages)
return -ENOMEM;
nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); /* this code path isn't called and is incorrect anyways */
if (!nvbe->ttm_alloced) { if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
kfree(nvbe->pages); nvbe->unmap_pages = false;
nvbe->pages = NULL; return 0;
return -ENOMEM;
} }
nvbe->nr_pages = 0; for (i = 0; i < num_pages; i++) {
while (num_pages--) { nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
/* this code path isn't called and is incorrect anyways */ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/ if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
nvbe->pages[nvbe->nr_pages] = nvbe->nr_pages = --i;
dma_addrs[nvbe->nr_pages]; be->func->clear(be);
nvbe->ttm_alloced[nvbe->nr_pages] = true; return -EFAULT;
} else {
nvbe->pages[nvbe->nr_pages] =
pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev,
nvbe->pages[nvbe->nr_pages])) {
be->func->clear(be);
return -EFAULT;
}
nvbe->ttm_alloced[nvbe->nr_pages] = false;
} }
nvbe->nr_pages++;
} }
return 0; return 0;
...@@ -72,25 +57,16 @@ static void ...@@ -72,25 +57,16 @@ static void
nouveau_sgdma_clear(struct ttm_backend *be) nouveau_sgdma_clear(struct ttm_backend *be)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev; struct drm_device *dev = nvbe->dev;
if (nvbe && nvbe->pages) {
dev = nvbe->dev;
NV_DEBUG(dev, "\n");
if (nvbe->bound) if (nvbe->bound)
be->func->unbind(be); be->func->unbind(be);
if (nvbe->unmap_pages) {
while (nvbe->nr_pages--) { while (nvbe->nr_pages--) {
if (!nvbe->ttm_alloced[nvbe->nr_pages]) pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
} }
kfree(nvbe->pages);
kfree(nvbe->ttm_alloced);
nvbe->pages = NULL;
nvbe->ttm_alloced = NULL;
nvbe->nr_pages = 0;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment