Commit e34b8fee authored by Christian König's avatar Christian König

drm/ttm: merge ttm_dma_tt back into ttm_tt

It makes no difference to kmalloc if the structure
is 48 or 64 bytes in size.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/396950/
parent 230c079f
...@@ -45,12 +45,10 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, ...@@ -45,12 +45,10 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
uint64_t *addr, uint64_t *flags) uint64_t *addr, uint64_t *flags)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_dma_tt *ttm;
switch (bo->tbo.mem.mem_type) { switch (bo->tbo.mem.mem_type) {
case TTM_PL_TT: case TTM_PL_TT:
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); *addr = bo->tbo.ttm->dma_address[0];
*addr = ttm->dma_address[0];
break; break;
case TTM_PL_VRAM: case TTM_PL_VRAM:
*addr = amdgpu_bo_gpu_offset(bo); *addr = amdgpu_bo_gpu_offset(bo);
...@@ -122,16 +120,14 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, ...@@ -122,16 +120,14 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo) uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_dma_tt *ttm;
if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached) if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached)
return AMDGPU_BO_INVALID_OFFSET; return AMDGPU_BO_INVALID_OFFSET;
ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm); if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
return AMDGPU_BO_INVALID_OFFSET; return AMDGPU_BO_INVALID_OFFSET;
return adev->gmc.agp_start + ttm->dma_address[0]; return adev->gmc.agp_start + bo->ttm->dma_address[0];
} }
/** /**
......
...@@ -294,11 +294,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, ...@@ -294,11 +294,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
cpu_addr = &job->ibs[0].ptr[num_dw]; cpu_addr = &job->ibs[0].ptr[num_dw];
if (mem->mem_type == TTM_PL_TT) { if (mem->mem_type == TTM_PL_TT) {
struct ttm_dma_tt *dma;
dma_addr_t *dma_address; dma_addr_t *dma_address;
dma = container_of(bo->ttm, struct ttm_dma_tt, ttm); dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT];
dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
cpu_addr); cpu_addr);
if (r) if (r)
...@@ -841,7 +839,7 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type) ...@@ -841,7 +839,7 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
* TTM backend functions. * TTM backend functions.
*/ */
struct amdgpu_ttm_tt { struct amdgpu_ttm_tt {
struct ttm_dma_tt ttm; struct ttm_tt ttm;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
u64 offset; u64 offset;
uint64_t userptr; uint64_t userptr;
...@@ -1292,7 +1290,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev, ...@@ -1292,7 +1290,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
if (r) if (r)
DRM_ERROR("failed to unbind %u pages at 0x%08llX\n", DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
gtt->ttm.ttm.num_pages, gtt->offset); gtt->ttm.num_pages, gtt->offset);
gtt->bound = false; gtt->bound = false;
} }
...@@ -1306,7 +1304,7 @@ static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev, ...@@ -1306,7 +1304,7 @@ static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
if (gtt->usertask) if (gtt->usertask)
put_task_struct(gtt->usertask); put_task_struct(gtt->usertask);
ttm_dma_tt_fini(&gtt->ttm); ttm_tt_fini(&gtt->ttm);
kfree(gtt); kfree(gtt);
} }
...@@ -1340,7 +1338,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, ...@@ -1340,7 +1338,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
kfree(gtt); kfree(gtt);
return NULL; return NULL;
} }
return &gtt->ttm.ttm; return &gtt->ttm;
} }
/** /**
...@@ -1507,7 +1505,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, ...@@ -1507,7 +1505,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
/* Return false if no part of the ttm_tt object lies within /* Return false if no part of the ttm_tt object lies within
* the range * the range
*/ */
size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
if (gtt->userptr > end || gtt->userptr + size <= start) if (gtt->userptr > end || gtt->userptr + size <= start)
return false; return false;
......
...@@ -1781,7 +1781,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, ...@@ -1781,7 +1781,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
resv = vm->root.base.bo->tbo.base.resv; resv = vm->root.base.bo->tbo.base.resv;
} else { } else {
struct drm_gem_object *obj = &bo->tbo.base; struct drm_gem_object *obj = &bo->tbo.base;
struct ttm_dma_tt *ttm;
resv = bo->tbo.base.resv; resv = bo->tbo.base.resv;
if (obj->import_attach && bo_va->is_xgmi) { if (obj->import_attach && bo_va->is_xgmi) {
...@@ -1794,10 +1793,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, ...@@ -1794,10 +1793,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
} }
mem = &bo->tbo.mem; mem = &bo->tbo.mem;
nodes = mem->mm_node; nodes = mem->mm_node;
if (mem->mem_type == TTM_PL_TT) { if (mem->mem_type == TTM_PL_TT)
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); pages_addr = bo->tbo.ttm->dma_address;
pages_addr = ttm->dma_address;
}
} }
if (bo) { if (bo) {
......
...@@ -547,7 +547,7 @@ void ...@@ -547,7 +547,7 @@ void
nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
{ {
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i; int i;
if (!ttm_dma) if (!ttm_dma)
...@@ -557,7 +557,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) ...@@ -557,7 +557,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
if (nvbo->force_coherent) if (nvbo->force_coherent)
return; return;
for (i = 0; i < ttm_dma->ttm.num_pages; i++) for (i = 0; i < ttm_dma->num_pages; i++)
dma_sync_single_for_device(drm->dev->dev, dma_sync_single_for_device(drm->dev->dev,
ttm_dma->dma_address[i], ttm_dma->dma_address[i],
PAGE_SIZE, DMA_TO_DEVICE); PAGE_SIZE, DMA_TO_DEVICE);
...@@ -567,7 +567,7 @@ void ...@@ -567,7 +567,7 @@ void
nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
{ {
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i; int i;
if (!ttm_dma) if (!ttm_dma)
...@@ -577,7 +577,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) ...@@ -577,7 +577,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
if (nvbo->force_coherent) if (nvbo->force_coherent)
return; return;
for (i = 0; i < ttm_dma->ttm.num_pages; i++) for (i = 0; i < ttm_dma->num_pages; i++)
dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
PAGE_SIZE, DMA_FROM_DEVICE); PAGE_SIZE, DMA_FROM_DEVICE);
} }
...@@ -1309,7 +1309,7 @@ static int ...@@ -1309,7 +1309,7 @@ static int
nouveau_ttm_tt_populate(struct ttm_bo_device *bdev, nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
struct ttm_dma_tt *ttm_dma = (void *)ttm; struct ttm_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm; struct nouveau_drm *drm;
struct device *dev; struct device *dev;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
...@@ -1345,7 +1345,7 @@ static void ...@@ -1345,7 +1345,7 @@ static void
nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev, nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm) struct ttm_tt *ttm)
{ {
struct ttm_dma_tt *ttm_dma = (void *)ttm; struct ttm_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm; struct nouveau_drm *drm;
struct device *dev; struct device *dev;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
......
...@@ -92,7 +92,7 @@ nouveau_mem_fini(struct nouveau_mem *mem) ...@@ -92,7 +92,7 @@ nouveau_mem_fini(struct nouveau_mem *mem)
} }
int int
nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt) nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
{ {
struct nouveau_mem *mem = nouveau_mem(reg); struct nouveau_mem *mem = nouveau_mem(reg);
struct nouveau_cli *cli = mem->cli; struct nouveau_cli *cli = mem->cli;
...@@ -116,8 +116,10 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt) ...@@ -116,8 +116,10 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt)
mem->comp = 0; mem->comp = 0;
} }
if (tt->ttm.sg) args.sgl = tt->ttm.sg->sgl; if (tt->sg)
else args.dma = tt->dma_address; args.sgl = tt->sg->sgl;
else
args.dma = tt->dma_address;
mutex_lock(&drm->master.lock); mutex_lock(&drm->master.lock);
cli->base.super = true; cli->base.super = true;
......
#ifndef __NOUVEAU_MEM_H__ #ifndef __NOUVEAU_MEM_H__
#define __NOUVEAU_MEM_H__ #define __NOUVEAU_MEM_H__
#include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_api.h>
struct ttm_dma_tt; struct ttm_tt;
#include <nvif/mem.h> #include <nvif/mem.h>
#include <nvif/vmm.h> #include <nvif/vmm.h>
...@@ -24,7 +24,7 @@ int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp, ...@@ -24,7 +24,7 @@ int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
struct ttm_resource *); struct ttm_resource *);
void nouveau_mem_del(struct ttm_resource *); void nouveau_mem_del(struct ttm_resource *);
int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page); int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
int nouveau_mem_host(struct ttm_resource *, struct ttm_dma_tt *); int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
void nouveau_mem_fini(struct nouveau_mem *); void nouveau_mem_fini(struct nouveau_mem *);
int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *); int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *);
#endif #endif
...@@ -11,7 +11,7 @@ struct nouveau_sgdma_be { ...@@ -11,7 +11,7 @@ struct nouveau_sgdma_be {
/* this has to be the first field so populate/unpopulated in /* this has to be the first field so populate/unpopulated in
* nouve_bo.c works properly, otherwise have to move them here * nouve_bo.c works properly, otherwise have to move them here
*/ */
struct ttm_dma_tt ttm; struct ttm_tt ttm;
struct nouveau_mem *mem; struct nouveau_mem *mem;
}; };
...@@ -23,7 +23,7 @@ nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) ...@@ -23,7 +23,7 @@ nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
if (ttm) { if (ttm) {
nouveau_sgdma_unbind(bdev, ttm); nouveau_sgdma_unbind(bdev, ttm);
ttm_tt_destroy_common(bdev, ttm); ttm_tt_destroy_common(bdev, ttm);
ttm_dma_tt_fini(&nvbe->ttm); ttm_tt_fini(&nvbe->ttm);
kfree(nvbe); kfree(nvbe);
} }
} }
...@@ -88,5 +88,5 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags) ...@@ -88,5 +88,5 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
kfree(nvbe); kfree(nvbe);
return NULL; return NULL;
} }
return &nvbe->ttm.ttm; return &nvbe->ttm;
} }
...@@ -116,7 +116,7 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo, ...@@ -116,7 +116,7 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
if (ttm == NULL) if (ttm == NULL)
return NULL; return NULL;
if (ttm_tt_init(ttm, bo, page_flags, ttm_cached)) { if (ttm_dma_tt_init(ttm, bo, page_flags, ttm_cached)) {
kfree(ttm); kfree(ttm);
return NULL; return NULL;
} }
......
...@@ -437,7 +437,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso ...@@ -437,7 +437,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
* TTM backend functions. * TTM backend functions.
*/ */
struct radeon_ttm_tt { struct radeon_ttm_tt {
struct ttm_dma_tt ttm; struct ttm_tt ttm;
u64 offset; u64 offset;
uint64_t userptr; uint64_t userptr;
...@@ -602,7 +602,7 @@ static void radeon_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt ...@@ -602,7 +602,7 @@ static void radeon_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt
radeon_ttm_backend_unbind(bdev, ttm); radeon_ttm_backend_unbind(bdev, ttm);
ttm_tt_destroy_common(bdev, ttm); ttm_tt_destroy_common(bdev, ttm);
ttm_dma_tt_fini(&gtt->ttm); ttm_tt_fini(&gtt->ttm);
kfree(gtt); kfree(gtt);
} }
...@@ -640,7 +640,7 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo, ...@@ -640,7 +640,7 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
kfree(gtt); kfree(gtt);
return NULL; return NULL;
} }
return &gtt->ttm.ttm; return &gtt->ttm;
} }
static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev, static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev,
...@@ -653,7 +653,7 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev, ...@@ -653,7 +653,7 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev,
if (!ttm) if (!ttm)
return NULL; return NULL;
return container_of(ttm, struct radeon_ttm_tt, ttm.ttm); return container_of(ttm, struct radeon_ttm_tt, ttm);
} }
static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev, static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
......
...@@ -1192,7 +1192,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, ...@@ -1192,7 +1192,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
size += ttm_round_pot(struct_size); size += ttm_round_pot(struct_size);
size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t))); size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
size += ttm_round_pot(sizeof(struct ttm_dma_tt)); size += ttm_round_pot(sizeof(struct ttm_tt));
return size; return size;
} }
EXPORT_SYMBOL(ttm_bo_dma_acc_size); EXPORT_SYMBOL(ttm_bo_dma_acc_size);
......
...@@ -1081,28 +1081,28 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm) ...@@ -1081,28 +1081,28 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
} }
EXPORT_SYMBOL(ttm_pool_unpopulate); EXPORT_SYMBOL(ttm_pool_unpopulate);
int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, int ttm_populate_and_map_pages(struct device *dev, struct ttm_tt *tt,
struct ttm_operation_ctx *ctx) struct ttm_operation_ctx *ctx)
{ {
unsigned i, j; unsigned i, j;
int r; int r;
r = ttm_pool_populate(&tt->ttm, ctx); r = ttm_pool_populate(tt, ctx);
if (r) if (r)
return r; return r;
for (i = 0; i < tt->ttm.num_pages; ++i) { for (i = 0; i < tt->num_pages; ++i) {
struct page *p = tt->ttm.pages[i]; struct page *p = tt->pages[i];
size_t num_pages = 1; size_t num_pages = 1;
for (j = i + 1; j < tt->ttm.num_pages; ++j) { for (j = i + 1; j < tt->num_pages; ++j) {
if (++p != tt->ttm.pages[j]) if (++p != tt->pages[j])
break; break;
++num_pages; ++num_pages;
} }
tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], tt->dma_address[i] = dma_map_page(dev, tt->pages[i],
0, num_pages * PAGE_SIZE, 0, num_pages * PAGE_SIZE,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, tt->dma_address[i])) { if (dma_mapping_error(dev, tt->dma_address[i])) {
...@@ -1111,7 +1111,7 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, ...@@ -1111,7 +1111,7 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
PAGE_SIZE, DMA_BIDIRECTIONAL); PAGE_SIZE, DMA_BIDIRECTIONAL);
tt->dma_address[i] = 0; tt->dma_address[i] = 0;
} }
ttm_pool_unpopulate(&tt->ttm); ttm_pool_unpopulate(tt);
return -EFAULT; return -EFAULT;
} }
...@@ -1124,21 +1124,21 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, ...@@ -1124,21 +1124,21 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
} }
EXPORT_SYMBOL(ttm_populate_and_map_pages); EXPORT_SYMBOL(ttm_populate_and_map_pages);
void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_tt *tt)
{ {
unsigned i, j; unsigned i, j;
for (i = 0; i < tt->ttm.num_pages;) { for (i = 0; i < tt->num_pages;) {
struct page *p = tt->ttm.pages[i]; struct page *p = tt->pages[i];
size_t num_pages = 1; size_t num_pages = 1;
if (!tt->dma_address[i] || !tt->ttm.pages[i]) { if (!tt->dma_address[i] || !tt->pages[i]) {
++i; ++i;
continue; continue;
} }
for (j = i + 1; j < tt->ttm.num_pages; ++j) { for (j = i + 1; j < tt->num_pages; ++j) {
if (++p != tt->ttm.pages[j]) if (++p != tt->pages[j])
break; break;
++num_pages; ++num_pages;
...@@ -1149,7 +1149,7 @@ void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) ...@@ -1149,7 +1149,7 @@ void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
i += num_pages; i += num_pages;
} }
ttm_pool_unpopulate(&tt->ttm); ttm_pool_unpopulate(tt);
} }
EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
......
...@@ -832,11 +832,10 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, ...@@ -832,11 +832,10 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
* return dma_page pointer if success, otherwise NULL. * return dma_page pointer if success, otherwise NULL.
*/ */
static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool, static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
struct ttm_dma_tt *ttm_dma, struct ttm_tt *ttm,
unsigned index) unsigned index)
{ {
struct dma_page *d_page = NULL; struct dma_page *d_page = NULL;
struct ttm_tt *ttm = &ttm_dma->ttm;
unsigned long irq_flags; unsigned long irq_flags;
int count; int count;
...@@ -845,8 +844,8 @@ static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool, ...@@ -845,8 +844,8 @@ static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
if (count) { if (count) {
d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
ttm->pages[index] = d_page->p; ttm->pages[index] = d_page->p;
ttm_dma->dma_address[index] = d_page->dma; ttm->dma_address[index] = d_page->dma;
list_move_tail(&d_page->page_list, &ttm_dma->pages_list); list_move_tail(&d_page->page_list, &ttm->pages_list);
pool->npages_in_use += 1; pool->npages_in_use += 1;
pool->npages_free -= 1; pool->npages_free -= 1;
} }
...@@ -854,9 +853,8 @@ static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool, ...@@ -854,9 +853,8 @@ static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
return d_page; return d_page;
} }
static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) static gfp_t ttm_dma_pool_gfp_flags(struct ttm_tt *ttm, bool huge)
{ {
struct ttm_tt *ttm = &ttm_dma->ttm;
gfp_t gfp_flags; gfp_t gfp_flags;
if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
...@@ -883,11 +881,10 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) ...@@ -883,11 +881,10 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
* On success pages list will hold count number of correctly * On success pages list will hold count number of correctly
* cached pages. On failure will hold the negative return value (-ENOMEM, etc). * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
*/ */
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev,
struct ttm_operation_ctx *ctx) struct ttm_operation_ctx *ctx)
{ {
struct ttm_mem_global *mem_glob = &ttm_mem_glob; struct ttm_mem_global *mem_glob = &ttm_mem_glob;
struct ttm_tt *ttm = &ttm_dma->ttm;
unsigned long num_pages = ttm->num_pages; unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool; struct dma_pool *pool;
struct dma_page *d_page; struct dma_page *d_page;
...@@ -901,7 +898,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, ...@@ -901,7 +898,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx)) if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&ttm_dma->pages_list); INIT_LIST_HEAD(&ttm->pages_list);
i = 0; i = 0;
type = ttm_to_type(ttm->page_flags, ttm->caching); type = ttm_to_type(ttm->page_flags, ttm->caching);
...@@ -912,7 +909,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, ...@@ -912,7 +909,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
pool = ttm_dma_find_pool(dev, type | IS_HUGE); pool = ttm_dma_find_pool(dev, type | IS_HUGE);
if (!pool) { if (!pool) {
gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true); gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm, true);
pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE); pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
if (IS_ERR_OR_NULL(pool)) if (IS_ERR_OR_NULL(pool))
...@@ -922,21 +919,21 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, ...@@ -922,21 +919,21 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
while (num_pages >= HPAGE_PMD_NR) { while (num_pages >= HPAGE_PMD_NR) {
unsigned j; unsigned j;
d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i); d_page = ttm_dma_pool_get_pages(pool, ttm, i);
if (!d_page) if (!d_page)
break; break;
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
pool->size, ctx); pool->size, ctx);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev); ttm_dma_unpopulate(ttm, dev);
return -ENOMEM; return -ENOMEM;
} }
d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT; d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) { for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
ttm->pages[j] = ttm->pages[j - 1] + 1; ttm->pages[j] = ttm->pages[j - 1] + 1;
ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] + ttm->dma_address[j] = ttm->dma_address[j - 1] +
PAGE_SIZE; PAGE_SIZE;
} }
...@@ -949,7 +946,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, ...@@ -949,7 +946,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
pool = ttm_dma_find_pool(dev, type); pool = ttm_dma_find_pool(dev, type);
if (!pool) { if (!pool) {
gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false); gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm, false);
pool = ttm_dma_pool_init(dev, gfp_flags, type); pool = ttm_dma_pool_init(dev, gfp_flags, type);
if (IS_ERR_OR_NULL(pool)) if (IS_ERR_OR_NULL(pool))
...@@ -957,16 +954,16 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, ...@@ -957,16 +954,16 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
} }
while (num_pages) { while (num_pages) {
d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i); d_page = ttm_dma_pool_get_pages(pool, ttm, i);
if (!d_page) { if (!d_page) {
ttm_dma_unpopulate(ttm_dma, dev); ttm_dma_unpopulate(ttm, dev);
return -ENOMEM; return -ENOMEM;
} }
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
pool->size, ctx); pool->size, ctx);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev); ttm_dma_unpopulate(ttm, dev);
return -ENOMEM; return -ENOMEM;
} }
...@@ -980,10 +977,9 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, ...@@ -980,10 +977,9 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
EXPORT_SYMBOL_GPL(ttm_dma_populate); EXPORT_SYMBOL_GPL(ttm_dma_populate);
/* Put all pages in pages list to correct pool to wait for reuse */ /* Put all pages in pages list to correct pool to wait for reuse */
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
{ {
struct ttm_mem_global *mem_glob = &ttm_mem_glob; struct ttm_mem_global *mem_glob = &ttm_mem_glob;
struct ttm_tt *ttm = &ttm_dma->ttm;
struct dma_pool *pool; struct dma_pool *pool;
struct dma_page *d_page, *next; struct dma_page *d_page, *next;
enum pool_type type; enum pool_type type;
...@@ -997,7 +993,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) ...@@ -997,7 +993,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
pool = ttm_dma_find_pool(dev, type | IS_HUGE); pool = ttm_dma_find_pool(dev, type | IS_HUGE);
if (pool) { if (pool) {
count = 0; count = 0;
list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, list_for_each_entry_safe(d_page, next, &ttm->pages_list,
page_list) { page_list) {
if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL)) if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
continue; continue;
...@@ -1027,7 +1023,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) ...@@ -1027,7 +1023,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
/* make sure pages array match list and count number of pages */ /* make sure pages array match list and count number of pages */
count = 0; count = 0;
list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, list_for_each_entry_safe(d_page, next, &ttm->pages_list,
page_list) { page_list) {
ttm->pages[count] = d_page->p; ttm->pages[count] = d_page->p;
count++; count++;
...@@ -1048,7 +1044,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) ...@@ -1048,7 +1044,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
pool->nfrees += count; pool->nfrees += count;
} else { } else {
pool->npages_free += count; pool->npages_free += count;
list_splice(&ttm_dma->pages_list, &pool->free_list); list_splice(&ttm->pages_list, &pool->free_list);
/* /*
* Wait to have at at least NUM_PAGES_TO_ALLOC number of pages * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
* to free in order to minimize calls to set_memory_wb(). * to free in order to minimize calls to set_memory_wb().
...@@ -1059,10 +1055,10 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) ...@@ -1059,10 +1055,10 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
} }
spin_unlock_irqrestore(&pool->lock, irq_flags); spin_unlock_irqrestore(&pool->lock, irq_flags);
INIT_LIST_HEAD(&ttm_dma->pages_list); INIT_LIST_HEAD(&ttm->pages_list);
for (i = 0; i < ttm->num_pages; i++) { for (i = 0; i < ttm->num_pages; i++) {
ttm->pages[i] = NULL; ttm->pages[i] = NULL;
ttm_dma->dma_address[i] = 0; ttm->dma_address[i] = 0;
} }
/* shrink pool if necessary (only on !is_cached pools)*/ /* shrink pool if necessary (only on !is_cached pools)*/
......
...@@ -92,21 +92,22 @@ static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm) ...@@ -92,21 +92,22 @@ static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
return 0; return 0;
} }
static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
{ {
ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages, ttm->pages = kvmalloc_array(ttm->num_pages,
sizeof(*ttm->ttm.pages) + sizeof(*ttm->pages) +
sizeof(*ttm->dma_address), sizeof(*ttm->dma_address),
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL | __GFP_ZERO);
if (!ttm->ttm.pages) if (!ttm->pages)
return -ENOMEM; return -ENOMEM;
ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
return 0; return 0;
} }
static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm) static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
{ {
ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages, ttm->dma_address = kvmalloc_array(ttm->num_pages,
sizeof(*ttm->dma_address), sizeof(*ttm->dma_address),
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL | __GFP_ZERO);
if (!ttm->dma_address) if (!ttm->dma_address)
...@@ -138,8 +139,10 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm, ...@@ -138,8 +139,10 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
ttm->num_pages = bo->num_pages; ttm->num_pages = bo->num_pages;
ttm->caching = ttm_cached; ttm->caching = ttm_cached;
ttm->page_flags = page_flags; ttm->page_flags = page_flags;
ttm->dma_address = NULL;
ttm->swap_storage = NULL; ttm->swap_storage = NULL;
ttm->sg = bo->sg; ttm->sg = bo->sg;
INIT_LIST_HEAD(&ttm->pages_list);
ttm->caching = caching; ttm->caching = caching;
} }
...@@ -158,20 +161,21 @@ EXPORT_SYMBOL(ttm_tt_init); ...@@ -158,20 +161,21 @@ EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_fini(struct ttm_tt *ttm) void ttm_tt_fini(struct ttm_tt *ttm)
{ {
if (ttm->pages)
kvfree(ttm->pages); kvfree(ttm->pages);
else
kvfree(ttm->dma_address);
ttm->pages = NULL; ttm->pages = NULL;
ttm->dma_address = NULL;
} }
EXPORT_SYMBOL(ttm_tt_fini); EXPORT_SYMBOL(ttm_tt_fini);
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, int ttm_dma_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
uint32_t page_flags, enum ttm_caching caching) uint32_t page_flags, enum ttm_caching caching)
{ {
struct ttm_tt *ttm = &ttm_dma->ttm;
ttm_tt_init_fields(ttm, bo, page_flags, caching); ttm_tt_init_fields(ttm, bo, page_flags, caching);
INIT_LIST_HEAD(&ttm_dma->pages_list); if (ttm_dma_tt_alloc_page_directory(ttm)) {
if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
pr_err("Failed allocating page table\n"); pr_err("Failed allocating page table\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -179,19 +183,17 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, ...@@ -179,19 +183,17 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
} }
EXPORT_SYMBOL(ttm_dma_tt_init); EXPORT_SYMBOL(ttm_dma_tt_init);
int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
uint32_t page_flags, enum ttm_caching caching) uint32_t page_flags, enum ttm_caching caching)
{ {
struct ttm_tt *ttm = &ttm_dma->ttm;
int ret; int ret;
ttm_tt_init_fields(ttm, bo, page_flags, caching); ttm_tt_init_fields(ttm, bo, page_flags, caching);
INIT_LIST_HEAD(&ttm_dma->pages_list);
if (page_flags & TTM_PAGE_FLAG_SG) if (page_flags & TTM_PAGE_FLAG_SG)
ret = ttm_sg_tt_alloc_page_directory(ttm_dma); ret = ttm_sg_tt_alloc_page_directory(ttm);
else else
ret = ttm_dma_tt_alloc_page_directory(ttm_dma); ret = ttm_dma_tt_alloc_page_directory(ttm);
if (ret) { if (ret) {
pr_err("Failed allocating page table\n"); pr_err("Failed allocating page table\n");
return -ENOMEM; return -ENOMEM;
...@@ -200,19 +202,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, ...@@ -200,19 +202,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
} }
EXPORT_SYMBOL(ttm_sg_tt_init); EXPORT_SYMBOL(ttm_sg_tt_init);
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
if (ttm->pages)
kvfree(ttm->pages);
else
kvfree(ttm_dma->dma_address);
ttm->pages = NULL;
ttm_dma->dma_address = NULL;
}
EXPORT_SYMBOL(ttm_dma_tt_fini);
int ttm_tt_swapin(struct ttm_tt *ttm) int ttm_tt_swapin(struct ttm_tt *ttm)
{ {
struct address_space *swap_space; struct address_space *swap_space;
......
...@@ -186,7 +186,7 @@ struct ttm_placement vmw_nonfixed_placement = { ...@@ -186,7 +186,7 @@ struct ttm_placement vmw_nonfixed_placement = {
}; };
struct vmw_ttm_tt { struct vmw_ttm_tt {
struct ttm_dma_tt dma_ttm; struct ttm_tt dma_ttm;
struct vmw_private *dev_priv; struct vmw_private *dev_priv;
int gmr_id; int gmr_id;
struct vmw_mob *mob; struct vmw_mob *mob;
...@@ -374,8 +374,8 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) ...@@ -374,8 +374,8 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
return 0; return 0;
vsgt->mode = dev_priv->map_mode; vsgt->mode = dev_priv->map_mode;
vsgt->pages = vmw_tt->dma_ttm.ttm.pages; vsgt->pages = vmw_tt->dma_ttm.pages;
vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; vsgt->num_pages = vmw_tt->dma_ttm.num_pages;
vsgt->addrs = vmw_tt->dma_ttm.dma_address; vsgt->addrs = vmw_tt->dma_ttm.dma_address;
vsgt->sgt = &vmw_tt->sgt; vsgt->sgt = &vmw_tt->sgt;
...@@ -483,7 +483,7 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) ...@@ -483,7 +483,7 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
{ {
struct vmw_ttm_tt *vmw_tt = struct vmw_ttm_tt *vmw_tt =
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
return &vmw_tt->vsgt; return &vmw_tt->vsgt;
} }
...@@ -493,7 +493,7 @@ static int vmw_ttm_bind(struct ttm_bo_device *bdev, ...@@ -493,7 +493,7 @@ static int vmw_ttm_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_resource *bo_mem) struct ttm_tt *ttm, struct ttm_resource *bo_mem)
{ {
struct vmw_ttm_tt *vmw_be = struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); container_of(ttm, struct vmw_ttm_tt, dma_ttm);
int ret = 0; int ret = 0;
if (!bo_mem) if (!bo_mem)
...@@ -537,7 +537,7 @@ static void vmw_ttm_unbind(struct ttm_bo_device *bdev, ...@@ -537,7 +537,7 @@ static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm) struct ttm_tt *ttm)
{ {
struct vmw_ttm_tt *vmw_be = struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); container_of(ttm, struct vmw_ttm_tt, dma_ttm);
if (!vmw_be->bound) if (!vmw_be->bound)
return; return;
...@@ -562,13 +562,13 @@ static void vmw_ttm_unbind(struct ttm_bo_device *bdev, ...@@ -562,13 +562,13 @@ static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{ {
struct vmw_ttm_tt *vmw_be = struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); container_of(ttm, struct vmw_ttm_tt, dma_ttm);
vmw_ttm_unbind(bdev, ttm); vmw_ttm_unbind(bdev, ttm);
ttm_tt_destroy_common(bdev, ttm); ttm_tt_destroy_common(bdev, ttm);
vmw_ttm_unmap_dma(vmw_be); vmw_ttm_unmap_dma(vmw_be);
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
ttm_dma_tt_fini(&vmw_be->dma_ttm); ttm_tt_fini(&vmw_be->dma_ttm);
else else
ttm_tt_fini(ttm); ttm_tt_fini(ttm);
...@@ -583,7 +583,7 @@ static int vmw_ttm_populate(struct ttm_bo_device *bdev, ...@@ -583,7 +583,7 @@ static int vmw_ttm_populate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
struct vmw_ttm_tt *vmw_tt = struct vmw_ttm_tt *vmw_tt =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); container_of(ttm, struct vmw_ttm_tt, dma_ttm);
struct vmw_private *dev_priv = vmw_tt->dev_priv; struct vmw_private *dev_priv = vmw_tt->dev_priv;
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
int ret; int ret;
...@@ -612,7 +612,7 @@ static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev, ...@@ -612,7 +612,7 @@ static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm) struct ttm_tt *ttm)
{ {
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm.ttm); dma_ttm);
struct vmw_private *dev_priv = vmw_tt->dev_priv; struct vmw_private *dev_priv = vmw_tt->dev_priv;
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
...@@ -650,12 +650,12 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, ...@@ -650,12 +650,12 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags, ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags,
ttm_cached); ttm_cached);
else else
ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags, ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
ttm_cached); ttm_cached);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_init; goto out_no_init;
return &vmw_be->dma_ttm.ttm; return &vmw_be->dma_ttm;
out_no_init: out_no_init:
kfree(vmw_be); kfree(vmw_be);
return NULL; return NULL;
...@@ -813,7 +813,7 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv, ...@@ -813,7 +813,7 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx); ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
if (likely(ret == 0)) { if (likely(ret == 0)) {
struct vmw_ttm_tt *vmw_tt = struct vmw_ttm_tt *vmw_tt =
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
ret = vmw_ttm_map_dma(vmw_tt); ret = vmw_ttm_map_dma(vmw_tt);
} }
......
...@@ -61,13 +61,13 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm); ...@@ -61,13 +61,13 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm);
/** /**
* Populates and DMA maps pages to fullfil a ttm_dma_populate() request * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
*/ */
int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, int ttm_populate_and_map_pages(struct device *dev, struct ttm_tt *tt,
struct ttm_operation_ctx *ctx); struct ttm_operation_ctx *ctx);
/** /**
* Unpopulates and DMA unmaps pages as part of a * Unpopulates and DMA unmaps pages as part of a
* ttm_dma_unpopulate() request */ * ttm_dma_unpopulate() request */
void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt); void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_tt *tt);
/** /**
* Output the state of pools to debugfs file * Output the state of pools to debugfs file
...@@ -90,9 +90,9 @@ void ttm_dma_page_alloc_fini(void); ...@@ -90,9 +90,9 @@ void ttm_dma_page_alloc_fini(void);
*/ */
int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, int ttm_dma_populate(struct ttm_tt *ttm_dma, struct device *dev,
struct ttm_operation_ctx *ctx); struct ttm_operation_ctx *ctx);
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); void ttm_dma_unpopulate(struct ttm_tt *ttm_dma, struct device *dev);
#else #else
static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
...@@ -107,13 +107,13 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) ...@@ -107,13 +107,13 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
{ {
return 0; return 0;
} }
static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, static inline int ttm_dma_populate(struct ttm_tt *ttm_dma,
struct device *dev, struct device *dev,
struct ttm_operation_ctx *ctx) struct ttm_operation_ctx *ctx)
{ {
return -ENOMEM; return -ENOMEM;
} }
static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, static inline void ttm_dma_unpopulate(struct ttm_tt *ttm_dma,
struct device *dev) struct device *dev)
{ {
} }
......
...@@ -47,12 +47,13 @@ struct ttm_operation_ctx; ...@@ -47,12 +47,13 @@ struct ttm_operation_ctx;
* struct ttm_tt * struct ttm_tt
* *
* @pages: Array of pages backing the data. * @pages: Array of pages backing the data.
* @page_flags: see TTM_PAGE_FLAG_*
* @num_pages: Number of pages in the page array. * @num_pages: Number of pages in the page array.
* @bdev: Pointer to the current struct ttm_bo_device. * @sg: for SG objects via dma-buf
* @be: Pointer to the ttm backend. * @dma_address: The DMA (bus) addresses of the pages
* @swap_storage: Pointer to shmem struct file for swap storage. * @swap_storage: Pointer to shmem struct file for swap storage.
* @caching_state: The current caching state of the pages. * @pages_list: used by some page allocation backend
* @state: The current binding state of the pages. * @caching: The current caching state of the pages.
* *
* This is a structure holding the pages, caching- and aperture binding * This is a structure holding the pages, caching- and aperture binding
* status for a buffer object that isn't backed by fixed (VRAM / AGP) * status for a buffer object that isn't backed by fixed (VRAM / AGP)
...@@ -62,8 +63,10 @@ struct ttm_tt { ...@@ -62,8 +63,10 @@ struct ttm_tt {
struct page **pages; struct page **pages;
uint32_t page_flags; uint32_t page_flags;
uint32_t num_pages; uint32_t num_pages;
struct sg_table *sg; /* for SG objects via dma-buf */ struct sg_table *sg;
dma_addr_t *dma_address;
struct file *swap_storage; struct file *swap_storage;
struct list_head pages_list;
enum ttm_caching caching; enum ttm_caching caching;
}; };
...@@ -72,23 +75,6 @@ static inline bool ttm_tt_is_populated(struct ttm_tt *tt) ...@@ -72,23 +75,6 @@ static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED; return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED;
} }
/**
* struct ttm_dma_tt
*
* @ttm: Base ttm_tt struct.
* @dma_address: The DMA (bus) addresses of the pages
* @pages_list: used by some page allocation backend
*
* This is a structure holding the pages, caching- and aperture binding
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
* memory.
*/
struct ttm_dma_tt {
struct ttm_tt ttm;
dma_addr_t *dma_address;
struct list_head pages_list;
};
/** /**
* ttm_tt_create * ttm_tt_create
* *
...@@ -115,9 +101,9 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc); ...@@ -115,9 +101,9 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
*/ */
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
uint32_t page_flags, enum ttm_caching caching); uint32_t page_flags, enum ttm_caching caching);
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, int ttm_dma_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
uint32_t page_flags, enum ttm_caching caching); uint32_t page_flags, enum ttm_caching caching);
int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
uint32_t page_flags, enum ttm_caching caching); uint32_t page_flags, enum ttm_caching caching);
/** /**
...@@ -128,7 +114,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, ...@@ -128,7 +114,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
* Free memory of ttm_tt structure * Free memory of ttm_tt structure
*/ */
void ttm_tt_fini(struct ttm_tt *ttm); void ttm_tt_fini(struct ttm_tt *ttm);
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
/** /**
* ttm_ttm_destroy: * ttm_ttm_destroy:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment