Commit 43d46f0b authored by Matthew Auld's avatar Matthew Auld Committed by Christian König

drm/ttm: s/FLAG_SG/FLAG_EXTERNAL/

It covers more than just ttm_bo_type_sg usage, like with say dma-buf,
since one other user is userptr in amdgpu, and in the future we might
have some more. Hence EXTERNAL is likely a more suitable name.

v2(Christian):
  - Rename these to TTM_TT_FLAGS_*
  - Fix up all the holes in the flag values
Suggested-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Christian König <christian.koenig@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210929132629.353541-1-matthew.auld@intel.comSigned-off-by: default avatarChristian König <christian.koenig@amd.com>
parent d0f5d790
...@@ -894,7 +894,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, ...@@ -894,7 +894,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
DRM_ERROR("failed to pin userptr\n"); DRM_ERROR("failed to pin userptr\n");
return r; return r;
} }
} else if (ttm->page_flags & TTM_PAGE_FLAG_SG) { } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
if (!ttm->sg) { if (!ttm->sg) {
struct dma_buf_attachment *attach; struct dma_buf_attachment *attach;
struct sg_table *sgt; struct sg_table *sgt;
...@@ -1130,7 +1130,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev, ...@@ -1130,7 +1130,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
return 0; return 0;
} }
if (ttm->page_flags & TTM_PAGE_FLAG_SG) if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
return 0; return 0;
ret = ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx); ret = ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
...@@ -1165,7 +1165,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev, ...@@ -1165,7 +1165,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
return; return;
} }
if (ttm->page_flags & TTM_PAGE_FLAG_SG) if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
return; return;
for (i = 0; i < ttm->num_pages; ++i) for (i = 0; i < ttm->num_pages; ++i)
...@@ -1198,8 +1198,8 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, ...@@ -1198,8 +1198,8 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
return -ENOMEM; return -ENOMEM;
} }
/* Set TTM_PAGE_FLAG_SG before populate but after create. */ /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
bo->ttm->page_flags |= TTM_PAGE_FLAG_SG; bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
gtt = (void *)bo->ttm; gtt = (void *)bo->ttm;
gtt->userptr = addr; gtt->userptr = addr;
......
...@@ -182,7 +182,7 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, ...@@ -182,7 +182,7 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
man->use_tt) man->use_tt)
page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags,
i915_ttm_select_tt_caching(obj)); i915_ttm_select_tt_caching(obj));
...@@ -451,7 +451,7 @@ static int i915_ttm_accel_move(struct ttm_buffer_object *bo, ...@@ -451,7 +451,7 @@ static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
if (bo->type == ttm_bo_type_kernel) if (bo->type == ttm_bo_type_kernel)
return -EINVAL; return -EINVAL;
if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)) if (ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))
return 0; return 0;
intel_engine_pm_get(i915->gt.migrate.context->engine); intel_engine_pm_get(i915->gt.migrate.context->engine);
...@@ -525,7 +525,7 @@ static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, ...@@ -525,7 +525,7 @@ static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
/* Populate ttm with pages if needed. Typically system memory. */ /* Populate ttm with pages if needed. Typically system memory. */
if (bo->ttm && (dst_man->use_tt || if (bo->ttm && (dst_man->use_tt ||
(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED))) { (bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED))) {
ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
if (ret) if (ret)
return ret; return ret;
......
...@@ -1249,7 +1249,7 @@ nouveau_ttm_tt_populate(struct ttm_device *bdev, ...@@ -1249,7 +1249,7 @@ nouveau_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm_dma = (void *)ttm; struct ttm_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm; struct nouveau_drm *drm;
struct device *dev; struct device *dev;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (ttm_tt_is_populated(ttm)) if (ttm_tt_is_populated(ttm))
return 0; return 0;
...@@ -1272,7 +1272,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_device *bdev, ...@@ -1272,7 +1272,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
{ {
struct nouveau_drm *drm; struct nouveau_drm *drm;
struct device *dev; struct device *dev;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (slave) if (slave)
return; return;
......
...@@ -545,14 +545,14 @@ static int radeon_ttm_tt_populate(struct ttm_device *bdev, ...@@ -545,14 +545,14 @@ static int radeon_ttm_tt_populate(struct ttm_device *bdev,
{ {
struct radeon_device *rdev = radeon_get_rdev(bdev); struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm); struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (gtt && gtt->userptr) { if (gtt && gtt->userptr) {
ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!ttm->sg) if (!ttm->sg)
return -ENOMEM; return -ENOMEM;
ttm->page_flags |= TTM_PAGE_FLAG_SG; ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
return 0; return 0;
} }
...@@ -569,13 +569,13 @@ static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm ...@@ -569,13 +569,13 @@ static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm
{ {
struct radeon_device *rdev = radeon_get_rdev(bdev); struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm); struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
radeon_ttm_tt_unbind(bdev, ttm); radeon_ttm_tt_unbind(bdev, ttm);
if (gtt && gtt->userptr) { if (gtt && gtt->userptr) {
kfree(ttm->sg); kfree(ttm->sg);
ttm->page_flags &= ~TTM_PAGE_FLAG_SG; ttm->page_flags &= ~TTM_TT_FLAG_EXTERNAL;
return; return;
} }
......
...@@ -1115,8 +1115,8 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, ...@@ -1115,8 +1115,8 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
return -EBUSY; return -EBUSY;
if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) || if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
bo->ttm->page_flags & TTM_PAGE_FLAG_SG || bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED || bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
!ttm_bo_get_unless_zero(bo)) { !ttm_bo_get_unless_zero(bo)) {
if (locked) if (locked)
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
......
...@@ -103,7 +103,7 @@ void ttm_move_memcpy(struct ttm_buffer_object *bo, ...@@ -103,7 +103,7 @@ void ttm_move_memcpy(struct ttm_buffer_object *bo,
/* Don't move nonexistent data. Clear destination instead. */ /* Don't move nonexistent data. Clear destination instead. */
if (src_ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm))) { if (src_ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm))) {
if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)) if (ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))
return; return;
for (i = 0; i < num_pages; ++i) { for (i = 0; i < num_pages; ++i) {
...@@ -150,7 +150,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, ...@@ -150,7 +150,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_kmap_iter *dst_iter, *src_iter; struct ttm_kmap_iter *dst_iter, *src_iter;
int ret = 0; int ret = 0;
if (ttm && ((ttm->page_flags & TTM_PAGE_FLAG_SWAPPED) || if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
dst_man->use_tt)) { dst_man->use_tt)) {
ret = ttm_tt_populate(bdev, ttm, ctx); ret = ttm_tt_populate(bdev, ttm, ctx);
if (ret) if (ret)
......
...@@ -162,7 +162,7 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, ...@@ -162,7 +162,7 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
* Refuse to fault imported pages. This should be handled * Refuse to fault imported pages. This should be handled
* (if at all) by redirecting mmap to the exporter. * (if at all) by redirecting mmap to the exporter.
*/ */
if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
......
...@@ -371,7 +371,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, ...@@ -371,7 +371,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
WARN_ON(!num_pages || ttm_tt_is_populated(tt)); WARN_ON(!num_pages || ttm_tt_is_populated(tt));
WARN_ON(dma_addr && !pool->dev); WARN_ON(dma_addr && !pool->dev);
if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO; gfp_flags |= __GFP_ZERO;
if (ctx->gfp_retry_mayfail) if (ctx->gfp_retry_mayfail)
......
...@@ -68,12 +68,12 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) ...@@ -68,12 +68,12 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
switch (bo->type) { switch (bo->type) {
case ttm_bo_type_device: case ttm_bo_type_device:
if (zero_alloc) if (zero_alloc)
page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
break; break;
case ttm_bo_type_kernel: case ttm_bo_type_kernel:
break; break;
case ttm_bo_type_sg: case ttm_bo_type_sg:
page_flags |= TTM_PAGE_FLAG_SG; page_flags |= TTM_TT_FLAG_EXTERNAL;
break; break;
default: default:
pr_err("Illegal buffer object type\n"); pr_err("Illegal buffer object type\n");
...@@ -156,7 +156,7 @@ EXPORT_SYMBOL(ttm_tt_init); ...@@ -156,7 +156,7 @@ EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_fini(struct ttm_tt *ttm) void ttm_tt_fini(struct ttm_tt *ttm)
{ {
WARN_ON(ttm->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED); WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
if (ttm->swap_storage) if (ttm->swap_storage)
fput(ttm->swap_storage); fput(ttm->swap_storage);
...@@ -178,7 +178,7 @@ int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, ...@@ -178,7 +178,7 @@ int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
ttm_tt_init_fields(ttm, bo, page_flags, caching); ttm_tt_init_fields(ttm, bo, page_flags, caching);
if (page_flags & TTM_PAGE_FLAG_SG) if (page_flags & TTM_TT_FLAG_EXTERNAL)
ret = ttm_sg_tt_alloc_page_directory(ttm); ret = ttm_sg_tt_alloc_page_directory(ttm);
else else
ret = ttm_dma_tt_alloc_page_directory(ttm); ret = ttm_dma_tt_alloc_page_directory(ttm);
...@@ -224,7 +224,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm) ...@@ -224,7 +224,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
fput(swap_storage); fput(swap_storage);
ttm->swap_storage = NULL; ttm->swap_storage = NULL;
ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
return 0; return 0;
...@@ -279,7 +279,7 @@ int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm, ...@@ -279,7 +279,7 @@ int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
ttm_tt_unpopulate(bdev, ttm); ttm_tt_unpopulate(bdev, ttm);
ttm->swap_storage = swap_storage; ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
return ttm->num_pages; return ttm->num_pages;
...@@ -300,7 +300,7 @@ int ttm_tt_populate(struct ttm_device *bdev, ...@@ -300,7 +300,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
if (ttm_tt_is_populated(ttm)) if (ttm_tt_is_populated(ttm))
return 0; return 0;
if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) { if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_add(ttm->num_pages, &ttm_pages_allocated); atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32) if (bdev->pool.use_dma32)
atomic_long_add(ttm->num_pages, atomic_long_add(ttm->num_pages,
...@@ -325,8 +325,8 @@ int ttm_tt_populate(struct ttm_device *bdev, ...@@ -325,8 +325,8 @@ int ttm_tt_populate(struct ttm_device *bdev,
if (ret) if (ret)
goto error; goto error;
ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED; ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm); ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
ttm_tt_unpopulate(bdev, ttm); ttm_tt_unpopulate(bdev, ttm);
...@@ -337,7 +337,7 @@ int ttm_tt_populate(struct ttm_device *bdev, ...@@ -337,7 +337,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
return 0; return 0;
error: error:
if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) { if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated); atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32) if (bdev->pool.use_dma32)
atomic_long_sub(ttm->num_pages, atomic_long_sub(ttm->num_pages,
...@@ -357,14 +357,14 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) ...@@ -357,14 +357,14 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
else else
ttm_pool_free(&bdev->pool, ttm); ttm_pool_free(&bdev->pool, ttm);
if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) { if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated); atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32) if (bdev->pool.use_dma32)
atomic_long_sub(ttm->num_pages, atomic_long_sub(ttm->num_pages,
&ttm_dma32_pages_allocated); &ttm_dma32_pages_allocated);
} }
ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED; ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
} }
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
......
...@@ -65,7 +65,7 @@ struct ttm_device_funcs { ...@@ -65,7 +65,7 @@ struct ttm_device_funcs {
* ttm_tt_create * ttm_tt_create
* *
* @bo: The buffer object to create the ttm for. * @bo: The buffer object to create the ttm for.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
* *
* Create a struct ttm_tt to back data with system memory pages. * Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated. * No pages are actually allocated.
......
...@@ -38,17 +38,17 @@ struct ttm_resource; ...@@ -38,17 +38,17 @@ struct ttm_resource;
struct ttm_buffer_object; struct ttm_buffer_object;
struct ttm_operation_ctx; struct ttm_operation_ctx;
#define TTM_PAGE_FLAG_SWAPPED (1 << 4) #define TTM_TT_FLAG_SWAPPED (1 << 0)
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) #define TTM_TT_FLAG_ZERO_ALLOC (1 << 1)
#define TTM_PAGE_FLAG_SG (1 << 8) #define TTM_TT_FLAG_EXTERNAL (1 << 2)
#define TTM_PAGE_FLAG_PRIV_POPULATED (1 << 31) #define TTM_TT_FLAG_PRIV_POPULATED (1 << 31)
/** /**
* struct ttm_tt * struct ttm_tt
* *
* @pages: Array of pages backing the data. * @pages: Array of pages backing the data.
* @page_flags: see TTM_PAGE_FLAG_* * @page_flags: see TTM_TT_FLAG_*
* @num_pages: Number of pages in the page array. * @num_pages: Number of pages in the page array.
* @sg: for SG objects via dma-buf * @sg: for SG objects via dma-buf
* @dma_address: The DMA (bus) addresses of the pages * @dma_address: The DMA (bus) addresses of the pages
...@@ -84,7 +84,7 @@ struct ttm_kmap_iter_tt { ...@@ -84,7 +84,7 @@ struct ttm_kmap_iter_tt {
static inline bool ttm_tt_is_populated(struct ttm_tt *tt) static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
{ {
return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED; return tt->page_flags & TTM_TT_FLAG_PRIV_POPULATED;
} }
/** /**
...@@ -103,7 +103,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc); ...@@ -103,7 +103,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
* *
* @ttm: The struct ttm_tt. * @ttm: The struct ttm_tt.
* @bo: The buffer object we create the ttm for. * @bo: The buffer object we create the ttm for.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
* @caching: the desired caching state of the pages * @caching: the desired caching state of the pages
* *
* Create a struct ttm_tt to back data with system memory pages. * Create a struct ttm_tt to back data with system memory pages.
...@@ -178,7 +178,7 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm); ...@@ -178,7 +178,7 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
*/ */
static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm) static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm)
{ {
ttm->page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; ttm->page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
} }
void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages); void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
...@@ -194,7 +194,7 @@ struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt, ...@@ -194,7 +194,7 @@ struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
* *
* @bo: Buffer object we allocate the ttm for. * @bo: Buffer object we allocate the ttm for.
* @bridge: The agp bridge this device is sitting on. * @bridge: The agp bridge this device is sitting on.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
* *
* *
* Create a TTM backend that uses the indicated AGP bridge as an aperture * Create a TTM backend that uses the indicated AGP bridge as an aperture
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment