Commit 7547a917 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-ttm-unmappable' into drm-core-next

* drm-ttm-unmappable:
  drm/radeon/kms: enable use of unmappable VRAM V2
  drm/ttm: remove io_ field from TTM V6
  drm/vmwgfx: add support for new TTM fault callback V5
  drm/nouveau/kms: add support for new TTM fault callback V5
  drm/radeon/kms: add support for new fault callback V7
  drm/ttm: ttm_fault callback to allow driver to handle bo placement V6
  drm/ttm: split no_wait argument in 2 GPU or reserve wait

Conflicts:
	drivers/gpu/drm/nouveau/nouveau_bo.c
parents a8089e84 6b8b1786
...@@ -225,7 +225,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) ...@@ -225,7 +225,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
nouveau_bo_placement_set(nvbo, memtype, 0); nouveau_bo_placement_set(nvbo, memtype, 0);
ret = ttm_bo_validate(bo, &nvbo->placement, false, false); ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
if (ret == 0) { if (ret == 0) {
switch (bo->mem.mem_type) { switch (bo->mem.mem_type) {
case TTM_PL_VRAM: case TTM_PL_VRAM:
...@@ -261,7 +261,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) ...@@ -261,7 +261,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
ret = ttm_bo_validate(bo, &nvbo->placement, false, false); ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
if (ret == 0) { if (ret == 0) {
switch (bo->mem.mem_type) { switch (bo->mem.mem_type) {
case TTM_PL_VRAM: case TTM_PL_VRAM:
...@@ -391,25 +391,16 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ...@@ -391,25 +391,16 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break; break;
case TTM_PL_VRAM: case TTM_PL_VRAM:
man->flags = TTM_MEMTYPE_FLAG_FIXED | man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_MAPPABLE;
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
man->available_caching = TTM_PL_FLAG_UNCACHED | man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC; TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC;
man->io_addr = NULL;
man->io_offset = drm_get_resource_start(dev, 1);
man->io_size = drm_get_resource_len(dev, 1);
if (man->io_size > dev_priv->vram_size)
man->io_size = dev_priv->vram_size;
man->gpu_offset = dev_priv->vm_vram_base; man->gpu_offset = dev_priv->vm_vram_base;
break; break;
case TTM_PL_TT: case TTM_PL_TT:
switch (dev_priv->gart_info.type) { switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP: case NOUVEAU_GART_AGP:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
man->available_caching = TTM_PL_FLAG_UNCACHED; man->available_caching = TTM_PL_FLAG_UNCACHED;
man->default_caching = TTM_PL_FLAG_UNCACHED; man->default_caching = TTM_PL_FLAG_UNCACHED;
break; break;
...@@ -424,10 +415,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ...@@ -424,10 +415,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
dev_priv->gart_info.type); dev_priv->gart_info.type);
return -EINVAL; return -EINVAL;
} }
man->io_offset = dev_priv->gart_info.aper_base;
man->io_size = dev_priv->gart_info.aper_size;
man->io_addr = NULL;
man->gpu_offset = dev_priv->vm_gart_base; man->gpu_offset = dev_priv->vm_gart_base;
break; break;
default: default:
...@@ -462,7 +449,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) ...@@ -462,7 +449,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int static int
nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
struct nouveau_bo *nvbo, bool evict, bool no_wait, struct nouveau_bo *nvbo, bool evict,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem) struct ttm_mem_reg *new_mem)
{ {
struct nouveau_fence *fence = NULL; struct nouveau_fence *fence = NULL;
...@@ -473,7 +461,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, ...@@ -473,7 +461,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
return ret; return ret;
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
evict, no_wait, new_mem); evict, no_wait_reserve, no_wait_gpu, new_mem);
if (nvbo->channel && nvbo->channel != chan) if (nvbo->channel && nvbo->channel != chan)
ret = nouveau_fence_wait(fence, NULL, false, false); ret = nouveau_fence_wait(fence, NULL, false, false);
nouveau_fence_unref((void *)&fence); nouveau_fence_unref((void *)&fence);
...@@ -497,7 +485,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan, ...@@ -497,7 +485,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
static int static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
int no_wait, struct ttm_mem_reg *new_mem) bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{ {
struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_bo *nvbo = nouveau_bo(bo);
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
...@@ -575,12 +564,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, ...@@ -575,12 +564,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
dst_offset += (PAGE_SIZE * line_count); dst_offset += (PAGE_SIZE * line_count);
} }
return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem); return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
} }
static int static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait, struct ttm_mem_reg *new_mem) bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{ {
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement; struct ttm_placement placement;
...@@ -593,7 +583,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, ...@@ -593,7 +583,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem; tmp_mem = *new_mem;
tmp_mem.mm_node = NULL; tmp_mem.mm_node = NULL;
ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
if (ret) if (ret)
return ret; return ret;
...@@ -601,11 +591,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, ...@@ -601,11 +591,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret) if (ret)
goto out; goto out;
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem); ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret) if (ret)
goto out; goto out;
ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem); ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out: out:
if (tmp_mem.mm_node) { if (tmp_mem.mm_node) {
spin_lock(&bo->bdev->glob->lru_lock); spin_lock(&bo->bdev->glob->lru_lock);
...@@ -618,7 +608,8 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, ...@@ -618,7 +608,8 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
static int static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait, struct ttm_mem_reg *new_mem) bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{ {
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement; struct ttm_placement placement;
...@@ -631,15 +622,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, ...@@ -631,15 +622,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem; tmp_mem = *new_mem;
tmp_mem.mm_node = NULL; tmp_mem.mm_node = NULL;
ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
if (ret) if (ret)
return ret; return ret;
ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem); ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret) if (ret)
goto out; goto out;
ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (ret) if (ret)
goto out; goto out;
...@@ -706,7 +697,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, ...@@ -706,7 +697,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
static int static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait, struct ttm_mem_reg *new_mem) bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{ {
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_bo *nvbo = nouveau_bo(bo);
...@@ -721,7 +713,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, ...@@ -721,7 +713,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Software copy if the card isn't up and running yet. */ /* Software copy if the card isn't up and running yet. */
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
!dev_priv->channel) { !dev_priv->channel) {
ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
goto out; goto out;
} }
...@@ -735,17 +727,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, ...@@ -735,17 +727,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Hardware assisted copy. */ /* Hardware assisted copy. */
if (new_mem->mem_type == TTM_PL_SYSTEM) if (new_mem->mem_type == TTM_PL_SYSTEM)
ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem); ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
else if (old_mem->mem_type == TTM_PL_SYSTEM) else if (old_mem->mem_type == TTM_PL_SYSTEM)
ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem); ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
else else
ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (!ret) if (!ret)
goto out; goto out;
/* Fallback to software copy. */ /* Fallback to software copy. */
ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out: out:
if (ret) if (ret)
...@@ -762,6 +754,55 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) ...@@ -762,6 +754,55 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
return 0; return 0;
} }
static int
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct drm_device *dev = dev_priv->dev;
mem->bus.addr = NULL;
mem->bus.offset = 0;
mem->bus.size = mem->num_pages << PAGE_SHIFT;
mem->bus.base = 0;
mem->bus.is_iomem = false;
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
/* System memory */
return 0;
case TTM_PL_TT:
#if __OS_HAS_AGP
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
mem->bus.base = dev_priv->gart_info.aper_base;
mem->bus.is_iomem = true;
}
#endif
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
mem->bus.base = drm_get_resource_start(dev, 1);
mem->bus.is_iomem = true;
break;
default:
return -EINVAL;
}
return 0;
}
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
}
static int
nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
return 0;
}
struct ttm_bo_driver nouveau_bo_driver = { struct ttm_bo_driver nouveau_bo_driver = {
.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
.invalidate_caches = nouveau_bo_invalidate_caches, .invalidate_caches = nouveau_bo_invalidate_caches,
...@@ -774,5 +815,8 @@ struct ttm_bo_driver nouveau_bo_driver = { ...@@ -774,5 +815,8 @@ struct ttm_bo_driver nouveau_bo_driver = {
.sync_obj_flush = nouveau_fence_flush, .sync_obj_flush = nouveau_fence_flush,
.sync_obj_unref = nouveau_fence_unref, .sync_obj_unref = nouveau_fence_unref,
.sync_obj_ref = nouveau_fence_ref, .sync_obj_ref = nouveau_fence_ref,
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
}; };
...@@ -385,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, ...@@ -385,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
nvbo->channel = chan; nvbo->channel = chan;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
false, false); false, false, false);
nvbo->channel = NULL; nvbo->channel = NULL;
if (unlikely(ret)) { if (unlikely(ret)) {
NV_ERROR(dev, "fail ttm_validate\n"); NV_ERROR(dev, "fail ttm_validate\n");
......
...@@ -1266,11 +1266,6 @@ int evergreen_mc_init(struct radeon_device *rdev) ...@@ -1266,11 +1266,6 @@ int evergreen_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.visible_vram_size = rdev->mc.aper_size; rdev->mc.visible_vram_size = rdev->mc.aper_size;
/* FIXME remove this once we support unmappable VRAM */
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
rdev->mc.real_vram_size = rdev->mc.aper_size;
}
r600_vram_gtt_location(rdev, &rdev->mc); r600_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev); radeon_update_bandwidth_info(rdev);
......
...@@ -2036,11 +2036,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev) ...@@ -2036,11 +2036,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
else else
rdev->mc.mc_vram_size = rdev->mc.real_vram_size; rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
} }
/* FIXME remove this once we support unmappable VRAM */
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
rdev->mc.real_vram_size = rdev->mc.aper_size;
}
} }
void r100_vga_set_state(struct radeon_device *rdev, bool state) void r100_vga_set_state(struct radeon_device *rdev, bool state)
......
...@@ -730,11 +730,6 @@ int r600_mc_init(struct radeon_device *rdev) ...@@ -730,11 +730,6 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size; rdev->mc.visible_vram_size = rdev->mc.aper_size;
/* FIXME remove this once we support unmappable VRAM */
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
rdev->mc.real_vram_size = rdev->mc.aper_size;
}
r600_vram_gtt_location(rdev, &rdev->mc); r600_vram_gtt_location(rdev, &rdev->mc);
if (rdev->flags & RADEON_IS_IGP) if (rdev->flags & RADEON_IS_IGP)
......
...@@ -192,7 +192,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) ...@@ -192,7 +192,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
} }
for (i = 0; i < bo->placement.num_placement; i++) for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
if (likely(r == 0)) { if (likely(r == 0)) {
bo->pin_count = 1; bo->pin_count = 1;
if (gpu_addr != NULL) if (gpu_addr != NULL)
...@@ -216,7 +216,7 @@ int radeon_bo_unpin(struct radeon_bo *bo) ...@@ -216,7 +216,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
return 0; return 0;
for (i = 0; i < bo->placement.num_placement; i++) for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r; return r;
...@@ -331,7 +331,7 @@ int radeon_bo_list_validate(struct list_head *head) ...@@ -331,7 +331,7 @@ int radeon_bo_list_validate(struct list_head *head)
lobj->rdomain); lobj->rdomain);
} }
r = ttm_bo_validate(&bo->tbo, &bo->placement, r = ttm_bo_validate(&bo->tbo, &bo->placement,
true, false); true, false, false);
if (unlikely(r)) if (unlikely(r))
return r; return r;
} }
...@@ -499,11 +499,33 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo, ...@@ -499,11 +499,33 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
radeon_bo_check_tiling(rbo, 0, 1); radeon_bo_check_tiling(rbo, 0, 1);
} }
void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{ {
struct radeon_device *rdev;
struct radeon_bo *rbo; struct radeon_bo *rbo;
unsigned long offset, size;
int r;
if (!radeon_ttm_bo_is_radeon_bo(bo)) if (!radeon_ttm_bo_is_radeon_bo(bo))
return; return 0;
rbo = container_of(bo, struct radeon_bo, tbo); rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 0); radeon_bo_check_tiling(rbo, 0, 0);
rdev = rbo->rdev;
if (bo->mem.mem_type == TTM_PL_VRAM) {
size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.mm_node->start << PAGE_SHIFT;
if ((offset + size) > rdev->mc.visible_vram_size) {
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
if (unlikely(r != 0))
return r;
offset = bo->mem.mm_node->start << PAGE_SHIFT;
/* this should not happen */
if ((offset + size) > rdev->mc.visible_vram_size)
return -EINVAL;
}
}
return 0;
} }
...@@ -168,6 +168,6 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, ...@@ -168,6 +168,6 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop); bool force_drop);
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem); struct ttm_mem_reg *mem);
extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
#endif #endif
...@@ -163,34 +163,21 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ...@@ -163,34 +163,21 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
(unsigned)type); (unsigned)type);
return -EINVAL; return -EINVAL;
} }
man->io_offset = rdev->mc.agp_base;
man->io_size = rdev->mc.gtt_size;
man->io_addr = NULL;
if (!rdev->ddev->agp->cant_use_aperture) if (!rdev->ddev->agp->cant_use_aperture)
man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC; TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC;
} else
#endif
{
man->io_offset = 0;
man->io_size = 0;
man->io_addr = NULL;
} }
#endif
break; break;
case TTM_PL_VRAM: case TTM_PL_VRAM:
/* "On-card" video ram */ /* "On-card" video ram */
man->gpu_offset = rdev->mc.vram_start; man->gpu_offset = rdev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED | man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE; TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC;
man->io_addr = NULL;
man->io_offset = rdev->mc.aper_base;
man->io_size = rdev->mc.aper_size;
break; break;
default: default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
...@@ -245,9 +232,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo, ...@@ -245,9 +232,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
} }
static int radeon_move_blit(struct ttm_buffer_object *bo, static int radeon_move_blit(struct ttm_buffer_object *bo,
bool evict, int no_wait, bool evict, int no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem, struct ttm_mem_reg *new_mem,
struct ttm_mem_reg *old_mem) struct ttm_mem_reg *old_mem)
{ {
struct radeon_device *rdev; struct radeon_device *rdev;
uint64_t old_start, new_start; uint64_t old_start, new_start;
...@@ -291,13 +278,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, ...@@ -291,13 +278,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
/* FIXME: handle copy error */ /* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
evict, no_wait, new_mem); evict, no_wait_reserve, no_wait_gpu, new_mem);
radeon_fence_unref(&fence); radeon_fence_unref(&fence);
return r; return r;
} }
static int radeon_move_vram_ram(struct ttm_buffer_object *bo, static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
bool evict, bool interruptible, bool no_wait, bool evict, bool interruptible,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem) struct ttm_mem_reg *new_mem)
{ {
struct radeon_device *rdev; struct radeon_device *rdev;
...@@ -318,7 +306,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, ...@@ -318,7 +306,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
placement.busy_placement = &placements; placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait); interruptible, no_wait_reserve, no_wait_gpu);
if (unlikely(r)) { if (unlikely(r)) {
return r; return r;
} }
...@@ -332,11 +320,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, ...@@ -332,11 +320,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) { if (unlikely(r)) {
goto out_cleanup; goto out_cleanup;
} }
r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem); r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) { if (unlikely(r)) {
goto out_cleanup; goto out_cleanup;
} }
r = ttm_bo_move_ttm(bo, true, no_wait, new_mem); r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
out_cleanup: out_cleanup:
if (tmp_mem.mm_node) { if (tmp_mem.mm_node) {
struct ttm_bo_global *glob = rdev->mman.bdev.glob; struct ttm_bo_global *glob = rdev->mman.bdev.glob;
...@@ -350,7 +338,8 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, ...@@ -350,7 +338,8 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
} }
static int radeon_move_ram_vram(struct ttm_buffer_object *bo, static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
bool evict, bool interruptible, bool no_wait, bool evict, bool interruptible,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem) struct ttm_mem_reg *new_mem)
{ {
struct radeon_device *rdev; struct radeon_device *rdev;
...@@ -370,15 +359,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, ...@@ -370,15 +359,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1; placement.num_busy_placement = 1;
placement.busy_placement = &placements; placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait); r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
if (unlikely(r)) { if (unlikely(r)) {
return r; return r;
} }
r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem); r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (unlikely(r)) { if (unlikely(r)) {
goto out_cleanup; goto out_cleanup;
} }
r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem); r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) { if (unlikely(r)) {
goto out_cleanup; goto out_cleanup;
} }
...@@ -395,8 +384,9 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, ...@@ -395,8 +384,9 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
} }
static int radeon_bo_move(struct ttm_buffer_object *bo, static int radeon_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible, bool no_wait, bool evict, bool interruptible,
struct ttm_mem_reg *new_mem) bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{ {
struct radeon_device *rdev; struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg *old_mem = &bo->mem;
...@@ -423,23 +413,66 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, ...@@ -423,23 +413,66 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (old_mem->mem_type == TTM_PL_VRAM && if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) { new_mem->mem_type == TTM_PL_SYSTEM) {
r = radeon_move_vram_ram(bo, evict, interruptible, r = radeon_move_vram_ram(bo, evict, interruptible,
no_wait, new_mem); no_wait_reserve, no_wait_gpu, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM && } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) { new_mem->mem_type == TTM_PL_VRAM) {
r = radeon_move_ram_vram(bo, evict, interruptible, r = radeon_move_ram_vram(bo, evict, interruptible,
no_wait, new_mem); no_wait_reserve, no_wait_gpu, new_mem);
} else { } else {
r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
} }
if (r) { if (r) {
memcpy: memcpy:
r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
} }
return r; return r;
} }
static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct radeon_device *rdev = radeon_get_rdev(bdev);
mem->bus.addr = NULL;
mem->bus.offset = 0;
mem->bus.size = mem->num_pages << PAGE_SHIFT;
mem->bus.base = 0;
mem->bus.is_iomem = false;
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
/* system memory */
return 0;
case TTM_PL_TT:
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
/* RADEON_IS_AGP is set only if AGP is active */
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
mem->bus.base = rdev->mc.agp_base;
mem->bus.is_iomem = true;
}
#endif
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
/* check if it's visible */
if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
return -EINVAL;
mem->bus.base = rdev->mc.aper_base;
mem->bus.is_iomem = true;
break;
default:
return -EINVAL;
}
return 0;
}
static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
}
static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
bool lazy, bool interruptible) bool lazy, bool interruptible)
{ {
...@@ -480,6 +513,8 @@ static struct ttm_bo_driver radeon_bo_driver = { ...@@ -480,6 +513,8 @@ static struct ttm_bo_driver radeon_bo_driver = {
.sync_obj_ref = &radeon_sync_obj_ref, .sync_obj_ref = &radeon_sync_obj_ref,
.move_notify = &radeon_bo_move_notify, .move_notify = &radeon_bo_move_notify,
.fault_reserve_notify = &radeon_bo_fault_reserve_notify, .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
.io_mem_free = &radeon_ttm_io_mem_free,
}; };
int radeon_ttm_init(struct radeon_device *rdev) int radeon_ttm_init(struct radeon_device *rdev)
......
...@@ -910,11 +910,6 @@ int rv770_mc_init(struct radeon_device *rdev) ...@@ -910,11 +910,6 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size; rdev->mc.visible_vram_size = rdev->mc.aper_size;
/* FIXME remove this once we support unmappable VRAM */
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
rdev->mc.real_vram_size = rdev->mc.aper_size;
}
r600_vram_gtt_location(rdev, &rdev->mc); r600_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev); radeon_update_bandwidth_info(rdev);
......
...@@ -79,8 +79,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) ...@@ -79,8 +79,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
printk(KERN_ERR TTM_PFX " size: %llu\n", man->size); printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
man->available_caching); man->available_caching);
...@@ -357,7 +355,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) ...@@ -357,7 +355,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem, struct ttm_mem_reg *mem,
bool evict, bool interruptible, bool no_wait) bool evict, bool interruptible,
bool no_wait_reserve, bool no_wait_gpu)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
...@@ -402,12 +401,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -402,12 +401,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
else if (bdev->driver->move) else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, interruptible, ret = bdev->driver->move(bo, evict, interruptible,
no_wait, mem); no_wait_reserve, no_wait_gpu, mem);
else else
ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem); ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
if (ret) if (ret)
goto out_err; goto out_err;
...@@ -606,7 +605,7 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo) ...@@ -606,7 +605,7 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
EXPORT_SYMBOL(ttm_bo_unref); EXPORT_SYMBOL(ttm_bo_unref);
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
bool no_wait) bool no_wait_reserve, bool no_wait_gpu)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob; struct ttm_bo_global *glob = bo->glob;
...@@ -615,7 +614,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, ...@@ -615,7 +614,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
int ret = 0; int ret = 0;
spin_lock(&bo->lock); spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bo->lock); spin_unlock(&bo->lock);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
...@@ -631,6 +630,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, ...@@ -631,6 +630,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
evict_mem = bo->mem; evict_mem = bo->mem;
evict_mem.mm_node = NULL; evict_mem.mm_node = NULL;
evict_mem.bus.io_reserved = false;
placement.fpfn = 0; placement.fpfn = 0;
placement.lpfn = 0; placement.lpfn = 0;
...@@ -638,7 +638,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, ...@@ -638,7 +638,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
placement.num_busy_placement = 0; placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement); bdev->driver->evict_flags(bo, &placement);
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
no_wait); no_wait_reserve, no_wait_gpu);
if (ret) { if (ret) {
if (ret != -ERESTARTSYS) { if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX printk(KERN_ERR TTM_PFX
...@@ -650,7 +650,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, ...@@ -650,7 +650,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
} }
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
no_wait); no_wait_reserve, no_wait_gpu);
if (ret) { if (ret) {
if (ret != -ERESTARTSYS) if (ret != -ERESTARTSYS)
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
...@@ -670,7 +670,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, ...@@ -670,7 +670,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
static int ttm_mem_evict_first(struct ttm_bo_device *bdev, static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type, uint32_t mem_type,
bool interruptible, bool no_wait) bool interruptible, bool no_wait_reserve,
bool no_wait_gpu)
{ {
struct ttm_bo_global *glob = bdev->glob; struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_mem_type_manager *man = &bdev->man[mem_type];
...@@ -687,11 +688,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, ...@@ -687,11 +688,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
kref_get(&bo->list_kref); kref_get(&bo->list_kref);
ret = ttm_bo_reserve_locked(bo, false, true, false, 0); ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
if (unlikely(ret == -EBUSY)) { if (unlikely(ret == -EBUSY)) {
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
if (likely(!no_wait)) if (likely(!no_wait_gpu))
ret = ttm_bo_wait_unreserved(bo, interruptible); ret = ttm_bo_wait_unreserved(bo, interruptible);
kref_put(&bo->list_kref, ttm_bo_release_list); kref_put(&bo->list_kref, ttm_bo_release_list);
...@@ -713,7 +714,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, ...@@ -713,7 +714,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
while (put_count--) while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug); kref_put(&bo->list_kref, ttm_bo_ref_bug);
ret = ttm_bo_evict(bo, interruptible, no_wait); ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
kref_put(&bo->list_kref, ttm_bo_release_list); kref_put(&bo->list_kref, ttm_bo_release_list);
...@@ -764,7 +765,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, ...@@ -764,7 +765,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
uint32_t mem_type, uint32_t mem_type,
struct ttm_placement *placement, struct ttm_placement *placement,
struct ttm_mem_reg *mem, struct ttm_mem_reg *mem,
bool interruptible, bool no_wait) bool interruptible,
bool no_wait_reserve,
bool no_wait_gpu)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bdev->glob; struct ttm_bo_global *glob = bdev->glob;
...@@ -785,7 +788,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, ...@@ -785,7 +788,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
} }
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, mem_type, interruptible, ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
no_wait); no_wait_reserve, no_wait_gpu);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
} while (1); } while (1);
...@@ -855,7 +858,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, ...@@ -855,7 +858,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
int ttm_bo_mem_space(struct ttm_buffer_object *bo, int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement, struct ttm_placement *placement,
struct ttm_mem_reg *mem, struct ttm_mem_reg *mem,
bool interruptible, bool no_wait) bool interruptible, bool no_wait_reserve,
bool no_wait_gpu)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man; struct ttm_mem_type_manager *man;
...@@ -952,7 +956,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -952,7 +956,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
} }
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
interruptible, no_wait); interruptible, no_wait_reserve, no_wait_gpu);
if (ret == 0 && mem->mm_node) { if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags; mem->placement = cur_flags;
mem->mm_node->private = bo; mem->mm_node->private = bo;
...@@ -978,7 +982,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu); ...@@ -978,7 +982,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu);
int ttm_bo_move_buffer(struct ttm_buffer_object *bo, int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement, struct ttm_placement *placement,
bool interruptible, bool no_wait) bool interruptible, bool no_wait_reserve,
bool no_wait_gpu)
{ {
struct ttm_bo_global *glob = bo->glob; struct ttm_bo_global *glob = bo->glob;
int ret = 0; int ret = 0;
...@@ -992,20 +997,21 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ...@@ -992,20 +997,21 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* instead of doing it here. * instead of doing it here.
*/ */
spin_lock(&bo->lock); spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bo->lock); spin_unlock(&bo->lock);
if (ret) if (ret)
return ret; return ret;
mem.num_pages = bo->num_pages; mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT; mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment; mem.page_alignment = bo->mem.page_alignment;
mem.bus.io_reserved = false;
/* /*
* Determine where to move the buffer. * Determine where to move the buffer.
*/ */
ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait); ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
out_unlock: out_unlock:
if (ret && mem.mm_node) { if (ret && mem.mm_node) {
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
...@@ -1039,7 +1045,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, ...@@ -1039,7 +1045,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
int ttm_bo_validate(struct ttm_buffer_object *bo, int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement, struct ttm_placement *placement,
bool interruptible, bool no_wait) bool interruptible, bool no_wait_reserve,
bool no_wait_gpu)
{ {
int ret; int ret;
...@@ -1054,7 +1061,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, ...@@ -1054,7 +1061,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/ */
ret = ttm_bo_mem_compat(placement, &bo->mem); ret = ttm_bo_mem_compat(placement, &bo->mem);
if (ret < 0) { if (ret < 0) {
ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait); ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
if (ret) if (ret)
return ret; return ret;
} else { } else {
...@@ -1153,6 +1160,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, ...@@ -1153,6 +1160,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->mem.num_pages = bo->num_pages; bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL; bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment; bo->mem.page_alignment = page_alignment;
bo->mem.bus.io_reserved = false;
bo->buffer_start = buffer_start & PAGE_MASK; bo->buffer_start = buffer_start & PAGE_MASK;
bo->priv_flags = 0; bo->priv_flags = 0;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
...@@ -1175,7 +1183,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, ...@@ -1175,7 +1183,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
goto out_err; goto out_err;
} }
ret = ttm_bo_validate(bo, placement, interruptible, false); ret = ttm_bo_validate(bo, placement, interruptible, false, false);
if (ret) if (ret)
goto out_err; goto out_err;
...@@ -1249,7 +1257,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, ...@@ -1249,7 +1257,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) { while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, mem_type, false, false); ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
if (ret) { if (ret) {
if (allow_errors) { if (allow_errors) {
return ret; return ret;
...@@ -1553,26 +1561,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) ...@@ -1553,26 +1561,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
return true; return true;
} }
int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem,
unsigned long *bus_base,
unsigned long *bus_offset, unsigned long *bus_size)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
*bus_size = 0;
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
return -EINVAL;
if (ttm_mem_reg_is_pci(bdev, mem)) {
*bus_offset = mem->mm_node->start << PAGE_SHIFT;
*bus_size = mem->num_pages << PAGE_SHIFT;
*bus_base = man->io_offset;
}
return 0;
}
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
...@@ -1581,8 +1569,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) ...@@ -1581,8 +1569,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
if (!bdev->dev_mapping) if (!bdev->dev_mapping)
return; return;
unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
ttm_mem_io_free(bdev, &bo->mem);
} }
EXPORT_SYMBOL(ttm_bo_unmap_virtual); EXPORT_SYMBOL(ttm_bo_unmap_virtual);
...@@ -1839,7 +1827,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) ...@@ -1839,7 +1827,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
evict_mem.mem_type = TTM_PL_SYSTEM; evict_mem.mem_type = TTM_PL_SYSTEM;
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
false, false); false, false, false);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out; goto out;
} }
......
...@@ -50,7 +50,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) ...@@ -50,7 +50,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
} }
int ttm_bo_move_ttm(struct ttm_buffer_object *bo, int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
bool evict, bool no_wait, struct ttm_mem_reg *new_mem) bool evict, bool no_wait_reserve,
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{ {
struct ttm_tt *ttm = bo->ttm; struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg *old_mem = &bo->mem;
...@@ -81,30 +82,51 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, ...@@ -81,30 +82,51 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
} }
EXPORT_SYMBOL(ttm_bo_move_ttm); EXPORT_SYMBOL(ttm_bo_move_ttm);
int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
int ret;
if (!mem->bus.io_reserved) {
mem->bus.io_reserved = true;
ret = bdev->driver->io_mem_reserve(bdev, mem);
if (unlikely(ret != 0))
return ret;
}
return 0;
}
void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
if (bdev->driver->io_mem_reserve) {
if (mem->bus.io_reserved) {
mem->bus.io_reserved = false;
bdev->driver->io_mem_free(bdev, mem);
}
}
}
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual) void **virtual)
{ {
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
unsigned long bus_offset;
unsigned long bus_size;
unsigned long bus_base;
int ret; int ret;
void *addr; void *addr;
*virtual = NULL; *virtual = NULL;
ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size); ret = ttm_mem_io_reserve(bdev, mem);
if (ret || bus_size == 0) if (ret)
return ret; return ret;
if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) if (mem->bus.addr) {
addr = (void *)(((u8 *) man->io_addr) + bus_offset); addr = mem->bus.addr;
else { } else {
if (mem->placement & TTM_PL_FLAG_WC) if (mem->placement & TTM_PL_FLAG_WC)
addr = ioremap_wc(bus_base + bus_offset, bus_size); addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else else
addr = ioremap_nocache(bus_base + bus_offset, bus_size); addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
if (!addr) if (!addr) {
ttm_mem_io_free(bdev, mem);
return -ENOMEM; return -ENOMEM;
}
} }
*virtual = addr; *virtual = addr;
return 0; return 0;
...@@ -117,8 +139,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, ...@@ -117,8 +139,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
man = &bdev->man[mem->mem_type]; man = &bdev->man[mem->mem_type];
if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) if (virtual && mem->bus.addr == NULL)
iounmap(virtual); iounmap(virtual);
ttm_mem_io_free(bdev, mem);
} }
static int ttm_copy_io_page(void *dst, void *src, unsigned long page) static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
...@@ -208,7 +231,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, ...@@ -208,7 +231,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
} }
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool evict, bool no_wait, struct ttm_mem_reg *new_mem) bool evict, bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
...@@ -369,26 +393,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) ...@@ -369,26 +393,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
EXPORT_SYMBOL(ttm_io_prot); EXPORT_SYMBOL(ttm_io_prot);
static int ttm_bo_ioremap(struct ttm_buffer_object *bo, static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long bus_base, unsigned long offset,
unsigned long bus_offset, unsigned long size,
unsigned long bus_size,
struct ttm_bo_kmap_obj *map) struct ttm_bo_kmap_obj *map)
{ {
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_reg *mem = &bo->mem; struct ttm_mem_reg *mem = &bo->mem;
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) { if (bo->mem.bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped; map->bo_kmap_type = ttm_bo_map_premapped;
map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
} else { } else {
map->bo_kmap_type = ttm_bo_map_iomap; map->bo_kmap_type = ttm_bo_map_iomap;
if (mem->placement & TTM_PL_FLAG_WC) if (mem->placement & TTM_PL_FLAG_WC)
map->virtual = ioremap_wc(bus_base + bus_offset, map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
bus_size); size);
else else
map->virtual = ioremap_nocache(bus_base + bus_offset, map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
bus_size); size);
} }
return (!map->virtual) ? -ENOMEM : 0; return (!map->virtual) ? -ENOMEM : 0;
} }
...@@ -441,13 +462,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, ...@@ -441,13 +462,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages, unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map) struct ttm_bo_kmap_obj *map)
{ {
unsigned long offset, size;
int ret; int ret;
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
BUG_ON(!list_empty(&bo->swap)); BUG_ON(!list_empty(&bo->swap));
map->virtual = NULL; map->virtual = NULL;
map->bo = bo;
if (num_pages > bo->num_pages) if (num_pages > bo->num_pages)
return -EINVAL; return -EINVAL;
if (start_page > bo->num_pages) if (start_page > bo->num_pages)
...@@ -456,16 +476,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, ...@@ -456,16 +476,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
return -EPERM; return -EPERM;
#endif #endif
ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base, ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
&bus_offset, &bus_size);
if (ret) if (ret)
return ret; return ret;
if (bus_size == 0) { if (!bo->mem.bus.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else { } else {
bus_offset += start_page << PAGE_SHIFT; offset = start_page << PAGE_SHIFT;
bus_size = num_pages << PAGE_SHIFT; size = num_pages << PAGE_SHIFT;
return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); return ttm_bo_ioremap(bo, offset, size, map);
} }
} }
EXPORT_SYMBOL(ttm_bo_kmap); EXPORT_SYMBOL(ttm_bo_kmap);
...@@ -477,6 +496,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) ...@@ -477,6 +496,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
switch (map->bo_kmap_type) { switch (map->bo_kmap_type) {
case ttm_bo_map_iomap: case ttm_bo_map_iomap:
iounmap(map->virtual); iounmap(map->virtual);
ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
break; break;
case ttm_bo_map_vmap: case ttm_bo_map_vmap:
vunmap(map->virtual); vunmap(map->virtual);
...@@ -494,39 +514,11 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) ...@@ -494,39 +514,11 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
} }
EXPORT_SYMBOL(ttm_bo_kunmap); EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
unsigned long dst_offset,
unsigned long *pfn, pgprot_t *prot)
{
struct ttm_mem_reg *mem = &bo->mem;
struct ttm_bo_device *bdev = bo->bdev;
unsigned long bus_offset;
unsigned long bus_size;
unsigned long bus_base;
int ret;
ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
&bus_size);
if (ret)
return -EINVAL;
if (bus_size != 0)
*pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
else
if (!bo->ttm)
return -EINVAL;
else
*pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
dst_offset >>
PAGE_SHIFT));
*prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
return 0;
}
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj, void *sync_obj,
void *sync_obj_arg, void *sync_obj_arg,
bool evict, bool no_wait, bool evict, bool no_wait_reserve,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem) struct ttm_mem_reg *new_mem)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
......
...@@ -74,9 +74,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -74,9 +74,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct ttm_buffer_object *bo = (struct ttm_buffer_object *) struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data; vma->vm_private_data;
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
unsigned long page_offset; unsigned long page_offset;
unsigned long page_last; unsigned long page_last;
unsigned long pfn; unsigned long pfn;
...@@ -84,7 +81,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -84,7 +81,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct page *page; struct page *page;
int ret; int ret;
int i; int i;
bool is_iomem;
unsigned long address = (unsigned long)vmf->virtual_address; unsigned long address = (unsigned long)vmf->virtual_address;
int retval = VM_FAULT_NOPAGE; int retval = VM_FAULT_NOPAGE;
...@@ -101,8 +97,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -101,8 +97,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_NOPAGE; return VM_FAULT_NOPAGE;
} }
if (bdev->driver->fault_reserve_notify) if (bdev->driver->fault_reserve_notify) {
bdev->driver->fault_reserve_notify(bo); ret = bdev->driver->fault_reserve_notify(bo);
switch (ret) {
case 0:
break;
case -EBUSY:
set_need_resched();
case -ERESTARTSYS:
retval = VM_FAULT_NOPAGE;
goto out_unlock;
default:
retval = VM_FAULT_SIGBUS;
goto out_unlock;
}
}
/* /*
* Wait for buffer data in transit, due to a pipelined * Wait for buffer data in transit, due to a pipelined
...@@ -122,15 +131,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -122,15 +131,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
spin_unlock(&bo->lock); spin_unlock(&bo->lock);
ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset, ret = ttm_mem_io_reserve(bdev, &bo->mem);
&bus_size); if (ret) {
if (unlikely(ret != 0)) {
retval = VM_FAULT_SIGBUS; retval = VM_FAULT_SIGBUS;
goto out_unlock; goto out_unlock;
} }
is_iomem = (bus_size != 0);
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
bo->vm_node->start - vma->vm_pgoff; bo->vm_node->start - vma->vm_pgoff;
page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
...@@ -154,8 +160,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -154,8 +160,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* vma->vm_page_prot when the object changes caching policy, with * vma->vm_page_prot when the object changes caching policy, with
* the correct locks held. * the correct locks held.
*/ */
if (bo->mem.bus.is_iomem) {
if (is_iomem) {
vma->vm_page_prot = ttm_io_prot(bo->mem.placement, vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
vma->vm_page_prot); vma->vm_page_prot);
} else { } else {
...@@ -171,10 +176,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -171,10 +176,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
*/ */
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
if (bo->mem.bus.is_iomem)
if (is_iomem) pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
page_offset;
else { else {
page = ttm_tt_get_page(ttm, page_offset); page = ttm_tt_get_page(ttm, page_offset);
if (unlikely(!page && i == 0)) { if (unlikely(!page && i == 0)) {
...@@ -198,7 +201,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -198,7 +201,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
retval = retval =
(ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
goto out_unlock; goto out_unlock;
} }
address += PAGE_SIZE; address += PAGE_SIZE;
...@@ -221,8 +223,7 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma) ...@@ -221,8 +223,7 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
static void ttm_bo_vm_close(struct vm_area_struct *vma) static void ttm_bo_vm_close(struct vm_area_struct *vma)
{ {
struct ttm_buffer_object *bo = struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
(struct ttm_buffer_object *)vma->vm_private_data;
ttm_bo_unref(&bo); ttm_bo_unref(&bo);
vma->vm_private_data = NULL; vma->vm_private_data = NULL;
......
...@@ -137,9 +137,6 @@ int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) ...@@ -137,9 +137,6 @@ int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man) struct ttm_mem_type_manager *man)
{ {
struct vmw_private *dev_priv =
container_of(bdev, struct vmw_private, bdev);
switch (type) { switch (type) {
case TTM_PL_SYSTEM: case TTM_PL_SYSTEM:
/* System memory */ /* System memory */
...@@ -151,11 +148,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ...@@ -151,11 +148,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
case TTM_PL_VRAM: case TTM_PL_VRAM:
/* "On-card" video ram */ /* "On-card" video ram */
man->gpu_offset = 0; man->gpu_offset = 0;
man->io_offset = dev_priv->vram_start; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
man->io_size = dev_priv->vram_size;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
man->io_addr = NULL;
man->available_caching = TTM_PL_MASK_CACHING; man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC;
break; break;
...@@ -193,6 +186,42 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo) ...@@ -193,6 +186,42 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
vmw_dmabuf_gmr_unbind(bo); vmw_dmabuf_gmr_unbind(bo);
} }
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
mem->bus.addr = NULL;
mem->bus.is_iomem = false;
mem->bus.offset = 0;
mem->bus.size = mem->num_pages << PAGE_SHIFT;
mem->bus.base = 0;
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
/* System memory */
return 0;
case TTM_PL_VRAM:
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
mem->bus.base = dev_priv->vram_start;
mem->bus.is_iomem = true;
break;
default:
return -EINVAL;
}
return 0;
}
static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
}
static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
return 0;
}
/** /**
* FIXME: We're using the old vmware polling method to sync. * FIXME: We're using the old vmware polling method to sync.
* Do this with fences instead. * Do this with fences instead.
...@@ -248,5 +277,8 @@ struct ttm_bo_driver vmw_bo_driver = { ...@@ -248,5 +277,8 @@ struct ttm_bo_driver vmw_bo_driver = {
.sync_obj_unref = vmw_sync_obj_unref, .sync_obj_unref = vmw_sync_obj_unref,
.sync_obj_ref = vmw_sync_obj_ref, .sync_obj_ref = vmw_sync_obj_ref,
.move_notify = vmw_move_notify, .move_notify = vmw_move_notify,
.swap_notify = vmw_swap_notify .swap_notify = vmw_swap_notify,
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
}; };
...@@ -570,7 +570,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, ...@@ -570,7 +570,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* Put BO in VRAM, only if there is space. * Put BO in VRAM, only if there is space.
*/ */
ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false); ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
if (unlikely(ret == -ERESTARTSYS)) if (unlikely(ret == -ERESTARTSYS))
return ret; return ret;
...@@ -590,7 +590,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, ...@@ -590,7 +590,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* previous contents. * previous contents.
*/ */
ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
return ret; return ret;
} }
......
...@@ -628,7 +628,7 @@ int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, ...@@ -628,7 +628,7 @@ int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false); ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
return ret; return ret;
...@@ -652,7 +652,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, ...@@ -652,7 +652,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto err_unlock; goto err_unlock;
ret = ttm_bo_validate(bo, &ne_placement, false, false); ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
err_unlock: err_unlock:
ttm_write_unlock(&vmw_priv->active_master->lock); ttm_write_unlock(&vmw_priv->active_master->lock);
......
...@@ -118,7 +118,7 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, ...@@ -118,7 +118,7 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
if (pin) if (pin)
overlay_placement = &vmw_vram_ne_placement; overlay_placement = &vmw_vram_ne_placement;
ret = ttm_bo_validate(bo, overlay_placement, interruptible, false); ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
......
...@@ -66,6 +66,26 @@ struct ttm_placement { ...@@ -66,6 +66,26 @@ struct ttm_placement {
const uint32_t *busy_placement; const uint32_t *busy_placement;
}; };
/**
* struct ttm_bus_placement
*
* @addr: mapped virtual address
* @base: bus base address
* @is_iomem: is this io memory ?
* @size: size in byte
* @offset: offset from the base address
*
* Structure indicating the bus placement of an object.
*/
struct ttm_bus_placement {
void *addr;
unsigned long base;
unsigned long size;
unsigned long offset;
bool is_iomem;
bool io_reserved;
};
/** /**
* struct ttm_mem_reg * struct ttm_mem_reg
...@@ -75,6 +95,7 @@ struct ttm_placement { ...@@ -75,6 +95,7 @@ struct ttm_placement {
* @num_pages: Actual size of memory region in pages. * @num_pages: Actual size of memory region in pages.
* @page_alignment: Page alignment. * @page_alignment: Page alignment.
* @placement: Placement flags. * @placement: Placement flags.
* @bus: Placement on io bus accessible to the CPU
* *
* Structure indicating the placement and space resources used by a * Structure indicating the placement and space resources used by a
* buffer object. * buffer object.
...@@ -87,6 +108,7 @@ struct ttm_mem_reg { ...@@ -87,6 +108,7 @@ struct ttm_mem_reg {
uint32_t page_alignment; uint32_t page_alignment;
uint32_t mem_type; uint32_t mem_type;
uint32_t placement; uint32_t placement;
struct ttm_bus_placement bus;
}; };
/** /**
...@@ -274,6 +296,7 @@ struct ttm_bo_kmap_obj { ...@@ -274,6 +296,7 @@ struct ttm_bo_kmap_obj {
ttm_bo_map_kmap = 3, ttm_bo_map_kmap = 3,
ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK, ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK,
} bo_kmap_type; } bo_kmap_type;
struct ttm_buffer_object *bo;
}; };
/** /**
...@@ -313,7 +336,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, ...@@ -313,7 +336,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
* @bo: The buffer object. * @bo: The buffer object.
* @placement: Proposed placement for the buffer object. * @placement: Proposed placement for the buffer object.
* @interruptible: Sleep interruptible if sleeping. * @interruptible: Sleep interruptible if sleeping.
* @no_wait: Return immediately if the buffer is busy. * @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
* *
* Changes placement and caching policy of the buffer object * Changes placement and caching policy of the buffer object
* according proposed placement. * according proposed placement.
...@@ -325,7 +349,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, ...@@ -325,7 +349,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
*/ */
extern int ttm_bo_validate(struct ttm_buffer_object *bo, extern int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement, struct ttm_placement *placement,
bool interruptible, bool no_wait); bool interruptible, bool no_wait_reserve,
bool no_wait_gpu);
/** /**
* ttm_bo_unref * ttm_bo_unref
......
...@@ -176,8 +176,6 @@ struct ttm_tt { ...@@ -176,8 +176,6 @@ struct ttm_tt {
#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ #define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ #define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
before kernel access. */
#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ #define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
/** /**
...@@ -189,13 +187,6 @@ struct ttm_tt { ...@@ -189,13 +187,6 @@ struct ttm_tt {
* managed by this memory type. * managed by this memory type.
* @gpu_offset: If used, the GPU offset of the first managed page of * @gpu_offset: If used, the GPU offset of the first managed page of
* fixed memory or the first managed location in an aperture. * fixed memory or the first managed location in an aperture.
* @io_offset: The io_offset of the first managed page of IO memory or
* the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
* memory, this should be set to NULL.
* @io_size: The size of a managed IO region (fixed memory or aperture).
* @io_addr: Virtual kernel address if the io region is pre-mapped. For
* TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
* @io_addr should be set to NULL.
* @size: Size of the managed region. * @size: Size of the managed region.
* @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
* as defined in ttm_placement_common.h * as defined in ttm_placement_common.h
...@@ -221,9 +212,6 @@ struct ttm_mem_type_manager { ...@@ -221,9 +212,6 @@ struct ttm_mem_type_manager {
bool use_type; bool use_type;
uint32_t flags; uint32_t flags;
unsigned long gpu_offset; unsigned long gpu_offset;
unsigned long io_offset;
unsigned long io_size;
void *io_addr;
uint64_t size; uint64_t size;
uint32_t available_caching; uint32_t available_caching;
uint32_t default_caching; uint32_t default_caching;
...@@ -311,7 +299,8 @@ struct ttm_bo_driver { ...@@ -311,7 +299,8 @@ struct ttm_bo_driver {
*/ */
int (*move) (struct ttm_buffer_object *bo, int (*move) (struct ttm_buffer_object *bo,
bool evict, bool interruptible, bool evict, bool interruptible,
bool no_wait, struct ttm_mem_reg *new_mem); bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/** /**
* struct ttm_bo_driver_member verify_access * struct ttm_bo_driver_member verify_access
...@@ -351,12 +340,21 @@ struct ttm_bo_driver { ...@@ -351,12 +340,21 @@ struct ttm_bo_driver {
struct ttm_mem_reg *new_mem); struct ttm_mem_reg *new_mem);
/* notify the driver we are taking a fault on this BO /* notify the driver we are taking a fault on this BO
* and have reserved it */ * and have reserved it */
void (*fault_reserve_notify)(struct ttm_buffer_object *bo); int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
/** /**
* notify the driver that we're about to swap out this bo * notify the driver that we're about to swap out this bo
*/ */
void (*swap_notify) (struct ttm_buffer_object *bo); void (*swap_notify) (struct ttm_buffer_object *bo);
/**
* Driver callback on when mapping io memory (for bo_move_memcpy
* for instance). TTM will take care to call io_mem_free whenever
* the mapping is not use anymore. io_mem_reserve & io_mem_free
* are balanced.
*/
int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
}; };
/** /**
...@@ -633,7 +631,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, ...@@ -633,7 +631,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
* @proposed_placement: Proposed new placement for the buffer object. * @proposed_placement: Proposed new placement for the buffer object.
* @mem: A struct ttm_mem_reg. * @mem: A struct ttm_mem_reg.
* @interruptible: Sleep interruptible when sliping. * @interruptible: Sleep interruptible when sliping.
* @no_wait: Don't sleep waiting for space to become available. * @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
* *
* Allocate memory space for the buffer object pointed to by @bo, using * Allocate memory space for the buffer object pointed to by @bo, using
* the placement flags in @mem, potentially evicting other idle buffer objects. * the placement flags in @mem, potentially evicting other idle buffer objects.
...@@ -647,7 +646,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, ...@@ -647,7 +646,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement, struct ttm_placement *placement,
struct ttm_mem_reg *mem, struct ttm_mem_reg *mem,
bool interruptible, bool no_wait); bool interruptible,
bool no_wait_reserve, bool no_wait_gpu);
/** /**
* ttm_bo_wait_for_cpu * ttm_bo_wait_for_cpu
* *
...@@ -682,6 +682,11 @@ extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev, ...@@ -682,6 +682,11 @@ extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
unsigned long *bus_offset, unsigned long *bus_offset,
unsigned long *bus_size); unsigned long *bus_size);
extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem);
extern void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem);
extern void ttm_bo_global_release(struct ttm_global_reference *ref); extern void ttm_bo_global_release(struct ttm_global_reference *ref);
extern int ttm_bo_global_init(struct ttm_global_reference *ref); extern int ttm_bo_global_init(struct ttm_global_reference *ref);
...@@ -826,7 +831,8 @@ extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo); ...@@ -826,7 +831,8 @@ extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
* *
* @bo: A pointer to a struct ttm_buffer_object. * @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline. * @evict: 1: This is an eviction. Don't try to pipeline.
* @no_wait: Never sleep, but rather return with -EBUSY. * @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move. * @new_mem: struct ttm_mem_reg indicating where to move.
* *
* Optimized move function for a buffer object with both old and * Optimized move function for a buffer object with both old and
...@@ -840,15 +846,16 @@ extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo); ...@@ -840,15 +846,16 @@ extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
*/ */
extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
bool evict, bool no_wait, bool evict, bool no_wait_reserve,
struct ttm_mem_reg *new_mem); bool no_wait_gpu, struct ttm_mem_reg *new_mem);
/** /**
* ttm_bo_move_memcpy * ttm_bo_move_memcpy
* *
* @bo: A pointer to a struct ttm_buffer_object. * @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline. * @evict: 1: This is an eviction. Don't try to pipeline.
* @no_wait: Never sleep, but rather return with -EBUSY. * @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move. * @new_mem: struct ttm_mem_reg indicating where to move.
* *
* Fallback move function for a mappable buffer object in mappable memory. * Fallback move function for a mappable buffer object in mappable memory.
...@@ -862,8 +869,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, ...@@ -862,8 +869,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*/ */
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool evict, bool evict, bool no_wait_reserve,
bool no_wait, struct ttm_mem_reg *new_mem); bool no_wait_gpu, struct ttm_mem_reg *new_mem);
/** /**
* ttm_bo_free_old_node * ttm_bo_free_old_node
...@@ -882,7 +889,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); ...@@ -882,7 +889,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
* @sync_obj_arg: An argument to pass to the sync object idle / wait * @sync_obj_arg: An argument to pass to the sync object idle / wait
* functions. * functions.
* @evict: This is an evict move. Don't return until the buffer is idle. * @evict: This is an evict move. Don't return until the buffer is idle.
* @no_wait: Never sleep, but rather return with -EBUSY. * @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move. * @new_mem: struct ttm_mem_reg indicating where to move.
* *
* Accelerated move function to be called when an accelerated move * Accelerated move function to be called when an accelerated move
...@@ -896,7 +904,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); ...@@ -896,7 +904,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj, void *sync_obj,
void *sync_obj_arg, void *sync_obj_arg,
bool evict, bool no_wait, bool evict, bool no_wait_reserve,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem); struct ttm_mem_reg *new_mem);
/** /**
* ttm_io_prot * ttm_io_prot
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment