Commit fe662d84 authored by Christian König's avatar Christian König Committed by Christian König

drm/ttm: remove io_reserve_lru handling v3

That is not used any more.

v2: keep the NULL checks in TTM.
v3: remove unused variable
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: default avatarBen Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/388646/
parent 141b15e5
...@@ -263,11 +263,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -263,11 +263,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type); struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
int ret; int ret;
ret = ttm_mem_io_lock(old_man, true); ttm_bo_unmap_virtual(bo);
if (unlikely(ret != 0))
goto out_err;
ttm_bo_unmap_virtual_locked(bo);
ttm_mem_io_unlock(old_man);
/* /*
* Create and bind a ttm if required. * Create and bind a ttm if required.
...@@ -538,7 +534,6 @@ static void ttm_bo_release(struct kref *kref) ...@@ -538,7 +534,6 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_buffer_object *bo = struct ttm_buffer_object *bo =
container_of(kref, struct ttm_buffer_object, kref); container_of(kref, struct ttm_buffer_object, kref);
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type);
size_t acc_size = bo->acc_size; size_t acc_size = bo->acc_size;
int ret; int ret;
...@@ -556,9 +551,7 @@ static void ttm_bo_release(struct kref *kref) ...@@ -556,9 +551,7 @@ static void ttm_bo_release(struct kref *kref)
bo->bdev->driver->release_notify(bo); bo->bdev->driver->release_notify(bo);
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, &bo->mem);
ttm_mem_io_free_vm(bo);
ttm_mem_io_unlock(man);
} }
if (!dma_resv_test_signaled_rcu(bo->base.resv, true) || if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
...@@ -648,8 +641,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, ...@@ -648,8 +641,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
evict_mem = bo->mem; evict_mem = bo->mem;
evict_mem.mm_node = NULL; evict_mem.mm_node = NULL;
evict_mem.bus.io_reserved_vm = false;
evict_mem.bus.io_reserved_count = 0;
evict_mem.bus.base = 0; evict_mem.bus.base = 0;
evict_mem.bus.offset = 0; evict_mem.bus.offset = 0;
evict_mem.bus.addr = NULL; evict_mem.bus.addr = NULL;
...@@ -1085,8 +1076,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ...@@ -1085,8 +1076,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
mem.num_pages = bo->num_pages; mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT; mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment; mem.page_alignment = bo->mem.page_alignment;
mem.bus.io_reserved_vm = false;
mem.bus.io_reserved_count = 0;
mem.bus.base = 0; mem.bus.base = 0;
mem.bus.offset = 0; mem.bus.offset = 0;
mem.bus.addr = NULL; mem.bus.addr = NULL;
...@@ -1238,7 +1227,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, ...@@ -1238,7 +1227,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap); INIT_LIST_HEAD(&bo->swap);
INIT_LIST_HEAD(&bo->io_reserve_lru);
bo->bdev = bdev; bo->bdev = bdev;
bo->type = type; bo->type = type;
bo->num_pages = num_pages; bo->num_pages = num_pages;
...@@ -1247,8 +1235,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, ...@@ -1247,8 +1235,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->mem.num_pages = bo->num_pages; bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL; bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment; bo->mem.page_alignment = page_alignment;
bo->mem.bus.io_reserved_vm = false;
bo->mem.bus.io_reserved_count = 0;
bo->mem.bus.base = 0; bo->mem.bus.base = 0;
bo->mem.bus.offset = 0; bo->mem.bus.offset = 0;
bo->mem.bus.addr = NULL; bo->mem.bus.addr = NULL;
...@@ -1554,25 +1540,13 @@ EXPORT_SYMBOL(ttm_bo_device_init); ...@@ -1554,25 +1540,13 @@ EXPORT_SYMBOL(ttm_bo_device_init);
* buffer object vm functions. * buffer object vm functions.
*/ */
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
ttm_mem_io_free_vm(bo);
}
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type);
ttm_mem_io_lock(man, false); drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
ttm_bo_unmap_virtual_locked(bo); ttm_mem_io_free(bdev, &bo->mem);
ttm_mem_io_unlock(man);
} }
EXPORT_SYMBOL(ttm_bo_unmap_virtual); EXPORT_SYMBOL(ttm_bo_unmap_virtual);
int ttm_bo_wait(struct ttm_buffer_object *bo, int ttm_bo_wait(struct ttm_buffer_object *bo,
......
...@@ -91,122 +91,42 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, ...@@ -91,122 +91,42 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
} }
EXPORT_SYMBOL(ttm_bo_move_ttm); EXPORT_SYMBOL(ttm_bo_move_ttm);
int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible)
{
if (likely(!man->use_io_reserve_lru))
return 0;
if (interruptible)
return mutex_lock_interruptible(&man->io_reserve_mutex);
mutex_lock(&man->io_reserve_mutex);
return 0;
}
void ttm_mem_io_unlock(struct ttm_resource_manager *man)
{
if (likely(!man->use_io_reserve_lru))
return;
mutex_unlock(&man->io_reserve_mutex);
}
static int ttm_mem_io_evict(struct ttm_resource_manager *man)
{
struct ttm_buffer_object *bo;
bo = list_first_entry_or_null(&man->io_reserve_lru,
struct ttm_buffer_object,
io_reserve_lru);
if (!bo)
return -ENOSPC;
list_del_init(&bo->io_reserve_lru);
ttm_bo_unmap_virtual_locked(bo);
return 0;
}
int ttm_mem_io_reserve(struct ttm_bo_device *bdev, int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
struct ttm_resource *mem) struct ttm_resource *mem)
{ {
struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); if (mem->bus.base || mem->bus.offset || mem->bus.addr)
int ret;
if (mem->bus.io_reserved_count++)
return 0; return 0;
mem->bus.is_iomem = false;
if (!bdev->driver->io_mem_reserve) if (!bdev->driver->io_mem_reserve)
return 0; return 0;
mem->bus.addr = NULL; return bdev->driver->io_mem_reserve(bdev, mem);
mem->bus.offset = 0;
mem->bus.base = 0;
mem->bus.is_iomem = false;
retry:
ret = bdev->driver->io_mem_reserve(bdev, mem);
if (ret == -ENOSPC) {
ret = ttm_mem_io_evict(man);
if (ret == 0)
goto retry;
}
return ret;
} }
void ttm_mem_io_free(struct ttm_bo_device *bdev, void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_resource *mem) struct ttm_resource *mem)
{ {
if (--mem->bus.io_reserved_count) if (!mem->bus.base && !mem->bus.offset && !mem->bus.addr)
return;
if (!bdev->driver->io_mem_free)
return; return;
if (bdev->driver->io_mem_free)
bdev->driver->io_mem_free(bdev, mem); bdev->driver->io_mem_free(bdev, mem);
}
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type);
struct ttm_resource *mem = &bo->mem;
int ret;
if (mem->bus.io_reserved_vm)
return 0;
ret = ttm_mem_io_reserve(bo->bdev, mem);
if (unlikely(ret != 0))
return ret;
mem->bus.io_reserved_vm = true;
if (man->use_io_reserve_lru)
list_add_tail(&bo->io_reserve_lru,
&man->io_reserve_lru);
return 0;
}
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
{
struct ttm_resource *mem = &bo->mem;
if (!mem->bus.io_reserved_vm)
return;
mem->bus.io_reserved_vm = false; mem->bus.base = 0;
list_del_init(&bo->io_reserve_lru); mem->bus.offset = 0;
ttm_mem_io_free(bo->bdev, mem); mem->bus.addr = NULL;
} }
static int ttm_resource_ioremap(struct ttm_bo_device *bdev, static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
struct ttm_resource *mem, struct ttm_resource *mem,
void **virtual) void **virtual)
{ {
struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
int ret; int ret;
void *addr; void *addr;
*virtual = NULL; *virtual = NULL;
(void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bdev, mem); ret = ttm_mem_io_reserve(bdev, mem);
ttm_mem_io_unlock(man);
if (ret || !mem->bus.is_iomem) if (ret || !mem->bus.is_iomem)
return ret; return ret;
...@@ -222,9 +142,7 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev, ...@@ -222,9 +142,7 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
addr = ioremap(mem->bus.base + mem->bus.offset, addr = ioremap(mem->bus.base + mem->bus.offset,
bus_size); bus_size);
if (!addr) { if (!addr) {
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem); ttm_mem_io_free(bdev, mem);
ttm_mem_io_unlock(man);
return -ENOMEM; return -ENOMEM;
} }
} }
...@@ -236,15 +154,9 @@ static void ttm_resource_iounmap(struct ttm_bo_device *bdev, ...@@ -236,15 +154,9 @@ static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
struct ttm_resource *mem, struct ttm_resource *mem,
void *virtual) void *virtual)
{ {
struct ttm_resource_manager *man;
man = ttm_manager_type(bdev, mem->mem_type);
if (virtual && mem->bus.addr == NULL) if (virtual && mem->bus.addr == NULL)
iounmap(virtual); iounmap(virtual);
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem); ttm_mem_io_free(bdev, mem);
ttm_mem_io_unlock(man);
} }
static int ttm_copy_io_page(void *dst, void *src, unsigned long page) static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
...@@ -458,7 +370,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, ...@@ -458,7 +370,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->base.ddestroy); INIT_LIST_HEAD(&fbo->base.ddestroy);
INIT_LIST_HEAD(&fbo->base.lru); INIT_LIST_HEAD(&fbo->base.lru);
INIT_LIST_HEAD(&fbo->base.swap); INIT_LIST_HEAD(&fbo->base.swap);
INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
fbo->base.moving = NULL; fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node); drm_vma_node_reset(&fbo->base.base.vma_node);
...@@ -573,8 +484,6 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, ...@@ -573,8 +484,6 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages, unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map) struct ttm_bo_kmap_obj *map)
{ {
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, bo->mem.mem_type);
unsigned long offset, size; unsigned long offset, size;
int ret; int ret;
...@@ -585,9 +494,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, ...@@ -585,9 +494,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (start_page > bo->num_pages) if (start_page > bo->num_pages)
return -EINVAL; return -EINVAL;
(void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
ttm_mem_io_unlock(man);
if (ret) if (ret)
return ret; return ret;
if (!bo->mem.bus.is_iomem) { if (!bo->mem.bus.is_iomem) {
...@@ -602,10 +509,6 @@ EXPORT_SYMBOL(ttm_bo_kmap); ...@@ -602,10 +509,6 @@ EXPORT_SYMBOL(ttm_bo_kmap);
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{ {
struct ttm_buffer_object *bo = map->bo;
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, bo->mem.mem_type);
if (!map->virtual) if (!map->virtual)
return; return;
switch (map->bo_kmap_type) { switch (map->bo_kmap_type) {
...@@ -623,9 +526,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) ...@@ -623,9 +526,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
default: default:
BUG(); BUG();
} }
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(map->bo->bdev, &map->bo->mem); ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
ttm_mem_io_unlock(man);
map->virtual = NULL; map->virtual = NULL;
map->page = NULL; map->page = NULL;
} }
......
...@@ -281,8 +281,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, ...@@ -281,8 +281,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgoff_t i; pgoff_t i;
vm_fault_t ret = VM_FAULT_NOPAGE; vm_fault_t ret = VM_FAULT_NOPAGE;
unsigned long address = vmf->address; unsigned long address = vmf->address;
struct ttm_resource_manager *man =
ttm_manager_type(bdev, bo->mem.mem_type);
/* /*
* Refuse to fault imported pages. This should be handled * Refuse to fault imported pages. This should be handled
...@@ -321,24 +319,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, ...@@ -321,24 +319,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
err = ttm_mem_io_lock(man, true); err = ttm_mem_io_reserve(bdev, &bo->mem);
if (unlikely(err != 0)) if (unlikely(err != 0))
return VM_FAULT_NOPAGE; return VM_FAULT_SIGBUS;
err = ttm_mem_io_reserve_vm(bo);
if (unlikely(err != 0)) {
ret = VM_FAULT_SIGBUS;
goto out_io_unlock;
}
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
page_last = vma_pages(vma) + vma->vm_pgoff - page_last = vma_pages(vma) + vma->vm_pgoff -
drm_vma_node_start(&bo->base.vma_node); drm_vma_node_start(&bo->base.vma_node);
if (unlikely(page_offset >= bo->num_pages)) { if (unlikely(page_offset >= bo->num_pages))
ret = VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
goto out_io_unlock;
}
prot = ttm_io_prot(bo->mem.placement, prot); prot = ttm_io_prot(bo->mem.placement, prot);
if (!bo->mem.bus.is_iomem) { if (!bo->mem.bus.is_iomem) {
...@@ -350,21 +341,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, ...@@ -350,21 +341,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
}; };
ttm = bo->ttm; ttm = bo->ttm;
if (ttm_tt_populate(bo->ttm, &ctx)) { if (ttm_tt_populate(bo->ttm, &ctx))
ret = VM_FAULT_OOM; return VM_FAULT_OOM;
goto out_io_unlock;
}
} else { } else {
/* Iomem should not be marked encrypted */ /* Iomem should not be marked encrypted */
prot = pgprot_decrypted(prot); prot = pgprot_decrypted(prot);
} }
/* We don't prefault on huge faults. Yet. */ /* We don't prefault on huge faults. Yet. */
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) { if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1)
ret = ttm_bo_vm_insert_huge(vmf, bo, page_offset, return ttm_bo_vm_insert_huge(vmf, bo, page_offset,
fault_page_size, prot); fault_page_size, prot);
goto out_io_unlock;
}
/* /*
* Speculatively prefault a number of pages. Only error on * Speculatively prefault a number of pages. Only error on
...@@ -376,8 +363,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, ...@@ -376,8 +363,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
} else { } else {
page = ttm->pages[page_offset]; page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) { if (unlikely(!page && i == 0)) {
ret = VM_FAULT_OOM; return VM_FAULT_OOM;
goto out_io_unlock;
} else if (unlikely(!page)) { } else if (unlikely(!page)) {
break; break;
} }
...@@ -404,7 +390,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, ...@@ -404,7 +390,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
/* Never error on prefaulted PTEs */ /* Never error on prefaulted PTEs */
if (unlikely((ret & VM_FAULT_ERROR))) { if (unlikely((ret & VM_FAULT_ERROR))) {
if (i == 0) if (i == 0)
goto out_io_unlock; return VM_FAULT_NOPAGE;
else else
break; break;
} }
...@@ -413,9 +399,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, ...@@ -413,9 +399,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
if (unlikely(++page_offset >= page_last)) if (unlikely(++page_offset >= page_last))
break; break;
} }
ret = VM_FAULT_NOPAGE;
out_io_unlock:
ttm_mem_io_unlock(man);
return ret; return ret;
} }
EXPORT_SYMBOL(ttm_bo_vm_fault_reserved); EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
......
...@@ -65,10 +65,7 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man, ...@@ -65,10 +65,7 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man,
{ {
unsigned i; unsigned i;
man->use_io_reserve_lru = false;
mutex_init(&man->io_reserve_mutex);
spin_lock_init(&man->move_lock); spin_lock_init(&man->move_lock);
INIT_LIST_HEAD(&man->io_reserve_lru);
man->size = p_size; man->size = p_size;
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
......
...@@ -151,7 +151,6 @@ struct ttm_buffer_object { ...@@ -151,7 +151,6 @@ struct ttm_buffer_object {
struct list_head lru; struct list_head lru;
struct list_head ddestroy; struct list_head ddestroy;
struct list_head swap; struct list_head swap;
struct list_head io_reserve_lru;
/** /**
* Members protected by a bo reservation. * Members protected by a bo reservation.
......
...@@ -441,11 +441,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); ...@@ -441,11 +441,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
*/ */
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible);
void ttm_mem_io_unlock(struct ttm_resource_manager *man);
/** /**
* ttm_bo_reserve: * ttm_bo_reserve:
* *
......
...@@ -113,10 +113,6 @@ struct ttm_resource_manager_func { ...@@ -113,10 +113,6 @@ struct ttm_resource_manager_func {
* @default_caching: The default caching policy used for a buffer object * @default_caching: The default caching policy used for a buffer object
* placed in this memory type if the user doesn't provide one. * placed in this memory type if the user doesn't provide one.
* @func: structure pointer implementing the range manager. See above * @func: structure pointer implementing the range manager. See above
* @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
* @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
* reserved by the TTM vm system.
* @io_reserve_lru: Optional lru list for unreserving io mem regions.
* @move_lock: lock for move fence * @move_lock: lock for move fence
* static information. bdev::driver::io_mem_free is never used. * static information. bdev::driver::io_mem_free is never used.
* @lru: The lru list for this memory type. * @lru: The lru list for this memory type.
...@@ -134,16 +130,8 @@ struct ttm_resource_manager { ...@@ -134,16 +130,8 @@ struct ttm_resource_manager {
uint32_t available_caching; uint32_t available_caching;
uint32_t default_caching; uint32_t default_caching;
const struct ttm_resource_manager_func *func; const struct ttm_resource_manager_func *func;
struct mutex io_reserve_mutex;
bool use_io_reserve_lru;
spinlock_t move_lock; spinlock_t move_lock;
/*
* Protected by @io_reserve_mutex:
*/
struct list_head io_reserve_lru;
/* /*
* Protected by the global->lru_lock. * Protected by the global->lru_lock.
*/ */
...@@ -163,8 +151,6 @@ struct ttm_resource_manager { ...@@ -163,8 +151,6 @@ struct ttm_resource_manager {
* @base: bus base address * @base: bus base address
* @is_iomem: is this io memory ? * @is_iomem: is this io memory ?
* @offset: offset from the base address * @offset: offset from the base address
* @io_reserved_vm: The VM system has a refcount in @io_reserved_count
* @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
* *
* Structure indicating the bus placement of an object. * Structure indicating the bus placement of an object.
*/ */
...@@ -173,8 +159,6 @@ struct ttm_bus_placement { ...@@ -173,8 +159,6 @@ struct ttm_bus_placement {
phys_addr_t base; phys_addr_t base;
unsigned long offset; unsigned long offset;
bool is_iomem; bool is_iomem;
bool io_reserved_vm;
uint64_t io_reserved_count;
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment