Commit 212c444b authored by Dave Airlie's avatar Dave Airlie

Merge branch 'ttm-next-3.13' of git://people.freedesktop.org/~thomash/linux into drm-next

- A couple of fixes that never made it into fixes-3.12
- Make NO_EVICT bo's available for shrinkers when on delayed-delete list
- Allow retrying page-faults that need to wait for GPU.

* 'ttm-next-3.13' of git://people.freedesktop.org/~thomash/linux:
  drm/ttm: Fix memory type compatibility check
  drm/ttm: Fix ttm_bo_move_memcpy
  drm/ttm: Handle in-memory region copies
  drm/ttm: Make NO_EVICT bos available to shrinkers pending destruction
  drm/ttm: Allow vm fault retries
parents 1e95ab58 59c8e663
...@@ -429,8 +429,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ...@@ -429,8 +429,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
sync_obj = driver->sync_obj_ref(bo->sync_obj); sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock); spin_unlock(&bdev->fence_lock);
if (!ret) if (!ret) {
/*
* Make NO_EVICT bos immediately available to
* shrinkers, now that they are queued for
* destruction.
*/
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
ttm_bo_add_to_lru(bo);
}
ww_mutex_unlock(&bo->resv->lock); ww_mutex_unlock(&bo->resv->lock);
}
kref_get(&bo->list_kref); kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy); list_add_tail(&bo->ddestroy, &bdev->ddestroy);
...@@ -986,24 +998,32 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ...@@ -986,24 +998,32 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
return ret; return ret;
} }
static int ttm_bo_mem_compat(struct ttm_placement *placement, static bool ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem,
uint32_t *new_flags)
{ {
int i; int i;
if (mem->mm_node && placement->lpfn != 0 && if (mem->mm_node && placement->lpfn != 0 &&
(mem->start < placement->fpfn || (mem->start < placement->fpfn ||
mem->start + mem->num_pages > placement->lpfn)) mem->start + mem->num_pages > placement->lpfn))
return -1; return false;
for (i = 0; i < placement->num_placement; i++) { for (i = 0; i < placement->num_placement; i++) {
if ((placement->placement[i] & mem->placement & *new_flags = placement->placement[i];
TTM_PL_MASK_CACHING) && if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
(placement->placement[i] & mem->placement & (*new_flags & mem->placement & TTM_PL_MASK_MEM))
TTM_PL_MASK_MEM)) return true;
return i;
} }
return -1;
for (i = 0; i < placement->num_busy_placement; i++) {
*new_flags = placement->busy_placement[i];
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
(*new_flags & mem->placement & TTM_PL_MASK_MEM))
return true;
}
return false;
} }
int ttm_bo_validate(struct ttm_buffer_object *bo, int ttm_bo_validate(struct ttm_buffer_object *bo,
...@@ -1012,6 +1032,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, ...@@ -1012,6 +1032,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
bool no_wait_gpu) bool no_wait_gpu)
{ {
int ret; int ret;
uint32_t new_flags;
lockdep_assert_held(&bo->resv->lock.base); lockdep_assert_held(&bo->resv->lock.base);
/* Check that range is valid */ /* Check that range is valid */
...@@ -1022,8 +1043,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, ...@@ -1022,8 +1043,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/* /*
* Check whether we need to move buffer. * Check whether we need to move buffer.
*/ */
ret = ttm_bo_mem_compat(placement, &bo->mem); if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
if (ret < 0) {
ret = ttm_bo_move_buffer(bo, placement, interruptible, ret = ttm_bo_move_buffer(bo, placement, interruptible,
no_wait_gpu); no_wait_gpu);
if (ret) if (ret)
...@@ -1033,7 +1053,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, ...@@ -1033,7 +1053,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
* Use the access and other non-mapping-related flag bits from * Use the access and other non-mapping-related flag bits from
* the compatible memory placement flags to the active flags * the compatible memory placement flags to the active flags
*/ */
ttm_flag_masked(&bo->mem.placement, placement->placement[ret], ttm_flag_masked(&bo->mem.placement, new_flags,
~TTM_PL_MASK_MEMTYPE); ~TTM_PL_MASK_MEMTYPE);
} }
/* /*
......
...@@ -343,19 +343,25 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, ...@@ -343,19 +343,25 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
if (ret) if (ret)
goto out; goto out;
/*
* Single TTM move. NOP.
*/
if (old_iomap == NULL && new_iomap == NULL) if (old_iomap == NULL && new_iomap == NULL)
goto out2; goto out2;
/*
* Move nonexistent data. NOP.
*/
if (old_iomap == NULL && ttm == NULL) if (old_iomap == NULL && ttm == NULL)
goto out2; goto out2;
if (ttm->state == tt_unpopulated) { /*
* TTM might be null for moves within the same region.
*/
if (ttm && ttm->state == tt_unpopulated) {
ret = ttm->bdev->driver->ttm_tt_populate(ttm); ret = ttm->bdev->driver->ttm_tt_populate(ttm);
if (ret) { if (ret)
/* if we fail here don't nuke the mm node
* as the bo still owns it */
old_copy.mm_node = NULL;
goto out1; goto out1;
}
} }
add = 0; add = 0;
...@@ -381,11 +387,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, ...@@ -381,11 +387,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
prot); prot);
} else } else
ret = ttm_copy_io_page(new_iomap, old_iomap, page); ret = ttm_copy_io_page(new_iomap, old_iomap, page);
if (ret) { if (ret)
/* failing here, means keep old copy as-is */
old_copy.mm_node = NULL;
goto out1; goto out1;
}
} }
mb(); mb();
out2: out2:
...@@ -403,7 +406,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, ...@@ -403,7 +406,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
out: out:
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
ttm_bo_mem_put(bo, &old_copy);
/*
* On error, keep the mm node!
*/
if (!ret)
ttm_bo_mem_put(bo, &old_copy);
return ret; return ret;
} }
EXPORT_SYMBOL(ttm_bo_move_memcpy); EXPORT_SYMBOL(ttm_bo_move_memcpy);
......
...@@ -41,6 +41,51 @@ ...@@ -41,6 +41,51 @@
#define TTM_BO_VM_NUM_PREFAULT 16 #define TTM_BO_VM_NUM_PREFAULT 16
static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
struct ttm_bo_device *bdev = bo->bdev;
int ret = 0;
spin_lock(&bdev->fence_lock);
if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
goto out_unlock;
/*
* Quick non-stalling check for idle.
*/
ret = ttm_bo_wait(bo, false, false, true);
if (likely(ret == 0))
goto out_unlock;
/*
* If possible, avoid waiting for GPU with mmap_sem
* held.
*/
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
ret = VM_FAULT_RETRY;
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out_unlock;
up_read(&vma->vm_mm->mmap_sem);
(void) ttm_bo_wait(bo, false, true, false);
goto out_unlock;
}
/*
* Ordinary wait.
*/
ret = ttm_bo_wait(bo, false, true, false);
if (unlikely(ret != 0))
ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
out_unlock:
spin_unlock(&bdev->fence_lock);
return ret;
}
static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
struct ttm_buffer_object *bo = (struct ttm_buffer_object *) struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
...@@ -91,18 +136,11 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -91,18 +136,11 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* Wait for buffer data in transit, due to a pipelined * Wait for buffer data in transit, due to a pipelined
* move. * move.
*/ */
ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
spin_lock(&bdev->fence_lock); if (unlikely(ret != 0)) {
if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { retval = ret;
ret = ttm_bo_wait(bo, false, true, false); goto out_unlock;
spin_unlock(&bdev->fence_lock); }
if (unlikely(ret != 0)) {
retval = (ret != -ERESTARTSYS) ?
VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
goto out_unlock;
}
} else
spin_unlock(&bdev->fence_lock);
ret = ttm_mem_io_lock(man, true); ret = ttm_mem_io_lock(man, true);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment