Commit 91cb91be authored by Jerome Glisse's avatar Jerome Glisse Committed by Dave Airlie

drm/radeon/kms: fix indirect buffer management V2

There is 3 different distinct states for an indirect buffer (IB) :
  1- free with no fence
  2- free with a fence
  3- non free (fence doesn't matter)
Previous code mixed case 2 & 3 in a single one leading to possible
catastrophique failure. This patch rework the handling and properly
separate each case. So when you get ib we set the ib as non free and
fence status doesn't matter. Fence become active (ie has a meaning
for the ib code) once the ib is scheduled or free. This patch also
get rid of the alloc bitmap as it was overkill, we know go through
IB pool list like in a ring buffer as the oldest IB is the first
one the will be free.

Fix :
https://bugs.freedesktop.org/show_bug.cgi?id=26438
and likely other bugs.

V2 remove the scheduled list, it's useless now, fix free ib scanning
Signed-off-by: default avatarJerome Glisse <jglisse@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent b58db2c6
...@@ -543,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev) ...@@ -543,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
void r600_vb_ib_put(struct radeon_device *rdev) void r600_vb_ib_put(struct radeon_device *rdev)
{ {
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
mutex_lock(&rdev->ib_pool.mutex);
list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
mutex_unlock(&rdev->ib_pool.mutex);
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
} }
......
...@@ -96,6 +96,7 @@ extern int radeon_audio; ...@@ -96,6 +96,7 @@ extern int radeon_audio;
* symbol; * symbol;
*/ */
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16 #define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_NUM_FILES 32 #define RADEON_DEBUGFS_MAX_NUM_FILES 32
#define RADEONFB_CONN_LIMIT 4 #define RADEONFB_CONN_LIMIT 4
...@@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); ...@@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
*/ */
struct radeon_ib { struct radeon_ib {
struct list_head list; struct list_head list;
unsigned long idx; unsigned idx;
uint64_t gpu_addr; uint64_t gpu_addr;
struct radeon_fence *fence; struct radeon_fence *fence;
uint32_t *ptr; uint32_t *ptr;
uint32_t length_dw; uint32_t length_dw;
bool free;
}; };
/* /*
...@@ -377,10 +379,9 @@ struct radeon_ib { ...@@ -377,10 +379,9 @@ struct radeon_ib {
struct radeon_ib_pool { struct radeon_ib_pool {
struct mutex mutex; struct mutex mutex;
struct radeon_bo *robj; struct radeon_bo *robj;
struct list_head scheduled_ibs;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready; bool ready;
DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE); unsigned head_id;
}; };
struct radeon_cp { struct radeon_cp {
......
...@@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) ...@@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
{ {
struct radeon_fence *fence; struct radeon_fence *fence;
struct radeon_ib *nib; struct radeon_ib *nib;
unsigned long i; int r = 0, i, c;
int r = 0;
*ib = NULL; *ib = NULL;
r = radeon_fence_create(rdev, &fence); r = radeon_fence_create(rdev, &fence);
if (r) { if (r) {
DRM_ERROR("failed to create fence for new IB\n"); dev_err(rdev->dev, "failed to create fence for new IB\n");
return r; return r;
} }
mutex_lock(&rdev->ib_pool.mutex); mutex_lock(&rdev->ib_pool.mutex);
i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
if (i < RADEON_IB_POOL_SIZE) { i &= (RADEON_IB_POOL_SIZE - 1);
set_bit(i, rdev->ib_pool.alloc_bm); if (rdev->ib_pool.ibs[i].free) {
rdev->ib_pool.ibs[i].length_dw = 0; nib = &rdev->ib_pool.ibs[i];
*ib = &rdev->ib_pool.ibs[i]; break;
mutex_unlock(&rdev->ib_pool.mutex); }
goto out;
} }
if (list_empty(&rdev->ib_pool.scheduled_ibs)) { if (nib == NULL) {
/* we go do nothings here */ /* This should never happen, it means we allocated all
* IB and haven't scheduled one yet, return EBUSY to
* userspace hoping that on ioctl recall we get better
* luck
*/
dev_err(rdev->dev, "no free indirect buffer !\n");
mutex_unlock(&rdev->ib_pool.mutex); mutex_unlock(&rdev->ib_pool.mutex);
DRM_ERROR("all IB allocated none scheduled.\n"); radeon_fence_unref(&fence);
r = -EINVAL; return -EBUSY;
goto out;
} }
/* get the first ib on the scheduled list */ rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
nib = list_entry(rdev->ib_pool.scheduled_ibs.next, nib->free = false;
struct radeon_ib, list); if (nib->fence) {
if (nib->fence == NULL) {
/* we go do nothings here */
mutex_unlock(&rdev->ib_pool.mutex); mutex_unlock(&rdev->ib_pool.mutex);
DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); r = radeon_fence_wait(nib->fence, false);
r = -EINVAL; if (r) {
goto out; dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
} nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
mutex_unlock(&rdev->ib_pool.mutex); mutex_lock(&rdev->ib_pool.mutex);
nib->free = true;
r = radeon_fence_wait(nib->fence, false); mutex_unlock(&rdev->ib_pool.mutex);
if (r) { radeon_fence_unref(&fence);
DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, return r;
(unsigned long)nib->gpu_addr, nib->length_dw); }
DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); mutex_lock(&rdev->ib_pool.mutex);
goto out;
} }
radeon_fence_unref(&nib->fence); radeon_fence_unref(&nib->fence);
nib->fence = fence;
nib->length_dw = 0; nib->length_dw = 0;
/* scheduled list is accessed here */
mutex_lock(&rdev->ib_pool.mutex);
list_del(&nib->list);
INIT_LIST_HEAD(&nib->list);
mutex_unlock(&rdev->ib_pool.mutex); mutex_unlock(&rdev->ib_pool.mutex);
*ib = nib; *ib = nib;
out: return 0;
if (r) {
radeon_fence_unref(&fence);
} else {
(*ib)->fence = fence;
}
return r;
} }
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
...@@ -114,18 +101,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) ...@@ -114,18 +101,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
return; return;
} }
mutex_lock(&rdev->ib_pool.mutex); mutex_lock(&rdev->ib_pool.mutex);
if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) { tmp->free = true;
/* IB is scheduled & not signaled don't do anythings */
mutex_unlock(&rdev->ib_pool.mutex);
return;
}
list_del(&tmp->list);
INIT_LIST_HEAD(&tmp->list);
if (tmp->fence)
radeon_fence_unref(&tmp->fence);
tmp->length_dw = 0;
clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
mutex_unlock(&rdev->ib_pool.mutex); mutex_unlock(&rdev->ib_pool.mutex);
} }
...@@ -135,7 +111,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) ...@@ -135,7 +111,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
if (!ib->length_dw || !rdev->cp.ready) { if (!ib->length_dw || !rdev->cp.ready) {
/* TODO: Nothings in the ib we should report. */ /* TODO: Nothings in the ib we should report. */
DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
return -EINVAL; return -EINVAL;
} }
...@@ -148,7 +124,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) ...@@ -148,7 +124,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_ib_execute(rdev, ib); radeon_ring_ib_execute(rdev, ib);
radeon_fence_emit(rdev, ib->fence); radeon_fence_emit(rdev, ib->fence);
mutex_lock(&rdev->ib_pool.mutex); mutex_lock(&rdev->ib_pool.mutex);
list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); /* once scheduled IB is considered free and protected by the fence */
ib->free = true;
mutex_unlock(&rdev->ib_pool.mutex); mutex_unlock(&rdev->ib_pool.mutex);
radeon_ring_unlock_commit(rdev); radeon_ring_unlock_commit(rdev);
return 0; return 0;
...@@ -164,7 +141,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev) ...@@ -164,7 +141,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
if (rdev->ib_pool.robj) if (rdev->ib_pool.robj)
return 0; return 0;
/* Allocate 1M object buffer */ /* Allocate 1M object buffer */
INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
true, RADEON_GEM_DOMAIN_GTT, true, RADEON_GEM_DOMAIN_GTT,
&rdev->ib_pool.robj); &rdev->ib_pool.robj);
...@@ -195,9 +171,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev) ...@@ -195,9 +171,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
rdev->ib_pool.ibs[i].ptr = ptr + offset; rdev->ib_pool.ibs[i].ptr = ptr + offset;
rdev->ib_pool.ibs[i].idx = i; rdev->ib_pool.ibs[i].idx = i;
rdev->ib_pool.ibs[i].length_dw = 0; rdev->ib_pool.ibs[i].length_dw = 0;
INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); rdev->ib_pool.ibs[i].free = true;
} }
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); rdev->ib_pool.head_id = 0;
rdev->ib_pool.ready = true; rdev->ib_pool.ready = true;
DRM_INFO("radeon: ib pool ready.\n"); DRM_INFO("radeon: ib pool ready.\n");
if (radeon_debugfs_ib_init(rdev)) { if (radeon_debugfs_ib_init(rdev)) {
...@@ -214,7 +190,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) ...@@ -214,7 +190,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
return; return;
} }
mutex_lock(&rdev->ib_pool.mutex); mutex_lock(&rdev->ib_pool.mutex);
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
if (rdev->ib_pool.robj) { if (rdev->ib_pool.robj) {
r = radeon_bo_reserve(rdev->ib_pool.robj, false); r = radeon_bo_reserve(rdev->ib_pool.robj, false);
if (likely(r == 0)) { if (likely(r == 0)) {
...@@ -363,7 +338,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data) ...@@ -363,7 +338,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
if (ib == NULL) { if (ib == NULL) {
return 0; return 0;
} }
seq_printf(m, "IB %04lu\n", ib->idx); seq_printf(m, "IB %04u\n", ib->idx);
seq_printf(m, "IB fence %p\n", ib->fence); seq_printf(m, "IB fence %p\n", ib->fence);
seq_printf(m, "IB size %05u dwords\n", ib->length_dw); seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
for (i = 0; i < ib->length_dw; i++) { for (i = 0; i < ib->length_dw; i++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment