Commit b68d8379 authored by Chris Wilson's avatar Chris Wilson Committed by Sumit Semwal

dma-buf: Restart reservation_object_test_signaled_rcu() after writes

In order to be completely generic, we have to double check the read
seqlock after acquiring a reference to the fence. If the driver is
allocating fences from a SLAB_DESTROY_BY_RCU, or similar freelist, then
within an RCU grace period a fence may be freed and reallocated. The RCU
read side critical section does not prevent this reallocation, instead
we have to inspect the reservation's seqlock to double check if the
fences have been reassigned as we were acquiring our reference.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: linux-media@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Cc: linaro-mm-sig@lists.linaro.org
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarSumit Semwal <sumit.semwal@linaro.org>
Link: http://patchwork.freedesktop.org/patch/msgid/20160829070834.22296-9-chris@chris-wilson.co.uk
parent 1cec20f0
...@@ -474,12 +474,13 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, ...@@ -474,12 +474,13 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
bool test_all) bool test_all)
{ {
unsigned seq, shared_count; unsigned seq, shared_count;
int ret = true; int ret;
rcu_read_lock();
retry: retry:
ret = true;
shared_count = 0; shared_count = 0;
seq = read_seqcount_begin(&obj->seq); seq = read_seqcount_begin(&obj->seq);
rcu_read_lock();
if (test_all) { if (test_all) {
unsigned i; unsigned i;
...@@ -490,46 +491,35 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, ...@@ -490,46 +491,35 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
if (fobj) if (fobj)
shared_count = fobj->shared_count; shared_count = fobj->shared_count;
if (read_seqcount_retry(&obj->seq, seq))
goto unlock_retry;
for (i = 0; i < shared_count; ++i) { for (i = 0; i < shared_count; ++i) {
struct fence *fence = rcu_dereference(fobj->shared[i]); struct fence *fence = rcu_dereference(fobj->shared[i]);
ret = reservation_object_test_signaled_single(fence); ret = reservation_object_test_signaled_single(fence);
if (ret < 0) if (ret < 0)
goto unlock_retry; goto retry;
else if (!ret) else if (!ret)
break; break;
} }
/* if (read_seqcount_retry(&obj->seq, seq))
* There could be a read_seqcount_retry here, but nothing cares goto retry;
* about whether it's the old or newer fence pointers that are
* signaled. That race could still have happened after checking
* read_seqcount_retry. If you care, use ww_mutex_lock.
*/
} }
if (!shared_count) { if (!shared_count) {
struct fence *fence_excl = rcu_dereference(obj->fence_excl); struct fence *fence_excl = rcu_dereference(obj->fence_excl);
if (read_seqcount_retry(&obj->seq, seq))
goto unlock_retry;
if (fence_excl) { if (fence_excl) {
ret = reservation_object_test_signaled_single( ret = reservation_object_test_signaled_single(
fence_excl); fence_excl);
if (ret < 0) if (ret < 0)
goto unlock_retry; goto retry;
if (read_seqcount_retry(&obj->seq, seq))
goto retry;
} }
} }
rcu_read_unlock(); rcu_read_unlock();
return ret; return ret;
unlock_retry:
rcu_read_unlock();
goto retry;
} }
EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu); EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment