Commit 8f94eda3 authored by Christian König's avatar Christian König

dma-buf: drop seq count based update

This should be possible now since we don't have the distinction
between exclusive and shared fences any more.

The only possible pitfall is that a dma_fence would be reused during the
RCU grace period, but even that could be handled with a single extra check.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-15-christian.koenig@amd.com
parent 8bb31587
...@@ -133,7 +133,6 @@ static void dma_resv_list_free(struct dma_resv_list *list) ...@@ -133,7 +133,6 @@ static void dma_resv_list_free(struct dma_resv_list *list)
void dma_resv_init(struct dma_resv *obj) void dma_resv_init(struct dma_resv *obj)
{ {
ww_mutex_init(&obj->lock, &reservation_ww_class); ww_mutex_init(&obj->lock, &reservation_ww_class);
seqcount_ww_mutex_init(&obj->seq, &obj->lock);
RCU_INIT_POINTER(obj->fences, NULL); RCU_INIT_POINTER(obj->fences, NULL);
} }
...@@ -292,28 +291,24 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, ...@@ -292,28 +291,24 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
fobj = dma_resv_fences_list(obj); fobj = dma_resv_fences_list(obj);
count = fobj->num_fences; count = fobj->num_fences;
write_seqcount_begin(&obj->seq);
for (i = 0; i < count; ++i) { for (i = 0; i < count; ++i) {
enum dma_resv_usage old_usage; enum dma_resv_usage old_usage;
dma_resv_list_entry(fobj, i, obj, &old, &old_usage); dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
if ((old->context == fence->context && old_usage >= usage) || if ((old->context == fence->context && old_usage >= usage) ||
dma_fence_is_signaled(old)) dma_fence_is_signaled(old)) {
goto replace; dma_resv_list_set(fobj, i, fence, usage);
dma_fence_put(old);
return;
}
} }
BUG_ON(fobj->num_fences >= fobj->max_fences); BUG_ON(fobj->num_fences >= fobj->max_fences);
old = NULL;
count++; count++;
replace:
dma_resv_list_set(fobj, i, fence, usage); dma_resv_list_set(fobj, i, fence, usage);
/* pointer update must be visible before we extend the num_fences */ /* pointer update must be visible before we extend the num_fences */
smp_store_mb(fobj->num_fences, count); smp_store_mb(fobj->num_fences, count);
write_seqcount_end(&obj->seq);
dma_fence_put(old);
} }
EXPORT_SYMBOL(dma_resv_add_fence); EXPORT_SYMBOL(dma_resv_add_fence);
...@@ -341,7 +336,6 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, ...@@ -341,7 +336,6 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
dma_resv_assert_held(obj); dma_resv_assert_held(obj);
list = dma_resv_fences_list(obj); list = dma_resv_fences_list(obj);
write_seqcount_begin(&obj->seq);
for (i = 0; list && i < list->num_fences; ++i) { for (i = 0; list && i < list->num_fences; ++i) {
struct dma_fence *old; struct dma_fence *old;
...@@ -352,14 +346,12 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, ...@@ -352,14 +346,12 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
dma_resv_list_set(list, i, replacement, usage); dma_resv_list_set(list, i, replacement, usage);
dma_fence_put(old); dma_fence_put(old);
} }
write_seqcount_end(&obj->seq);
} }
EXPORT_SYMBOL(dma_resv_replace_fences); EXPORT_SYMBOL(dma_resv_replace_fences);
/* Restart the unlocked iteration by initializing the cursor object. */ /* Restart the unlocked iteration by initializing the cursor object. */
static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
{ {
cursor->seq = read_seqcount_begin(&cursor->obj->seq);
cursor->index = 0; cursor->index = 0;
cursor->num_fences = 0; cursor->num_fences = 0;
cursor->fences = dma_resv_fences_list(cursor->obj); cursor->fences = dma_resv_fences_list(cursor->obj);
...@@ -388,8 +380,10 @@ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor) ...@@ -388,8 +380,10 @@ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
cursor->obj, &cursor->fence, cursor->obj, &cursor->fence,
&cursor->fence_usage); &cursor->fence_usage);
cursor->fence = dma_fence_get_rcu(cursor->fence); cursor->fence = dma_fence_get_rcu(cursor->fence);
if (!cursor->fence) if (!cursor->fence) {
break; dma_resv_iter_restart_unlocked(cursor);
continue;
}
if (!dma_fence_is_signaled(cursor->fence) && if (!dma_fence_is_signaled(cursor->fence) &&
cursor->usage >= cursor->fence_usage) cursor->usage >= cursor->fence_usage)
...@@ -415,7 +409,7 @@ struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor) ...@@ -415,7 +409,7 @@ struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
do { do {
dma_resv_iter_restart_unlocked(cursor); dma_resv_iter_restart_unlocked(cursor);
dma_resv_iter_walk_unlocked(cursor); dma_resv_iter_walk_unlocked(cursor);
} while (read_seqcount_retry(&cursor->obj->seq, cursor->seq)); } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
rcu_read_unlock(); rcu_read_unlock();
return cursor->fence; return cursor->fence;
...@@ -438,13 +432,13 @@ struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor) ...@@ -438,13 +432,13 @@ struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
rcu_read_lock(); rcu_read_lock();
cursor->is_restarted = false; cursor->is_restarted = false;
restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq); restart = dma_resv_fences_list(cursor->obj) != cursor->fences;
do { do {
if (restart) if (restart)
dma_resv_iter_restart_unlocked(cursor); dma_resv_iter_restart_unlocked(cursor);
dma_resv_iter_walk_unlocked(cursor); dma_resv_iter_walk_unlocked(cursor);
restart = true; restart = true;
} while (read_seqcount_retry(&cursor->obj->seq, cursor->seq)); } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
rcu_read_unlock(); rcu_read_unlock();
return cursor->fence; return cursor->fence;
...@@ -540,10 +534,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) ...@@ -540,10 +534,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
} }
dma_resv_iter_end(&cursor); dma_resv_iter_end(&cursor);
write_seqcount_begin(&dst->seq);
list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst)); list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst));
write_seqcount_end(&dst->seq);
dma_resv_list_free(list); dma_resv_list_free(list);
return 0; return 0;
} }
......
...@@ -217,7 +217,7 @@ static int test_for_each_unlocked(void *arg) ...@@ -217,7 +217,7 @@ static int test_for_each_unlocked(void *arg)
if (r == -ENOENT) { if (r == -ENOENT) {
r = -EINVAL; r = -EINVAL;
/* That should trigger an restart */ /* That should trigger an restart */
cursor.seq--; cursor.fences = (void*)~0;
} else if (r == -EINVAL) { } else if (r == -EINVAL) {
r = 0; r = 0;
} }
......
...@@ -155,15 +155,6 @@ struct dma_resv { ...@@ -155,15 +155,6 @@ struct dma_resv {
*/ */
struct ww_mutex lock; struct ww_mutex lock;
/**
* @seq:
*
* Sequence count for managing RCU read-side synchronization, allows
* read-only access to @fences while ensuring we take a consistent
* snapshot.
*/
seqcount_ww_mutex_t seq;
/** /**
* @fences: * @fences:
* *
...@@ -202,9 +193,6 @@ struct dma_resv_iter { ...@@ -202,9 +193,6 @@ struct dma_resv_iter {
/** @fence_usage: the usage of the current fence */ /** @fence_usage: the usage of the current fence */
enum dma_resv_usage fence_usage; enum dma_resv_usage fence_usage;
/** @seq: sequence number to check for modifications */
unsigned int seq;
/** @index: index into the shared fences */ /** @index: index into the shared fences */
unsigned int index; unsigned int index;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment