Commit 047a1b87 authored by Christian König's avatar Christian König

dma-buf & drm/amdgpu: remove dma_resv workaround

Rework the internals of the dma_resv object to allow adding more than one
write fence and remember for each fence what purpose it had.

This allows removing the workaround from amdgpu which used a container for
this instead.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Cc: amd-gfx@lists.freedesktop.org
Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-4-christian.koenig@amd.com
parent 73511edf
This diff is collapsed.
...@@ -34,7 +34,6 @@ struct amdgpu_fpriv; ...@@ -34,7 +34,6 @@ struct amdgpu_fpriv;
struct amdgpu_bo_list_entry { struct amdgpu_bo_list_entry {
struct ttm_validate_buffer tv; struct ttm_validate_buffer tv;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
struct dma_fence_chain *chain;
uint32_t priority; uint32_t priority;
struct page **user_pages; struct page **user_pages;
bool user_invalidated; bool user_invalidated;
......
...@@ -574,14 +574,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -574,14 +574,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
e->bo_va = amdgpu_vm_bo_find(vm, bo); e->bo_va = amdgpu_vm_bo_find(vm, bo);
if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) {
e->chain = dma_fence_chain_alloc();
if (!e->chain) {
r = -ENOMEM;
goto error_validate;
}
}
} }
/* Move fence waiting after getting reservation lock of /* Move fence waiting after getting reservation lock of
...@@ -642,13 +634,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -642,13 +634,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
} }
error_validate: error_validate:
if (r) { if (r)
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
dma_fence_chain_free(e->chain);
e->chain = NULL;
}
ttm_eu_backoff_reservation(&p->ticket, &p->validated); ttm_eu_backoff_reservation(&p->ticket, &p->validated);
}
out: out:
return r; return r;
} }
...@@ -688,17 +675,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, ...@@ -688,17 +675,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
{ {
unsigned i; unsigned i;
if (error && backoff) { if (error && backoff)
struct amdgpu_bo_list_entry *e;
amdgpu_bo_list_for_each_entry(e, parser->bo_list) {
dma_fence_chain_free(e->chain);
e->chain = NULL;
}
ttm_eu_backoff_reservation(&parser->ticket, ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated); &parser->validated);
}
for (i = 0; i < parser->num_post_deps; i++) { for (i = 0; i < parser->num_post_deps; i++) {
drm_syncobj_put(parser->post_deps[i].syncobj); drm_syncobj_put(parser->post_deps[i].syncobj);
...@@ -1272,31 +1251,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1272,31 +1251,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
amdgpu_bo_list_for_each_entry(e, p->bo_list) { /* Make sure all BOs are remembered as writers */
struct dma_resv *resv = e->tv.bo->base.resv; amdgpu_bo_list_for_each_entry(e, p->bo_list)
struct dma_fence_chain *chain = e->chain; e->tv.num_shared = 0;
struct dma_resv_iter cursor;
struct dma_fence *fence;
if (!chain)
continue;
/*
* Temporary workaround dma_resv shortcommings by wrapping up
* the submission in a dma_fence_chain and add it as exclusive
* fence.
*
* TODO: Remove together with dma_resv rework.
*/
dma_resv_for_each_fence(&cursor, resv,
DMA_RESV_USAGE_WRITE,
fence) {
break;
}
dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);
rcu_assign_pointer(resv->fence_excl, &chain->base);
e->chain = NULL;
}
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
mutex_unlock(&p->adev->notifier_lock); mutex_unlock(&p->adev->notifier_lock);
......
...@@ -99,8 +99,8 @@ static inline enum dma_resv_usage dma_resv_usage_rw(bool write) ...@@ -99,8 +99,8 @@ static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
/** /**
* struct dma_resv - a reservation object manages fences for a buffer * struct dma_resv - a reservation object manages fences for a buffer
* *
* There are multiple uses for this, with sometimes slightly different rules in * This is a container for dma_fence objects which needs to handle multiple use
* how the fence slots are used. * cases.
* *
* One use is to synchronize cross-driver access to a struct dma_buf, either for * One use is to synchronize cross-driver access to a struct dma_buf, either for
* dynamic buffer management or just to handle implicit synchronization between * dynamic buffer management or just to handle implicit synchronization between
...@@ -130,47 +130,22 @@ struct dma_resv { ...@@ -130,47 +130,22 @@ struct dma_resv {
* @seq: * @seq:
* *
* Sequence count for managing RCU read-side synchronization, allows * Sequence count for managing RCU read-side synchronization, allows
* read-only access to @fence_excl and @fence while ensuring we take a * read-only access to @fences while ensuring we take a consistent
* consistent snapshot. * snapshot.
*/ */
seqcount_ww_mutex_t seq; seqcount_ww_mutex_t seq;
/** /**
* @fence_excl: * @fences:
* *
* The exclusive fence, if there is one currently. * Array of fences which where added to the dma_resv object
* *
* To guarantee that no fences are lost, this new fence must signal * A new fence is added by calling dma_resv_add_fence(). Since this
* only after the previous exclusive fence has signalled. If * often needs to be done past the point of no return in command
* semantically only a new access is added without actually treating the
* previous one as a dependency the exclusive fences can be strung
* together using struct dma_fence_chain.
*
* Note that actual semantics of what an exclusive or shared fence mean
* is defined by the user, for reservation objects shared across drivers
* see &dma_buf.resv.
*/
struct dma_fence __rcu *fence_excl;
/**
* @fence:
*
* List of current shared fences.
*
* There are no ordering constraints of shared fences against the
* exclusive fence slot. If a waiter needs to wait for all access, it
* has to wait for both sets of fences to signal.
*
* A new fence is added by calling dma_resv_add_shared_fence(). Since
* this often needs to be done past the point of no return in command
* submission it cannot fail, and therefore sufficient slots need to be * submission it cannot fail, and therefore sufficient slots need to be
* reserved by calling dma_resv_reserve_fences(). * reserved by calling dma_resv_reserve_fences().
*
* Note that actual semantics of what an exclusive or shared fence mean
* is defined by the user, for reservation objects shared across drivers
* see &dma_buf.resv.
*/ */
struct dma_resv_list __rcu *fence; struct dma_resv_list __rcu *fences;
}; };
/** /**
...@@ -207,8 +182,8 @@ struct dma_resv_iter { ...@@ -207,8 +182,8 @@ struct dma_resv_iter {
/** @fences: the shared fences; private, *MUST* not dereference */ /** @fences: the shared fences; private, *MUST* not dereference */
struct dma_resv_list *fences; struct dma_resv_list *fences;
/** @shared_count: number of shared fences */ /** @num_fences: number of fences */
unsigned int shared_count; unsigned int num_fences;
/** @is_restarted: true if this is the first returned fence */ /** @is_restarted: true if this is the first returned fence */
bool is_restarted; bool is_restarted;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment