Commit 047a1b87 authored by Christian König's avatar Christian König

dma-buf & drm/amdgpu: remove dma_resv workaround

Rework the internals of the dma_resv object to allow adding more than one
write fence and remember for each fence what purpose it had.

This allows removing the workaround from amdgpu which used a container for
this instead.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Cc: amd-gfx@lists.freedesktop.org
Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-4-christian.koenig@amd.com
parent 73511edf
This diff is collapsed.
......@@ -34,7 +34,6 @@ struct amdgpu_fpriv;
struct amdgpu_bo_list_entry {
struct ttm_validate_buffer tv;
struct amdgpu_bo_va *bo_va;
struct dma_fence_chain *chain;
uint32_t priority;
struct page **user_pages;
bool user_invalidated;
......
......@@ -574,14 +574,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
e->bo_va = amdgpu_vm_bo_find(vm, bo);
if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) {
e->chain = dma_fence_chain_alloc();
if (!e->chain) {
r = -ENOMEM;
goto error_validate;
}
}
}
/* Move fence waiting after getting reservation lock of
......@@ -642,13 +634,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
error_validate:
if (r) {
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
dma_fence_chain_free(e->chain);
e->chain = NULL;
}
if (r)
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
}
out:
return r;
}
......@@ -688,17 +675,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
{
unsigned i;
if (error && backoff) {
struct amdgpu_bo_list_entry *e;
amdgpu_bo_list_for_each_entry(e, parser->bo_list) {
dma_fence_chain_free(e->chain);
e->chain = NULL;
}
if (error && backoff)
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
}
for (i = 0; i < parser->num_post_deps; i++) {
drm_syncobj_put(parser->post_deps[i].syncobj);
......@@ -1272,31 +1251,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
struct dma_resv *resv = e->tv.bo->base.resv;
struct dma_fence_chain *chain = e->chain;
struct dma_resv_iter cursor;
struct dma_fence *fence;
if (!chain)
continue;
/*
* Temporary workaround dma_resv shortcommings by wrapping up
* the submission in a dma_fence_chain and add it as exclusive
* fence.
*
* TODO: Remove together with dma_resv rework.
*/
dma_resv_for_each_fence(&cursor, resv,
DMA_RESV_USAGE_WRITE,
fence) {
break;
}
dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);
rcu_assign_pointer(resv->fence_excl, &chain->base);
e->chain = NULL;
}
/* Make sure all BOs are remembered as writers */
amdgpu_bo_list_for_each_entry(e, p->bo_list)
e->tv.num_shared = 0;
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
mutex_unlock(&p->adev->notifier_lock);
......
......@@ -99,8 +99,8 @@ static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
/**
* struct dma_resv - a reservation object manages fences for a buffer
*
* There are multiple uses for this, with sometimes slightly different rules in
* how the fence slots are used.
* This is a container for dma_fence objects which needs to handle multiple use
* cases.
*
* One use is to synchronize cross-driver access to a struct dma_buf, either for
* dynamic buffer management or just to handle implicit synchronization between
......@@ -130,47 +130,22 @@ struct dma_resv {
* @seq:
*
* Sequence count for managing RCU read-side synchronization, allows
* read-only access to @fence_excl and @fence while ensuring we take a
* consistent snapshot.
* read-only access to @fences while ensuring we take a consistent
* snapshot.
*/
seqcount_ww_mutex_t seq;
/**
* @fence_excl:
* @fences:
*
* The exclusive fence, if there is one currently.
* Array of fences which where added to the dma_resv object
*
* To guarantee that no fences are lost, this new fence must signal
* only after the previous exclusive fence has signalled. If
* semantically only a new access is added without actually treating the
* previous one as a dependency the exclusive fences can be strung
* together using struct dma_fence_chain.
*
* Note that actual semantics of what an exclusive or shared fence mean
* is defined by the user, for reservation objects shared across drivers
* see &dma_buf.resv.
*/
struct dma_fence __rcu *fence_excl;
/**
* @fence:
*
* List of current shared fences.
*
* There are no ordering constraints of shared fences against the
* exclusive fence slot. If a waiter needs to wait for all access, it
* has to wait for both sets of fences to signal.
*
* A new fence is added by calling dma_resv_add_shared_fence(). Since
* this often needs to be done past the point of no return in command
* A new fence is added by calling dma_resv_add_fence(). Since this
* often needs to be done past the point of no return in command
* submission it cannot fail, and therefore sufficient slots need to be
* reserved by calling dma_resv_reserve_fences().
*
* Note that actual semantics of what an exclusive or shared fence mean
* is defined by the user, for reservation objects shared across drivers
* see &dma_buf.resv.
*/
struct dma_resv_list __rcu *fence;
struct dma_resv_list __rcu *fences;
};
/**
......@@ -207,8 +182,8 @@ struct dma_resv_iter {
/** @fences: the shared fences; private, *MUST* not dereference */
struct dma_resv_list *fences;
/** @shared_count: number of shared fences */
unsigned int shared_count;
/** @num_fences: number of fences */
unsigned int num_fences;
/** @is_restarted: true if this is the first returned fence */
bool is_restarted;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment