Commit 92581f9f authored by Chris Wilson's avatar Chris Wilson

drm/i915: Immediately execute the fenced work

If the caller allows and we do not have to wait for any signals,
immediately execute the work within the caller's process. By doing so we
avoid the overhead of scheduling a new task, and the latency in
executing it, at the cost of pulling that work back into the immediate
context. (Sometimes we still prefer to offload the task to another cpu,
especially if we plan on executing many such tasks in parallel for this
client.)
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200325120227.8044-2-chris@chris-wilson.co.uk
parent 6670b413
...@@ -1822,7 +1822,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb, ...@@ -1822,7 +1822,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
dma_resv_add_excl_fence(shadow->resv, &pw->base.dma); dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
dma_resv_unlock(shadow->resv); dma_resv_unlock(shadow->resv);
dma_fence_work_commit(&pw->base); dma_fence_work_commit_imm(&pw->base);
return 0; return 0;
err_batch_unlock: err_batch_unlock:
......
...@@ -38,6 +38,9 @@ fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) ...@@ -38,6 +38,9 @@ fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
if (!f->dma.error) { if (!f->dma.error) {
dma_fence_get(&f->dma); dma_fence_get(&f->dma);
if (test_bit(DMA_FENCE_WORK_IMM, &f->dma.flags))
fence_work(&f->work);
else
queue_work(system_unbound_wq, &f->work); queue_work(system_unbound_wq, &f->work);
} else { } else {
fence_complete(f); fence_complete(f);
......
...@@ -32,6 +32,10 @@ struct dma_fence_work { ...@@ -32,6 +32,10 @@ struct dma_fence_work {
const struct dma_fence_work_ops *ops; const struct dma_fence_work_ops *ops;
}; };
enum {
DMA_FENCE_WORK_IMM = DMA_FENCE_FLAG_USER_BITS,
};
void dma_fence_work_init(struct dma_fence_work *f, void dma_fence_work_init(struct dma_fence_work *f,
const struct dma_fence_work_ops *ops); const struct dma_fence_work_ops *ops);
int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal); int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal);
...@@ -41,4 +45,23 @@ static inline void dma_fence_work_commit(struct dma_fence_work *f) ...@@ -41,4 +45,23 @@ static inline void dma_fence_work_commit(struct dma_fence_work *f)
i915_sw_fence_commit(&f->chain); i915_sw_fence_commit(&f->chain);
} }
/**
* dma_fence_work_commit_imm: Commit the fence, and if possible execute locally.
* @f: the fenced worker
*
* Instead of always scheduling a worker to execute the callback (see
* dma_fence_work_commit()), we try to execute the callback immediately in
* the local context. It is required that the fence be committed before it
* is published, and that no other threads try to tamper with the number
* of asynchronous waits on the fence (or else the callback will be
* executed in the wrong context, i.e. not the callers).
*/
static inline void dma_fence_work_commit_imm(struct dma_fence_work *f)
{
if (atomic_read(&f->chain.pending) <= 1)
__set_bit(DMA_FENCE_WORK_IMM, &f->dma.flags);
dma_fence_work_commit(f);
}
#endif /* I915_SW_FENCE_WORK_H */ #endif /* I915_SW_FENCE_WORK_H */
...@@ -980,7 +980,7 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -980,7 +980,7 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
mutex_unlock(&vma->vm->mutex); mutex_unlock(&vma->vm->mutex);
err_fence: err_fence:
if (work) if (work)
dma_fence_work_commit(&work->base); dma_fence_work_commit_imm(&work->base);
if (wakeref) if (wakeref)
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
err_pages: err_pages:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment