Commit 89ff76bf authored by Chris Wilson's avatar Chris Wilson

drm/i915/gem: Utilize rcu iteration of context engines

Now that we can peek at GEM->engines[] and obtain a reference to them
using RCU, do so for instances where we can safely iterate the
potentially old copy of the engines. For setting, we can do this when we
know the engine properties are copied over before swapping, so we know
the new engines already have the global property and we update the old
before they are discarded. For reading, we only need to be safe; as we
do so on behalf of the user, their races are their own problem.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200402124218.6375-1-chris@chris-wilson.co.uk
parent 4c977837
...@@ -757,21 +757,46 @@ __create_context(struct drm_i915_private *i915) ...@@ -757,21 +757,46 @@ __create_context(struct drm_i915_private *i915)
return ERR_PTR(err); return ERR_PTR(err);
} }
static inline struct i915_gem_engines *
__context_engines_await(const struct i915_gem_context *ctx)
{
struct i915_gem_engines *engines;
rcu_read_lock();
do {
engines = rcu_dereference(ctx->engines);
GEM_BUG_ON(!engines);
if (unlikely(!i915_sw_fence_await(&engines->fence)))
continue;
if (likely(engines == rcu_access_pointer(ctx->engines)))
break;
i915_sw_fence_complete(&engines->fence);
} while (1);
rcu_read_unlock();
return engines;
}
static int static int
context_apply_all(struct i915_gem_context *ctx, context_apply_all(struct i915_gem_context *ctx,
int (*fn)(struct intel_context *ce, void *data), int (*fn)(struct intel_context *ce, void *data),
void *data) void *data)
{ {
struct i915_gem_engines_iter it; struct i915_gem_engines_iter it;
struct i915_gem_engines *e;
struct intel_context *ce; struct intel_context *ce;
int err = 0; int err = 0;
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { e = __context_engines_await(ctx);
for_each_gem_engine(ce, e, it) {
err = fn(ce, data); err = fn(ce, data);
if (err) if (err)
break; break;
} }
i915_gem_context_unlock_engines(ctx); i915_sw_fence_complete(&e->fence);
return err; return err;
} }
...@@ -786,11 +811,13 @@ static int __apply_ppgtt(struct intel_context *ce, void *vm) ...@@ -786,11 +811,13 @@ static int __apply_ppgtt(struct intel_context *ce, void *vm)
static struct i915_address_space * static struct i915_address_space *
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{ {
struct i915_address_space *old = i915_gem_context_vm(ctx); struct i915_address_space *old;
old = rcu_replace_pointer(ctx->vm,
i915_vm_open(vm),
lockdep_is_held(&ctx->mutex));
GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old)); GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
rcu_assign_pointer(ctx->vm, i915_vm_open(vm));
context_apply_all(ctx, __apply_ppgtt, vm); context_apply_all(ctx, __apply_ppgtt, vm);
return old; return old;
...@@ -1069,30 +1096,6 @@ static void cb_retire(struct i915_active *base) ...@@ -1069,30 +1096,6 @@ static void cb_retire(struct i915_active *base)
kfree(cb); kfree(cb);
} }
static inline struct i915_gem_engines *
__context_engines_await(const struct i915_gem_context *ctx)
{
struct i915_gem_engines *engines;
rcu_read_lock();
do {
engines = rcu_dereference(ctx->engines);
if (unlikely(!engines))
break;
if (unlikely(!i915_sw_fence_await(&engines->fence)))
continue;
if (likely(engines == rcu_access_pointer(ctx->engines)))
break;
i915_sw_fence_complete(&engines->fence);
} while (1);
rcu_read_unlock();
return engines;
}
I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
static int context_barrier_task(struct i915_gem_context *ctx, static int context_barrier_task(struct i915_gem_context *ctx,
intel_engine_mask_t engines, intel_engine_mask_t engines,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment