Commit d92f77de authored by Chris Wilson's avatar Chris Wilson

Revert "drm/i915: use a separate context for gpu relocs"

Since commit c45e788d ("drm/i915/tgl: Suspend pre-parser across GTT
invalidations"), we now disable the advanced preparser on Tigerlake for the
invalidation phase at the start of the batch, we no longer need to emit
the GPU relocations from a second context as they are now flushed inlined.

References: 8a9a9827 ("drm/i915: use a separate context for gpu relocs")
References: c45e788d ("drm/i915/tgl: Suspend pre-parser across GTT invalidations")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191129124846.949100-1-chris@chris-wilson.co.uk
parent 0cb7da10
...@@ -253,7 +253,6 @@ struct i915_execbuffer { ...@@ -253,7 +253,6 @@ struct i915_execbuffer {
bool has_fence : 1; bool has_fence : 1;
bool needs_unfenced : 1; bool needs_unfenced : 1;
struct intel_context *ce;
struct i915_request *rq; struct i915_request *rq;
u32 *rq_cmd; u32 *rq_cmd;
unsigned int rq_size; unsigned int rq_size;
...@@ -886,9 +885,6 @@ static void eb_destroy(const struct i915_execbuffer *eb) ...@@ -886,9 +885,6 @@ static void eb_destroy(const struct i915_execbuffer *eb)
{ {
GEM_BUG_ON(eb->reloc_cache.rq); GEM_BUG_ON(eb->reloc_cache.rq);
if (eb->reloc_cache.ce)
intel_context_put(eb->reloc_cache.ce);
if (eb->lut_size > 0) if (eb->lut_size > 0)
kfree(eb->buckets); kfree(eb->buckets);
} }
...@@ -912,7 +908,6 @@ static void reloc_cache_init(struct reloc_cache *cache, ...@@ -912,7 +908,6 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->has_fence = cache->gen < 4; cache->has_fence = cache->gen < 4;
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
cache->node.flags = 0; cache->node.flags = 0;
cache->ce = NULL;
cache->rq = NULL; cache->rq = NULL;
cache->rq_size = 0; cache->rq_size = 0;
} }
...@@ -1182,7 +1177,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, ...@@ -1182,7 +1177,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err) if (err)
goto err_unmap; goto err_unmap;
rq = intel_context_create_request(cache->ce); rq = i915_request_create(eb->context);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto err_unpin; goto err_unpin;
...@@ -1253,29 +1248,6 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb, ...@@ -1253,29 +1248,6 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
if (!intel_engine_can_store_dword(eb->engine)) if (!intel_engine_can_store_dword(eb->engine))
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
if (!cache->ce) {
struct intel_context *ce;
/*
* The CS pre-parser can pre-fetch commands across
* memory sync points and starting gen12 it is able to
* pre-fetch across BB_START and BB_END boundaries
* (within the same context). We therefore use a
* separate context gen12+ to guarantee that the reloc
* writes land before the parser gets to the target
* memory location.
*/
if (cache->gen >= 12)
ce = intel_context_create(eb->context->gem_context,
eb->engine);
else
ce = intel_context_get(eb->context);
if (IS_ERR(ce))
return ERR_CAST(ce);
cache->ce = ce;
}
err = __reloc_gpu_alloc(eb, vma, len); err = __reloc_gpu_alloc(eb, vma, len);
if (unlikely(err)) if (unlikely(err))
return ERR_PTR(err); return ERR_PTR(err);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment