Commit a3cfdca9 authored by fred gao's avatar fred gao Committed by Zhenyu Wang

drm/i915/gvt: Add error handling for intel_gvt_scan_and_shadow_workload

When an error occurs after shadow_indirect_ctx, this patch is to do the
proper cleanup and rollback to the original states for shadowed indirect
context before the workload is abandoned.

v2:
- split the mixed several error paths for better review. (Zhenyu)

v3:
- no return check for clean up functions. (Changbin)

v4:
- expose and reuse the existing release_shadow_wa_ctx. (Zhenyu)

v5:
- move the release function to scheduler.c file. (Zhenyu)

v6:
- move error handling code of intel_gvt_scan_and_shadow_workload
  to here. (Zhenyu)
Signed-off-by: default avatarfred gao <fred.gao@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent 0a53bc07
......@@ -489,15 +489,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
}
}
static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
if (!wa_ctx->indirect_ctx.obj)
return;
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
}
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
......
......@@ -229,6 +229,15 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
return 0;
}
void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
if (!wa_ctx->indirect_ctx.obj)
return;
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
}
/**
* intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
* shadow it as well, include ringbuffer,wa_ctx and ctx.
......@@ -263,13 +272,13 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
goto out;
goto err_scan;
if ((workload->ring_id == RCS) &&
(workload->wa_ctx.indirect_ctx.size != 0)) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto out;
goto err_scan;
}
/* pin shadow context by gvt even the shadow context will be pinned
......@@ -283,18 +292,18 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
gvt_vgpu_err("fail to pin shadow context\n");
goto out;
goto err_shadow;
}
ret = populate_shadow_context(workload);
if (ret)
goto out;
goto err_unpin;
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq);
goto out;
goto err_unpin;
}
gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
......@@ -302,10 +311,15 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
workload->req = i915_gem_request_get(rq);
ret = copy_workload_to_ring_buffer(workload);
if (ret)
goto out;
goto err_unpin;
workload->shadowed = true;
return 0;
out:
err_unpin:
engine->context_unpin(engine, shadow_ctx);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
err_scan:
return ret;
}
......
......@@ -140,4 +140,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment