Commit 6c2f73e2 authored by Yan Zhao's avatar Yan Zhao Committed by Zhenyu Wang

drm/i915/gvt: access shadow ctx via its virtual address directly

as shadow context is pinned in intel_vgpu_setup_submission() and
unpinned in intel_vgpu_clean_submission(), its base virtual address of
is safely obtained from lrc_reg_state. no need to call kmap()/kunmap()
repeatedly.
Signed-off-by: default avatarYan Zhao <yan.y.zhao@intel.com>
Reviewed-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20200415035728.26424-1-yan.y.zhao@intel.com
parent 07f2fee9
...@@ -128,16 +128,19 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -128,16 +128,19 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_gem_object *ctx_obj = struct intel_context *ctx = workload->req->context;
workload->req->context->state->obj;
struct execlist_ring_context *shadow_ring_context; struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *dst; void *dst;
void *context_base;
unsigned long context_gpa, context_page_num; unsigned long context_gpa, context_page_num;
int i; int i;
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); GEM_BUG_ON(!intel_context_is_pinned(ctx));
shadow_ring_context = kmap(page);
context_base = (void *) ctx->lrc_reg_state -
(LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
shadow_ring_context = (void *) ctx->lrc_reg_state;
sr_oa_regs(workload, (u32 *)shadow_ring_context, true); sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
#define COPY_REG(name) \ #define COPY_REG(name) \
...@@ -169,7 +172,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -169,7 +172,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
sr_oa_regs(workload, (u32 *)shadow_ring_context, false); sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
kunmap(page);
if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val)) if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
return 0; return 0;
...@@ -194,11 +196,9 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -194,11 +196,9 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return -EFAULT; return -EFAULT;
} }
page = i915_gem_object_get_page(ctx_obj, i); dst = context_base + (i << I915_GTT_PAGE_SHIFT);
dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
I915_GTT_PAGE_SIZE); I915_GTT_PAGE_SIZE);
kunmap(page);
i++; i++;
} }
return 0; return 0;
...@@ -784,9 +784,9 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -784,9 +784,9 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
{ {
struct i915_request *rq = workload->req; struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
struct execlist_ring_context *shadow_ring_context; struct execlist_ring_context *shadow_ring_context;
struct page *page; struct intel_context *ctx = workload->req->context;
void *context_base;
void *src; void *src;
unsigned long context_gpa, context_page_num; unsigned long context_gpa, context_page_num;
int i; int i;
...@@ -797,6 +797,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -797,6 +797,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id, gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
workload->ctx_desc.lrca); workload->ctx_desc.lrca);
GEM_BUG_ON(!intel_context_is_pinned(ctx));
head = workload->rb_head; head = workload->rb_head;
tail = workload->rb_tail; tail = workload->rb_tail;
wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF; wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
...@@ -821,6 +823,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -821,6 +823,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
context_page_num = 19; context_page_num = 19;
i = 2; i = 2;
context_base = (void *) ctx->lrc_reg_state -
(LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
while (i < context_page_num) { while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
...@@ -831,19 +835,16 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -831,19 +835,16 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
return; return;
} }
page = i915_gem_object_get_page(ctx_obj, i); src = context_base + (i << I915_GTT_PAGE_SHIFT);
src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
I915_GTT_PAGE_SIZE); I915_GTT_PAGE_SIZE);
kunmap(page);
i++; i++;
} }
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); shadow_ring_context = (void *) ctx->lrc_reg_state;
shadow_ring_context = kmap(page);
#define COPY_REG(name) \ #define COPY_REG(name) \
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \ intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
...@@ -860,8 +861,6 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -860,8 +861,6 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
(void *)shadow_ring_context + (void *)shadow_ring_context +
sizeof(*shadow_ring_context), sizeof(*shadow_ring_context),
I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap(page);
} }
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment