Commit ef75c685 authored by fred gao's avatar fred gao Committed by Zhenyu Wang

drm/i915/gvt: Correct the privilege shadow batch buffer address

Once the ring buffer is copied to ring_scan_buffer and scanned,
the shadow batch buffer start address is only updated into
ring_scan_buffer, not the real ring address allocated through
intel_ring_begin in later copy_workload_to_ring_buffer.

This patch is only to set the right shadow batch buffer address
from Ring buffer, not include the shadow_wa_ctx.

v2:
- refine some comments. (Zhenyu)
v3:
- fix typo in title. (Zhenyu)
v4:
- remove the unnecessary comments. (Zhenyu)
- add comments in bb_start_cmd_va update. (Zhenyu)

Fixes: 0a53bc07 ("drm/i915/gvt: Separate cmd scan from request allocation")
Cc: stable@vger.kernel.org  # v4.15
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Cc: Yulei Zhang <yulei.zhang@intel.com>
Signed-off-by: default avatarfred gao <fred.gao@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent fa3dd623
...@@ -471,6 +471,7 @@ struct parser_exec_state { ...@@ -471,6 +471,7 @@ struct parser_exec_state {
* used when ret from 2nd level batch buffer * used when ret from 2nd level batch buffer
*/ */
int saved_buf_addr_type; int saved_buf_addr_type;
bool is_ctx_wa;
struct cmd_info *info; struct cmd_info *info;
...@@ -1715,6 +1716,11 @@ static int perform_bb_shadow(struct parser_exec_state *s) ...@@ -1715,6 +1716,11 @@ static int perform_bb_shadow(struct parser_exec_state *s)
bb->accessing = true; bb->accessing = true;
bb->bb_start_cmd_va = s->ip_va; bb->bb_start_cmd_va = s->ip_va;
if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
bb->bb_offset = s->ip_va - s->rb_va;
else
bb->bb_offset = 0;
/* /*
* ip_va saves the virtual address of the shadow batch buffer, while * ip_va saves the virtual address of the shadow batch buffer, while
* ip_gma saves the graphics address of the original batch buffer. * ip_gma saves the graphics address of the original batch buffer.
...@@ -2571,6 +2577,7 @@ static int scan_workload(struct intel_vgpu_workload *workload) ...@@ -2571,6 +2577,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.ring_tail = gma_tail; s.ring_tail = gma_tail;
s.rb_va = workload->shadow_ring_buffer_va; s.rb_va = workload->shadow_ring_buffer_va;
s.workload = workload; s.workload = workload;
s.is_ctx_wa = false;
if ((bypass_scan_mask & (1 << workload->ring_id)) || if ((bypass_scan_mask & (1 << workload->ring_id)) ||
gma_head == gma_tail) gma_head == gma_tail)
...@@ -2624,6 +2631,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2624,6 +2631,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.ring_tail = gma_tail; s.ring_tail = gma_tail;
s.rb_va = wa_ctx->indirect_ctx.shadow_va; s.rb_va = wa_ctx->indirect_ctx.shadow_va;
s.workload = workload; s.workload = workload;
s.is_ctx_wa = true;
if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
ret = -EINVAL; ret = -EINVAL;
......
...@@ -426,6 +426,17 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) ...@@ -426,6 +426,17 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
goto err; goto err;
} }
/* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
* is only updated into ring_scan_buffer, not real ring address
* allocated in later copy_workload_to_ring_buffer. pls be noted
* shadow_ring_buffer_va is now pointed to real ring buffer va
* in copy_workload_to_ring_buffer.
*/
if (bb->bb_offset)
bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
+ bb->bb_offset;
/* relocate shadow batch buffer */ /* relocate shadow batch buffer */
bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
if (gmadr_bytes == 8) if (gmadr_bytes == 8)
......
...@@ -124,6 +124,7 @@ struct intel_vgpu_shadow_bb { ...@@ -124,6 +124,7 @@ struct intel_vgpu_shadow_bb {
u32 *bb_start_cmd_va; u32 *bb_start_cmd_va;
unsigned int clflush; unsigned int clflush;
bool accessing; bool accessing;
unsigned long bb_offset;
}; };
#define workload_q_head(vgpu, ring_id) \ #define workload_q_head(vgpu, ring_id) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment