Commit 1be8f347 authored by Joonas Lahtinen's avatar Joonas Lahtinen

Merge tag 'gvt-next-2020-05-12' of https://github.com/intel/gvt-linux into drm-intel-next-queued

gvt-next-2020-05-12

- Support PPGTT update via LRI cmd (Zhenyu)
- Remove extra kmap for shadow ctx update (Zhenyu)
- Move workload cleanup out of execlist handling code (Zhenyu)
Signed-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200512094017.GX18545@zhen-hp.sh.intel.com
parents 7a00e68b 47e51832
......@@ -882,6 +882,47 @@ static int mocs_cmd_reg_handler(struct parser_exec_state *s,
return 0;
}
static int is_cmd_update_pdps(unsigned int offset,
struct parser_exec_state *s)
{
u32 base = s->workload->engine->mmio_base;
return i915_mmio_reg_equal(_MMIO(offset), GEN8_RING_PDP_UDW(base, 0));
}
static int cmd_pdp_mmio_update_handler(struct parser_exec_state *s,
unsigned int offset, unsigned int index)
{
struct intel_vgpu *vgpu = s->vgpu;
struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm;
struct intel_vgpu_mm *mm;
u64 pdps[GEN8_3LVL_PDPES];
if (shadow_mm->ppgtt_mm.root_entry_type ==
GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
pdps[0] = (u64)cmd_val(s, 2) << 32;
pdps[0] |= cmd_val(s, 4);
mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
if (!mm) {
gvt_vgpu_err("failed to get the 4-level shadow vm\n");
return -EINVAL;
}
intel_vgpu_mm_get(mm);
list_add_tail(&mm->ppgtt_mm.link,
&s->workload->lri_shadow_mm);
*cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
*cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
} else {
/* Currently all guests use PML4 table and now can't
* have a guest with 3-level table but uses LRI for
* PPGTT update. So this is simply un-testable. */
GEM_BUG_ON(1);
gvt_vgpu_err("invalid shared shadow vm type\n");
return -EINVAL;
}
return 0;
}
static int cmd_reg_handler(struct parser_exec_state *s,
unsigned int offset, unsigned int index, char *cmd)
{
......@@ -920,6 +961,10 @@ static int cmd_reg_handler(struct parser_exec_state *s,
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
}
if (is_cmd_update_pdps(offset, s) &&
cmd_pdp_mmio_update_handler(s, offset, index))
return -EINVAL;
/* TODO
* In order to let workload with inhibit context to generate
* correct image data into memory, vregs values will be loaded to
......
......@@ -424,8 +424,6 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
out:
intel_vgpu_unpin_mm(workload->shadow_mm);
intel_vgpu_destroy_workload(workload);
return ret;
}
......
......@@ -1900,6 +1900,7 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
INIT_LIST_HEAD(&mm->ppgtt_mm.list);
INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
INIT_LIST_HEAD(&mm->ppgtt_mm.link);
if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
mm->ppgtt_mm.guest_pdps[0] = pdps[0];
......
......@@ -160,6 +160,7 @@ struct intel_vgpu_mm {
struct list_head list;
struct list_head lru_list;
struct list_head link; /* possible LRI shadow mm list */
} ppgtt_mm;
struct {
void *virtual_ggtt;
......
......@@ -2812,7 +2812,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_D(GAMTARBMODE, D_BDW_PLUS);
#define RING_REG(base) _MMIO((base) + 0x270)
MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
......
......@@ -58,10 +58,8 @@ static void set_context_pdp_root_pointer(
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
{
struct drm_i915_gem_object *ctx_obj =
workload->req->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
struct intel_context *ctx = workload->req->context;
if (WARN_ON(!workload->shadow_mm))
return;
......@@ -69,11 +67,9 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
return;
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap(page);
shadow_ring_context = (struct execlist_ring_context *)ctx->lrc_reg_state;
set_context_pdp_root_pointer(shadow_ring_context,
(void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
kunmap(page);
}
/*
......@@ -646,10 +642,11 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
}
}
static int prepare_workload(struct intel_vgpu_workload *workload)
static int
intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_mm *m;
int ret = 0;
ret = intel_vgpu_pin_mm(workload->shadow_mm);
......@@ -664,6 +661,52 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
return -EINVAL;
}
if (!list_empty(&workload->lri_shadow_mm)) {
list_for_each_entry(m, &workload->lri_shadow_mm,
ppgtt_mm.link) {
ret = intel_vgpu_pin_mm(m);
if (ret) {
list_for_each_entry_from_reverse(m,
&workload->lri_shadow_mm,
ppgtt_mm.link)
intel_vgpu_unpin_mm(m);
gvt_vgpu_err("LRI shadow ppgtt fail to pin\n");
break;
}
}
}
if (ret)
intel_vgpu_unpin_mm(workload->shadow_mm);
return ret;
}
static void
intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload)
{
struct intel_vgpu_mm *m;
if (!list_empty(&workload->lri_shadow_mm)) {
list_for_each_entry(m, &workload->lri_shadow_mm,
ppgtt_mm.link)
intel_vgpu_unpin_mm(m);
}
intel_vgpu_unpin_mm(workload->shadow_mm);
}
static int prepare_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
int ret = 0;
ret = intel_vgpu_shadow_mm_pin(workload);
if (ret) {
gvt_vgpu_err("fail to pin shadow mm\n");
return ret;
}
update_shadow_pdps(workload);
set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
......@@ -710,7 +753,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
err_shadow_batch:
release_shadow_batch_buffer(workload);
err_unpin_mm:
intel_vgpu_unpin_mm(workload->shadow_mm);
intel_vgpu_shadow_mm_unpin(workload);
return ret;
}
......@@ -820,6 +863,37 @@ pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
return workload;
}
static void update_guest_pdps(struct intel_vgpu *vgpu,
u64 ring_context_gpa, u32 pdp[8])
{
u64 gpa;
int i;
gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
for (i = 0; i < 8; i++)
intel_gvt_hypervisor_write_gpa(vgpu,
gpa + i * 8, &pdp[7 - i], 4);
}
static bool
check_shadow_context_ppgtt(struct execlist_ring_context *c, struct intel_vgpu_mm *m)
{
if (m->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
u64 shadow_pdp = c->pdps[7].val | (u64) c->pdps[6].val << 32;
if (shadow_pdp != m->ppgtt_mm.shadow_pdps[0]) {
gvt_dbg_mm("4-level context ppgtt not match LRI command\n");
return false;
}
return true;
} else {
/* see comment in LRI handler in cmd_parser.c */
gvt_dbg_mm("invalid shadow mm type\n");
return false;
}
}
static void update_guest_context(struct intel_vgpu_workload *workload)
{
struct i915_request *rq = workload->req;
......@@ -905,6 +979,15 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
shadow_ring_context = (void *) ctx->lrc_reg_state;
if (!list_empty(&workload->lri_shadow_mm)) {
struct intel_vgpu_mm *m = list_last_entry(&workload->lri_shadow_mm,
struct intel_vgpu_mm,
ppgtt_mm.link);
GEM_BUG_ON(!check_shadow_context_ppgtt(shadow_ring_context, m));
update_guest_pdps(vgpu, workload->ring_context_gpa,
(void *)m->ppgtt_mm.guest_pdps);
}
#define COPY_REG(name) \
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
......@@ -1013,6 +1096,9 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->complete(workload);
intel_vgpu_shadow_mm_unpin(workload);
intel_vgpu_destroy_workload(workload);
atomic_dec(&s->running_workload_num);
wake_up(&scheduler->workload_complete_wq);
......@@ -1406,6 +1492,16 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);
if (!list_empty(&workload->lri_shadow_mm)) {
struct intel_vgpu_mm *m, *mm;
list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm,
ppgtt_mm.link) {
list_del(&m->ppgtt_mm.link);
intel_vgpu_mm_put(m);
}
}
GEM_BUG_ON(!list_empty(&workload->lri_shadow_mm));
if (workload->shadow_mm)
intel_vgpu_mm_put(workload->shadow_mm);
......@@ -1424,6 +1520,7 @@ alloc_workload(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&workload->list);
INIT_LIST_HEAD(&workload->shadow_bb);
INIT_LIST_HEAD(&workload->lri_shadow_mm);
init_waitqueue_head(&workload->shadow_ctx_status_wq);
atomic_set(&workload->shadow_ctx_active, 0);
......
......@@ -87,6 +87,7 @@ struct intel_vgpu_workload {
int status;
struct intel_vgpu_mm *shadow_mm;
struct list_head lri_shadow_mm; /* For PPGTT load cmd */
/* different submission model may need different handler */
int (*prepare)(struct intel_vgpu_workload *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment