Commit baa68f6e authored by Daniel Vetter's avatar Daniel Vetter

Merge tag 'gvt-next-2017-08-15' of https://github.com/01org/gvt-linux into drm-intel-next-queued

gvt-next-2017-08-15

gvt update for 4.14
- MMIO save/restore optimization (Changbin)
- Split workload scan vs. dispatch for more parallel exec (Ping)
- vGPU full 48bit ppgtt support (Joonas, Tina)
- vGPU hw id expose for perf (Zhenyu)
- other misc cleanup and fixes
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20170815023940.skhjfcsyrao7axqi@zhen-hp.sh.intel.com
parents d59814a5 6b3816d6
...@@ -285,8 +285,8 @@ static int alloc_resource(struct intel_vgpu *vgpu, ...@@ -285,8 +285,8 @@ static int alloc_resource(struct intel_vgpu *vgpu,
return 0; return 0;
no_enough_resource: no_enough_resource:
gvt_vgpu_err("fail to allocate resource %s\n", item); gvt_err("fail to allocate resource %s\n", item);
gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n", gvt_err("request %luMB avail %luMB max %luMB taken %luMB\n",
BYTES_TO_MB(request), BYTES_TO_MB(avail), BYTES_TO_MB(request), BYTES_TO_MB(avail),
BYTES_TO_MB(max), BYTES_TO_MB(taken)); BYTES_TO_MB(max), BYTES_TO_MB(taken));
return -ENOSPC; return -ENOSPC;
......
...@@ -1382,13 +1382,13 @@ static inline int cmd_address_audit(struct parser_exec_state *s, ...@@ -1382,13 +1382,13 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
ret = -EINVAL; ret = -EINVAL;
goto err; goto err;
} }
} else if ((!vgpu_gmadr_is_valid(s->vgpu, guest_gma)) || } else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
(!vgpu_gmadr_is_valid(s->vgpu,
guest_gma + op_size - 1))) {
ret = -EINVAL; ret = -EINVAL;
goto err; goto err;
} }
return 0; return 0;
err: err:
gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
s->info->name, guest_gma, op_size); s->info->name, guest_gma, op_size);
...@@ -2647,7 +2647,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) ...@@ -2647,7 +2647,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
return 0; return 0;
} }
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload)
{ {
int ret; int ret;
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
......
...@@ -42,7 +42,7 @@ void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt); ...@@ -42,7 +42,7 @@ void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
int intel_gvt_init_cmd_parser(struct intel_gvt *gvt); int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload); int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
......
...@@ -605,6 +605,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, ...@@ -605,6 +605,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
struct list_head *q = workload_q_head(vgpu, ring_id); struct list_head *q = workload_q_head(vgpu, ring_id);
struct intel_vgpu_workload *last_workload = get_last_workload(q); struct intel_vgpu_workload *last_workload = get_last_workload(q);
struct intel_vgpu_workload *workload = NULL; struct intel_vgpu_workload *workload = NULL;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u64 ring_context_gpa; u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx; u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
int ret; int ret;
...@@ -668,6 +669,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, ...@@ -668,6 +669,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
workload->complete = complete_execlist_workload; workload->complete = complete_execlist_workload;
workload->status = -EINPROGRESS; workload->status = -EINPROGRESS;
workload->emulate_schedule_in = emulate_schedule_in; workload->emulate_schedule_in = emulate_schedule_in;
workload->shadowed = false;
if (ring_id == RCS) { if (ring_id == RCS) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
...@@ -701,6 +703,17 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, ...@@ -701,6 +703,17 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
return ret; return ret;
} }
/* Only scan and shadow the first workload in the queue
* as there is only one pre-allocated buf-obj for shadow.
*/
if (list_empty(workload_q_head(vgpu, ring_id))) {
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
intel_gvt_scan_and_shadow_workload(workload);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
}
queue_workload(workload); queue_workload(workload);
return 0; return 0;
} }
...@@ -783,6 +796,8 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) ...@@ -783,6 +796,8 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
list_del_init(&pos->list); list_del_init(&pos->list);
free_workload(pos); free_workload(pos);
} }
clear_bit(engine->id, vgpu->shadow_ctx_desc_updated);
} }
} }
......
This diff is collapsed.
...@@ -49,14 +49,18 @@ struct intel_gvt_gtt_entry { ...@@ -49,14 +49,18 @@ struct intel_gvt_gtt_entry {
}; };
struct intel_gvt_gtt_pte_ops { struct intel_gvt_gtt_pte_ops {
struct intel_gvt_gtt_entry *(*get_entry)(void *pt, int (*get_entry)(void *pt,
struct intel_gvt_gtt_entry *e, struct intel_gvt_gtt_entry *e,
unsigned long index, bool hypervisor_access, unsigned long gpa, unsigned long index,
struct intel_vgpu *vgpu); bool hypervisor_access,
struct intel_gvt_gtt_entry *(*set_entry)(void *pt, unsigned long gpa,
struct intel_gvt_gtt_entry *e, struct intel_vgpu *vgpu);
unsigned long index, bool hypervisor_access, unsigned long gpa, int (*set_entry)(void *pt,
struct intel_vgpu *vgpu); struct intel_gvt_gtt_entry *e,
unsigned long index,
bool hypervisor_access,
unsigned long gpa,
struct intel_vgpu *vgpu);
bool (*test_present)(struct intel_gvt_gtt_entry *e); bool (*test_present)(struct intel_gvt_gtt_entry *e);
void (*clear_present)(struct intel_gvt_gtt_entry *e); void (*clear_present)(struct intel_gvt_gtt_entry *e);
bool (*test_pse)(struct intel_gvt_gtt_entry *e); bool (*test_pse)(struct intel_gvt_gtt_entry *e);
...@@ -143,12 +147,12 @@ struct intel_vgpu_mm { ...@@ -143,12 +147,12 @@ struct intel_vgpu_mm {
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu;
}; };
extern struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry( extern int intel_vgpu_mm_get_entry(
struct intel_vgpu_mm *mm, struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e, void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index); unsigned long index);
extern struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry( extern int intel_vgpu_mm_set_entry(
struct intel_vgpu_mm *mm, struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e, void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index); unsigned long index);
...@@ -208,7 +212,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); ...@@ -208,7 +212,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt); extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr); void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
......
...@@ -167,6 +167,7 @@ struct intel_vgpu { ...@@ -167,6 +167,7 @@ struct intel_vgpu {
atomic_t running_workload_num; atomic_t running_workload_num;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx; struct i915_gem_context *shadow_ctx;
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct { struct {
...@@ -470,6 +471,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); ...@@ -470,6 +471,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
void populate_pvinfo_page(struct intel_vgpu *vgpu); void populate_pvinfo_page(struct intel_vgpu *vgpu);
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
struct intel_gvt_ops { struct intel_gvt_ops {
int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *, int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
unsigned int); unsigned int);
......
...@@ -113,9 +113,17 @@ static int new_mmio_info(struct intel_gvt *gvt, ...@@ -113,9 +113,17 @@ static int new_mmio_info(struct intel_gvt *gvt,
info->offset = i; info->offset = i;
p = find_mmio_info(gvt, info->offset); p = find_mmio_info(gvt, info->offset);
if (p) if (p) {
gvt_err("dup mmio definition offset %x\n", WARN(1, "dup mmio definition offset %x\n",
info->offset); info->offset);
kfree(info);
/* We return -EEXIST here to make GVT-g load fail.
* So duplicated MMIO can be found as soon as
* possible.
*/
return -EEXIST;
}
info->ro_mask = ro_mask; info->ro_mask = ro_mask;
info->device = device; info->device = device;
...@@ -2583,7 +2591,6 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) ...@@ -2583,7 +2591,6 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
NULL, force_nonpriv_write); NULL, force_nonpriv_write);
MMIO_D(0x22040, D_BDW_PLUS);
MMIO_D(0x44484, D_BDW_PLUS); MMIO_D(0x44484, D_BDW_PLUS);
MMIO_D(0x4448c, D_BDW_PLUS); MMIO_D(0x4448c, D_BDW_PLUS);
...@@ -2641,7 +2648,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) ...@@ -2641,7 +2648,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(HSW_PWR_WELL_BIOS, D_SKL_PLUS); MMIO_D(HSW_PWR_WELL_BIOS, D_SKL_PLUS);
MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL_PLUS, NULL, MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL_PLUS, NULL,
skl_power_well_ctl_write); skl_power_well_ctl_write);
MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL_PLUS, NULL, mailbox_write);
MMIO_D(0xa210, D_SKL_PLUS); MMIO_D(0xa210, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
...@@ -2833,7 +2839,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) ...@@ -2833,7 +2839,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x320f0, D_SKL | D_KBL); MMIO_D(0x320f0, D_SKL | D_KBL);
MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_REG_VECS_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x70034, D_SKL_PLUS); MMIO_D(0x70034, D_SKL_PLUS);
MMIO_D(0x71034, D_SKL_PLUS); MMIO_D(0x71034, D_SKL_PLUS);
MMIO_D(0x72034, D_SKL_PLUS); MMIO_D(0x72034, D_SKL_PLUS);
...@@ -2851,10 +2856,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) ...@@ -2851,10 +2856,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
NULL, NULL); NULL, NULL);
MMIO_D(0x4ab8, D_KBL); MMIO_D(0x4ab8, D_KBL);
MMIO_D(0x940c, D_SKL_PLUS);
MMIO_D(0x2248, D_SKL_PLUS | D_KBL); MMIO_D(0x2248, D_SKL_PLUS | D_KBL);
MMIO_D(0x4ab0, D_SKL | D_KBL);
MMIO_D(0x20d4, D_SKL | D_KBL);
return 0; return 0;
} }
......
...@@ -1170,10 +1170,27 @@ vgpu_id_show(struct device *dev, struct device_attribute *attr, ...@@ -1170,10 +1170,27 @@ vgpu_id_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "\n"); return sprintf(buf, "\n");
} }
static ssize_t
hw_id_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mdev_device *mdev = mdev_from_dev(dev);
if (mdev) {
struct intel_vgpu *vgpu = (struct intel_vgpu *)
mdev_get_drvdata(mdev);
return sprintf(buf, "%u\n",
vgpu->shadow_ctx->hw_id);
}
return sprintf(buf, "\n");
}
static DEVICE_ATTR_RO(vgpu_id); static DEVICE_ATTR_RO(vgpu_id);
static DEVICE_ATTR_RO(hw_id);
static struct attribute *intel_vgpu_attrs[] = { static struct attribute *intel_vgpu_attrs[] = {
&dev_attr_vgpu_id.attr, &dev_attr_vgpu_id.attr,
&dev_attr_hw_id.attr,
NULL NULL
}; };
......
...@@ -207,18 +207,16 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id) ...@@ -207,18 +207,16 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
offset.reg = regs[ring_id]; offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
gen9_render_mocs[ring_id][i] = I915_READ(offset); gen9_render_mocs[ring_id][i] = I915_READ_FW(offset);
I915_WRITE(offset, vgpu_vreg(vgpu, offset)); I915_WRITE(offset, vgpu_vreg(vgpu, offset));
POSTING_READ(offset);
offset.reg += 4; offset.reg += 4;
} }
if (ring_id == RCS) { if (ring_id == RCS) {
l3_offset.reg = 0xb020; l3_offset.reg = 0xb020;
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
gen9_render_mocs_L3[i] = I915_READ(l3_offset); gen9_render_mocs_L3[i] = I915_READ_FW(l3_offset);
I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset)); I915_WRITE_FW(l3_offset, vgpu_vreg(vgpu, l3_offset));
POSTING_READ(l3_offset);
l3_offset.reg += 4; l3_offset.reg += 4;
} }
} }
...@@ -242,18 +240,16 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id) ...@@ -242,18 +240,16 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
offset.reg = regs[ring_id]; offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
vgpu_vreg(vgpu, offset) = I915_READ(offset); vgpu_vreg(vgpu, offset) = I915_READ_FW(offset);
I915_WRITE(offset, gen9_render_mocs[ring_id][i]); I915_WRITE_FW(offset, gen9_render_mocs[ring_id][i]);
POSTING_READ(offset);
offset.reg += 4; offset.reg += 4;
} }
if (ring_id == RCS) { if (ring_id == RCS) {
l3_offset.reg = 0xb020; l3_offset.reg = 0xb020;
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset); vgpu_vreg(vgpu, l3_offset) = I915_READ_FW(l3_offset);
I915_WRITE(l3_offset, gen9_render_mocs_L3[i]); I915_WRITE_FW(l3_offset, gen9_render_mocs_L3[i]);
POSTING_READ(l3_offset);
l3_offset.reg += 4; l3_offset.reg += 4;
} }
} }
...@@ -272,6 +268,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id) ...@@ -272,6 +268,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL]; u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
u32 inhibit_mask = u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
i915_reg_t last_reg = _MMIO(0);
if (IS_SKYLAKE(vgpu->gvt->dev_priv) if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) { || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
...@@ -287,7 +284,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id) ...@@ -287,7 +284,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
if (mmio->ring_id != ring_id) if (mmio->ring_id != ring_id)
continue; continue;
mmio->value = I915_READ(mmio->reg); mmio->value = I915_READ_FW(mmio->reg);
/* /*
* if it is an inhibit context, load in_context mmio * if it is an inhibit context, load in_context mmio
...@@ -304,13 +301,18 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id) ...@@ -304,13 +301,18 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
else else
v = vgpu_vreg(vgpu, mmio->reg); v = vgpu_vreg(vgpu, mmio->reg);
I915_WRITE(mmio->reg, v); I915_WRITE_FW(mmio->reg, v);
POSTING_READ(mmio->reg); last_reg = mmio->reg;
trace_render_mmio(vgpu->id, "load", trace_render_mmio(vgpu->id, "load",
i915_mmio_reg_offset(mmio->reg), i915_mmio_reg_offset(mmio->reg),
mmio->value, v); mmio->value, v);
} }
/* Make sure the swiched MMIOs has taken effect. */
if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
I915_READ_FW(last_reg);
handle_tlb_pending_event(vgpu, ring_id); handle_tlb_pending_event(vgpu, ring_id);
} }
...@@ -319,6 +321,7 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id) ...@@ -319,6 +321,7 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct render_mmio *mmio; struct render_mmio *mmio;
i915_reg_t last_reg = _MMIO(0);
u32 v; u32 v;
int i, array_size; int i, array_size;
...@@ -335,7 +338,7 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id) ...@@ -335,7 +338,7 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
if (mmio->ring_id != ring_id) if (mmio->ring_id != ring_id)
continue; continue;
vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg); vgpu_vreg(vgpu, mmio->reg) = I915_READ_FW(mmio->reg);
if (mmio->mask) { if (mmio->mask) {
vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16); vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
...@@ -346,13 +349,17 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id) ...@@ -346,13 +349,17 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
if (mmio->in_context) if (mmio->in_context)
continue; continue;
I915_WRITE(mmio->reg, v); I915_WRITE_FW(mmio->reg, v);
POSTING_READ(mmio->reg); last_reg = mmio->reg;
trace_render_mmio(vgpu->id, "restore", trace_render_mmio(vgpu->id, "restore",
i915_mmio_reg_offset(mmio->reg), i915_mmio_reg_offset(mmio->reg),
mmio->value, v); mmio->value, v);
} }
/* Make sure the swiched MMIOs has taken effect. */
if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
I915_READ_FW(last_reg);
} }
/** /**
...@@ -367,12 +374,23 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id) ...@@ -367,12 +374,23 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
void intel_gvt_switch_mmio(struct intel_vgpu *pre, void intel_gvt_switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next, int ring_id) struct intel_vgpu *next, int ring_id)
{ {
struct drm_i915_private *dev_priv;
if (WARN_ON(!pre && !next)) if (WARN_ON(!pre && !next))
return; return;
gvt_dbg_render("switch ring %d from %s to %s\n", ring_id, gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
pre ? "vGPU" : "host", next ? "vGPU" : "HOST"); pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
/**
* We are using raw mmio access wrapper to improve the
* performace for batch mmio read/write, so we need
* handle forcewake mannually.
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/** /**
* TODO: Optimize for vGPU to vGPU switch by merging * TODO: Optimize for vGPU to vGPU switch by merging
* switch_mmio_to_host() and switch_mmio_to_vgpu(). * switch_mmio_to_host() and switch_mmio_to_vgpu().
...@@ -382,4 +400,6 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, ...@@ -382,4 +400,6 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
if (next) if (next)
switch_mmio_to_vgpu(next, ring_id); switch_mmio_to_vgpu(next, ring_id);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
} }
...@@ -184,41 +184,52 @@ static int shadow_context_status_change(struct notifier_block *nb, ...@@ -184,41 +184,52 @@ static int shadow_context_status_change(struct notifier_block *nb,
return NOTIFY_OK; return NOTIFY_OK;
} }
static int dispatch_workload(struct intel_vgpu_workload *workload) static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct intel_context *ce = &ctx->engine[engine->id];
u64 desc = 0;
desc = ce->lrc_desc;
/* Update bits 0-11 of the context descriptor which includes flags
* like GEN8_CTX_* cached in desc_template
*/
desc &= U64_MAX << 12;
desc |= ctx->desc_template & ((1ULL << 12) - 1);
ce->lrc_desc = desc;
}
/**
* intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
* shadow it as well, include ringbuffer,wa_ctx and ctx.
* @workload: an abstract entity for each execlist submission.
*
* This function is called before the workload submitting to i915, to make
* sure the content of the workload is valid.
*/
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{ {
int ring_id = workload->ring_id; int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq; struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_ring *ring;
int ret; int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", lockdep_assert_held(&dev_priv->drm.struct_mutex);
ring_id, workload);
if (workload->shadowed)
return 0;
shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode << shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT; GEN8_CTX_ADDRESSING_MODE_SHIFT;
mutex_lock(&dev_priv->drm.struct_mutex); if (!test_and_set_bit(ring_id, vgpu->shadow_ctx_desc_updated))
shadow_context_descriptor_update(shadow_ctx,
/* pin shadow context by gvt even the shadow context will be pinned dev_priv->engine[ring_id]);
* when i915 alloc request. That is because gvt will update the guest
* context from shadow context when workload is completed, and at that
* moment, i915 may already unpined the shadow context to make the
* shadow_ctx pages invalid. So gvt need to pin itself. After update
* the guest context, gvt can unpin the shadow_ctx safely.
*/
ring = engine->context_pin(engine, shadow_ctx);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
gvt_vgpu_err("fail to pin shadow context\n");
workload->status = ret;
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
...@@ -231,7 +242,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -231,7 +242,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
workload->req = i915_gem_request_get(rq); workload->req = i915_gem_request_get(rq);
ret = intel_gvt_scan_and_shadow_workload(workload); ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret) if (ret)
goto out; goto out;
...@@ -246,25 +257,61 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -246,25 +257,61 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (ret) if (ret)
goto out; goto out;
workload->shadowed = true;
out:
return ret;
}
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_ring *ring;
int ret = 0;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
ring_id, workload);
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
if (ret)
goto out;
if (workload->prepare) { if (workload->prepare) {
ret = workload->prepare(workload); ret = workload->prepare(workload);
if (ret) if (ret)
goto out; goto out;
} }
gvt_dbg_sched("ring id %d submit workload to i915 %p\n", /* pin shadow context by gvt even the shadow context will be pinned
ring_id, workload->req); * when i915 alloc request. That is because gvt will update the guest
* context from shadow context when workload is completed, and at that
* moment, i915 may already unpined the shadow context to make the
* shadow_ctx pages invalid. So gvt need to pin itself. After update
* the guest context, gvt can unpin the shadow_ctx safely.
*/
ring = engine->context_pin(engine, shadow_ctx);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
gvt_vgpu_err("fail to pin shadow context\n");
goto out;
}
ret = 0;
workload->dispatched = true;
out: out:
if (ret) if (ret)
workload->status = ret; workload->status = ret;
if (!IS_ERR_OR_NULL(rq)) if (!IS_ERR_OR_NULL(workload->req)) {
i915_add_request(rq); gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
else ring_id, workload->req);
engine->context_unpin(engine, shadow_ctx); i915_add_request(workload->req);
workload->dispatched = true;
}
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
return ret; return ret;
...@@ -630,5 +677,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu) ...@@ -630,5 +677,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
vgpu->shadow_ctx->engine[RCS].initialised = true; vgpu->shadow_ctx->engine[RCS].initialised = true;
bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
return 0; return 0;
} }
...@@ -82,6 +82,7 @@ struct intel_vgpu_workload { ...@@ -82,6 +82,7 @@ struct intel_vgpu_workload {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
/* if this workload has been dispatched to i915? */ /* if this workload has been dispatched to i915? */
bool dispatched; bool dispatched;
bool shadowed;
int status; int status;
struct intel_vgpu_mm *shadow_mm; struct intel_vgpu_mm *shadow_mm;
......
...@@ -43,6 +43,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) ...@@ -43,6 +43,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0; vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0; vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id; vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) = vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
vgpu_aperture_gmadr_base(vgpu); vgpu_aperture_gmadr_base(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) = vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
...@@ -502,11 +503,11 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, ...@@ -502,11 +503,11 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
/* full GPU reset or device model level reset */ /* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) { if (engine_mask == ALL_ENGINES || dmlr) {
intel_vgpu_reset_gtt(vgpu, dmlr);
/*fence will not be reset during virtual reset */ /*fence will not be reset during virtual reset */
if (dmlr) if (dmlr) {
intel_vgpu_reset_gtt(vgpu);
intel_vgpu_reset_resource(vgpu); intel_vgpu_reset_resource(vgpu);
}
intel_vgpu_reset_mmio(vgpu, dmlr); intel_vgpu_reset_mmio(vgpu, dmlr);
populate_pvinfo_page(vgpu); populate_pvinfo_page(vgpu);
......
...@@ -1905,6 +1905,7 @@ struct i915_workarounds { ...@@ -1905,6 +1905,7 @@ struct i915_workarounds {
struct i915_virtual_gpu { struct i915_virtual_gpu {
bool active; bool active;
u32 caps;
}; };
/* used in computing the new watermarks state */ /* used in computing the new watermarks state */
......
...@@ -144,9 +144,9 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, ...@@ -144,9 +144,9 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt; has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
if (intel_vgpu_active(dev_priv)) { if (intel_vgpu_active(dev_priv)) {
/* emulation is too hard */ /* GVT-g has no support for 32bit ppgtt */
has_full_ppgtt = false; has_full_ppgtt = false;
has_full_48bit_ppgtt = false; has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
} }
if (!has_aliasing_ppgtt) if (!has_aliasing_ppgtt)
...@@ -180,10 +180,15 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, ...@@ -180,10 +180,15 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 0; return 0;
} }
if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt) if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) {
return has_full_48bit_ppgtt ? 3 : 2; if (has_full_48bit_ppgtt)
else return 3;
return has_aliasing_ppgtt ? 1 : 0;
if (has_full_ppgtt)
return 2;
}
return has_aliasing_ppgtt ? 1 : 0;
} }
static int ppgtt_bind_vma(struct i915_vma *vma, static int ppgtt_bind_vma(struct i915_vma *vma,
......
...@@ -49,12 +49,18 @@ enum vgt_g2v_type { ...@@ -49,12 +49,18 @@ enum vgt_g2v_type {
VGT_G2V_MAX, VGT_G2V_MAX,
}; };
/*
* VGT capabilities type
*/
#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
struct vgt_if { struct vgt_if {
u64 magic; /* VGT_MAGIC */ u64 magic; /* VGT_MAGIC */
u16 version_major; u16 version_major;
u16 version_minor; u16 version_minor;
u32 vgt_id; /* ID of vGT instance */ u32 vgt_id; /* ID of vGT instance */
u32 rsv1[12]; /* pad to offset 0x40 */ u32 vgt_caps; /* VGT capabilities */
u32 rsv1[11]; /* pad to offset 0x40 */
/* /*
* Data structure to describe the balooning info of resources. * Data structure to describe the balooning info of resources.
* Each VM can only have one portion of continuous area for now. * Each VM can only have one portion of continuous area for now.
......
...@@ -75,10 +75,17 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv) ...@@ -75,10 +75,17 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
return; return;
} }
dev_priv->vgpu.caps = __raw_i915_read32(dev_priv, vgtif_reg(vgt_caps));
dev_priv->vgpu.active = true; dev_priv->vgpu.active = true;
DRM_INFO("Virtual GPU for Intel GVT-g detected.\n"); DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
} }
bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv)
{
return dev_priv->vgpu.caps & VGT_CAPS_FULL_48BIT_PPGTT;
}
struct _balloon_info_ { struct _balloon_info_ {
/* /*
* There are up to 2 regions per mappable/unmappable graphic * There are up to 2 regions per mappable/unmappable graphic
......
...@@ -27,6 +27,9 @@ ...@@ -27,6 +27,9 @@
#include "i915_pvinfo.h" #include "i915_pvinfo.h"
void i915_check_vgpu(struct drm_i915_private *dev_priv); void i915_check_vgpu(struct drm_i915_private *dev_priv);
bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv);
int intel_vgt_balloon(struct drm_i915_private *dev_priv); int intel_vgt_balloon(struct drm_i915_private *dev_priv);
void intel_vgt_deballoon(struct drm_i915_private *dev_priv); void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment