Commit 09d2da31 authored by Rodrigo Vivi's avatar Rodrigo Vivi

Merge tag 'gvt-fixes-2018-07-11' of https://github.com/intel/gvt-linux into drm-intel-fixes

gvt-fixes-2018-07-11

- Fix KBL virtual register update from LRI for GPU hang (Henry)
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180711024056.GV1267@zhen-hp.sh.intel.com
parents 96a85cc5 6cef21a1
...@@ -862,6 +862,7 @@ static int cmd_reg_handler(struct parser_exec_state *s, ...@@ -862,6 +862,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
{ {
struct intel_vgpu *vgpu = s->vgpu; struct intel_vgpu *vgpu = s->vgpu;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
u32 ctx_sr_ctl;
if (offset + 4 > gvt->device_info.mmio_size) { if (offset + 4 > gvt->device_info.mmio_size) {
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n", gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
...@@ -894,6 +895,28 @@ static int cmd_reg_handler(struct parser_exec_state *s, ...@@ -894,6 +895,28 @@ static int cmd_reg_handler(struct parser_exec_state *s,
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
} }
/* TODO
* Right now only scan LRI command on KBL and in inhibit context.
* It's good enough to support initializing mmio by lri command in
* vgpu inhibit context on KBL.
*/
if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
intel_gvt_mmio_is_in_ctx(gvt, offset) &&
!strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
/* check inhibit context */
if (ctx_sr_ctl & 1) {
u32 data = cmd_val(s, index + 1);
if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
intel_vgpu_mask_mmio_write(vgpu,
offset, &data, 4);
else
vgpu_vreg(vgpu, offset) = data;
}
}
/* TODO: Update the global mask if this MMIO is a masked-MMIO */ /* TODO: Update the global mask if this MMIO is a masked-MMIO */
intel_gvt_mmio_set_cmd_accessed(gvt, offset); intel_gvt_mmio_set_cmd_accessed(gvt, offset);
return 0; return 0;
......
...@@ -268,6 +268,8 @@ struct intel_gvt_mmio { ...@@ -268,6 +268,8 @@ struct intel_gvt_mmio {
#define F_CMD_ACCESSED (1 << 5) #define F_CMD_ACCESSED (1 << 5)
/* This reg could be accessed by unaligned address */ /* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6) #define F_UNALIGN (1 << 6)
/* This reg is saved/restored in context */
#define F_IN_CTX (1 << 7)
struct gvt_mmio_block *mmio_block; struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block; unsigned int num_mmio_block;
...@@ -639,6 +641,33 @@ static inline bool intel_gvt_mmio_has_mode_mask( ...@@ -639,6 +641,33 @@ static inline bool intel_gvt_mmio_has_mode_mask(
return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK; return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
} }
/**
* intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
* True if a MMIO has a in-context mask, false if it isn't.
*
*/
static inline bool intel_gvt_mmio_is_in_ctx(
struct intel_gvt *gvt, unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
}
/**
* intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
* @gvt: a GVT device
* @offset: register offset
*
*/
static inline void intel_gvt_mmio_set_in_ctx(
struct intel_gvt *gvt, unsigned int offset)
{
gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
}
int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
int intel_gvt_debugfs_init(struct intel_gvt *gvt); int intel_gvt_debugfs_init(struct intel_gvt *gvt);
......
...@@ -3045,6 +3045,30 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -3045,6 +3045,30 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0; return 0;
} }
/**
* intel_vgpu_mask_mmio_write - write mask register
* @vgpu: a vGPU
* @offset: access offset
* @p_data: write data buffer
* @bytes: access data length
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 mask, old_vreg;
old_vreg = vgpu_vreg(vgpu, offset);
write_vreg(vgpu, offset, p_data, bytes);
mask = vgpu_vreg(vgpu, offset) >> 16;
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
(vgpu_vreg(vgpu, offset) & mask);
return 0;
}
/** /**
* intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
* force-nopriv register * force-nopriv register
......
...@@ -98,4 +98,6 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, ...@@ -98,4 +98,6 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
void *pdata, unsigned int bytes, bool is_read); void *pdata, unsigned int bytes, bool is_read);
int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
#endif #endif
...@@ -581,7 +581,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) ...@@ -581,7 +581,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
for (mmio = gvt->engine_mmio_list.mmio; for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) { i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->in_context) if (mmio->in_context) {
gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++; gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
}
} }
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment