Commit afb286bc authored by Joonas Lahtinen's avatar Joonas Lahtinen

Merge tag 'gvt-fixes-2019-05-30' of https://github.com/intel/gvt-linux into drm-intel-fixes

gvt-fixes-2019-05-30

- Fix gtt entry update with sane initialization (Tina)
- Fix force-to-nonpriv warning from recent guest (Colin)
- Fix GFX_MODE and CSFE_CHICKEN1_REG handler for host only control (Colin)
- GGTT range validation enforced (Xiong)
- Fix cmd length for VEB_DI_IECP (Fred)
Signed-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190530034137.GE3211@zhen-hp.sh.intel.com
parents a6315005 3035e8cd
...@@ -2530,7 +2530,7 @@ static const struct cmd_info cmd_info[] = { ...@@ -2530,7 +2530,7 @@ static const struct cmd_info cmd_info[] = {
0, 12, NULL}, 0, 12, NULL},
{"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS, {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
0, 20, NULL}, 0, 12, NULL},
}; };
static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e) static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
......
...@@ -53,13 +53,19 @@ static int preallocated_oos_pages = 8192; ...@@ -53,13 +53,19 @@ static int preallocated_oos_pages = 8192;
*/ */
bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
{ {
if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size if (size == 0)
&& !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { return vgpu_gmadr_is_valid(vgpu, addr);
gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
if (vgpu_gmadr_is_aperture(vgpu, addr) &&
vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
return true;
else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
return true;
gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
addr, size); addr, size);
return false; return false;
}
return true;
} }
/* translate a guest gmadr to host gmadr */ /* translate a guest gmadr to host gmadr */
...@@ -2183,7 +2189,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -2183,7 +2189,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
unsigned long gma, gfn; unsigned long gma, gfn;
struct intel_gvt_gtt_entry e, m; struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
dma_addr_t dma_addr; dma_addr_t dma_addr;
int ret; int ret;
struct intel_gvt_partial_pte *partial_pte, *pos, *n; struct intel_gvt_partial_pte *partial_pte, *pos, *n;
...@@ -2250,7 +2257,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -2250,7 +2257,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
if (!partial_update && (ops->test_present(&e))) { if (!partial_update && (ops->test_present(&e))) {
gfn = ops->get_pfn(&e); gfn = ops->get_pfn(&e);
m = e; m.val64 = e.val64;
m.type = e.type;
/* one PTE update may be issued in multiple writes and the /* one PTE update may be issued in multiple writes and the
* first write may not construct a valid gfn * first write may not construct a valid gfn
......
...@@ -464,6 +464,8 @@ static i915_reg_t force_nonpriv_white_list[] = { ...@@ -464,6 +464,8 @@ static i915_reg_t force_nonpriv_white_list[] = {
_MMIO(0x2690), _MMIO(0x2690),
_MMIO(0x2694), _MMIO(0x2694),
_MMIO(0x2698), _MMIO(0x2698),
_MMIO(0x2754),
_MMIO(0x28a0),
_MMIO(0x4de0), _MMIO(0x4de0),
_MMIO(0x4de4), _MMIO(0x4de4),
_MMIO(0x4dfc), _MMIO(0x4dfc),
...@@ -1690,8 +1692,22 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1690,8 +1692,22 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
bool enable_execlist; bool enable_execlist;
int ret; int ret;
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
if (IS_COFFEELAKE(vgpu->gvt->dev_priv))
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes); write_vreg(vgpu, offset, p_data, bytes);
if (data & _MASKED_BIT_ENABLE(1)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
if (IS_COFFEELAKE(vgpu->gvt->dev_priv) &&
data & _MASKED_BIT_ENABLE(2)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
/* when PPGTT mode enabled, we will check if guest has called /* when PPGTT mode enabled, we will check if guest has called
* pvinfo, if not, we will treat this guest as non-gvtg-aware * pvinfo, if not, we will treat this guest as non-gvtg-aware
* guest, and stop emulating its cfg space, mmio, gtt, etc. * guest, and stop emulating its cfg space, mmio, gtt, etc.
...@@ -1773,6 +1789,21 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, ...@@ -1773,6 +1789,21 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
return 0; return 0;
} }
static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data,
unsigned int bytes)
{
u32 data = *(u32 *)p_data;
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
write_vreg(vgpu, offset, p_data, bytes);
if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \ #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \ ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
f, s, am, rm, d, r, w); \ f, s, am, rm, d, r, w); \
...@@ -3059,7 +3090,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) ...@@ -3059,7 +3090,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
MMIO_D(_MMIO(0x44500), D_SKL_PLUS); MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); #define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, csfe_chicken1_mmio_write);
#undef CSFE_CHICKEN1_REG
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL); NULL, NULL);
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment