Commit 9a1054c3 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2021-01-28' of...

Merge tag 'drm-intel-fixes-2021-01-28' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

drm/i915 fixes for v5.11-rc6:
- Fix ICL MG PHY vswing
- Fix subplatform handling
- Fix selftest memleak
- Clear CACHE_MODE prior to clearing residuals
- Always flush the active worker before returning from the wait
- Always try to reserve GGTT address 0x0
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87y2gdi3mp.fsf@intel.com
parents fb62b7b9 489140b5
...@@ -2755,12 +2755,11 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, ...@@ -2755,12 +2755,11 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
u32 val; u32 val;
ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries); ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
/* The table does not have values for level 3 and level 9. */ if (level >= n_entries) {
if (level >= n_entries || level == 3 || level == 9) {
drm_dbg_kms(&dev_priv->drm, drm_dbg_kms(&dev_priv->drm,
"DDI translation not found for level %d. Using %d instead.", "DDI translation not found for level %d. Using %d instead.",
level, n_entries - 2); level, n_entries - 1);
level = n_entries - 2; level = n_entries - 1;
} }
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */ /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
......
...@@ -390,6 +390,16 @@ static void emit_batch(struct i915_vma * const vma, ...@@ -390,6 +390,16 @@ static void emit_batch(struct i915_vma * const vma,
&cb_kernel_ivb, &cb_kernel_ivb,
desc_count); desc_count);
/* Reset inherited context registers */
gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
batch_add(&cmds, 0xffff0000);
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
gen7_emit_pipeline_flush(&cmds);
/* Switch to the media pipeline and our base address */
gen7_emit_pipeline_invalidate(&cmds); gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA); batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
batch_add(&cmds, MI_NOOP); batch_add(&cmds, MI_NOOP);
...@@ -399,9 +409,11 @@ static void emit_batch(struct i915_vma * const vma, ...@@ -399,9 +409,11 @@ static void emit_batch(struct i915_vma * const vma,
gen7_emit_state_base_address(&cmds, descriptors); gen7_emit_state_base_address(&cmds, descriptors);
gen7_emit_pipeline_invalidate(&cmds); gen7_emit_pipeline_invalidate(&cmds);
/* Set the clear-residual kernel state */
gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0); gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count); gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
/* Execute the kernel on all HW threads */
for (i = 0; i < num_primitives(bv); i++) for (i = 0; i < num_primitives(bv); i++)
gen7_emit_media_object(&cmds, i); gen7_emit_media_object(&cmds, i);
......
...@@ -526,16 +526,39 @@ static int init_ggtt(struct i915_ggtt *ggtt) ...@@ -526,16 +526,39 @@ static int init_ggtt(struct i915_ggtt *ggtt)
mutex_init(&ggtt->error_mutex); mutex_init(&ggtt->error_mutex);
if (ggtt->mappable_end) { if (ggtt->mappable_end) {
/* Reserve a mappable slot for our lockless error capture */ /*
ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, * Reserve a mappable slot for our lockless error capture.
&ggtt->error_capture, *
PAGE_SIZE, 0, * We strongly prefer taking address 0x0 in order to protect
I915_COLOR_UNEVICTABLE, * other critical buffers against accidental overwrites,
0, ggtt->mappable_end, * as writing to address 0 is a very common mistake.
DRM_MM_INSERT_LOW); *
if (ret) * Since 0 may already be in use by the system (e.g. the BIOS
return ret; * framebuffer), we let the reservation fail quietly and hope
* 0 remains reserved always.
*
* If we fail to reserve 0, and then fail to find any space
* for an error-capture, remain silent. We can afford not
* to reserve an error_capture node as we have fallback
* paths, and we trust that 0 will remain reserved. However,
* the only likely reason for failure to insert is a driver
* bug, which we expect to cause other failures...
*/
ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
drm_mm_insert_node_in_range(&ggtt->vm.mm,
&ggtt->error_capture,
ggtt->error_capture.size, 0,
ggtt->error_capture.color,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
} }
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_dbg(&ggtt->vm.i915->drm,
"Reserved GGTT:[%llx, %llx] for use by error capture\n",
ggtt->error_capture.start,
ggtt->error_capture.start + ggtt->error_capture.size);
/* /*
* The upper portion of the GuC address space has a sizeable hole * The upper portion of the GuC address space has a sizeable hole
...@@ -548,9 +571,9 @@ static int init_ggtt(struct i915_ggtt *ggtt) ...@@ -548,9 +571,9 @@ static int init_ggtt(struct i915_ggtt *ggtt)
/* Clear any non-preallocated blocks */ /* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
drm_dbg_kms(&ggtt->vm.i915->drm, drm_dbg(&ggtt->vm.i915->drm,
"clearing unused GTT space: [%lx, %lx]\n", "clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end); hole_start, hole_end);
ggtt->vm.clear_range(&ggtt->vm, hole_start, ggtt->vm.clear_range(&ggtt->vm, hole_start,
hole_end - hole_start); hole_end - hole_start);
} }
......
...@@ -631,24 +631,26 @@ static int flush_lazy_signals(struct i915_active *ref) ...@@ -631,24 +631,26 @@ static int flush_lazy_signals(struct i915_active *ref)
int __i915_active_wait(struct i915_active *ref, int state) int __i915_active_wait(struct i915_active *ref, int state)
{ {
int err;
might_sleep(); might_sleep();
if (!i915_active_acquire_if_busy(ref))
return 0;
/* Any fence added after the wait begins will not be auto-signaled */ /* Any fence added after the wait begins will not be auto-signaled */
err = flush_lazy_signals(ref); if (i915_active_acquire_if_busy(ref)) {
i915_active_release(ref); int err;
if (err)
return err;
if (!i915_active_is_idle(ref) && err = flush_lazy_signals(ref);
___wait_var_event(ref, i915_active_is_idle(ref), i915_active_release(ref);
state, 0, 0, schedule())) if (err)
return -EINTR; return err;
if (___wait_var_event(ref, i915_active_is_idle(ref),
state, 0, 0, schedule()))
return -EINTR;
}
/*
* After the wait is complete, the caller may free the active.
* We have to flush any concurrent retirement before returning.
*/
flush_work(&ref->work); flush_work(&ref->work);
return 0; return 0;
} }
......
...@@ -1346,7 +1346,7 @@ intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p) ...@@ -1346,7 +1346,7 @@ intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
{ {
const unsigned int pi = __platform_mask_index(info, p); const unsigned int pi = __platform_mask_index(info, p);
return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS; return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1);
} }
static __always_inline bool static __always_inline bool
......
...@@ -1880,7 +1880,7 @@ static int igt_cs_tlb(void *arg) ...@@ -1880,7 +1880,7 @@ static int igt_cs_tlb(void *arg)
vma = i915_vma_instance(out, vm, NULL); vma = i915_vma_instance(out, vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto out_put_batch; goto out_put_out;
} }
err = i915_vma_pin(vma, 0, 0, err = i915_vma_pin(vma, 0, 0,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment