Commit ac6c0433 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2023-04-06' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Daniel Vetter:
 "Mostly i915 fixes: dp mst for compression/dsc, perf ioctl uaf, ctx rpm
  accounting, gt reset vs huc loading.

  And a few individual driver fixes: ivpu dma fence&suspend, panfrost
  mmap, nouveau color depth"

* tag 'drm-fixes-2023-04-06' of git://anongit.freedesktop.org/drm/drm:
  accel/ivpu: Fix S3 system suspend when not idle
  accel/ivpu: Add dma fence to command buffers only
  drm/i915: Fix context runtime accounting
  drm/i915: fix race condition UAF in i915_perf_add_config_ioctl
  drm/i915: Use compressed bpp when calculating m/n value for DP MST DSC
  drm/i915/huc: Cancel HuC delayed load timer on reset.
  drm/i915/ttm: fix sparse warning
  drm/panfrost: Fix the panfrost_mmu_map_fault_addr() error path
  drm/nouveau/disp: Support more modes by checking with lower bpc
parents 2a28a8b3 3dfa8926
...@@ -461,26 +461,22 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 ...@@ -461,26 +461,22 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset; job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count, ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx);
&acquire_ctx);
if (ret) { if (ret) {
ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret); ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
return ret; return ret;
} }
for (i = 0; i < buf_count; i++) { ret = dma_resv_reserve_fences(bo->base.resv, 1);
ret = dma_resv_reserve_fences(job->bos[i]->base.resv, 1);
if (ret) { if (ret) {
ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret); ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
goto unlock_reservations; goto unlock_reservations;
} }
}
for (i = 0; i < buf_count; i++) dma_resv_add_fence(bo->base.resv, job->done_fence, DMA_RESV_USAGE_WRITE);
dma_resv_add_fence(job->bos[i]->base.resv, job->done_fence, DMA_RESV_USAGE_WRITE);
unlock_reservations: unlock_reservations:
drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx); drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx);
wmb(); /* Flush write combining buffers */ wmb(); /* Flush write combining buffers */
......
...@@ -140,32 +140,28 @@ int ivpu_pm_suspend_cb(struct device *dev) ...@@ -140,32 +140,28 @@ int ivpu_pm_suspend_cb(struct device *dev)
{ {
struct drm_device *drm = dev_get_drvdata(dev); struct drm_device *drm = dev_get_drvdata(dev);
struct ivpu_device *vdev = to_ivpu_device(drm); struct ivpu_device *vdev = to_ivpu_device(drm);
int ret; unsigned long timeout;
ivpu_dbg(vdev, PM, "Suspend..\n"); ivpu_dbg(vdev, PM, "Suspend..\n");
ret = ivpu_suspend(vdev); timeout = jiffies + msecs_to_jiffies(vdev->timeout.tdr);
if (ret && vdev->pm->suspend_reschedule_counter) { while (!ivpu_hw_is_idle(vdev)) {
ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n", cond_resched();
vdev->pm->suspend_reschedule_counter); if (time_after_eq(jiffies, timeout)) {
pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend); ivpu_err(vdev, "Failed to enter idle on system suspend\n");
vdev->pm->suspend_reschedule_counter--;
return -EBUSY; return -EBUSY;
} else if (!vdev->pm->suspend_reschedule_counter) { }
ivpu_warn(vdev, "Failed to enter idle, force suspend\n");
ivpu_pm_prepare_cold_boot(vdev);
} else {
ivpu_pm_prepare_warm_boot(vdev);
} }
vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; ivpu_suspend(vdev);
ivpu_pm_prepare_warm_boot(vdev);
pci_save_state(to_pci_dev(dev)); pci_save_state(to_pci_dev(dev));
pci_set_power_state(to_pci_dev(dev), PCI_D3hot); pci_set_power_state(to_pci_dev(dev), PCI_D3hot);
ivpu_dbg(vdev, PM, "Suspend done.\n"); ivpu_dbg(vdev, PM, "Suspend done.\n");
return ret; return 0;
} }
int ivpu_pm_resume_cb(struct device *dev) int ivpu_pm_resume_cb(struct device *dev)
......
...@@ -232,7 +232,7 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder, ...@@ -232,7 +232,7 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
return slots; return slots;
} }
intel_link_compute_m_n(crtc_state->pipe_bpp, intel_link_compute_m_n(crtc_state->dsc.compressed_bpp,
crtc_state->lane_count, crtc_state->lane_count,
adjusted_mode->crtc_clock, adjusted_mode->crtc_clock,
crtc_state->port_clock, crtc_state->port_clock,
......
...@@ -1067,11 +1067,12 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) ...@@ -1067,11 +1067,12 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
.interruptible = true, .interruptible = true,
.no_wait_gpu = true, /* should be idle already */ .no_wait_gpu = true, /* should be idle already */
}; };
int err;
GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED)); GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
ret = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx); err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
if (ret) { if (err) {
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
......
...@@ -2018,6 +2018,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive) ...@@ -2018,6 +2018,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* inspecting the queue to see if we need to resumbit. * inspecting the queue to see if we need to resumbit.
*/ */
if (*prev != *execlists->active) { /* elide lite-restores */ if (*prev != *execlists->active) { /* elide lite-restores */
struct intel_context *prev_ce = NULL, *active_ce = NULL;
/* /*
* Note the inherent discrepancy between the HW runtime, * Note the inherent discrepancy between the HW runtime,
* recorded as part of the context switch, and the CPU * recorded as part of the context switch, and the CPU
...@@ -2029,9 +2031,15 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive) ...@@ -2029,9 +2031,15 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* and correct overselves later when updating from HW. * and correct overselves later when updating from HW.
*/ */
if (*prev) if (*prev)
lrc_runtime_stop((*prev)->context); prev_ce = (*prev)->context;
if (*execlists->active) if (*execlists->active)
lrc_runtime_start((*execlists->active)->context); active_ce = (*execlists->active)->context;
if (prev_ce != active_ce) {
if (prev_ce)
lrc_runtime_stop(prev_ce);
if (active_ce)
lrc_runtime_start(active_ce);
}
new_timeslice(execlists); new_timeslice(execlists);
} }
......
...@@ -235,6 +235,13 @@ static void delayed_huc_load_fini(struct intel_huc *huc) ...@@ -235,6 +235,13 @@ static void delayed_huc_load_fini(struct intel_huc *huc)
i915_sw_fence_fini(&huc->delayed_load.fence); i915_sw_fence_fini(&huc->delayed_load.fence);
} }
int intel_huc_sanitize(struct intel_huc *huc)
{
delayed_huc_load_complete(huc);
intel_uc_fw_sanitize(&huc->fw);
return 0;
}
static bool vcs_supported(struct intel_gt *gt) static bool vcs_supported(struct intel_gt *gt)
{ {
intel_engine_mask_t mask = gt->info.engine_mask; intel_engine_mask_t mask = gt->info.engine_mask;
......
...@@ -41,6 +41,7 @@ struct intel_huc { ...@@ -41,6 +41,7 @@ struct intel_huc {
} delayed_load; } delayed_load;
}; };
int intel_huc_sanitize(struct intel_huc *huc);
void intel_huc_init_early(struct intel_huc *huc); void intel_huc_init_early(struct intel_huc *huc);
int intel_huc_init(struct intel_huc *huc); int intel_huc_init(struct intel_huc *huc);
void intel_huc_fini(struct intel_huc *huc); void intel_huc_fini(struct intel_huc *huc);
...@@ -54,12 +55,6 @@ bool intel_huc_is_authenticated(struct intel_huc *huc); ...@@ -54,12 +55,6 @@ bool intel_huc_is_authenticated(struct intel_huc *huc);
void intel_huc_register_gsc_notifier(struct intel_huc *huc, struct bus_type *bus); void intel_huc_register_gsc_notifier(struct intel_huc *huc, struct bus_type *bus);
void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, struct bus_type *bus); void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, struct bus_type *bus);
static inline int intel_huc_sanitize(struct intel_huc *huc)
{
intel_uc_fw_sanitize(&huc->fw);
return 0;
}
static inline bool intel_huc_is_supported(struct intel_huc *huc) static inline bool intel_huc_is_supported(struct intel_huc *huc)
{ {
return intel_uc_fw_is_supported(&huc->fw); return intel_uc_fw_is_supported(&huc->fw);
......
...@@ -4638,13 +4638,13 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, ...@@ -4638,13 +4638,13 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
err = oa_config->id; err = oa_config->id;
goto sysfs_err; goto sysfs_err;
} }
id = oa_config->id;
mutex_unlock(&perf->metrics_lock);
drm_dbg(&perf->i915->drm, drm_dbg(&perf->i915->drm,
"Added config %s id=%i\n", oa_config->uuid, oa_config->id); "Added config %s id=%i\n", oa_config->uuid, oa_config->id);
mutex_unlock(&perf->metrics_lock);
return oa_config->id; return id;
sysfs_err: sysfs_err:
mutex_unlock(&perf->metrics_lock); mutex_unlock(&perf->metrics_lock);
......
...@@ -363,6 +363,35 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder, ...@@ -363,6 +363,35 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder,
return 0; return 0;
} }
static void
nv50_outp_atomic_fix_depth(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state)
{
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_display_mode *mode = &asyh->state.adjusted_mode;
unsigned int max_rate, mode_rate;
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_DP:
max_rate = nv_encoder->dp.link_nr * nv_encoder->dp.link_bw;
/* we don't support more than 10 anyway */
asyh->or.bpc = min_t(u8, asyh->or.bpc, 10);
/* reduce the bpc until it works out */
while (asyh->or.bpc > 6) {
mode_rate = DIV_ROUND_UP(mode->clock * asyh->or.bpc * 3, 8);
if (mode_rate <= max_rate)
break;
asyh->or.bpc -= 2;
}
break;
default:
break;
}
}
static int static int
nv50_outp_atomic_check(struct drm_encoder *encoder, nv50_outp_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state, struct drm_crtc_state *crtc_state,
...@@ -381,6 +410,9 @@ nv50_outp_atomic_check(struct drm_encoder *encoder, ...@@ -381,6 +410,9 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
if (crtc_state->mode_changed || crtc_state->connectors_changed) if (crtc_state->mode_changed || crtc_state->connectors_changed)
asyh->or.bpc = connector->display_info.bpc; asyh->or.bpc = connector->display_info.bpc;
/* We might have to reduce the bpc */
nv50_outp_atomic_fix_depth(encoder, crtc_state);
return 0; return 0;
} }
......
...@@ -263,8 +263,6 @@ nouveau_dp_irq(struct work_struct *work) ...@@ -263,8 +263,6 @@ nouveau_dp_irq(struct work_struct *work)
} }
/* TODO: /* TODO:
* - Use the minimum possible BPC here, once we add support for the max bpc
* property.
* - Validate against the DP caps advertised by the GPU (we don't check these * - Validate against the DP caps advertised by the GPU (we don't check these
* yet) * yet)
*/ */
...@@ -276,7 +274,11 @@ nv50_dp_mode_valid(struct drm_connector *connector, ...@@ -276,7 +274,11 @@ nv50_dp_mode_valid(struct drm_connector *connector,
{ {
const unsigned int min_clock = 25000; const unsigned int min_clock = 25000;
unsigned int max_rate, mode_rate, ds_max_dotclock, clock = mode->clock; unsigned int max_rate, mode_rate, ds_max_dotclock, clock = mode->clock;
const u8 bpp = connector->display_info.bpc * 3; /* Check with the minmum bpc always, so we can advertise better modes.
* In particlar not doing this causes modes to be dropped on HDR
* displays as we might check with a bpc of 16 even.
*/
const u8 bpp = 6 * 3;
if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace) if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
return MODE_NO_INTERLACE; return MODE_NO_INTERLACE;
......
...@@ -504,6 +504,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -504,6 +504,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
if (IS_ERR(pages[i])) { if (IS_ERR(pages[i])) {
mutex_unlock(&bo->base.pages_lock); mutex_unlock(&bo->base.pages_lock);
ret = PTR_ERR(pages[i]); ret = PTR_ERR(pages[i]);
pages[i] = NULL;
goto err_pages; goto err_pages;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment