Commit 77dd1143 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2021-08-27' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Last set of fixes for 5.14, nothing major a couple of i915, couple of
  imx and a few amdgpu. All pretty small.

  i915:
   - Fix syncmap memory leak
   - Drop redundant display port debug print

  amdgpu:
   - Fix for pinning display buffers multiple times
   - Fix delayed work handling for GFXOFF
   - Fix build when CONFIG_SUSPEND is not set

  imx:
   - fix planar offset calculations
   - fix accidental partial revert"

* tag 'drm-fixes-2021-08-27' of git://anongit.freedesktop.org/drm/drm:
  drm/i915/dp: Drop redundant debug print
  drm/i915: Fix syncmap memory leak
  drm/amdgpu: Fix build with missing pm_suspend_target_state module export
  drm/amdgpu: Cancel delayed work when GFXOFF is disabled
  drm/amdgpu: use the preferred pin domain after the check
  drm/imx: ipuv3-plane: fix accidental partial revert of 8 pixel alignment fix
  gpu: ipu-v3: Fix i.MX IPU-v3 offset calculations for (semi)planar U/V formats
parents 73367f05 9fe4f5a2
...@@ -1040,7 +1040,7 @@ void amdgpu_acpi_detect(void) ...@@ -1040,7 +1040,7 @@ void amdgpu_acpi_detect(void)
*/ */
bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
{ {
#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_PM_SLEEP) #if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) { if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
if (adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
return pm_suspend_target_state == PM_SUSPEND_TO_IDLE; return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
......
...@@ -2777,12 +2777,11 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) ...@@ -2777,12 +2777,11 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
struct amdgpu_device *adev = struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work); container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
mutex_lock(&adev->gfx.gfx_off_mutex); WARN_ON_ONCE(adev->gfx.gfx_off_state);
if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) { WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
adev->gfx.gfx_off_state = true; adev->gfx.gfx_off_state = true;
}
mutex_unlock(&adev->gfx.gfx_off_mutex);
} }
/** /**
......
...@@ -563,24 +563,38 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) ...@@ -563,24 +563,38 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
mutex_lock(&adev->gfx.gfx_off_mutex); mutex_lock(&adev->gfx.gfx_off_mutex);
if (!enable) if (enable) {
adev->gfx.gfx_off_req_count++; /* If the count is already 0, it means there's an imbalance bug somewhere.
else if (adev->gfx.gfx_off_req_count > 0) * Note that the bug may be in a different caller than the one which triggers the
* WARN_ON_ONCE.
*/
if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
goto unlock;
adev->gfx.gfx_off_req_count--; adev->gfx.gfx_off_req_count--;
if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) { if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state)
schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE); schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
} else if (!enable && adev->gfx.gfx_off_state) { } else {
if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) { if (adev->gfx.gfx_off_req_count == 0) {
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
if (adev->gfx.gfx_off_state &&
!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
adev->gfx.gfx_off_state = false; adev->gfx.gfx_off_state = false;
if (adev->gfx.funcs->init_spm_golden) { if (adev->gfx.funcs->init_spm_golden) {
dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n"); dev_dbg(adev->dev,
"GFXOFF is disabled, re-init SPM golden settings\n");
amdgpu_gfx_init_spm_golden(adev); amdgpu_gfx_init_spm_golden(adev);
} }
} }
} }
adev->gfx.gfx_off_req_count++;
}
unlock:
mutex_unlock(&adev->gfx.gfx_off_mutex); mutex_unlock(&adev->gfx.gfx_off_mutex);
} }
......
...@@ -920,11 +920,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, ...@@ -920,11 +920,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return -EINVAL; return -EINVAL;
} }
/* This assumes only APU display buffers are pinned with (VRAM|GTT).
* See function amdgpu_display_supported_domains()
*/
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
if (bo->tbo.pin_count) { if (bo->tbo.pin_count) {
uint32_t mem_type = bo->tbo.resource->mem_type; uint32_t mem_type = bo->tbo.resource->mem_type;
uint32_t mem_flags = bo->tbo.resource->placement; uint32_t mem_flags = bo->tbo.resource->placement;
...@@ -949,6 +944,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, ...@@ -949,6 +944,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return 0; return 0;
} }
/* This assumes only APU display buffers are pinned with (VRAM|GTT).
* See function amdgpu_display_supported_domains()
*/
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
if (bo->tbo.base.import_attach) if (bo->tbo.base.import_attach)
dma_buf_pin(bo->tbo.base.import_attach); dma_buf_pin(bo->tbo.base.import_attach);
......
...@@ -3850,23 +3850,18 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) ...@@ -3850,23 +3850,18 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp) static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
{ {
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 val; u8 val;
if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
return; return;
if (drm_dp_dpcd_readb(&intel_dp->aux, if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) { DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n");
return; return;
}
if (drm_dp_dpcd_writeb(&intel_dp->aux, if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) { DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n");
return; return;
}
if (val & HDMI_LINK_STATUS_CHANGED) if (val & HDMI_LINK_STATUS_CHANGED)
intel_dp_handle_hdmi_link_status_change(intel_dp); intel_dp_handle_hdmi_link_status_change(intel_dp);
......
...@@ -127,6 +127,15 @@ static void intel_timeline_fini(struct rcu_head *rcu) ...@@ -127,6 +127,15 @@ static void intel_timeline_fini(struct rcu_head *rcu)
i915_vma_put(timeline->hwsp_ggtt); i915_vma_put(timeline->hwsp_ggtt);
i915_active_fini(&timeline->active); i915_active_fini(&timeline->active);
/*
* A small race exists between intel_gt_retire_requests_timeout and
* intel_timeline_exit which could result in the syncmap not getting
* free'd. Rather than work to hard to seal this race, simply cleanup
* the syncmap on fini.
*/
i915_syncmap_free(&timeline->sync);
kfree(timeline); kfree(timeline);
} }
......
...@@ -683,7 +683,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, ...@@ -683,7 +683,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
break; break;
} }
ipu_dmfc_config_wait4eot(ipu_plane->dmfc, drm_rect_width(dst)); ipu_dmfc_config_wait4eot(ipu_plane->dmfc, ALIGN(drm_rect_width(dst), 8));
width = ipu_src_rect_width(new_state); width = ipu_src_rect_width(new_state);
height = drm_rect_height(&new_state->src) >> 16; height = drm_rect_height(&new_state->src) >> 16;
......
...@@ -585,21 +585,21 @@ static const struct ipu_rgb def_bgra_16 = { ...@@ -585,21 +585,21 @@ static const struct ipu_rgb def_bgra_16 = {
.bits_per_pixel = 16, .bits_per_pixel = 16,
}; };
#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y)) #define Y_OFFSET(pix, x, y) ((x) + pix->bytesperline * (y))
#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \ #define U_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
(pix->width * ((y) / 2) / 2) + (x) / 2) (pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
#define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \ #define V_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
(pix->width * pix->height / 4) + \ (pix->bytesperline * pix->height / 4) + \
(pix->width * ((y) / 2) / 2) + (x) / 2) (pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
#define U2_OFFSET(pix, x, y) ((pix->width * pix->height) + \ #define U2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
(pix->width * (y) / 2) + (x) / 2) (pix->bytesperline * (y) / 2) + (x) / 2)
#define V2_OFFSET(pix, x, y) ((pix->width * pix->height) + \ #define V2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
(pix->width * pix->height / 2) + \ (pix->bytesperline * pix->height / 2) + \
(pix->width * (y) / 2) + (x) / 2) (pix->bytesperline * (y) / 2) + (x) / 2)
#define UV_OFFSET(pix, x, y) ((pix->width * pix->height) + \ #define UV_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
(pix->width * ((y) / 2)) + (x)) (pix->bytesperline * ((y) / 2)) + (x))
#define UV2_OFFSET(pix, x, y) ((pix->width * pix->height) + \ #define UV2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
(pix->width * y) + (x)) (pix->bytesperline * y) + (x))
#define NUM_ALPHA_CHANNELS 7 #define NUM_ALPHA_CHANNELS 7
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment