Commit f9915b96 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-next-2020-10-19' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Some fixes queued up already for i915 and amdgpu, I've also included
  the fix for the clang warning you've seen.

  i915:
   - set all unused color plane offsets to ~0xfff again (Ville)
   - fix TGL DKL PHY DP vswing handling (Ville)

  amdgpu:
   - DCN clang warning fix
   - eDP fix
   - BACO fix
   - kernel documentation fixes
   - SMU7 mclk fix
   - VCN1 hw bug workaround

  amdkfd:
   - kvfree vs kfree fix"

* tag 'drm-next-2020-10-19' of git://anongit.freedesktop.org/drm/drm:
  drm/amd/display: Fix incorrect dsc force enable logic
  drm/amdkfd: Use kvfree in destroy_crat_image
  drm/amdgpu: vcn and jpeg ring synchronization
  drm/amd/pm: increase mclk switch threshold to 200 us
  docs: amdgpu: fix a warning when building the documentation
  drm/amd/display: kernel-doc: document force_timing_sync
  drm/amdgpu/swsmu: init the baco mutex in early_init
  drm/amd/display: Fix module load hangs when connected to an eDP
  drm/i915: Set all unused color plane offsets to ~0xfff again
  drm/i915: Fix TGL DKL PHY DP vswing handling
parents 5c7e3f3f 40b99050
......@@ -206,8 +206,8 @@ pp_power_profile_mode
.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
:doc: pp_power_profile_mode
*_busy_percent
~~~~~~~~~~~~~~
\*_busy_percent
~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
:doc: gpu_busy_percent
......
......@@ -68,6 +68,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
mutex_init(&adev->vcn.vcn_pg_lock);
mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
atomic_set(&adev->vcn.total_submission_cnt, 0);
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
......@@ -237,6 +238,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
}
release_firmware(adev->vcn.fw);
mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
mutex_destroy(&adev->vcn.vcn_pg_lock);
return 0;
......
......@@ -220,6 +220,7 @@ struct amdgpu_vcn {
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
struct amdgpu_vcn_reg internal;
struct mutex vcn_pg_lock;
struct mutex vcn1_jpeg1_workaround;
atomic_t total_submission_cnt;
unsigned harvest_config;
......
......@@ -33,6 +33,7 @@
static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev);
static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring);
static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
{
......@@ -564,8 +565,8 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
.insert_start = jpeg_v1_0_decode_ring_insert_start,
.insert_end = jpeg_v1_0_decode_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = vcn_v1_0_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
.begin_use = jpeg_v1_0_ring_begin_use,
.end_use = vcn_v1_0_ring_end_use,
.emit_wreg = jpeg_v1_0_decode_ring_emit_wreg,
.emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
......@@ -586,3 +587,22 @@ static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->irq.funcs = &jpeg_v1_0_irq_funcs;
}
static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
int cnt = 0;
mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_dec))
DRM_ERROR("JPEG dec: vcn dec ring may not be empty\n");
for (cnt = 0; cnt < adev->vcn.num_enc_rings; cnt++) {
if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_enc[cnt]))
DRM_ERROR("JPEG dec: vcn enc ring[%d] may not be empty\n", cnt);
}
vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
}
......@@ -54,6 +54,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
static void vcn_v1_0_idle_work_handler(struct work_struct *work);
static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
/**
* vcn_v1_0_early_init - set function pointers
......@@ -1804,11 +1805,24 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
}
}
void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
if (amdgpu_fence_wait_empty(&ring->adev->jpeg.inst->ring_dec))
DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n");
vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
}
void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
{
struct amdgpu_device *adev = ring->adev;
if (set_clocks) {
amdgpu_gfx_off_ctrl(adev, false);
if (adev->pm.dpm_enabled)
......@@ -1844,6 +1858,12 @@ void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
}
}
void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
{
schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
}
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.name = "vcn_v1_0",
.early_init = vcn_v1_0_early_init,
......@@ -1891,7 +1911,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.insert_end = vcn_v1_0_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = vcn_v1_0_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
.end_use = vcn_v1_0_ring_end_use,
.emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
......@@ -1923,7 +1943,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
.insert_end = vcn_v1_0_enc_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = vcn_v1_0_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
.end_use = vcn_v1_0_ring_end_use,
.emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
......
......@@ -24,7 +24,8 @@
#ifndef __VCN_V1_0_H__
#define __VCN_V1_0_H__
void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring);
void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks);
extern const struct amdgpu_ip_block_version vcn_v1_0_ip_block;
......
......@@ -1426,5 +1426,5 @@ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
*/
void kfd_destroy_crat_image(void *crat_image)
{
kfree(crat_image);
kvfree(crat_image);
}
......@@ -149,6 +149,8 @@ struct amdgpu_dm_backlight_caps {
* @cached_state: Caches device atomic state for suspend/resume
* @cached_dc_state: Cached state of content streams
* @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
* @force_timing_sync: set via debugfs. When set, indicates that all connected
* displays will be forced to synchronize.
*/
struct amdgpu_display_manager {
......
......@@ -647,7 +647,7 @@ static void try_disable_dsc(struct drm_atomic_state *state,
for (i = 0; i < count; i++) {
if (vars[i].dsc_enabled
&& vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16
&& !params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) {
&& params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) {
kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
tried[i] = false;
remaining_to_try += 1;
......
......@@ -848,7 +848,7 @@ static void disable_vbios_mode_if_required(
struct dc *dc,
struct dc_state *context)
{
unsigned int i;
unsigned int i, j;
/* check if timing_changed, disable stream*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
......@@ -872,10 +872,10 @@ static void disable_vbios_mode_if_required(
enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
if (enc_inst != ENGINE_ID_UNKNOWN) {
for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
if (dc->res_pool->stream_enc[i]->id == enc_inst) {
tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
dc->res_pool->stream_enc[i]);
for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
if (dc->res_pool->stream_enc[j]->id == enc_inst) {
tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
dc->res_pool->stream_enc[j]);
break;
}
}
......
......@@ -2883,7 +2883,7 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
if (hwmgr->is_kicker)
switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
else
switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
switch_limit_us = data->is_memory_gddr5 ? 200 : 150;
break;
case CHIP_VEGAM:
switch_limit_us = 30;
......
......@@ -417,6 +417,9 @@ static int smu_early_init(void *handle)
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
mutex_init(&smu->mutex);
mutex_init(&smu->smu_baco.mutex);
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.platform_support = false;
return smu_set_funcs(adev);
}
......@@ -795,10 +798,6 @@ static int smu_sw_init(void *handle)
bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
mutex_init(&smu->smu_baco.mutex);
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.platform_support = false;
mutex_init(&smu->sensor_lock);
mutex_init(&smu->metrics_lock);
mutex_init(&smu->message_lock);
......
......@@ -2742,7 +2742,7 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
int rate = 0;
if (type == INTEL_OUTPUT_HDMI) {
if (type != INTEL_OUTPUT_HDMI) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
rate = intel_dp->link_rate;
......
......@@ -4093,8 +4093,7 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
int skl_check_plane_surface(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
int ret;
bool needs_aux = false;
int ret, i;
ret = intel_plane_compute_gtt(plane_state);
if (ret)
......@@ -4108,7 +4107,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
* it.
*/
if (is_ccs_modifier(fb->modifier)) {
needs_aux = true;
ret = skl_check_ccs_aux_surface(plane_state);
if (ret)
return ret;
......@@ -4116,21 +4114,16 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
if (intel_format_info_is_yuv_semiplanar(fb->format,
fb->modifier)) {
needs_aux = true;
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
}
if (!needs_aux) {
int i;
for (i = 1; i < fb->format->num_planes; i++) {
for (i = fb->format->num_planes; i < ARRAY_SIZE(plane_state->color_plane); i++) {
plane_state->color_plane[i].offset = ~0xfff;
plane_state->color_plane[i].x = 0;
plane_state->color_plane[i].y = 0;
}
}
ret = skl_check_main_surface(plane_state);
if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment