Commit 0844708a authored by Dave Airlie's avatar Dave Airlie

Merge tag 'amd-drm-fixes-5.13-2021-05-05' of...

Merge tag 'amd-drm-fixes-5.13-2021-05-05' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-fixes-5.13-2021-05-05:

amdgpu:
- MPO hang workaround
- Fix for concurrent VM flushes on vega/navi
- dcefclk is not adjustable on navi1x and newer
- MST HPD debugfs fix
- Suspend/resumes fixes
- Register VGA clients late in case driver fails to load
- Fix GEM leak in user framebuffer create
- Add support for polaris12 with 32 bit memory interface
- Fix duplicate cursor issue when using overlay
- Fix corruption with tiled surfaces on VCN3
- Add BO size and stride check to fix BO size verification

radeon:
- Fix off-by-one in power state parsing
- Fix possible memory leak in power state parsing
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210506033929.3875-1-alexander.deucher@amd.com
parents 59e528c5 234055fd
...@@ -3410,19 +3410,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -3410,19 +3410,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* doorbell bar mapping and doorbell index init*/ /* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev); amdgpu_device_doorbell_init(adev);
/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
if (amdgpu_device_supports_px(ddev)) {
px = true;
vga_switcheroo_register_client(adev->pdev,
&amdgpu_switcheroo_ops, px);
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
}
if (amdgpu_emu_mode == 1) { if (amdgpu_emu_mode == 1) {
/* post the asic on emulation mode */ /* post the asic on emulation mode */
emu_soc_asic_init(adev); emu_soc_asic_init(adev);
...@@ -3619,6 +3606,19 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -3619,6 +3606,19 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_device_cache_pci_state(adev->pdev)) if (amdgpu_device_cache_pci_state(adev->pdev))
pci_restore_state(pdev); pci_restore_state(pdev);
/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
if (amdgpu_device_supports_px(ddev)) {
px = true;
vga_switcheroo_register_client(adev->pdev,
&amdgpu_switcheroo_ops, px);
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
}
if (adev->gmc.xgmi.pending_reset) if (adev->gmc.xgmi.pending_reset)
queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work, queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
msecs_to_jiffies(AMDGPU_RESUME_MS)); msecs_to_jiffies(AMDGPU_RESUME_MS));
...@@ -3630,8 +3630,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -3630,8 +3630,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
failed: failed:
amdgpu_vf_error_trans_all(adev); amdgpu_vf_error_trans_all(adev);
if (px)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
failed_unmap: failed_unmap:
iounmap(adev->rmmio); iounmap(adev->rmmio);
......
...@@ -837,6 +837,174 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb) ...@@ -837,6 +837,174 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
return 0; return 0;
} }
static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
unsigned int *width, unsigned int *height)
{
unsigned int cpp_log2 = ilog2(cpp);
unsigned int pixel_log2 = block_log2 - cpp_log2;
unsigned int width_log2 = (pixel_log2 + 1) / 2;
unsigned int height_log2 = pixel_log2 - width_log2;
*width = 1 << width_log2;
*height = 1 << height_log2;
}
static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned,
bool pipe_aligned)
{
unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
switch (ver) {
case AMD_FMT_MOD_TILE_VER_GFX9: {
/*
* TODO: for pipe aligned we may need to check the alignment of the
* total size of the surface, which may need to be bigger than the
* natural alignment due to some HW workarounds
*/
return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12);
}
case AMD_FMT_MOD_TILE_VER_GFX10:
case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS: {
int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
if (ver == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 &&
AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2)
++pipes_log2;
return max(8 + (pipe_aligned ? pipes_log2 : 0), 12);
}
default:
return 0;
}
}
static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane,
const struct drm_format_info *format,
unsigned int block_width, unsigned int block_height,
unsigned int block_size_log2)
{
unsigned int width = rfb->base.width /
((plane && plane < format->num_planes) ? format->hsub : 1);
unsigned int height = rfb->base.height /
((plane && plane < format->num_planes) ? format->vsub : 1);
unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1;
unsigned int block_pitch = block_width * cpp;
unsigned int min_pitch = ALIGN(width * cpp, block_pitch);
unsigned int block_size = 1 << block_size_log2;
uint64_t size;
if (rfb->base.pitches[plane] % block_pitch) {
drm_dbg_kms(rfb->base.dev,
"pitch %d for plane %d is not a multiple of block pitch %d\n",
rfb->base.pitches[plane], plane, block_pitch);
return -EINVAL;
}
if (rfb->base.pitches[plane] < min_pitch) {
drm_dbg_kms(rfb->base.dev,
"pitch %d for plane %d is less than minimum pitch %d\n",
rfb->base.pitches[plane], plane, min_pitch);
return -EINVAL;
}
/* Force at least natural alignment. */
if (rfb->base.offsets[plane] % block_size) {
drm_dbg_kms(rfb->base.dev,
"offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n",
rfb->base.offsets[plane], plane, block_size);
return -EINVAL;
}
size = rfb->base.offsets[plane] +
(uint64_t)rfb->base.pitches[plane] / block_pitch *
block_size * DIV_ROUND_UP(height, block_height);
if (rfb->base.obj[0]->size < size) {
drm_dbg_kms(rfb->base.dev,
"BO size 0x%zx is less than 0x%llx required for plane %d\n",
rfb->base.obj[0]->size, size, plane);
return -EINVAL;
}
return 0;
}
static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
{
const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format);
uint64_t modifier = rfb->base.modifier;
int ret;
unsigned int i, block_width, block_height, block_size_log2;
if (!rfb->base.dev->mode_config.allow_fb_modifiers)
return 0;
for (i = 0; i < format_info->num_planes; ++i) {
if (modifier == DRM_FORMAT_MOD_LINEAR) {
block_width = 256 / format_info->cpp[i];
block_height = 1;
block_size_log2 = 8;
} else {
int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
switch ((swizzle & ~3) + 1) {
case DC_SW_256B_S:
block_size_log2 = 8;
break;
case DC_SW_4KB_S:
case DC_SW_4KB_S_X:
block_size_log2 = 12;
break;
case DC_SW_64KB_S:
case DC_SW_64KB_S_T:
case DC_SW_64KB_S_X:
block_size_log2 = 16;
break;
default:
drm_dbg_kms(rfb->base.dev,
"Swizzle mode with unknown block size: %d\n", swizzle);
return -EINVAL;
}
get_block_dimensions(block_size_log2, format_info->cpp[i],
&block_width, &block_height);
}
ret = amdgpu_display_verify_plane(rfb, i, format_info,
block_width, block_height, block_size_log2);
if (ret)
return ret;
}
if (AMD_FMT_MOD_GET(DCC, modifier)) {
if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
block_size_log2 = get_dcc_block_size(modifier, false, false);
get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
&block_width, &block_height);
ret = amdgpu_display_verify_plane(rfb, i, format_info,
block_width, block_height,
block_size_log2);
if (ret)
return ret;
++i;
block_size_log2 = get_dcc_block_size(modifier, true, true);
} else {
bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned);
}
get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
&block_width, &block_height);
ret = amdgpu_display_verify_plane(rfb, i, format_info,
block_width, block_height, block_size_log2);
if (ret)
return ret;
}
return 0;
}
static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
uint64_t *tiling_flags, bool *tmz_surface) uint64_t *tiling_flags, bool *tmz_surface)
{ {
...@@ -902,10 +1070,8 @@ int amdgpu_display_gem_fb_verify_and_init( ...@@ -902,10 +1070,8 @@ int amdgpu_display_gem_fb_verify_and_init(
int ret; int ret;
rfb->base.obj[0] = obj; rfb->base.obj[0] = obj;
drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
/* Verify that bo size can fit the fb size. */ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
ret = drm_gem_fb_init_with_funcs(dev, &rfb->base, file_priv, mode_cmd,
&amdgpu_fb_funcs);
if (ret) if (ret)
goto err; goto err;
/* Verify that the modifier is supported. */ /* Verify that the modifier is supported. */
...@@ -967,9 +1133,12 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev, ...@@ -967,9 +1133,12 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
} }
} }
for (i = 1; i < rfb->base.format->num_planes; ++i) { ret = amdgpu_display_verify_sizes(rfb);
if (ret)
return ret;
for (i = 0; i < rfb->base.format->num_planes; ++i) {
drm_gem_object_get(rfb->base.obj[0]); drm_gem_object_get(rfb->base.obj[0]);
drm_gem_object_put(rfb->base.obj[i]);
rfb->base.obj[i] = rfb->base.obj[0]; rfb->base.obj[i] = rfb->base.obj[0];
} }
...@@ -999,6 +1168,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, ...@@ -999,6 +1168,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags); domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n"); drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
drm_gem_object_put(obj);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -1412,7 +1582,7 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev) ...@@ -1412,7 +1582,7 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
} }
} }
} }
return r; return 0;
} }
int amdgpu_display_resume_helper(struct amdgpu_device *adev) int amdgpu_display_resume_helper(struct amdgpu_device *adev)
......
...@@ -1573,6 +1573,9 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) ...@@ -1573,6 +1573,9 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
amdgpu_device_baco_exit(drm_dev); amdgpu_device_baco_exit(drm_dev);
} }
ret = amdgpu_device_resume(drm_dev, false); ret = amdgpu_device_resume(drm_dev, false);
if (ret)
return ret;
if (amdgpu_device_supports_px(drm_dev)) if (amdgpu_device_supports_px(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
adev->in_runpm = false; adev->in_runpm = false;
......
...@@ -215,7 +215,11 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, ...@@ -215,7 +215,11 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
/* Check if we have an idle VMID */ /* Check if we have an idle VMID */
i = 0; i = 0;
list_for_each_entry((*idle), &id_mgr->ids_lru, list) { list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring); /* Don't use per engine and per process VMID at the same time */
struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
NULL : ring;
fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
if (!fences[i]) if (!fences[i])
break; break;
++i; ++i;
...@@ -281,7 +285,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, ...@@ -281,7 +285,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
if (updates && (*id)->flushed_updates && if (updates && (*id)->flushed_updates &&
updates->context == (*id)->flushed_updates->context && updates->context == (*id)->flushed_updates->context &&
!dma_fence_is_later(updates, (*id)->flushed_updates)) !dma_fence_is_later(updates, (*id)->flushed_updates))
updates = NULL; updates = NULL;
if ((*id)->owner != vm->immediate.fence_context || if ((*id)->owner != vm->immediate.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr || job->vm_pd_addr != (*id)->pd_gpu_addr ||
...@@ -290,6 +294,10 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, ...@@ -290,6 +294,10 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
!dma_fence_is_signaled((*id)->last_flush))) { !dma_fence_is_signaled((*id)->last_flush))) {
struct dma_fence *tmp; struct dma_fence *tmp;
/* Don't use per engine and per process VMID at the same time */
if (adev->vm_manager.concurrent_flush)
ring = NULL;
/* to prevent one context starved by another context */ /* to prevent one context starved by another context */
(*id)->pd_gpu_addr = 0; (*id)->pd_gpu_addr = 0;
tmp = amdgpu_sync_peek_fence(&(*id)->active, ring); tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
...@@ -365,12 +373,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, ...@@ -365,12 +373,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
if (updates && (!flushed || dma_fence_is_later(updates, flushed))) if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
needs_flush = true; needs_flush = true;
/* Concurrent flushes are only possible starting with Vega10 and if (needs_flush && !adev->vm_manager.concurrent_flush)
* are broken on Navi10 and Navi14.
*/
if (needs_flush && (adev->asic_type < CHIP_VEGA10 ||
adev->asic_type == CHIP_NAVI10 ||
adev->asic_type == CHIP_NAVI14))
continue; continue;
/* Good, we can use this VMID. Remember this submission as /* Good, we can use this VMID. Remember this submission as
......
...@@ -3148,6 +3148,12 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) ...@@ -3148,6 +3148,12 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{ {
unsigned i; unsigned i;
/* Concurrent flushes are only possible starting with Vega10 and
* are broken on Navi10 and Navi14.
*/
adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
adev->asic_type == CHIP_NAVI10 ||
adev->asic_type == CHIP_NAVI14);
amdgpu_vmid_mgr_init(adev); amdgpu_vmid_mgr_init(adev);
adev->vm_manager.fence_context = adev->vm_manager.fence_context =
......
...@@ -331,6 +331,7 @@ struct amdgpu_vm_manager { ...@@ -331,6 +331,7 @@ struct amdgpu_vm_manager {
/* Handling of VMIDs */ /* Handling of VMIDs */
struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS]; struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
unsigned int first_kfd_vmid; unsigned int first_kfd_vmid;
bool concurrent_flush;
/* Handling of VM fences */ /* Handling of VM fences */
u64 fence_context; u64 fence_context;
......
...@@ -59,6 +59,7 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); ...@@ -59,6 +59,7 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
...@@ -243,10 +244,16 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) ...@@ -243,10 +244,16 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
chip_name = "polaris10"; chip_name = "polaris10";
break; break;
case CHIP_POLARIS12: case CHIP_POLARIS12:
if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
chip_name = "polaris12_k"; chip_name = "polaris12_k";
else } else {
chip_name = "polaris12"; WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159);
/* Polaris12 32bit ASIC needs a special MC firmware */
if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40)
chip_name = "polaris12_32";
else
chip_name = "polaris12";
}
break; break;
case CHIP_FIJI: case CHIP_FIJI:
case CHIP_CARRIZO: case CHIP_CARRIZO:
......
...@@ -589,6 +589,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx ...@@ -589,6 +589,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0), VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
} }
static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst) static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
......
...@@ -4015,6 +4015,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state, ...@@ -4015,6 +4015,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
scaling_info->src_rect.x = state->src_x >> 16; scaling_info->src_rect.x = state->src_x >> 16;
scaling_info->src_rect.y = state->src_y >> 16; scaling_info->src_rect.y = state->src_y >> 16;
/*
* For reasons we don't (yet) fully understand a non-zero
* src_y coordinate into an NV12 buffer can cause a
* system hang. To avoid hangs (and maybe be overly cautious)
* let's reject both non-zero src_x and src_y.
*
* We currently know of only one use-case to reproduce a
* scenario with non-zero src_x and src_y for NV12, which
* is to gesture the YouTube Android app into full screen
* on ChromeOS.
*/
if (state->fb &&
state->fb->format->format == DRM_FORMAT_NV12 &&
(scaling_info->src_rect.x != 0 ||
scaling_info->src_rect.y != 0))
return -EINVAL;
scaling_info->src_rect.width = state->src_w >> 16; scaling_info->src_rect.width = state->src_w >> 16;
if (scaling_info->src_rect.width == 0) if (scaling_info->src_rect.width == 0)
return -EINVAL; return -EINVAL;
...@@ -9869,6 +9886,53 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm ...@@ -9869,6 +9886,53 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
} }
#endif #endif
static int validate_overlay(struct drm_atomic_state *state)
{
int i;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct drm_plane_state *primary_state, *overlay_state = NULL;
/* Check if primary plane is contained inside overlay */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
if (drm_atomic_plane_disabling(plane->state, new_plane_state))
return 0;
overlay_state = new_plane_state;
continue;
}
}
/* check if we're making changes to the overlay plane */
if (!overlay_state)
return 0;
/* check if overlay plane is enabled */
if (!overlay_state->crtc)
return 0;
/* find the primary plane for the CRTC that the overlay is enabled on */
primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
if (IS_ERR(primary_state))
return PTR_ERR(primary_state);
/* check if primary plane is enabled */
if (!primary_state->crtc)
return 0;
/* Perform the bounds check to ensure the overlay plane covers the primary */
if (primary_state->crtc_x < overlay_state->crtc_x ||
primary_state->crtc_y < overlay_state->crtc_y ||
primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
return -EINVAL;
}
return 0;
}
/** /**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
* @dev: The DRM device * @dev: The DRM device
...@@ -10043,6 +10107,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ...@@ -10043,6 +10107,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail; goto fail;
} }
ret = validate_overlay(state);
if (ret)
goto fail;
/* Add new/modified planes */ /* Add new/modified planes */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
ret = dm_update_plane_state(dc, state, plane, ret = dm_update_plane_state(dc, state, plane,
......
...@@ -3012,7 +3012,7 @@ static int trigger_hpd_mst_set(void *data, u64 val) ...@@ -3012,7 +3012,7 @@ static int trigger_hpd_mst_set(void *data, u64 val)
if (!aconnector->dc_link) if (!aconnector->dc_link)
continue; continue;
if (!(aconnector->port && &aconnector->mst_port->mst_mgr)) if (!aconnector->mst_port)
continue; continue;
link = aconnector->dc_link; link = aconnector->dc_link;
......
...@@ -451,7 +451,7 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, ...@@ -451,7 +451,7 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev); struct amdgpu_device *adev = drm_to_adev(ddev);
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct pp_states_info data; struct pp_states_info data = {0};
enum amd_pm_state_type pm = 0; enum amd_pm_state_type pm = 0;
int i = 0, ret = 0; int i = 0, ret = 0;
...@@ -1893,6 +1893,14 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ ...@@ -1893,6 +1893,14 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} }
} }
if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
/* SMU MP1 does not support dcefclk level setting */
if (asic_type >= CHIP_NAVI10) {
dev_attr->attr.mode &= ~S_IWUGO;
dev_attr->store = NULL;
}
}
#undef DEVICE_ATTR_IS #undef DEVICE_ATTR_IS
return 0; return 0;
......
...@@ -1443,7 +1443,6 @@ static int navi10_force_clk_levels(struct smu_context *smu, ...@@ -1443,7 +1443,6 @@ static int navi10_force_clk_levels(struct smu_context *smu,
case SMU_SOCCLK: case SMU_SOCCLK:
case SMU_MCLK: case SMU_MCLK:
case SMU_UCLK: case SMU_UCLK:
case SMU_DCEFCLK:
case SMU_FCLK: case SMU_FCLK:
/* There is only 2 levels for fine grained DPM */ /* There is only 2 levels for fine grained DPM */
if (navi10_is_support_fine_grained_dpm(smu, clk_type)) { if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
...@@ -1463,6 +1462,10 @@ static int navi10_force_clk_levels(struct smu_context *smu, ...@@ -1463,6 +1462,10 @@ static int navi10_force_clk_levels(struct smu_context *smu,
if (ret) if (ret)
return size; return size;
break; break;
case SMU_DCEFCLK:
dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
break;
default: default:
break; break;
} }
......
...@@ -1127,7 +1127,6 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu, ...@@ -1127,7 +1127,6 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
case SMU_SOCCLK: case SMU_SOCCLK:
case SMU_MCLK: case SMU_MCLK:
case SMU_UCLK: case SMU_UCLK:
case SMU_DCEFCLK:
case SMU_FCLK: case SMU_FCLK:
/* There is only 2 levels for fine grained DPM */ /* There is only 2 levels for fine grained DPM */
if (sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) { if (sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) {
...@@ -1147,6 +1146,9 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu, ...@@ -1147,6 +1146,9 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
if (ret) if (ret)
goto forec_level_out; goto forec_level_out;
break; break;
case SMU_DCEFCLK:
dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
break;
default: default:
break; break;
} }
......
...@@ -2120,11 +2120,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) ...@@ -2120,11 +2120,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
return state_index; return state_index;
/* last mode is usually default, array is low to high */ /* last mode is usually default, array is low to high */
for (i = 0; i < num_modes; i++) { for (i = 0; i < num_modes; i++) {
rdev->pm.power_state[state_index].clock_info = /* avoid memory leaks from invalid modes or unknown frev. */
kcalloc(1, sizeof(struct radeon_pm_clock_info), if (!rdev->pm.power_state[state_index].clock_info) {
GFP_KERNEL); rdev->pm.power_state[state_index].clock_info =
kzalloc(sizeof(struct radeon_pm_clock_info),
GFP_KERNEL);
}
if (!rdev->pm.power_state[state_index].clock_info) if (!rdev->pm.power_state[state_index].clock_info)
return state_index; goto out;
rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
switch (frev) { switch (frev) {
...@@ -2243,17 +2246,24 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) ...@@ -2243,17 +2246,24 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
break; break;
} }
} }
out:
/* free any unused clock_info allocation. */
if (state_index && state_index < num_modes) {
kfree(rdev->pm.power_state[state_index].clock_info);
rdev->pm.power_state[state_index].clock_info = NULL;
}
/* last mode is usually default */ /* last mode is usually default */
if (rdev->pm.default_power_state_index == -1) { if (state_index && rdev->pm.default_power_state_index == -1) {
rdev->pm.power_state[state_index - 1].type = rdev->pm.power_state[state_index - 1].type =
POWER_STATE_TYPE_DEFAULT; POWER_STATE_TYPE_DEFAULT;
rdev->pm.default_power_state_index = state_index - 1; rdev->pm.default_power_state_index = state_index - 1;
rdev->pm.power_state[state_index - 1].default_clock_mode = rdev->pm.power_state[state_index - 1].default_clock_mode =
&rdev->pm.power_state[state_index - 1].clock_info[0]; &rdev->pm.power_state[state_index - 1].clock_info[0];
rdev->pm.power_state[state_index].flags &= rdev->pm.power_state[state_index - 1].flags &=
~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
rdev->pm.power_state[state_index].misc = 0; rdev->pm.power_state[state_index - 1].misc = 0;
rdev->pm.power_state[state_index].misc2 = 0; rdev->pm.power_state[state_index - 1].misc2 = 0;
} }
return state_index; return state_index;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment