Commit 185c3cfa authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-fixes-4.19' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

Fixes for 4.19:
- SR-IOV fixes
- Kasan and page fault fix on device removal
- S3 stability fix for CZ/ST
- VCE regression fixes for CIK parts
- Avoid holding the mn_lock when allocating memory
- DC memory leak fix
- BO eviction fix
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180829202555.2653-1-alexander.deucher@amd.com
parents 852fde0a 6ddd9769
...@@ -1012,13 +1012,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -1012,13 +1012,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
if (r) if (r)
return r; return r;
if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) { if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; parser->job->preamble_status |=
if (!parser->ctx->preamble_presented) { AMDGPU_PREAMBLE_IB_PRESENT;
parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
parser->ctx->preamble_presented = true;
}
}
if (parser->ring && parser->ring != ring) if (parser->ring && parser->ring != ring)
return -EINVAL; return -EINVAL;
...@@ -1207,26 +1203,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1207,26 +1203,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
int r; int r;
job = p->job;
p->job = NULL;
r = drm_sched_job_init(&job->base, entity, p->filp);
if (r)
goto error_unlock;
/* No memory allocation is allowed while holding the mn lock */
amdgpu_mn_lock(p->mn); amdgpu_mn_lock(p->mn);
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = e->robj; struct amdgpu_bo *bo = e->robj;
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
amdgpu_mn_unlock(p->mn); r = -ERESTARTSYS;
return -ERESTARTSYS; goto error_abort;
} }
} }
job = p->job;
p->job = NULL;
r = drm_sched_job_init(&job->base, entity, p->filp);
if (r) {
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
return r;
}
job->owner = p->filp; job->owner = p->filp;
p->fence = dma_fence_get(&job->base.s_fence->finished); p->fence = dma_fence_get(&job->base.s_fence->finished);
...@@ -1241,6 +1235,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1241,6 +1235,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_cs_post_dependencies(p); amdgpu_cs_post_dependencies(p);
if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
!p->ctx->preamble_presented) {
job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
p->ctx->preamble_presented = true;
}
cs->out.handle = seq; cs->out.handle = seq;
job->uf_sequence = seq; job->uf_sequence = seq;
...@@ -1258,6 +1258,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1258,6 +1258,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_mn_unlock(p->mn); amdgpu_mn_unlock(p->mn);
return 0; return 0;
error_abort:
dma_fence_put(&job->base.s_fence->finished);
job->base.s_fence = NULL;
error_unlock:
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
return r;
} }
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
......
...@@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return r; return r;
} }
need_ctx_switch = ring->current_ctx != fence_ctx;
if (ring->funcs->emit_pipeline_sync && job && if (ring->funcs->emit_pipeline_sync && job &&
((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
(amdgpu_sriov_vf(adev) && need_ctx_switch) ||
amdgpu_vm_need_pipeline_sync(ring, job))) { amdgpu_vm_need_pipeline_sync(ring, job))) {
need_pipe_sync = true; need_pipe_sync = true;
dma_fence_put(tmp); dma_fence_put(tmp);
...@@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
} }
skip_preamble = ring->current_ctx == fence_ctx; skip_preamble = ring->current_ctx == fence_ctx;
need_ctx_switch = ring->current_ctx != fence_ctx;
if (job && ring->funcs->emit_cntxcntl) { if (job && ring->funcs->emit_cntxcntl) {
if (need_ctx_switch) if (need_ctx_switch)
status |= AMDGPU_HAVE_CTX_SWITCH; status |= AMDGPU_HAVE_CTX_SWITCH;
......
...@@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) ...@@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
amdgpu_fence_wait_empty(ring); amdgpu_fence_wait_empty(ring);
} }
mutex_lock(&adev->pm.mutex);
/* update battery/ac status */
if (power_supply_is_system_supplied() > 0)
adev->pm.ac_power = true;
else
adev->pm.ac_power = false;
mutex_unlock(&adev->pm.mutex);
if (adev->powerplay.pp_funcs->dispatch_tasks) { if (adev->powerplay.pp_funcs->dispatch_tasks) {
if (!amdgpu_device_has_dc_support(adev)) { if (!amdgpu_device_has_dc_support(adev)) {
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
......
...@@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, ...@@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
* is validated on next vm use to avoid fault. * is validated on next vm use to avoid fault.
* */ * */
list_move_tail(&base->vm_status, &vm->evicted); list_move_tail(&base->vm_status, &vm->evicted);
base->moved = true;
} }
/** /**
...@@ -369,7 +370,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, ...@@ -369,7 +370,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
uint64_t addr; uint64_t addr;
int r; int r;
addr = amdgpu_bo_gpu_offset(bo);
entries = amdgpu_bo_size(bo) / 8; entries = amdgpu_bo_size(bo) / 8;
if (pte_support_ats) { if (pte_support_ats) {
...@@ -401,6 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, ...@@ -401,6 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r) if (r)
goto error; goto error;
addr = amdgpu_bo_gpu_offset(bo);
if (ats_entries) { if (ats_entries) {
uint64_t ats_value; uint64_t ats_value;
...@@ -2483,28 +2484,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) ...@@ -2483,28 +2484,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
* amdgpu_vm_adjust_size - adjust vm size, block size and fragment size * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vm_size: the default vm size if it's set auto * @min_vm_size: the minimum vm size in GB if it's set auto
* @fragment_size_default: Default PTE fragment size * @fragment_size_default: Default PTE fragment size
* @max_level: max VMPT level * @max_level: max VMPT level
* @max_bits: max address space size in bits * @max_bits: max address space size in bits
* *
*/ */
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
uint32_t fragment_size_default, unsigned max_level, uint32_t fragment_size_default, unsigned max_level,
unsigned max_bits) unsigned max_bits)
{ {
unsigned int max_size = 1 << (max_bits - 30);
unsigned int vm_size;
uint64_t tmp; uint64_t tmp;
/* adjust vm size first */ /* adjust vm size first */
if (amdgpu_vm_size != -1) { if (amdgpu_vm_size != -1) {
unsigned max_size = 1 << (max_bits - 30);
vm_size = amdgpu_vm_size; vm_size = amdgpu_vm_size;
if (vm_size > max_size) { if (vm_size > max_size) {
dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
amdgpu_vm_size, max_size); amdgpu_vm_size, max_size);
vm_size = max_size; vm_size = max_size;
} }
} else {
struct sysinfo si;
unsigned int phys_ram_gb;
/* Optimal VM size depends on the amount of physical
* RAM available. Underlying requirements and
* assumptions:
*
* - Need to map system memory and VRAM from all GPUs
* - VRAM from other GPUs not known here
* - Assume VRAM <= system memory
* - On GFX8 and older, VM space can be segmented for
* different MTYPEs
* - Need to allow room for fragmentation, guard pages etc.
*
* This adds up to a rough guess of system memory x3.
* Round up to power of two to maximize the available
* VM size with the given page table size.
*/
si_meminfo(&si);
phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
(1 << 30) - 1) >> 30;
vm_size = roundup_pow_of_two(
min(max(phys_ram_gb * 3, min_vm_size), max_size));
} }
adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
......
...@@ -321,7 +321,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, ...@@ -321,7 +321,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va); struct amdgpu_bo_va *bo_va);
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
uint32_t fragment_size_default, unsigned max_level, uint32_t fragment_size_default, unsigned max_level,
unsigned max_bits); unsigned max_bits);
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
......
...@@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, ...@@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return 0; return 0;
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG))
adev->gfx.rlc.funcs->enter_safe_mode(adev);
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_CARRIZO: case CHIP_CARRIZO:
case CHIP_STONEY: case CHIP_STONEY:
...@@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, ...@@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
default: default:
break; break;
} }
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG))
adev->gfx.rlc.funcs->exit_safe_mode(adev);
return 0; return 0;
} }
......
...@@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) ...@@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
amdgpu_gart_table_vram_unpin(adev); amdgpu_gart_table_vram_unpin(adev);
} }
static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
{
amdgpu_gart_table_vram_free(adev);
amdgpu_gart_fini(adev);
}
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
u32 status, u32 addr, u32 mc_client) u32 status, u32 addr, u32 mc_client)
{ {
...@@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle) ...@@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev); amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev); amdgpu_vm_manager_fini(adev);
gmc_v6_0_gart_fini(adev); amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev); amdgpu_bo_fini(adev);
amdgpu_gart_fini(adev);
release_firmware(adev->gmc.fw); release_firmware(adev->gmc.fw);
adev->gmc.fw = NULL; adev->gmc.fw = NULL;
......
...@@ -746,19 +746,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) ...@@ -746,19 +746,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
amdgpu_gart_table_vram_unpin(adev); amdgpu_gart_table_vram_unpin(adev);
} }
/**
* gmc_v7_0_gart_fini - vm fini callback
*
* @adev: amdgpu_device pointer
*
* Tears down the driver GART/VM setup (CIK).
*/
static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
{
amdgpu_gart_table_vram_free(adev);
amdgpu_gart_fini(adev);
}
/** /**
* gmc_v7_0_vm_decode_fault - print human readable fault info * gmc_v7_0_vm_decode_fault - print human readable fault info
* *
...@@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle) ...@@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev); amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev); amdgpu_vm_manager_fini(adev);
kfree(adev->gmc.vm_fault_info); kfree(adev->gmc.vm_fault_info);
gmc_v7_0_gart_fini(adev); amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev); amdgpu_bo_fini(adev);
amdgpu_gart_fini(adev);
release_firmware(adev->gmc.fw); release_firmware(adev->gmc.fw);
adev->gmc.fw = NULL; adev->gmc.fw = NULL;
......
...@@ -968,19 +968,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) ...@@ -968,19 +968,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
amdgpu_gart_table_vram_unpin(adev); amdgpu_gart_table_vram_unpin(adev);
} }
/**
* gmc_v8_0_gart_fini - vm fini callback
*
* @adev: amdgpu_device pointer
*
* Tears down the driver GART/VM setup (CIK).
*/
static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
{
amdgpu_gart_table_vram_free(adev);
amdgpu_gart_fini(adev);
}
/** /**
* gmc_v8_0_vm_decode_fault - print human readable fault info * gmc_v8_0_vm_decode_fault - print human readable fault info
* *
...@@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle) ...@@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev); amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev); amdgpu_vm_manager_fini(adev);
kfree(adev->gmc.vm_fault_info); kfree(adev->gmc.vm_fault_info);
gmc_v8_0_gart_fini(adev); amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev); amdgpu_bo_fini(adev);
amdgpu_gart_fini(adev);
release_firmware(adev->gmc.fw); release_firmware(adev->gmc.fw);
adev->gmc.fw = NULL; adev->gmc.fw = NULL;
......
...@@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle) ...@@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle)
return 0; return 0;
} }
/**
* gmc_v9_0_gart_fini - vm fini callback
*
* @adev: amdgpu_device pointer
*
* Tears down the driver GART/VM setup (CIK).
*/
static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
{
amdgpu_gart_table_vram_free(adev);
amdgpu_gart_fini(adev);
}
static int gmc_v9_0_sw_fini(void *handle) static int gmc_v9_0_sw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_gem_force_release(adev); amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev); amdgpu_vm_manager_fini(adev);
gmc_v9_0_gart_fini(adev);
/* /*
* TODO: * TODO:
...@@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle) ...@@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle)
*/ */
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev); amdgpu_bo_fini(adev);
amdgpu_gart_fini(adev);
return 0; return 0;
} }
......
...@@ -65,8 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, ...@@ -65,8 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
int min_temp, int max_temp); int min_temp, int max_temp);
static int kv_init_fps_limits(struct amdgpu_device *adev); static int kv_init_fps_limits(struct amdgpu_device *adev);
static void kv_dpm_powergate_uvd(void *handle, bool gate);
static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
...@@ -1354,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev) ...@@ -1354,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
return ret; return ret;
} }
kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
if (adev->irq.installed && if (adev->irq.installed &&
amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
...@@ -1374,6 +1370,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev) ...@@ -1374,6 +1370,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
static void kv_dpm_disable(struct amdgpu_device *adev) static void kv_dpm_disable(struct amdgpu_device *adev)
{ {
struct kv_power_info *pi = kv_get_pi(adev);
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
...@@ -1387,8 +1385,10 @@ static void kv_dpm_disable(struct amdgpu_device *adev) ...@@ -1387,8 +1385,10 @@ static void kv_dpm_disable(struct amdgpu_device *adev)
/* powerup blocks */ /* powerup blocks */
kv_dpm_powergate_acp(adev, false); kv_dpm_powergate_acp(adev, false);
kv_dpm_powergate_samu(adev, false); kv_dpm_powergate_samu(adev, false);
kv_dpm_powergate_vce(adev, false); if (pi->caps_vce_pg) /* power on the VCE block */
kv_dpm_powergate_uvd(adev, false); amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
if (pi->caps_uvd_pg) /* power on the UVD block */
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
kv_enable_smc_cac(adev, false); kv_enable_smc_cac(adev, false);
kv_enable_didt(adev, false); kv_enable_didt(adev, false);
...@@ -1551,7 +1551,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, ...@@ -1551,7 +1551,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
int ret; int ret;
if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
kv_dpm_powergate_vce(adev, false);
if (pi->caps_stable_p_state) if (pi->caps_stable_p_state)
pi->vce_boot_level = table->count - 1; pi->vce_boot_level = table->count - 1;
else else
...@@ -1573,7 +1572,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, ...@@ -1573,7 +1572,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
kv_enable_vce_dpm(adev, true); kv_enable_vce_dpm(adev, true);
} else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
kv_enable_vce_dpm(adev, false); kv_enable_vce_dpm(adev, false);
kv_dpm_powergate_vce(adev, true);
} }
return 0; return 0;
...@@ -1702,24 +1700,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate) ...@@ -1702,24 +1700,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate)
} }
} }
static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) static void kv_dpm_powergate_vce(void *handle, bool gate)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev); struct kv_power_info *pi = kv_get_pi(adev);
int ret;
if (pi->vce_power_gated == gate)
return;
pi->vce_power_gated = gate; pi->vce_power_gated = gate;
if (!pi->caps_vce_pg) if (gate) {
return; /* stop the VCE block */
ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
if (gate) AMD_PG_STATE_GATE);
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); kv_enable_vce_dpm(adev, false);
else if (pi->caps_vce_pg) /* power off the VCE block */
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
} else {
if (pi->caps_vce_pg) /* power on the VCE block */
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
kv_enable_vce_dpm(adev, true);
/* re-init the VCE block */
ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_UNGATE);
}
} }
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
{ {
struct kv_power_info *pi = kv_get_pi(adev); struct kv_power_info *pi = kv_get_pi(adev);
...@@ -3061,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle) ...@@ -3061,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle)
else else
adev->pm.dpm_enabled = true; adev->pm.dpm_enabled = true;
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
amdgpu_pm_compute_clocks(adev);
return ret; return ret;
} }
...@@ -3313,6 +3319,9 @@ static int kv_set_powergating_by_smu(void *handle, ...@@ -3313,6 +3319,9 @@ static int kv_set_powergating_by_smu(void *handle,
case AMD_IP_BLOCK_TYPE_UVD: case AMD_IP_BLOCK_TYPE_UVD:
kv_dpm_powergate_uvd(handle, gate); kv_dpm_powergate_uvd(handle, gate);
break; break;
case AMD_IP_BLOCK_TYPE_VCE:
kv_dpm_powergate_vce(handle, gate);
break;
default: default:
break; break;
} }
......
...@@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev) ...@@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
si_thermal_start_thermal_controller(adev); si_thermal_start_thermal_controller(adev);
ni_update_current_ps(adev, boot_ps);
return 0; return 0;
} }
...@@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle) ...@@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle)
else else
adev->pm.dpm_enabled = true; adev->pm.dpm_enabled = true;
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
amdgpu_pm_compute_clocks(adev);
return ret; return ret;
} }
......
...@@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp, ...@@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp,
{ {
struct dc_context *ctx = pp->ctx; struct dc_context *ctx = pp->ctx;
struct amdgpu_device *adev = ctx->driver_context; struct amdgpu_device *adev = ctx->driver_context;
void *pp_handle = adev->powerplay.pp_handle;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct pp_display_clock_request clock = {0};
if (!pp_funcs || !pp_funcs->display_configuration_changed) if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
return; return;
amdgpu_dpm_display_configuration_changed(adev); clock.clock_type = amd_pp_dcf_clock;
clock.clock_freq_in_khz = req->hard_min_dcefclk_khz;
pp_funcs->display_clock_voltage_request(pp_handle, &clock);
clock.clock_type = amd_pp_f_clock;
clock.clock_freq_in_khz = req->hard_min_fclk_khz;
pp_funcs->display_clock_voltage_request(pp_handle, &clock);
} }
void pp_rv_set_wm_ranges(struct pp_smu *pp, void pp_rv_set_wm_ranges(struct pp_smu *pp,
......
...@@ -754,8 +754,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) ...@@ -754,8 +754,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
* fail-safe mode * fail-safe mode
*/ */
if (dc_is_hdmi_signal(link->connector_signal) || if (dc_is_hdmi_signal(link->connector_signal) ||
dc_is_dvi_signal(link->connector_signal)) dc_is_dvi_signal(link->connector_signal)) {
if (prev_sink != NULL)
dc_sink_release(prev_sink);
return false; return false;
}
default: default:
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment