Commit 60a10650 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-msm-fixes-2020-08-24' of https://gitlab.freedesktop.org/drm/msm into drm-fixes

Some fixes for v5.9 plus the one opp/bandwidth scaling patch ("drm:
msm: a6xx: use dev_pm_opp_set_bw to scale DDR") which was not included
in the initial pull due to dependency on patch landing thru OPP tree
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ <CAF6AEGt45A4ObyhEdC5Ga4f4cAf-NBSVRECu7df3Gh6-X4G3tQ@mail.gmail.com
parents 6284a418 5e0c22d4
...@@ -133,7 +133,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) ...@@ -133,7 +133,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
if (!gmu->legacy) { if (!gmu->legacy) {
a6xx_hfi_set_freq(gmu, perf_index); a6xx_hfi_set_freq(gmu, perf_index);
icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216)); dev_pm_opp_set_bw(&gpu->pdev->dev, opp);
pm_runtime_put(gmu->dev); pm_runtime_put(gmu->dev);
return; return;
} }
...@@ -157,11 +157,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) ...@@ -157,11 +157,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
if (ret) if (ret)
dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
/* dev_pm_opp_set_bw(&gpu->pdev->dev, opp);
* Eventually we will want to scale the path vote with the frequency but
* for now leave it at max so that the performance is nominal.
*/
icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
pm_runtime_put(gmu->dev); pm_runtime_put(gmu->dev);
} }
...@@ -204,6 +200,16 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) ...@@ -204,6 +200,16 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
{ {
int ret; int ret;
u32 val; u32 val;
u32 mask, reset_val;
val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
if (val <= 0x20010004) {
mask = 0xffffffff;
reset_val = 0xbabeface;
} else {
mask = 0x1ff;
reset_val = 0x100;
}
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
...@@ -215,7 +221,7 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) ...@@ -215,7 +221,7 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
val == 0xbabeface, 100, 10000); (val & mask) == reset_val, 100, 10000);
if (ret) if (ret)
DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
...@@ -845,10 +851,24 @@ static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) ...@@ -845,10 +851,24 @@ static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
if (IS_ERR_OR_NULL(gpu_opp)) if (IS_ERR_OR_NULL(gpu_opp))
return; return;
gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */
a6xx_gmu_set_freq(gpu, gpu_opp); a6xx_gmu_set_freq(gpu, gpu_opp);
dev_pm_opp_put(gpu_opp); dev_pm_opp_put(gpu_opp);
} }
static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
{
struct dev_pm_opp *gpu_opp;
unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
if (IS_ERR_OR_NULL(gpu_opp))
return;
dev_pm_opp_set_bw(&gpu->pdev->dev, gpu_opp);
dev_pm_opp_put(gpu_opp);
}
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
{ {
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
...@@ -882,7 +902,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) ...@@ -882,7 +902,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
} }
/* Set the bus quota to a reasonable value for boot */ /* Set the bus quota to a reasonable value for boot */
icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072)); a6xx_gmu_set_initial_bw(gpu, gmu);
/* Enable the GMU interrupt */ /* Enable the GMU interrupt */
gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
...@@ -1051,7 +1071,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) ...@@ -1051,7 +1071,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
a6xx_gmu_shutdown(gmu); a6xx_gmu_shutdown(gmu);
/* Remove the bus vote */ /* Remove the bus vote */
icc_set_bw(gpu->icc_path, 0, 0); dev_pm_opp_set_bw(&gpu->pdev->dev, NULL);
/* /*
* Make sure the GX domain is off before turning off the GMU (CX) * Make sure the GX domain is off before turning off the GMU (CX)
......
...@@ -938,7 +938,8 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) ...@@ -938,7 +938,8 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
msm_gem_kernel_put(dumper.bo, gpu->aspace, true); msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
} }
a6xx_get_debugbus(gpu, a6xx_state); if (snapshot_debugbus)
a6xx_get_debugbus(gpu, a6xx_state);
return &a6xx_state->base; return &a6xx_state->base;
} }
......
...@@ -372,7 +372,7 @@ static const struct a6xx_indexed_registers { ...@@ -372,7 +372,7 @@ static const struct a6xx_indexed_registers {
u32 data; u32 data;
u32 count; u32 count;
} a6xx_indexed_reglist[] = { } a6xx_indexed_reglist[] = {
{ "CP_SEQ_STAT", REG_A6XX_CP_SQE_STAT_ADDR, { "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
REG_A6XX_CP_SQE_STAT_DATA, 0x33 }, REG_A6XX_CP_SQE_STAT_DATA, 0x33 },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR, { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
REG_A6XX_CP_DRAW_STATE_DATA, 0x100 }, REG_A6XX_CP_DRAW_STATE_DATA, 0x100 },
......
...@@ -14,6 +14,10 @@ bool hang_debug = false; ...@@ -14,6 +14,10 @@ bool hang_debug = false;
MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)"); MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
module_param_named(hang_debug, hang_debug, bool, 0600); module_param_named(hang_debug, hang_debug, bool, 0600);
bool snapshot_debugbus = false;
MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
static const struct adreno_info gpulist[] = { static const struct adreno_info gpulist[] = {
{ {
.rev = ADRENO_REV(2, 0, 0, 0), .rev = ADRENO_REV(2, 0, 0, 0),
......
...@@ -396,7 +396,7 @@ int adreno_hw_init(struct msm_gpu *gpu) ...@@ -396,7 +396,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
ring->next = ring->start; ring->next = ring->start;
/* reset completed fence seqno: */ /* reset completed fence seqno: */
ring->memptrs->fence = ring->seqno; ring->memptrs->fence = ring->fctx->completed_fence;
ring->memptrs->rptr = 0; ring->memptrs->rptr = 0;
} }
......
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#define REG_SKIP ~0 #define REG_SKIP ~0
#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP #define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
extern bool snapshot_debugbus;
/** /**
* adreno_regs: List of registers that are used in across all * adreno_regs: List of registers that are used in across all
* 3D devices. Each device type has different offset value for the same * 3D devices. Each device type has different offset value for the same
......
...@@ -827,7 +827,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc, ...@@ -827,7 +827,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
{ {
struct dpu_crtc *dpu_crtc; struct dpu_crtc *dpu_crtc;
struct drm_encoder *encoder; struct drm_encoder *encoder;
bool request_bandwidth; bool request_bandwidth = false;
if (!crtc) { if (!crtc) {
DPU_ERROR("invalid crtc\n"); DPU_ERROR("invalid crtc\n");
......
...@@ -599,7 +599,10 @@ static int dpu_encoder_virt_atomic_check( ...@@ -599,7 +599,10 @@ static int dpu_encoder_virt_atomic_check(
dpu_kms = to_dpu_kms(priv->kms); dpu_kms = to_dpu_kms(priv->kms);
mode = &crtc_state->mode; mode = &crtc_state->mode;
adj_mode = &crtc_state->adjusted_mode; adj_mode = &crtc_state->adjusted_mode;
global_state = dpu_kms_get_existing_global_state(dpu_kms); global_state = dpu_kms_get_global_state(crtc_state->state);
if (IS_ERR(global_state))
return PTR_ERR(global_state);
trace_dpu_enc_atomic_check(DRMID(drm_enc)); trace_dpu_enc_atomic_check(DRMID(drm_enc));
/* perform atomic check on the first physical encoder (master) */ /* perform atomic check on the first physical encoder (master) */
...@@ -625,12 +628,15 @@ static int dpu_encoder_virt_atomic_check( ...@@ -625,12 +628,15 @@ static int dpu_encoder_virt_atomic_check(
/* Reserve dynamic resources now. */ /* Reserve dynamic resources now. */
if (!ret) { if (!ret) {
/* /*
* Avoid reserving resources when mode set is pending. Topology * Release and Allocate resources on every modeset
* info may not be available to complete reservation. * Dont allocate when active is false.
*/ */
if (drm_atomic_crtc_needs_modeset(crtc_state)) { if (drm_atomic_crtc_needs_modeset(crtc_state)) {
ret = dpu_rm_reserve(&dpu_kms->rm, global_state, dpu_rm_release(global_state, drm_enc);
drm_enc, crtc_state, topology);
if (!crtc_state->active_changed || crtc_state->active)
ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
drm_enc, crtc_state, topology);
} }
} }
...@@ -1181,7 +1187,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) ...@@ -1181,7 +1187,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
struct dpu_encoder_virt *dpu_enc = NULL; struct dpu_encoder_virt *dpu_enc = NULL;
struct msm_drm_private *priv; struct msm_drm_private *priv;
struct dpu_kms *dpu_kms; struct dpu_kms *dpu_kms;
struct dpu_global_state *global_state;
int i = 0; int i = 0;
if (!drm_enc) { if (!drm_enc) {
...@@ -1200,7 +1205,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) ...@@ -1200,7 +1205,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
priv = drm_enc->dev->dev_private; priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms); dpu_kms = to_dpu_kms(priv->kms);
global_state = dpu_kms_get_existing_global_state(dpu_kms);
trace_dpu_enc_disable(DRMID(drm_enc)); trace_dpu_enc_disable(DRMID(drm_enc));
...@@ -1230,8 +1234,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) ...@@ -1230,8 +1234,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
dpu_rm_release(global_state, drm_enc);
mutex_unlock(&dpu_enc->enc_lock); mutex_unlock(&dpu_enc->enc_lock);
} }
......
...@@ -866,9 +866,9 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, ...@@ -866,9 +866,9 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
crtc_state = drm_atomic_get_new_crtc_state(state->state, crtc_state = drm_atomic_get_new_crtc_state(state->state,
state->crtc); state->crtc);
min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale); min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxupscale);
ret = drm_atomic_helper_check_plane_state(state, crtc_state, min_scale, ret = drm_atomic_helper_check_plane_state(state, crtc_state, min_scale,
pdpu->pipe_sblk->maxupscale << 16, pdpu->pipe_sblk->maxdwnscale << 16,
true, true); true, true);
if (ret) { if (ret) {
DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret); DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
......
...@@ -27,6 +27,34 @@ int msm_atomic_prepare_fb(struct drm_plane *plane, ...@@ -27,6 +27,34 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
return msm_framebuffer_prepare(new_state->fb, kms->aspace); return msm_framebuffer_prepare(new_state->fb, kms->aspace);
} }
/*
* Helpers to control vblanks while we flush.. basically just to ensure
* that vblank accounting is switched on, so we get valid seqn/timestamp
* on pageflip events (if requested)
*/
static void vblank_get(struct msm_kms *kms, unsigned crtc_mask)
{
struct drm_crtc *crtc;
for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
if (!crtc->state->active)
continue;
drm_crtc_vblank_get(crtc);
}
}
static void vblank_put(struct msm_kms *kms, unsigned crtc_mask)
{
struct drm_crtc *crtc;
for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
if (!crtc->state->active)
continue;
drm_crtc_vblank_put(crtc);
}
}
static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
{ {
unsigned crtc_mask = BIT(crtc_idx); unsigned crtc_mask = BIT(crtc_idx);
...@@ -44,6 +72,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) ...@@ -44,6 +72,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
kms->funcs->enable_commit(kms); kms->funcs->enable_commit(kms);
vblank_get(kms, crtc_mask);
/* /*
* Flush hardware updates: * Flush hardware updates:
*/ */
...@@ -58,6 +88,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) ...@@ -58,6 +88,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
kms->funcs->wait_flush(kms, crtc_mask); kms->funcs->wait_flush(kms, crtc_mask);
trace_msm_atomic_wait_flush_finish(crtc_mask); trace_msm_atomic_wait_flush_finish(crtc_mask);
vblank_put(kms, crtc_mask);
mutex_lock(&kms->commit_lock); mutex_lock(&kms->commit_lock);
kms->funcs->complete_commit(kms, crtc_mask); kms->funcs->complete_commit(kms, crtc_mask);
mutex_unlock(&kms->commit_lock); mutex_unlock(&kms->commit_lock);
...@@ -221,6 +253,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) ...@@ -221,6 +253,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
*/ */
kms->pending_crtc_mask &= ~crtc_mask; kms->pending_crtc_mask &= ~crtc_mask;
vblank_get(kms, crtc_mask);
/* /*
* Flush hardware updates: * Flush hardware updates:
*/ */
...@@ -235,6 +269,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) ...@@ -235,6 +269,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
kms->funcs->wait_flush(kms, crtc_mask); kms->funcs->wait_flush(kms, crtc_mask);
trace_msm_atomic_wait_flush_finish(crtc_mask); trace_msm_atomic_wait_flush_finish(crtc_mask);
vblank_put(kms, crtc_mask);
mutex_lock(&kms->commit_lock); mutex_lock(&kms->commit_lock);
kms->funcs->complete_commit(kms, crtc_mask); kms->funcs->complete_commit(kms, crtc_mask);
mutex_unlock(&kms->commit_lock); mutex_unlock(&kms->commit_lock);
......
...@@ -1320,6 +1320,13 @@ static int msm_pdev_remove(struct platform_device *pdev) ...@@ -1320,6 +1320,13 @@ static int msm_pdev_remove(struct platform_device *pdev)
return 0; return 0;
} }
static void msm_pdev_shutdown(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
drm_atomic_helper_shutdown(drm);
}
static const struct of_device_id dt_match[] = { static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 }, { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
{ .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 }, { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
...@@ -1332,6 +1339,7 @@ MODULE_DEVICE_TABLE(of, dt_match); ...@@ -1332,6 +1339,7 @@ MODULE_DEVICE_TABLE(of, dt_match);
static struct platform_driver msm_platform_driver = { static struct platform_driver msm_platform_driver = {
.probe = msm_pdev_probe, .probe = msm_pdev_probe,
.remove = msm_pdev_remove, .remove = msm_pdev_remove,
.shutdown = msm_pdev_shutdown,
.driver = { .driver = {
.name = "msm", .name = "msm",
.of_match_table = dt_match, .of_match_table = dt_match,
......
...@@ -27,7 +27,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, ...@@ -27,7 +27,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->id = id; ring->id = id;
ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
MSM_BO_WC, gpu->aspace, &ring->bo, &ring->iova); MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &ring->bo,
&ring->iova);
if (IS_ERR(ring->start)) { if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start); ret = PTR_ERR(ring->start);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment