Commit 8b233a83 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-msm-fixes-2021-11-28' of https://gitlab.freedesktop.org/drm/msm into drm-fixes

msm misc fixes, build, display
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGsV-ntO_u323XMKuD6bgbgvXporwi1sbyXwNDAuA52Afw@mail.gmail.com
parents d58071a8 afece15a
...@@ -4,8 +4,8 @@ config DRM_MSM ...@@ -4,8 +4,8 @@ config DRM_MSM
tristate "MSM DRM" tristate "MSM DRM"
depends on DRM depends on DRM
depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
depends on COMMON_CLK
depends on IOMMU_SUPPORT depends on IOMMU_SUPPORT
depends on (OF && COMMON_CLK) || COMPILE_TEST
depends on QCOM_OCMEM || QCOM_OCMEM=n depends on QCOM_OCMEM || QCOM_OCMEM=n
depends on QCOM_LLCC || QCOM_LLCC=n depends on QCOM_LLCC || QCOM_LLCC=n
depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
......
...@@ -23,8 +23,10 @@ msm-y := \ ...@@ -23,8 +23,10 @@ msm-y := \
hdmi/hdmi_i2c.o \ hdmi/hdmi_i2c.o \
hdmi/hdmi_phy.o \ hdmi/hdmi_phy.o \
hdmi/hdmi_phy_8960.o \ hdmi/hdmi_phy_8960.o \
hdmi/hdmi_phy_8996.o \
hdmi/hdmi_phy_8x60.o \ hdmi/hdmi_phy_8x60.o \
hdmi/hdmi_phy_8x74.o \ hdmi/hdmi_phy_8x74.o \
hdmi/hdmi_pll_8960.o \
edp/edp.o \ edp/edp.o \
edp/edp_aux.o \ edp/edp_aux.o \
edp/edp_bridge.o \ edp/edp_bridge.o \
...@@ -37,6 +39,7 @@ msm-y := \ ...@@ -37,6 +39,7 @@ msm-y := \
disp/mdp4/mdp4_dtv_encoder.o \ disp/mdp4/mdp4_dtv_encoder.o \
disp/mdp4/mdp4_lcdc_encoder.o \ disp/mdp4/mdp4_lcdc_encoder.o \
disp/mdp4/mdp4_lvds_connector.o \ disp/mdp4/mdp4_lvds_connector.o \
disp/mdp4/mdp4_lvds_pll.o \
disp/mdp4/mdp4_irq.o \ disp/mdp4/mdp4_irq.o \
disp/mdp4/mdp4_kms.o \ disp/mdp4/mdp4_kms.o \
disp/mdp4/mdp4_plane.o \ disp/mdp4/mdp4_plane.o \
...@@ -116,9 +119,6 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \ ...@@ -116,9 +119,6 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
dp/dp_audio.o dp/dp_audio.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
......
...@@ -1424,17 +1424,24 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) ...@@ -1424,17 +1424,24 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
{ {
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base;
u32 gpu_scid, cntl1_regval = 0; u32 cntl1_regval = 0;
if (IS_ERR(a6xx_gpu->llc_mmio)) if (IS_ERR(a6xx_gpu->llc_mmio))
return; return;
if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
gpu_scid &= 0x1f; gpu_scid &= 0x1f;
cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) | cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
(gpu_scid << 15) | (gpu_scid << 20); (gpu_scid << 15) | (gpu_scid << 20);
/* On A660, the SCID programming for UCHE traffic is done in
* A6XX_GBIF_SCACHE_CNTL0[14:10]
*/
if (adreno_is_a660_family(adreno_gpu))
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
(1 << 8), (gpu_scid << 10) | (1 << 8));
} }
/* /*
...@@ -1471,13 +1478,6 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) ...@@ -1471,13 +1478,6 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
} }
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval); gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
/* On A660, the SCID programming for UCHE traffic is done in
* A6XX_GBIF_SCACHE_CNTL0[14:10]
*/
if (adreno_is_a660_family(adreno_gpu))
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
(1 << 8), (gpu_scid << 10) | (1 << 8));
} }
static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu) static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
...@@ -1640,7 +1640,7 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) ...@@ -1640,7 +1640,7 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
return (unsigned long)busy_time; return (unsigned long)busy_time;
} }
void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
{ {
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
......
...@@ -777,12 +777,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu, ...@@ -777,12 +777,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
a6xx_state->gmu_registers = state_kcalloc(a6xx_state, a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
2, sizeof(*a6xx_state->gmu_registers)); 3, sizeof(*a6xx_state->gmu_registers));
if (!a6xx_state->gmu_registers) if (!a6xx_state->gmu_registers)
return; return;
a6xx_state->nr_gmu_registers = 2; a6xx_state->nr_gmu_registers = 3;
/* Get the CX GMU registers from AHB */ /* Get the CX GMU registers from AHB */
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0], _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
......
...@@ -33,6 +33,7 @@ struct dp_aux_private { ...@@ -33,6 +33,7 @@ struct dp_aux_private {
bool read; bool read;
bool no_send_addr; bool no_send_addr;
bool no_send_stop; bool no_send_stop;
bool initted;
u32 offset; u32 offset;
u32 segment; u32 segment;
...@@ -331,6 +332,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, ...@@ -331,6 +332,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
} }
mutex_lock(&aux->mutex); mutex_lock(&aux->mutex);
if (!aux->initted) {
ret = -EIO;
goto exit;
}
dp_aux_update_offset_and_segment(aux, msg); dp_aux_update_offset_and_segment(aux, msg);
dp_aux_transfer_helper(aux, msg, true); dp_aux_transfer_helper(aux, msg, true);
...@@ -380,6 +385,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, ...@@ -380,6 +385,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
} }
aux->cmd_busy = false; aux->cmd_busy = false;
exit:
mutex_unlock(&aux->mutex); mutex_unlock(&aux->mutex);
return ret; return ret;
...@@ -431,8 +438,13 @@ void dp_aux_init(struct drm_dp_aux *dp_aux) ...@@ -431,8 +438,13 @@ void dp_aux_init(struct drm_dp_aux *dp_aux)
aux = container_of(dp_aux, struct dp_aux_private, dp_aux); aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
mutex_lock(&aux->mutex);
dp_catalog_aux_enable(aux->catalog, true); dp_catalog_aux_enable(aux->catalog, true);
aux->retry_cnt = 0; aux->retry_cnt = 0;
aux->initted = true;
mutex_unlock(&aux->mutex);
} }
void dp_aux_deinit(struct drm_dp_aux *dp_aux) void dp_aux_deinit(struct drm_dp_aux *dp_aux)
...@@ -441,7 +453,12 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux) ...@@ -441,7 +453,12 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux)
aux = container_of(dp_aux, struct dp_aux_private, dp_aux); aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
mutex_lock(&aux->mutex);
aux->initted = false;
dp_catalog_aux_enable(aux->catalog, false); dp_catalog_aux_enable(aux->catalog, false);
mutex_unlock(&aux->mutex);
} }
int dp_aux_register(struct drm_dp_aux *dp_aux) int dp_aux_register(struct drm_dp_aux *dp_aux)
......
...@@ -1658,6 +1658,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, ...@@ -1658,6 +1658,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
if (!prop) { if (!prop) {
DRM_DEV_DEBUG(dev, DRM_DEV_DEBUG(dev,
"failed to find data lane mapping, using default\n"); "failed to find data lane mapping, using default\n");
/* Set the number of date lanes to 4 by default. */
msm_host->num_data_lanes = 4;
return 0; return 0;
} }
......
...@@ -77,6 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file) ...@@ -77,6 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
goto free_priv; goto free_priv;
pm_runtime_get_sync(&gpu->pdev->dev); pm_runtime_get_sync(&gpu->pdev->dev);
msm_gpu_hw_init(gpu);
show_priv->state = gpu->funcs->gpu_state_get(gpu); show_priv->state = gpu->funcs->gpu_state_get(gpu);
pm_runtime_put_sync(&gpu->pdev->dev); pm_runtime_put_sync(&gpu->pdev->dev);
......
...@@ -967,29 +967,18 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, ...@@ -967,29 +967,18 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
return ret; return ret;
} }
static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
struct drm_file *file) ktime_t timeout)
{ {
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_wait_fence *args = data;
ktime_t timeout = to_ktime(args->timeout);
struct msm_gpu_submitqueue *queue;
struct msm_gpu *gpu = priv->gpu;
struct dma_fence *fence; struct dma_fence *fence;
int ret; int ret;
if (args->pad) { if (fence_id > queue->last_fence) {
DRM_ERROR("invalid pad: %08x\n", args->pad); DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
fence_id, queue->last_fence);
return -EINVAL; return -EINVAL;
} }
if (!gpu)
return 0;
queue = msm_submitqueue_get(file->driver_priv, args->queueid);
if (!queue)
return -ENOENT;
/* /*
* Map submitqueue scoped "seqno" (which is actually an idr key) * Map submitqueue scoped "seqno" (which is actually an idr key)
* back to underlying dma-fence * back to underlying dma-fence
...@@ -1001,7 +990,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, ...@@ -1001,7 +990,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
ret = mutex_lock_interruptible(&queue->lock); ret = mutex_lock_interruptible(&queue->lock);
if (ret) if (ret)
return ret; return ret;
fence = idr_find(&queue->fence_idr, args->fence); fence = idr_find(&queue->fence_idr, fence_id);
if (fence) if (fence)
fence = dma_fence_get_rcu(fence); fence = dma_fence_get_rcu(fence);
mutex_unlock(&queue->lock); mutex_unlock(&queue->lock);
...@@ -1017,6 +1006,32 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, ...@@ -1017,6 +1006,32 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
} }
dma_fence_put(fence); dma_fence_put(fence);
return ret;
}
static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_wait_fence *args = data;
struct msm_gpu_submitqueue *queue;
int ret;
if (args->pad) {
DRM_ERROR("invalid pad: %08x\n", args->pad);
return -EINVAL;
}
if (!priv->gpu)
return 0;
queue = msm_submitqueue_get(file->driver_priv, args->queueid);
if (!queue)
return -ENOENT;
ret = wait_fence(queue, args->fence, to_ktime(args->timeout));
msm_submitqueue_put(queue); msm_submitqueue_put(queue);
return ret; return ret;
......
...@@ -1056,8 +1056,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct ...@@ -1056,8 +1056,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
vma->vm_flags &= ~VM_PFNMAP; vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags)); vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0; return 0;
...@@ -1121,7 +1120,7 @@ static int msm_gem_new_impl(struct drm_device *dev, ...@@ -1121,7 +1120,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
break; break;
fallthrough; fallthrough;
default: default:
DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK)); (flags & MSM_BO_CACHE_MASK));
return -EINVAL; return -EINVAL;
} }
......
...@@ -772,6 +772,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -772,6 +772,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
args->nr_cmds); args->nr_cmds);
if (IS_ERR(submit)) { if (IS_ERR(submit)) {
ret = PTR_ERR(submit); ret = PTR_ERR(submit);
submit = NULL;
goto out_unlock; goto out_unlock;
} }
...@@ -904,6 +905,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -904,6 +905,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
drm_sched_entity_push_job(&submit->base); drm_sched_entity_push_job(&submit->base);
args->fence = submit->fence_id; args->fence = submit->fence_id;
queue->last_fence = submit->fence_id;
msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs); msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
msm_process_post_deps(post_deps, args->nr_out_syncobjs, msm_process_post_deps(post_deps, args->nr_out_syncobjs,
......
...@@ -359,6 +359,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, ...@@ -359,6 +359,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
* @ring_nr: the ringbuffer used by this submitqueue, which is determined * @ring_nr: the ringbuffer used by this submitqueue, which is determined
* by the submitqueue's priority * by the submitqueue's priority
* @faults: the number of GPU hangs associated with this submitqueue * @faults: the number of GPU hangs associated with this submitqueue
* @last_fence: the sequence number of the last allocated fence (for error
* checking)
* @ctx: the per-drm_file context associated with the submitqueue (ie. * @ctx: the per-drm_file context associated with the submitqueue (ie.
* which set of pgtables do submits jobs associated with the * which set of pgtables do submits jobs associated with the
* submitqueue use) * submitqueue use)
...@@ -374,6 +376,7 @@ struct msm_gpu_submitqueue { ...@@ -374,6 +376,7 @@ struct msm_gpu_submitqueue {
u32 flags; u32 flags;
u32 ring_nr; u32 ring_nr;
int faults; int faults;
uint32_t last_fence;
struct msm_file_private *ctx; struct msm_file_private *ctx;
struct list_head node; struct list_head node;
struct idr fence_idr; struct idr fence_idr;
......
...@@ -20,6 +20,10 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, ...@@ -20,6 +20,10 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
struct msm_gpu *gpu = dev_to_gpu(dev); struct msm_gpu *gpu = dev_to_gpu(dev);
struct dev_pm_opp *opp; struct dev_pm_opp *opp;
/*
* Note that devfreq_recommended_opp() can modify the freq
* to something that actually is in the opp table:
*/
opp = devfreq_recommended_opp(dev, freq, flags); opp = devfreq_recommended_opp(dev, freq, flags);
/* /*
...@@ -28,6 +32,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, ...@@ -28,6 +32,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
*/ */
if (gpu->devfreq.idle_freq) { if (gpu->devfreq.idle_freq) {
gpu->devfreq.idle_freq = *freq; gpu->devfreq.idle_freq = *freq;
dev_pm_opp_put(opp);
return 0; return 0;
} }
...@@ -203,9 +208,6 @@ static void msm_devfreq_idle_work(struct kthread_work *work) ...@@ -203,9 +208,6 @@ static void msm_devfreq_idle_work(struct kthread_work *work)
struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq); struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
unsigned long idle_freq, target_freq = 0; unsigned long idle_freq, target_freq = 0;
if (!df->devfreq)
return;
/* /*
* Hold devfreq lock to synchronize with get_dev_status()/ * Hold devfreq lock to synchronize with get_dev_status()/
* target() callbacks * target() callbacks
...@@ -227,6 +229,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu) ...@@ -227,6 +229,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
{ {
struct msm_gpu_devfreq *df = &gpu->devfreq; struct msm_gpu_devfreq *df = &gpu->devfreq;
if (!df->devfreq)
return;
msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1), msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1),
HRTIMER_MODE_ABS); HRTIMER_MODE_REL);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment