Commit 397ab98e authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-msm-next-2021-08-12' of https://gitlab.freedesktop.org/drm/msm into drm-next

This is the main pull for v5.15, after the early pull request with
drm/scheduler conversion:

* New a6xx GPU support: a680 and 7c3
* dsi: 7nm phi, sc7280 support, test pattern generator support
* mdp4 fixes for older hw like the nexus7
* displayport fixes
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGs_tyanTeDGMH1X+Uf4wdyy7jYj-CinGXXVETiYOESahw@mail.gmail.com
parents f97a1b65 cb0927ab
...@@ -64,6 +64,18 @@ properties: ...@@ -64,6 +64,18 @@ properties:
Indicates if the DSI controller is driving a panel which needs Indicates if the DSI controller is driving a panel which needs
2 DSI links. 2 DSI links.
assigned-clocks:
minItems: 2
maxItems: 2
description: |
Parents of "byte" and "pixel" for the given platform.
assigned-clock-parents:
minItems: 2
maxItems: 2
description: |
The Byte clock and Pixel clock PLL outputs provided by a DSI PHY block.
power-domains: power-domains:
maxItems: 1 maxItems: 1
...@@ -119,6 +131,8 @@ required: ...@@ -119,6 +131,8 @@ required:
- clock-names - clock-names
- phys - phys
- phy-names - phy-names
- assigned-clocks
- assigned-clock-parents
- power-domains - power-domains
- operating-points-v2 - operating-points-v2
- ports - ports
...@@ -159,6 +173,9 @@ examples: ...@@ -159,6 +173,9 @@ examples:
phys = <&dsi0_phy>; phys = <&dsi0_phy>;
phy-names = "dsi"; phy-names = "dsi";
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
assigned-clock-parents = <&dsi_phy 0>, <&dsi_phy 1>;
power-domains = <&rpmhpd SC7180_CX>; power-domains = <&rpmhpd SC7180_CX>;
operating-points-v2 = <&dsi_opp_table>; operating-points-v2 = <&dsi_opp_table>;
......
# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/msm/dsi-phy-7nm.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Display DSI 7nm PHY
maintainers:
- Jonathan Marek <jonathan@marek.ca>
allOf:
- $ref: dsi-phy-common.yaml#
properties:
compatible:
oneOf:
- const: qcom,dsi-phy-7nm
- const: qcom,dsi-phy-7nm-8150
- const: qcom,sc7280-dsi-phy-7nm
reg:
items:
- description: dsi phy register set
- description: dsi phy lane register set
- description: dsi pll register set
reg-names:
items:
- const: dsi_phy
- const: dsi_phy_lane
- const: dsi_pll
vdds-supply:
description: |
Connected to VDD_A_DSI_PLL_0P9 pin (or VDDA_DSI{0,1}_PLL_0P9 for sm8150)
phy-type:
description: D-PHY (default) or C-PHY mode
enum: [ 10, 11 ]
default: 10
required:
- compatible
- reg
- reg-names
- vdds-supply
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,dispcc-sm8250.h>
#include <dt-bindings/clock/qcom,rpmh.h>
dsi-phy@ae94400 {
compatible = "qcom,dsi-phy-7nm";
reg = <0x0ae94400 0x200>,
<0x0ae94600 0x280>,
<0x0ae94900 0x260>;
reg-names = "dsi_phy",
"dsi_phy_lane",
"dsi_pll";
#clock-cells = <1>;
#phy-cells = <0>;
vdds-supply = <&vreg_l5a_0p88>;
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
<&rpmhcc RPMH_CXO_CLK>;
clock-names = "iface", "ref";
};
...@@ -116,9 +116,9 @@ config DRM_MSM_DSI_10NM_PHY ...@@ -116,9 +116,9 @@ config DRM_MSM_DSI_10NM_PHY
Choose this option if DSI PHY on SDM845 is used on the platform. Choose this option if DSI PHY on SDM845 is used on the platform.
config DRM_MSM_DSI_7NM_PHY config DRM_MSM_DSI_7NM_PHY
bool "Enable DSI 7nm PHY driver in MSM DRM (used by SM8150/SM8250)" bool "Enable DSI 7nm PHY driver in MSM DRM"
depends on DRM_MSM_DSI depends on DRM_MSM_DSI
default y default y
help help
Choose this option if DSI PHY on SM8150/SM8250 is used on the Choose this option if DSI PHY on SM8150/SM8250/SC7280 is used on
platform. the platform.
...@@ -18,6 +18,18 @@ static void a5xx_dump(struct msm_gpu *gpu); ...@@ -18,6 +18,18 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13 #define GPU_PAS_ID 13
static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (a5xx_gpu->has_whereami) {
OUT_PKT7(ring, CP_WHERE_AM_I, 2);
OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
}
}
void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
bool sync) bool sync)
{ {
...@@ -30,11 +42,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, ...@@ -30,11 +42,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
* Most flush operations need to issue a WHERE_AM_I opcode to sync up * Most flush operations need to issue a WHERE_AM_I opcode to sync up
* the rptr shadow * the rptr shadow
*/ */
if (a5xx_gpu->has_whereami && sync) { if (sync)
OUT_PKT7(ring, CP_WHERE_AM_I, 2); update_shadow_rptr(gpu, ring);
OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
}
spin_lock_irqsave(&ring->preempt_lock, flags); spin_lock_irqsave(&ring->preempt_lock, flags);
...@@ -168,6 +177,16 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) ...@@ -168,6 +177,16 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
ibs++; ibs++;
break; break;
} }
/*
* Periodically update shadow-wptr if needed, so that we
* can see partial progress of submits with large # of
* cmds.. otherwise we could needlessly stall waiting for
* ringbuffer state, simply due to looking at a shadow
* rptr value that has not been updated
*/
if ((ibs % 32) == 0)
update_shadow_rptr(gpu, ring);
} }
/* /*
......
...@@ -519,9 +519,9 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) ...@@ -519,9 +519,9 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
if (!pdcptr) if (!pdcptr)
goto err; goto err;
if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
pdc_in_aop = true; pdc_in_aop = true;
else if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu)) else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
pdc_address_offset = 0x30090; pdc_address_offset = 0x30090;
else else
pdc_address_offset = 0x30080; pdc_address_offset = 0x30080;
...@@ -933,6 +933,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) ...@@ -933,6 +933,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
/* Use a known rate to bring up the GMU */ /* Use a known rate to bring up the GMU */
clk_set_rate(gmu->core_clk, 200000000); clk_set_rate(gmu->core_clk, 200000000);
clk_set_rate(gmu->hub_clk, 150000000);
ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
if (ret) { if (ret) {
pm_runtime_put(gmu->gxpd); pm_runtime_put(gmu->gxpd);
...@@ -1393,6 +1394,9 @@ static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) ...@@ -1393,6 +1394,9 @@ static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
gmu->nr_clocks, "gmu"); gmu->nr_clocks, "gmu");
gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks,
gmu->nr_clocks, "hub");
return 0; return 0;
} }
...@@ -1504,7 +1508,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) ...@@ -1504,7 +1508,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
* are otherwise unused by a660. * are otherwise unused by a660.
*/ */
gmu->dummy.size = SZ_4K; gmu->dummy.size = SZ_4K;
if (adreno_is_a660(adreno_gpu)) { if (adreno_is_a660_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000); ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000);
if (ret) if (ret)
goto err_memory; goto err_memory;
...@@ -1522,7 +1526,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) ...@@ -1522,7 +1526,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
SZ_16M - SZ_16K, 0x04000); SZ_16M - SZ_16K, 0x04000);
if (ret) if (ret)
goto err_memory; goto err_memory;
} else if (adreno_is_a640(adreno_gpu)) { } else if (adreno_is_a640_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
SZ_256K - SZ_16K, 0x04000); SZ_256K - SZ_16K, 0x04000);
if (ret) if (ret)
......
...@@ -66,6 +66,7 @@ struct a6xx_gmu { ...@@ -66,6 +66,7 @@ struct a6xx_gmu {
int nr_clocks; int nr_clocks;
struct clk_bulk_data *clocks; struct clk_bulk_data *clocks;
struct clk *core_clk; struct clk *core_clk;
struct clk *hub_clk;
/* current performance index set externally */ /* current performance index set externally */
int current_perf_index; int current_perf_index;
......
This diff is collapsed.
...@@ -382,6 +382,36 @@ static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) ...@@ -382,6 +382,36 @@ static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
msg->cnoc_cmds_data[1][0] = 0x60000001; msg->cnoc_cmds_data[1][0] = 0x60000001;
} }
static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/*
* Send a single "off" entry just to get things running
* TODO: bus scaling
*/
msg->bw_level_num = 1;
msg->ddr_cmds_num = 3;
msg->ddr_wait_bitmask = 0x07;
msg->ddr_cmds_addrs[0] = 0x50004;
msg->ddr_cmds_addrs[1] = 0x50000;
msg->ddr_cmds_addrs[2] = 0x50088;
msg->ddr_cmds_data[0][0] = 0x40000000;
msg->ddr_cmds_data[0][1] = 0x40000000;
msg->ddr_cmds_data[0][2] = 0x40000000;
/*
* These are the CX (CNOC) votes - these are used by the GMU but the
* votes are known and fixed for the target
*/
msg->cnoc_cmds_num = 1;
msg->cnoc_wait_bitmask = 0x01;
msg->cnoc_cmds_addrs[0] = 0x5006c;
msg->cnoc_cmds_data[0][0] = 0x40000000;
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{ {
/* Send a single "off" entry since the 630 GMU doesn't do bus scaling */ /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
...@@ -428,10 +458,12 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) ...@@ -428,10 +458,12 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
if (adreno_is_a618(adreno_gpu)) if (adreno_is_a618(adreno_gpu))
a618_build_bw_table(&msg); a618_build_bw_table(&msg);
else if (adreno_is_a640(adreno_gpu)) else if (adreno_is_a640_family(adreno_gpu))
a640_build_bw_table(&msg); a640_build_bw_table(&msg);
else if (adreno_is_a650(adreno_gpu)) else if (adreno_is_a650(adreno_gpu))
a650_build_bw_table(&msg); a650_build_bw_table(&msg);
else if (adreno_is_7c3(adreno_gpu))
adreno_7c3_build_bw_table(&msg);
else if (adreno_is_a660(adreno_gpu)) else if (adreno_is_a660(adreno_gpu))
a660_build_bw_table(&msg); a660_build_bw_table(&msg);
else else
......
...@@ -8,8 +8,6 @@ ...@@ -8,8 +8,6 @@
#include "adreno_gpu.h" #include "adreno_gpu.h"
#define ANY_ID 0xff
bool hang_debug = false; bool hang_debug = false;
MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)"); MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
module_param_named(hang_debug, hang_debug, bool, 0600); module_param_named(hang_debug, hang_debug, bool, 0600);
...@@ -300,6 +298,30 @@ static const struct adreno_info gpulist[] = { ...@@ -300,6 +298,30 @@ static const struct adreno_info gpulist[] = {
.init = a6xx_gpu_init, .init = a6xx_gpu_init,
.zapfw = "a660_zap.mdt", .zapfw = "a660_zap.mdt",
.hwcg = a660_hwcg, .hwcg = a660_hwcg,
}, {
.rev = ADRENO_REV(6, 3, 5, ANY_ID),
.name = "Adreno 7c Gen 3",
.fw = {
[ADRENO_FW_SQE] = "a660_sqe.fw",
[ADRENO_FW_GMU] = "a660_gmu.bin",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.hwcg = a660_hwcg,
}, {
.rev = ADRENO_REV(6, 8, 0, ANY_ID),
.revn = 680,
.name = "A680",
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a640_gmu.bin",
},
.gmem = SZ_2M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a640_zap.mdt",
.hwcg = a640_hwcg,
}, },
}; };
...@@ -325,6 +347,15 @@ static inline bool _rev_match(uint8_t entry, uint8_t id) ...@@ -325,6 +347,15 @@ static inline bool _rev_match(uint8_t entry, uint8_t id)
return (entry == ANY_ID) || (entry == id); return (entry == ANY_ID) || (entry == id);
} }
bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2)
{
return _rev_match(rev1.core, rev2.core) &&
_rev_match(rev1.major, rev2.major) &&
_rev_match(rev1.minor, rev2.minor) &&
_rev_match(rev1.patchid, rev2.patchid);
}
const struct adreno_info *adreno_info(struct adreno_rev rev) const struct adreno_info *adreno_info(struct adreno_rev rev)
{ {
int i; int i;
...@@ -332,10 +363,7 @@ const struct adreno_info *adreno_info(struct adreno_rev rev) ...@@ -332,10 +363,7 @@ const struct adreno_info *adreno_info(struct adreno_rev rev)
/* identify gpu: */ /* identify gpu: */
for (i = 0; i < ARRAY_SIZE(gpulist); i++) { for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
const struct adreno_info *info = &gpulist[i]; const struct adreno_info *info = &gpulist[i];
if (_rev_match(info->rev.core, rev.core) && if (adreno_cmp_rev(info->rev, rev))
_rev_match(info->rev.major, rev.major) &&
_rev_match(info->rev.minor, rev.minor) &&
_rev_match(info->rev.patchid, rev.patchid))
return info; return info;
} }
......
...@@ -42,6 +42,8 @@ struct adreno_rev { ...@@ -42,6 +42,8 @@ struct adreno_rev {
uint8_t patchid; uint8_t patchid;
}; };
#define ANY_ID 0xff
#define ADRENO_REV(core, major, minor, patchid) \ #define ADRENO_REV(core, major, minor, patchid) \
((struct adreno_rev){ core, major, minor, patchid }) ((struct adreno_rev){ core, major, minor, patchid })
...@@ -141,6 +143,8 @@ struct adreno_platform_config { ...@@ -141,6 +143,8 @@ struct adreno_platform_config {
__ret; \ __ret; \
}) })
bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2);
static inline bool adreno_is_a2xx(struct adreno_gpu *gpu) static inline bool adreno_is_a2xx(struct adreno_gpu *gpu)
{ {
return (gpu->revn < 300); return (gpu->revn < 300);
...@@ -237,9 +241,9 @@ static inline int adreno_is_a630(struct adreno_gpu *gpu) ...@@ -237,9 +241,9 @@ static inline int adreno_is_a630(struct adreno_gpu *gpu)
return gpu->revn == 630; return gpu->revn == 630;
} }
static inline int adreno_is_a640(struct adreno_gpu *gpu) static inline int adreno_is_a640_family(struct adreno_gpu *gpu)
{ {
return gpu->revn == 640; return (gpu->revn == 640) || (gpu->revn == 680);
} }
static inline int adreno_is_a650(struct adreno_gpu *gpu) static inline int adreno_is_a650(struct adreno_gpu *gpu)
...@@ -247,15 +251,27 @@ static inline int adreno_is_a650(struct adreno_gpu *gpu) ...@@ -247,15 +251,27 @@ static inline int adreno_is_a650(struct adreno_gpu *gpu)
return gpu->revn == 650; return gpu->revn == 650;
} }
static inline int adreno_is_7c3(struct adreno_gpu *gpu)
{
/* The order of args is important here to handle ANY_ID correctly */
return adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), gpu->rev);
}
static inline int adreno_is_a660(struct adreno_gpu *gpu) static inline int adreno_is_a660(struct adreno_gpu *gpu)
{ {
return gpu->revn == 660; return gpu->revn == 660;
} }
static inline int adreno_is_a660_family(struct adreno_gpu *gpu)
{
return adreno_is_a660(gpu) || adreno_is_7c3(gpu);
}
/* check for a650, a660, or any derivatives */ /* check for a650, a660, or any derivatives */
static inline int adreno_is_a650_family(struct adreno_gpu *gpu) static inline int adreno_is_a650_family(struct adreno_gpu *gpu)
{ {
return gpu->revn == 650 || gpu->revn == 620 || gpu->revn == 660; return gpu->revn == 650 || gpu->revn == 620 ||
adreno_is_a660_family(gpu);
} }
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
......
...@@ -30,12 +30,6 @@ ...@@ -30,12 +30,6 @@
#include "dpu_core_perf.h" #include "dpu_core_perf.h"
#include "dpu_trace.h" #include "dpu_trace.h"
#define DPU_DRM_BLEND_OP_NOT_DEFINED 0
#define DPU_DRM_BLEND_OP_OPAQUE 1
#define DPU_DRM_BLEND_OP_PREMULTIPLIED 2
#define DPU_DRM_BLEND_OP_COVERAGE 3
#define DPU_DRM_BLEND_OP_MAX 4
/* layer mixer index on dpu_crtc */ /* layer mixer index on dpu_crtc */
#define LEFT_MIXER 0 #define LEFT_MIXER 0
#define RIGHT_MIXER 1 #define RIGHT_MIXER 1
...@@ -146,20 +140,43 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, ...@@ -146,20 +140,43 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
{ {
struct dpu_hw_mixer *lm = mixer->hw_lm; struct dpu_hw_mixer *lm = mixer->hw_lm;
uint32_t blend_op; uint32_t blend_op;
uint32_t fg_alpha, bg_alpha;
/* default to opaque blending */ fg_alpha = pstate->base.alpha >> 8;
blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | bg_alpha = 0xff - fg_alpha;
DPU_BLEND_BG_ALPHA_BG_CONST;
if (format->alpha_enable) { /* default to opaque blending */
if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE ||
!format->alpha_enable) {
blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
DPU_BLEND_BG_ALPHA_BG_CONST;
} else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
DPU_BLEND_BG_ALPHA_FG_PIXEL;
if (fg_alpha != 0xff) {
bg_alpha = fg_alpha;
blend_op |= DPU_BLEND_BG_MOD_ALPHA |
DPU_BLEND_BG_INV_MOD_ALPHA;
} else {
blend_op |= DPU_BLEND_BG_INV_ALPHA;
}
} else {
/* coverage blending */ /* coverage blending */
blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL | blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
DPU_BLEND_BG_ALPHA_FG_PIXEL | DPU_BLEND_BG_ALPHA_FG_PIXEL;
DPU_BLEND_BG_INV_ALPHA; if (fg_alpha != 0xff) {
bg_alpha = fg_alpha;
blend_op |= DPU_BLEND_FG_MOD_ALPHA |
DPU_BLEND_FG_INV_MOD_ALPHA |
DPU_BLEND_BG_MOD_ALPHA |
DPU_BLEND_BG_INV_MOD_ALPHA;
} else {
blend_op |= DPU_BLEND_BG_INV_ALPHA;
}
} }
lm->ops.setup_blend_config(lm, pstate->stage, lm->ops.setup_blend_config(lm, pstate->stage,
0xFF, 0, blend_op); fg_alpha, bg_alpha, blend_op);
DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n", DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
&format->base.pixel_format, format->alpha_enable, blend_op); &format->base.pixel_format, format->alpha_enable, blend_op);
......
...@@ -274,20 +274,20 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, ...@@ -274,20 +274,20 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
/* return EWOULDBLOCK since we know the wait isn't necessary */ /* return EWOULDBLOCK since we know the wait isn't necessary */
if (phys_enc->enable_state == DPU_ENC_DISABLED) { if (phys_enc->enable_state == DPU_ENC_DISABLED) {
DRM_ERROR("encoder is disabled id=%u, intr=%d, irq=%d", DRM_ERROR("encoder is disabled id=%u, intr=%d, irq=%d\n",
DRMID(phys_enc->parent), intr_idx, DRMID(phys_enc->parent), intr_idx,
irq->irq_idx); irq->irq_idx);
return -EWOULDBLOCK; return -EWOULDBLOCK;
} }
if (irq->irq_idx < 0) { if (irq->irq_idx < 0) {
DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, irq=%s", DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, irq=%s\n",
DRMID(phys_enc->parent), intr_idx, DRMID(phys_enc->parent), intr_idx,
irq->name); irq->name);
return 0; return 0;
} }
DRM_DEBUG_KMS("id=%u, intr=%d, irq=%d, pp=%d, pending_cnt=%d", DRM_DEBUG_KMS("id=%u, intr=%d, irq=%d, pp=%d, pending_cnt=%d\n",
DRMID(phys_enc->parent), intr_idx, DRMID(phys_enc->parent), intr_idx,
irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0, irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt)); atomic_read(wait_info->atomic_cnt));
...@@ -303,8 +303,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, ...@@ -303,8 +303,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
if (irq_status) { if (irq_status) {
unsigned long flags; unsigned long flags;
DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, " DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n",
"irq=%d, pp=%d, atomic_cnt=%d",
DRMID(phys_enc->parent), intr_idx, DRMID(phys_enc->parent), intr_idx,
irq->irq_idx, irq->irq_idx,
phys_enc->hw_pp->idx - PINGPONG_0, phys_enc->hw_pp->idx - PINGPONG_0,
...@@ -315,8 +314,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, ...@@ -315,8 +314,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
ret = 0; ret = 0;
} else { } else {
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, " DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n",
"irq=%d, pp=%d, atomic_cnt=%d",
DRMID(phys_enc->parent), intr_idx, DRMID(phys_enc->parent), intr_idx,
irq->irq_idx, irq->irq_idx,
phys_enc->hw_pp->idx - PINGPONG_0, phys_enc->hw_pp->idx - PINGPONG_0,
......
...@@ -974,6 +974,7 @@ static const struct dpu_perf_cfg sdm845_perf_data = { ...@@ -974,6 +974,7 @@ static const struct dpu_perf_cfg sdm845_perf_data = {
.amortizable_threshold = 25, .amortizable_threshold = 25,
.min_prefill_lines = 24, .min_prefill_lines = 24,
.danger_lut_tbl = {0xf, 0xffff, 0x0}, .danger_lut_tbl = {0xf, 0xffff, 0x0},
.safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
.qos_lut_tbl = { .qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sdm845_qos_linear), {.nentry = ARRAY_SIZE(sdm845_qos_linear),
.entries = sdm845_qos_linear .entries = sdm845_qos_linear
...@@ -1001,6 +1002,7 @@ static const struct dpu_perf_cfg sc7180_perf_data = { ...@@ -1001,6 +1002,7 @@ static const struct dpu_perf_cfg sc7180_perf_data = {
.min_dram_ib = 1600000, .min_dram_ib = 1600000,
.min_prefill_lines = 24, .min_prefill_lines = 24,
.danger_lut_tbl = {0xff, 0xffff, 0x0}, .danger_lut_tbl = {0xff, 0xffff, 0x0},
.safe_lut_tbl = {0xfff0, 0xff00, 0xffff},
.qos_lut_tbl = { .qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc7180_qos_linear), {.nentry = ARRAY_SIZE(sc7180_qos_linear),
.entries = sc7180_qos_linear .entries = sc7180_qos_linear
...@@ -1028,6 +1030,7 @@ static const struct dpu_perf_cfg sm8150_perf_data = { ...@@ -1028,6 +1030,7 @@ static const struct dpu_perf_cfg sm8150_perf_data = {
.min_dram_ib = 800000, .min_dram_ib = 800000,
.min_prefill_lines = 24, .min_prefill_lines = 24,
.danger_lut_tbl = {0xf, 0xffff, 0x0}, .danger_lut_tbl = {0xf, 0xffff, 0x0},
.safe_lut_tbl = {0xfff8, 0xf000, 0xffff},
.qos_lut_tbl = { .qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sm8150_qos_linear), {.nentry = ARRAY_SIZE(sm8150_qos_linear),
.entries = sm8150_qos_linear .entries = sm8150_qos_linear
...@@ -1056,6 +1059,7 @@ static const struct dpu_perf_cfg sm8250_perf_data = { ...@@ -1056,6 +1059,7 @@ static const struct dpu_perf_cfg sm8250_perf_data = {
.min_dram_ib = 800000, .min_dram_ib = 800000,
.min_prefill_lines = 35, .min_prefill_lines = 35,
.danger_lut_tbl = {0xf, 0xffff, 0x0}, .danger_lut_tbl = {0xf, 0xffff, 0x0},
.safe_lut_tbl = {0xfff0, 0xff00, 0xffff},
.qos_lut_tbl = { .qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc7180_qos_linear), {.nentry = ARRAY_SIZE(sc7180_qos_linear),
.entries = sc7180_qos_linear .entries = sc7180_qos_linear
...@@ -1084,6 +1088,7 @@ static const struct dpu_perf_cfg sc7280_perf_data = { ...@@ -1084,6 +1088,7 @@ static const struct dpu_perf_cfg sc7280_perf_data = {
.min_dram_ib = 1600000, .min_dram_ib = 1600000,
.min_prefill_lines = 24, .min_prefill_lines = 24,
.danger_lut_tbl = {0xffff, 0xffff, 0x0}, .danger_lut_tbl = {0xffff, 0xffff, 0x0},
.safe_lut_tbl = {0xff00, 0xff00, 0xffff},
.qos_lut_tbl = { .qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc7180_qos_macrotile), {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
.entries = sc7180_qos_macrotile .entries = sc7180_qos_macrotile
......
...@@ -345,10 +345,12 @@ static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx) ...@@ -345,10 +345,12 @@ static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
int i; int i;
for (i = 0; i < ctx->mixer_count; i++) { for (i = 0; i < ctx->mixer_count; i++) {
DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0); enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0); DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0); DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
} }
DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0); DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
......
...@@ -471,30 +471,68 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev, ...@@ -471,30 +471,68 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
struct dpu_kms *dpu_kms) struct dpu_kms *dpu_kms)
{ {
struct drm_encoder *encoder = NULL; struct drm_encoder *encoder = NULL;
struct msm_display_info info;
int i, rc = 0; int i, rc = 0;
if (!(priv->dsi[0] || priv->dsi[1])) if (!(priv->dsi[0] || priv->dsi[1]))
return rc; return rc;
/*TODO: Support two independent DSI connectors */ /*
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI); * We support following confiurations:
if (IS_ERR(encoder)) { * - Single DSI host (dsi0 or dsi1)
DPU_ERROR("encoder init failed for dsi display\n"); * - Two independent DSI hosts
return PTR_ERR(encoder); * - Bonded DSI0 and DSI1 hosts
} *
* TODO: Support swapping DSI0 and DSI1 in the bonded setup.
priv->encoders[priv->num_encoders++] = encoder; */
for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
int other = (i + 1) % 2;
if (!priv->dsi[i]) if (!priv->dsi[i])
continue; continue;
if (msm_dsi_is_bonded_dsi(priv->dsi[i]) &&
!msm_dsi_is_master_dsi(priv->dsi[i]))
continue;
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
if (IS_ERR(encoder)) {
DPU_ERROR("encoder init failed for dsi display\n");
return PTR_ERR(encoder);
}
priv->encoders[priv->num_encoders++] = encoder;
memset(&info, 0, sizeof(info));
info.intf_type = encoder->encoder_type;
rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder); rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
if (rc) { if (rc) {
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n", DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
i, rc); i, rc);
break; break;
} }
info.h_tile_instance[info.num_of_h_tiles++] = i;
info.capabilities = msm_dsi_is_cmd_mode(priv->dsi[i]) ?
MSM_DISPLAY_CAP_CMD_MODE :
MSM_DISPLAY_CAP_VID_MODE;
if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
other, rc);
break;
}
info.h_tile_instance[info.num_of_h_tiles++] = other;
}
rc = dpu_encoder_setup(dev, encoder, &info);
if (rc)
DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
encoder->base.id, rc);
} }
return rc; return rc;
...@@ -505,6 +543,7 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev, ...@@ -505,6 +543,7 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
struct dpu_kms *dpu_kms) struct dpu_kms *dpu_kms)
{ {
struct drm_encoder *encoder = NULL; struct drm_encoder *encoder = NULL;
struct msm_display_info info;
int rc = 0; int rc = 0;
if (!priv->dp) if (!priv->dp)
...@@ -516,6 +555,7 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev, ...@@ -516,6 +555,7 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
return PTR_ERR(encoder); return PTR_ERR(encoder);
} }
memset(&info, 0, sizeof(info));
rc = msm_dp_modeset_init(priv->dp, dev, encoder); rc = msm_dp_modeset_init(priv->dp, dev, encoder);
if (rc) { if (rc) {
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
...@@ -524,6 +564,14 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev, ...@@ -524,6 +564,14 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
} }
priv->encoders[priv->num_encoders++] = encoder; priv->encoders[priv->num_encoders++] = encoder;
info.num_of_h_tiles = 1;
info.capabilities = MSM_DISPLAY_CAP_VID_MODE;
info.intf_type = encoder->encoder_type;
rc = dpu_encoder_setup(dev, encoder, &info);
if (rc)
DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
encoder->base.id, rc);
return rc; return rc;
} }
...@@ -726,41 +774,6 @@ static void dpu_kms_destroy(struct msm_kms *kms) ...@@ -726,41 +774,6 @@ static void dpu_kms_destroy(struct msm_kms *kms)
msm_kms_destroy(&dpu_kms->base); msm_kms_destroy(&dpu_kms->base);
} }
static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode)
{
struct msm_display_info info;
struct msm_drm_private *priv = encoder->dev->dev_private;
int i, rc = 0;
memset(&info, 0, sizeof(info));
info.intf_type = encoder->encoder_type;
info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
MSM_DISPLAY_CAP_VID_MODE;
switch (info.intf_type) {
case DRM_MODE_ENCODER_DSI:
/* TODO: No support for DSI swap */
for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
if (priv->dsi[i]) {
info.h_tile_instance[info.num_of_h_tiles] = i;
info.num_of_h_tiles++;
}
}
break;
case DRM_MODE_ENCODER_TMDS:
info.num_of_h_tiles = 1;
break;
}
rc = dpu_encoder_setup(encoder->dev, encoder, &info);
if (rc)
DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
encoder->base.id, rc);
}
static irqreturn_t dpu_irq(struct msm_kms *kms) static irqreturn_t dpu_irq(struct msm_kms *kms)
{ {
struct dpu_kms *dpu_kms = to_dpu_kms(kms); struct dpu_kms *dpu_kms = to_dpu_kms(kms);
...@@ -863,7 +876,6 @@ static const struct msm_kms_funcs kms_funcs = { ...@@ -863,7 +876,6 @@ static const struct msm_kms_funcs kms_funcs = {
.get_format = dpu_get_msm_format, .get_format = dpu_get_msm_format,
.round_pixclk = dpu_kms_round_pixclk, .round_pixclk = dpu_kms_round_pixclk,
.destroy = dpu_kms_destroy, .destroy = dpu_kms_destroy,
.set_encoder_mode = _dpu_kms_set_encoder_mode,
.snapshot = dpu_kms_mdp_snapshot, .snapshot = dpu_kms_mdp_snapshot,
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
.debugfs_init = dpu_kms_debugfs_init, .debugfs_init = dpu_kms_debugfs_init,
......
...@@ -1339,9 +1339,7 @@ static void dpu_plane_reset(struct drm_plane *plane) ...@@ -1339,9 +1339,7 @@ static void dpu_plane_reset(struct drm_plane *plane)
return; return;
} }
pstate->base.plane = plane; __drm_atomic_helper_plane_reset(plane, &pstate->base);
plane->state = &pstate->base;
} }
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
...@@ -1647,6 +1645,12 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev, ...@@ -1647,6 +1645,12 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
if (ret) if (ret)
DPU_ERROR("failed to install zpos property, rc = %d\n", ret); DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
drm_plane_create_alpha_property(plane);
drm_plane_create_blend_mode_property(plane,
BIT(DRM_MODE_BLEND_PIXEL_NONE) |
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
drm_plane_create_rotation_property(plane, drm_plane_create_rotation_property(plane,
DRM_MODE_ROTATE_0, DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_0 |
......
...@@ -19,30 +19,12 @@ static int mdp4_hw_init(struct msm_kms *kms) ...@@ -19,30 +19,12 @@ static int mdp4_hw_init(struct msm_kms *kms)
{ {
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct drm_device *dev = mdp4_kms->dev; struct drm_device *dev = mdp4_kms->dev;
uint32_t version, major, minor, dmap_cfg, vg_cfg; u32 dmap_cfg, vg_cfg;
unsigned long clk; unsigned long clk;
int ret = 0; int ret = 0;
pm_runtime_get_sync(dev->dev); pm_runtime_get_sync(dev->dev);
mdp4_enable(mdp4_kms);
version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
mdp4_disable(mdp4_kms);
major = FIELD(version, MDP4_VERSION_MAJOR);
minor = FIELD(version, MDP4_VERSION_MINOR);
DBG("found MDP4 version v%d.%d", major, minor);
if (major != 4) {
DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto out;
}
mdp4_kms->rev = minor;
if (mdp4_kms->rev > 1) { if (mdp4_kms->rev > 1) {
mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff); mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f); mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
...@@ -88,7 +70,6 @@ static int mdp4_hw_init(struct msm_kms *kms) ...@@ -88,7 +70,6 @@ static int mdp4_hw_init(struct msm_kms *kms)
if (mdp4_kms->rev > 1) if (mdp4_kms->rev > 1)
mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1); mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
out:
pm_runtime_put_sync(dev->dev); pm_runtime_put_sync(dev->dev);
return ret; return ret;
...@@ -108,13 +89,6 @@ static void mdp4_disable_commit(struct msm_kms *kms) ...@@ -108,13 +89,6 @@ static void mdp4_disable_commit(struct msm_kms *kms)
static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{ {
int i;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
/* see 119ecb7fd */
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
drm_crtc_vblank_get(crtc);
} }
static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask) static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
...@@ -133,12 +107,6 @@ static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask) ...@@ -133,12 +107,6 @@ static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask) static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
{ {
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct drm_crtc *crtc;
/* see 119ecb7fd */
for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
drm_crtc_vblank_put(crtc);
} }
static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
...@@ -411,14 +379,32 @@ static int modeset_init(struct mdp4_kms *mdp4_kms) ...@@ -411,14 +379,32 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
return ret; return ret;
} }
static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms,
u32 *major, u32 *minor)
{
struct drm_device *dev = mdp4_kms->dev;
u32 version;
mdp4_enable(mdp4_kms);
version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
mdp4_disable(mdp4_kms);
*major = FIELD(version, MDP4_VERSION_MAJOR);
*minor = FIELD(version, MDP4_VERSION_MINOR);
DRM_DEV_INFO(dev->dev, "MDP4 version v%d.%d", *major, *minor);
}
struct msm_kms *mdp4_kms_init(struct drm_device *dev) struct msm_kms *mdp4_kms_init(struct drm_device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev->dev); struct platform_device *pdev = to_platform_device(dev->dev);
struct mdp4_platform_config *config = mdp4_get_config(pdev); struct mdp4_platform_config *config = mdp4_get_config(pdev);
struct msm_drm_private *priv = dev->dev_private;
struct mdp4_kms *mdp4_kms; struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL; struct msm_kms *kms = NULL;
struct msm_gem_address_space *aspace; struct msm_gem_address_space *aspace;
int irq, ret; int irq, ret;
u32 major, minor;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) { if (!mdp4_kms) {
...@@ -433,7 +419,8 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) ...@@ -433,7 +419,8 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail; goto fail;
} }
kms = &mdp4_kms->base.base; priv->kms = &mdp4_kms->base.base;
kms = priv->kms;
mdp4_kms->dev = dev; mdp4_kms->dev = dev;
...@@ -479,15 +466,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) ...@@ -479,15 +466,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (IS_ERR(mdp4_kms->pclk)) if (IS_ERR(mdp4_kms->pclk))
mdp4_kms->pclk = NULL; mdp4_kms->pclk = NULL;
if (mdp4_kms->rev >= 2) {
mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
if (IS_ERR(mdp4_kms->lut_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
ret = PTR_ERR(mdp4_kms->lut_clk);
goto fail;
}
}
mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk"); mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(mdp4_kms->axi_clk)) { if (IS_ERR(mdp4_kms->axi_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n"); DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
...@@ -496,8 +474,27 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) ...@@ -496,8 +474,27 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
} }
clk_set_rate(mdp4_kms->clk, config->max_clk); clk_set_rate(mdp4_kms->clk, config->max_clk);
if (mdp4_kms->lut_clk)
read_mdp_hw_revision(mdp4_kms, &major, &minor);
if (major != 4) {
DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;
}
mdp4_kms->rev = minor;
if (mdp4_kms->rev >= 2) {
mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
if (IS_ERR(mdp4_kms->lut_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
ret = PTR_ERR(mdp4_kms->lut_clk);
goto fail;
}
clk_set_rate(mdp4_kms->lut_clk, config->max_clk); clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
}
pm_runtime_enable(dev->dev); pm_runtime_enable(dev->dev);
mdp4_kms->rpm_enabled = true; mdp4_kms->rpm_enabled = true;
......
...@@ -737,7 +737,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, ...@@ -737,7 +737,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
} }
/* /*
* In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI * In bonded DSI case, CTL0 and CTL1 are always assigned to two DSI
* interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
* only write into CTL0's FLUSH register) to keep two DSI pipes in sync. * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
* Single FLUSH is supported from hw rev v3.0. * Single FLUSH is supported from hw rev v3.0.
......
...@@ -209,13 +209,6 @@ static int mdp5_set_split_display(struct msm_kms *kms, ...@@ -209,13 +209,6 @@ static int mdp5_set_split_display(struct msm_kms *kms,
slave_encoder); slave_encoder);
} }
static void mdp5_set_encoder_mode(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode)
{
mdp5_encoder_set_intf_mode(encoder, cmd_mode);
}
static void mdp5_kms_destroy(struct msm_kms *kms) static void mdp5_kms_destroy(struct msm_kms *kms)
{ {
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
...@@ -287,7 +280,6 @@ static const struct mdp_kms_funcs kms_funcs = { ...@@ -287,7 +280,6 @@ static const struct mdp_kms_funcs kms_funcs = {
.get_format = mdp_get_format, .get_format = mdp_get_format,
.round_pixclk = mdp5_round_pixclk, .round_pixclk = mdp5_round_pixclk,
.set_split_display = mdp5_set_split_display, .set_split_display = mdp5_set_split_display,
.set_encoder_mode = mdp5_set_encoder_mode,
.destroy = mdp5_kms_destroy, .destroy = mdp5_kms_destroy,
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
.debugfs_init = mdp5_kms_debugfs_init, .debugfs_init = mdp5_kms_debugfs_init,
...@@ -448,6 +440,9 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, ...@@ -448,6 +440,9 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
} }
ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
if (!ret)
mdp5_encoder_set_intf_mode(encoder, msm_dsi_is_cmd_mode(priv->dsi[dsi_id]));
break; break;
} }
default: default:
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/ktime.h> #include <linux/ktime.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <linux/slab.h> #include <linux/slab.h>
......
...@@ -353,6 +353,9 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, ...@@ -353,6 +353,9 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
if (!(aux->retry_cnt % MAX_AUX_RETRIES)) if (!(aux->retry_cnt % MAX_AUX_RETRIES))
dp_catalog_aux_update_cfg(aux->catalog); dp_catalog_aux_update_cfg(aux->catalog);
} }
/* reset aux if link is in connected state */
if (dp_catalog_link_is_connected(aux->catalog))
dp_catalog_aux_reset(aux->catalog);
} else { } else {
aux->retry_cnt = 0; aux->retry_cnt = 0;
switch (aux->aux_error_num) { switch (aux->aux_error_num) {
......
...@@ -372,6 +372,7 @@ void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, ...@@ -372,6 +372,7 @@ void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog,
struct dp_catalog_private *catalog = container_of(dp_catalog, struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog); struct dp_catalog_private, dp_catalog);
DRM_DEBUG_DP("enable=%d\n", enable);
if (enable) { if (enable) {
/* /*
* To make sure link reg writes happens before other operation, * To make sure link reg writes happens before other operation,
...@@ -580,6 +581,7 @@ void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, ...@@ -580,6 +581,7 @@ void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
config = (en ? config | intr_mask : config & ~intr_mask); config = (en ? config | intr_mask : config & ~intr_mask);
DRM_DEBUG_DP("intr_mask=%#x config=%#x\n", intr_mask, config);
dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK, dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK,
config & DP_DP_HPD_INT_MASK); config & DP_DP_HPD_INT_MASK);
} }
...@@ -610,6 +612,7 @@ u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog) ...@@ -610,6 +612,7 @@ u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog)
u32 status; u32 status;
status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
DRM_DEBUG_DP("aux status: %#x\n", status);
status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT; status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT;
status &= DP_DP_HPD_STATE_STATUS_BITS_MASK; status &= DP_DP_HPD_STATE_STATUS_BITS_MASK;
...@@ -685,6 +688,7 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, ...@@ -685,6 +688,7 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
/* Make sure to clear the current pattern before starting a new one */ /* Make sure to clear the current pattern before starting a new one */
dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0); dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0);
DRM_DEBUG_DP("pattern: %#x\n", pattern);
switch (pattern) { switch (pattern) {
case DP_PHY_TEST_PATTERN_D10_2: case DP_PHY_TEST_PATTERN_D10_2:
dp_write_link(catalog, REG_DP_STATE_CTRL, dp_write_link(catalog, REG_DP_STATE_CTRL,
...@@ -745,7 +749,7 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, ...@@ -745,7 +749,7 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
DP_STATE_CTRL_LINK_TRAINING_PATTERN4); DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
break; break;
default: default:
DRM_DEBUG_DP("No valid test pattern requested:0x%x\n", pattern); DRM_DEBUG_DP("No valid test pattern requested: %#x\n", pattern);
break; break;
} }
} }
...@@ -928,7 +932,7 @@ void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog) ...@@ -928,7 +932,7 @@ void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog)
select = dp_catalog->audio_data; select = dp_catalog->audio_data;
acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14); acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
DRM_DEBUG_DP("select = 0x%x, acr_ctrl = 0x%x\n", select, acr_ctrl); DRM_DEBUG_DP("select: %#x, acr_ctrl: %#x\n", select, acr_ctrl);
dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl); dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
} }
......
This diff is collapsed.
...@@ -55,7 +55,6 @@ enum { ...@@ -55,7 +55,6 @@ enum {
EV_HPD_INIT_SETUP, EV_HPD_INIT_SETUP,
EV_HPD_PLUG_INT, EV_HPD_PLUG_INT,
EV_IRQ_HPD_INT, EV_IRQ_HPD_INT,
EV_HPD_REPLUG_INT,
EV_HPD_UNPLUG_INT, EV_HPD_UNPLUG_INT,
EV_USER_NOTIFICATION, EV_USER_NOTIFICATION,
EV_CONNECT_PENDING_TIMEOUT, EV_CONNECT_PENDING_TIMEOUT,
...@@ -102,8 +101,6 @@ struct dp_display_private { ...@@ -102,8 +101,6 @@ struct dp_display_private {
struct dp_display_mode dp_mode; struct dp_display_mode dp_mode;
struct msm_dp dp_display; struct msm_dp dp_display;
bool encoder_mode_set;
/* wait for audio signaling */ /* wait for audio signaling */
struct completion audio_comp; struct completion audio_comp;
...@@ -267,6 +264,8 @@ static bool dp_display_is_ds_bridge(struct dp_panel *panel) ...@@ -267,6 +264,8 @@ static bool dp_display_is_ds_bridge(struct dp_panel *panel)
static bool dp_display_is_sink_count_zero(struct dp_display_private *dp) static bool dp_display_is_sink_count_zero(struct dp_display_private *dp)
{ {
DRM_DEBUG_DP("present=%#x sink_count=%d\n", dp->panel->dpcd[DP_DOWNSTREAMPORT_PRESENT],
dp->link->sink_count);
return dp_display_is_ds_bridge(dp->panel) && return dp_display_is_ds_bridge(dp->panel) &&
(dp->link->sink_count == 0); (dp->link->sink_count == 0);
} }
...@@ -283,20 +282,6 @@ static void dp_display_send_hpd_event(struct msm_dp *dp_display) ...@@ -283,20 +282,6 @@ static void dp_display_send_hpd_event(struct msm_dp *dp_display)
} }
static void dp_display_set_encoder_mode(struct dp_display_private *dp)
{
struct msm_drm_private *priv = dp->dp_display.drm_dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!dp->encoder_mode_set && dp->dp_display.encoder &&
kms->funcs->set_encoder_mode) {
kms->funcs->set_encoder_mode(kms,
dp->dp_display.encoder, false);
dp->encoder_mode_set = true;
}
}
static int dp_display_send_hpd_notification(struct dp_display_private *dp, static int dp_display_send_hpd_notification(struct dp_display_private *dp,
bool hpd) bool hpd)
{ {
...@@ -312,6 +297,7 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp, ...@@ -312,6 +297,7 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
dp->dp_display.is_connected = hpd; dp->dp_display.is_connected = hpd;
DRM_DEBUG_DP("hpd=%d\n", hpd);
dp_display_send_hpd_event(&dp->dp_display); dp_display_send_hpd_event(&dp->dp_display);
return 0; return 0;
...@@ -361,6 +347,7 @@ static void dp_display_host_init(struct dp_display_private *dp, int reset) ...@@ -361,6 +347,7 @@ static void dp_display_host_init(struct dp_display_private *dp, int reset)
{ {
bool flip = false; bool flip = false;
DRM_DEBUG_DP("core_initialized=%d\n", dp->core_initialized);
if (dp->core_initialized) { if (dp->core_initialized) {
DRM_DEBUG_DP("DP core already initialized\n"); DRM_DEBUG_DP("DP core already initialized\n");
return; return;
...@@ -369,8 +356,6 @@ static void dp_display_host_init(struct dp_display_private *dp, int reset) ...@@ -369,8 +356,6 @@ static void dp_display_host_init(struct dp_display_private *dp, int reset)
if (dp->usbpd->orientation == ORIENTATION_CC2) if (dp->usbpd->orientation == ORIENTATION_CC2)
flip = true; flip = true;
dp_display_set_encoder_mode(dp);
dp_power_init(dp->power, flip); dp_power_init(dp->power, flip);
dp_ctrl_host_init(dp->ctrl, flip, reset); dp_ctrl_host_init(dp->ctrl, flip, reset);
dp_aux_init(dp->aux); dp_aux_init(dp->aux);
...@@ -465,8 +450,10 @@ static int dp_display_handle_irq_hpd(struct dp_display_private *dp) ...@@ -465,8 +450,10 @@ static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
{ {
u32 sink_request = dp->link->sink_request; u32 sink_request = dp->link->sink_request;
DRM_DEBUG_DP("%d\n", sink_request);
if (dp->hpd_state == ST_DISCONNECTED) { if (dp->hpd_state == ST_DISCONNECTED) {
if (sink_request & DP_LINK_STATUS_UPDATED) { if (sink_request & DP_LINK_STATUS_UPDATED) {
DRM_DEBUG_DP("Disconnected sink_request: %d\n", sink_request);
DRM_ERROR("Disconnected, no DP_LINK_STATUS_UPDATED\n"); DRM_ERROR("Disconnected, no DP_LINK_STATUS_UPDATED\n");
return -EINVAL; return -EINVAL;
} }
...@@ -498,6 +485,7 @@ static int dp_display_usbpd_attention_cb(struct device *dev) ...@@ -498,6 +485,7 @@ static int dp_display_usbpd_attention_cb(struct device *dev)
rc = dp_link_process_request(dp->link); rc = dp_link_process_request(dp->link);
if (!rc) { if (!rc) {
sink_request = dp->link->sink_request; sink_request = dp->link->sink_request;
DRM_DEBUG_DP("hpd_state=%d sink_request=%d\n", dp->hpd_state, sink_request);
if (sink_request & DS_PORT_STATUS_CHANGED) if (sink_request & DS_PORT_STATUS_CHANGED)
rc = dp_display_handle_port_ststus_changed(dp); rc = dp_display_handle_port_ststus_changed(dp);
else else
...@@ -520,6 +508,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) ...@@ -520,6 +508,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
mutex_lock(&dp->event_mutex); mutex_lock(&dp->event_mutex);
state = dp->hpd_state; state = dp->hpd_state;
DRM_DEBUG_DP("hpd_state=%d\n", state);
if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) { if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
mutex_unlock(&dp->event_mutex); mutex_unlock(&dp->event_mutex);
return 0; return 0;
...@@ -655,6 +644,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) ...@@ -655,6 +644,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
/* start sentinel checking in case of missing uevent */ /* start sentinel checking in case of missing uevent */
dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND); dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
DRM_DEBUG_DP("hpd_state=%d\n", state);
/* signal the disconnect event early to ensure proper teardown */ /* signal the disconnect event early to ensure proper teardown */
dp_display_handle_plugged_change(g_dp_display, false); dp_display_handle_plugged_change(g_dp_display, false);
...@@ -713,6 +703,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) ...@@ -713,6 +703,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
if (ret == -ECONNRESET) { /* cable unplugged */ if (ret == -ECONNRESET) { /* cable unplugged */
dp->core_initialized = false; dp->core_initialized = false;
} }
DRM_DEBUG_DP("hpd_state=%d\n", state);
mutex_unlock(&dp->event_mutex); mutex_unlock(&dp->event_mutex);
...@@ -854,6 +845,7 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data) ...@@ -854,6 +845,7 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data)
dp_display = g_dp_display; dp_display = g_dp_display;
DRM_DEBUG_DP("sink_count=%d\n", dp->link->sink_count);
if (dp_display->power_on) { if (dp_display->power_on) {
DRM_DEBUG_DP("Link already setup, return\n"); DRM_DEBUG_DP("Link already setup, return\n");
return 0; return 0;
...@@ -915,6 +907,7 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data) ...@@ -915,6 +907,7 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
dp_display->power_on = false; dp_display->power_on = false;
DRM_DEBUG_DP("sink count: %d\n", dp->link->sink_count);
return 0; return 0;
} }
...@@ -1014,10 +1007,8 @@ int dp_display_get_test_bpp(struct msm_dp *dp) ...@@ -1014,10 +1007,8 @@ int dp_display_get_test_bpp(struct msm_dp *dp)
void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp) void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
{ {
struct dp_display_private *dp_display; struct dp_display_private *dp_display;
struct drm_device *drm;
dp_display = container_of(dp, struct dp_display_private, dp_display); dp_display = container_of(dp, struct dp_display_private, dp_display);
drm = dp->drm_dev;
/* /*
* if we are reading registers we need the link clocks to be on * if we are reading registers we need the link clocks to be on
...@@ -1118,9 +1109,6 @@ static int hpd_event_thread(void *data) ...@@ -1118,9 +1109,6 @@ static int hpd_event_thread(void *data)
case EV_IRQ_HPD_INT: case EV_IRQ_HPD_INT:
dp_irq_hpd_handle(dp_priv, todo->data); dp_irq_hpd_handle(dp_priv, todo->data);
break; break;
case EV_HPD_REPLUG_INT:
/* do nothing */
break;
case EV_USER_NOTIFICATION: case EV_USER_NOTIFICATION:
dp_display_send_hpd_notification(dp_priv, dp_display_send_hpd_notification(dp_priv,
todo->data); todo->data);
...@@ -1162,12 +1150,11 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id) ...@@ -1162,12 +1150,11 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog); hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog);
DRM_DEBUG_DP("hpd isr status=%#x\n", hpd_isr_status);
if (hpd_isr_status & 0x0F) { if (hpd_isr_status & 0x0F) {
/* hpd related interrupts */ /* hpd related interrupts */
if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK || if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK)
hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
}
if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) { if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) {
/* stop sentinel connect pending checking */ /* stop sentinel connect pending checking */
...@@ -1175,8 +1162,10 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id) ...@@ -1175,8 +1162,10 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0); dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
} }
if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
dp_add_event(dp, EV_HPD_REPLUG_INT, 0, 0); dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3);
}
if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK) if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK)
dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
...@@ -1285,12 +1274,15 @@ static int dp_pm_resume(struct device *dev) ...@@ -1285,12 +1274,15 @@ static int dp_pm_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct msm_dp *dp_display = platform_get_drvdata(pdev); struct msm_dp *dp_display = platform_get_drvdata(pdev);
struct dp_display_private *dp; struct dp_display_private *dp;
u32 status; int sink_count = 0;
dp = container_of(dp_display, struct dp_display_private, dp_display); dp = container_of(dp_display, struct dp_display_private, dp_display);
mutex_lock(&dp->event_mutex); mutex_lock(&dp->event_mutex);
DRM_DEBUG_DP("Before, core_inited=%d power_on=%d\n",
dp->core_initialized, dp_display->power_on);
/* start from disconnected state */ /* start from disconnected state */
dp->hpd_state = ST_DISCONNECTED; dp->hpd_state = ST_DISCONNECTED;
...@@ -1299,18 +1291,33 @@ static int dp_pm_resume(struct device *dev) ...@@ -1299,18 +1291,33 @@ static int dp_pm_resume(struct device *dev)
dp_catalog_ctrl_hpd_config(dp->catalog); dp_catalog_ctrl_hpd_config(dp->catalog);
status = dp_catalog_link_is_connected(dp->catalog); /*
* set sink to normal operation mode -- D0
* before dpcd read
*/
dp_link_psm_config(dp->link, &dp->panel->link_info, false);
if (dp_catalog_link_is_connected(dp->catalog)) {
sink_count = drm_dp_read_sink_count(dp->aux);
if (sink_count < 0)
sink_count = 0;
}
dp->link->sink_count = sink_count;
/* /*
* can not declared display is connected unless * can not declared display is connected unless
* HDMI cable is plugged in and sink_count of * HDMI cable is plugged in and sink_count of
* dongle become 1 * dongle become 1
*/ */
if (status && dp->link->sink_count) if (dp->link->sink_count)
dp->dp_display.is_connected = true; dp->dp_display.is_connected = true;
else else
dp->dp_display.is_connected = false; dp->dp_display.is_connected = false;
DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n",
dp->link->sink_count, dp->dp_display.is_connected,
dp->core_initialized, dp_display->power_on);
mutex_unlock(&dp->event_mutex); mutex_unlock(&dp->event_mutex);
return 0; return 0;
...@@ -1326,6 +1333,9 @@ static int dp_pm_suspend(struct device *dev) ...@@ -1326,6 +1333,9 @@ static int dp_pm_suspend(struct device *dev)
mutex_lock(&dp->event_mutex); mutex_lock(&dp->event_mutex);
DRM_DEBUG_DP("Before, core_inited=%d power_on=%d\n",
dp->core_initialized, dp_display->power_on);
if (dp->core_initialized == true) { if (dp->core_initialized == true) {
/* mainlink enabled */ /* mainlink enabled */
if (dp_power_clk_status(dp->power, DP_CTRL_PM)) if (dp_power_clk_status(dp->power, DP_CTRL_PM))
...@@ -1339,6 +1349,9 @@ static int dp_pm_suspend(struct device *dev) ...@@ -1339,6 +1349,9 @@ static int dp_pm_suspend(struct device *dev)
/* host_init will be called at pm_resume */ /* host_init will be called at pm_resume */
dp->core_initialized = false; dp->core_initialized = false;
DRM_DEBUG_DP("After, core_inited=%d power_on=%d\n",
dp->core_initialized, dp_display->power_on);
mutex_unlock(&dp->event_mutex); mutex_unlock(&dp->event_mutex);
return 0; return 0;
......
...@@ -1027,43 +1027,29 @@ int dp_link_process_request(struct dp_link *dp_link) ...@@ -1027,43 +1027,29 @@ int dp_link_process_request(struct dp_link *dp_link)
if (link->request.test_requested == DP_TEST_LINK_EDID_READ) { if (link->request.test_requested == DP_TEST_LINK_EDID_READ) {
dp_link->sink_request |= DP_TEST_LINK_EDID_READ; dp_link->sink_request |= DP_TEST_LINK_EDID_READ;
return ret; } else if (!dp_link_process_ds_port_status_change(link)) {
}
ret = dp_link_process_ds_port_status_change(link);
if (!ret) {
dp_link->sink_request |= DS_PORT_STATUS_CHANGED; dp_link->sink_request |= DS_PORT_STATUS_CHANGED;
return ret; } else if (!dp_link_process_link_training_request(link)) {
}
ret = dp_link_process_link_training_request(link);
if (!ret) {
dp_link->sink_request |= DP_TEST_LINK_TRAINING; dp_link->sink_request |= DP_TEST_LINK_TRAINING;
return ret; } else if (!dp_link_process_phy_test_pattern_request(link)) {
}
ret = dp_link_process_phy_test_pattern_request(link);
if (!ret) {
dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN; dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN;
return ret; } else {
} ret = dp_link_process_link_status_update(link);
if (!ret) {
ret = dp_link_process_link_status_update(link); dp_link->sink_request |= DP_LINK_STATUS_UPDATED;
if (!ret) { } else {
dp_link->sink_request |= DP_LINK_STATUS_UPDATED; if (dp_link_is_video_pattern_requested(link)) {
return ret; ret = 0;
} dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN;
}
if (dp_link_is_video_pattern_requested(link)) { if (dp_link_is_audio_pattern_requested(link)) {
ret = 0; dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN;
dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN; ret = -EINVAL;
} }
}
if (dp_link_is_audio_pattern_requested(link)) {
dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN;
return -EINVAL;
} }
DRM_DEBUG_DP("sink request=%#x", dp_link->sink_request);
return ret; return ret;
} }
......
...@@ -271,7 +271,7 @@ static u8 dp_panel_get_edid_checksum(struct edid *edid) ...@@ -271,7 +271,7 @@ static u8 dp_panel_get_edid_checksum(struct edid *edid)
{ {
struct edid *last_block; struct edid *last_block;
u8 *raw_edid; u8 *raw_edid;
bool is_edid_corrupt; bool is_edid_corrupt = false;
if (!edid) { if (!edid) {
DRM_ERROR("invalid edid input\n"); DRM_ERROR("invalid edid input\n");
...@@ -303,7 +303,12 @@ void dp_panel_handle_sink_request(struct dp_panel *dp_panel) ...@@ -303,7 +303,12 @@ void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
panel = container_of(dp_panel, struct dp_panel_private, dp_panel); panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) { if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) {
u8 checksum = dp_panel_get_edid_checksum(dp_panel->edid); u8 checksum;
if (dp_panel->edid)
checksum = dp_panel_get_edid_checksum(dp_panel->edid);
else
checksum = dp_panel->connector->real_edid_checksum;
dp_link_send_edid_checksum(panel->link, checksum); dp_link_send_edid_checksum(panel->link, checksum);
dp_link_send_test_response(panel->link); dp_link_send_test_response(panel->link);
......
...@@ -208,6 +208,9 @@ static int dp_power_clk_set_rate(struct dp_power_private *power, ...@@ -208,6 +208,9 @@ static int dp_power_clk_set_rate(struct dp_power_private *power,
int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type) int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type)
{ {
DRM_DEBUG_DP("core_clk_on=%d link_clk_on=%d stream_clk_on=%d\n",
dp_power->core_clks_on, dp_power->link_clks_on, dp_power->stream_clks_on);
if (pm_type == DP_CORE_PM) if (pm_type == DP_CORE_PM)
return dp_power->core_clks_on; return dp_power->core_clks_on;
......
...@@ -13,6 +13,13 @@ struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi) ...@@ -13,6 +13,13 @@ struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
return msm_dsi->encoder; return msm_dsi->encoder;
} }
bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
{
unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host);
return !(host_flags & MIPI_DSI_MODE_VIDEO);
}
static int dsi_get_phy(struct msm_dsi *msm_dsi) static int dsi_get_phy(struct msm_dsi *msm_dsi)
{ {
struct platform_device *pdev = msm_dsi->pdev; struct platform_device *pdev = msm_dsi->pdev;
...@@ -26,8 +33,10 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi) ...@@ -26,8 +33,10 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
} }
phy_pdev = of_find_device_by_node(phy_node); phy_pdev = of_find_device_by_node(phy_node);
if (phy_pdev) if (phy_pdev) {
msm_dsi->phy = platform_get_drvdata(phy_pdev); msm_dsi->phy = platform_get_drvdata(phy_pdev);
msm_dsi->phy_dev = &phy_pdev->dev;
}
of_node_put(phy_node); of_node_put(phy_node);
...@@ -36,8 +45,6 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi) ...@@ -36,8 +45,6 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
return -EPROBE_DEFER; return -EPROBE_DEFER;
} }
msm_dsi->phy_dev = get_device(&phy_pdev->dev);
return 0; return 0;
} }
...@@ -244,8 +251,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, ...@@ -244,8 +251,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail; goto fail;
} }
msm_dsi_manager_setup_encoder(msm_dsi->id);
priv->bridges[priv->num_bridges++] = msm_dsi->bridge; priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
priv->connectors[priv->num_connectors++] = msm_dsi->connector; priv->connectors[priv->num_connectors++] = msm_dsi->connector;
......
...@@ -80,10 +80,10 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id); ...@@ -80,10 +80,10 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id);
struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id); struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len); bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
void msm_dsi_manager_setup_encoder(int id);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi); int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
bool msm_dsi_manager_validate_current_config(u8 id); bool msm_dsi_manager_validate_current_config(u8 id);
void msm_dsi_manager_tpg_enable(void);
/* msm dsi */ /* msm dsi */
static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi) static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
...@@ -109,7 +109,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host); ...@@ -109,7 +109,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host); int msm_dsi_host_disable(struct mipi_dsi_host *host);
int msm_dsi_host_power_on(struct mipi_dsi_host *host, int msm_dsi_host_power_on(struct mipi_dsi_host *host,
struct msm_dsi_phy_shared_timings *phy_shared_timings, struct msm_dsi_phy_shared_timings *phy_shared_timings,
bool is_dual_dsi); bool is_bonded_dsi, struct msm_dsi_phy *phy);
int msm_dsi_host_power_off(struct mipi_dsi_host *host); int msm_dsi_host_power_off(struct mipi_dsi_host *host);
int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
const struct drm_display_mode *mode); const struct drm_display_mode *mode);
...@@ -123,7 +123,7 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, ...@@ -123,7 +123,7 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host); void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
struct msm_dsi_phy_clk_request *clk_req, struct msm_dsi_phy_clk_request *clk_req,
bool is_dual_dsi); bool is_bonded_dsi);
void msm_dsi_host_destroy(struct mipi_dsi_host *host); void msm_dsi_host_destroy(struct mipi_dsi_host *host);
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev); struct drm_device *dev);
...@@ -145,9 +145,11 @@ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova); ...@@ -145,9 +145,11 @@ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova); int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
int dsi_clk_init_v2(struct msm_dsi_host *msm_host); int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host); int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host);
int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi); int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi); int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host); void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host);
void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host);
/* dsi phy */ /* dsi phy */
struct msm_dsi_phy; struct msm_dsi_phy;
struct msm_dsi_phy_shared_timings { struct msm_dsi_phy_shared_timings {
...@@ -164,10 +166,9 @@ struct msm_dsi_phy_clk_request { ...@@ -164,10 +166,9 @@ struct msm_dsi_phy_clk_request {
void msm_dsi_phy_driver_register(void); void msm_dsi_phy_driver_register(void);
void msm_dsi_phy_driver_unregister(void); void msm_dsi_phy_driver_unregister(void);
int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
struct msm_dsi_phy_clk_request *clk_req); struct msm_dsi_phy_clk_request *clk_req,
struct msm_dsi_phy_shared_timings *shared_timings);
void msm_dsi_phy_disable(struct msm_dsi_phy *phy); void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
struct msm_dsi_phy_shared_timings *shared_timing);
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
enum msm_dsi_phy_usecase uc); enum msm_dsi_phy_usecase uc);
int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy, int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
...@@ -175,6 +176,7 @@ int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy, ...@@ -175,6 +176,7 @@ int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy); void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy);
int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy); int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy);
void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy); void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy);
bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable);
#endif /* __DSI_CONNECTOR_H__ */ #endif /* __DSI_CONNECTOR_H__ */
...@@ -105,6 +105,32 @@ enum dsi_lane_swap { ...@@ -105,6 +105,32 @@ enum dsi_lane_swap {
LANE_SWAP_3210 = 7, LANE_SWAP_3210 = 7,
}; };
enum video_config_bpp {
VIDEO_CONFIG_18BPP = 0,
VIDEO_CONFIG_24BPP = 1,
};
enum video_pattern_sel {
VID_PRBS = 0,
VID_INCREMENTAL = 1,
VID_FIXED = 2,
VID_MDSS_GENERAL_PATTERN = 3,
};
enum cmd_mdp_stream0_pattern_sel {
CMD_MDP_PRBS = 0,
CMD_MDP_INCREMENTAL = 1,
CMD_MDP_FIXED = 2,
CMD_MDP_MDSS_GENERAL_PATTERN = 3,
};
enum cmd_dma_pattern_sel {
CMD_DMA_PRBS = 0,
CMD_DMA_INCREMENTAL = 1,
CMD_DMA_FIXED = 2,
CMD_DMA_CUSTOM_PATTERN_DMA_FIFO = 3,
};
#define DSI_IRQ_CMD_DMA_DONE 0x00000001 #define DSI_IRQ_CMD_DMA_DONE 0x00000001
#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002 #define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002
#define DSI_IRQ_CMD_MDP_DONE 0x00000100 #define DSI_IRQ_CMD_MDP_DONE 0x00000100
...@@ -518,6 +544,7 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val) ...@@ -518,6 +544,7 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
#define DSI_LANE_STATUS_DLN0_DIRECTION 0x00010000 #define DSI_LANE_STATUS_DLN0_DIRECTION 0x00010000
#define REG_DSI_LANE_CTRL 0x000000a8 #define REG_DSI_LANE_CTRL 0x000000a8
#define DSI_LANE_CTRL_HS_REQ_SEL_PHY 0x01000000
#define DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST 0x10000000 #define DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST 0x10000000
#define REG_DSI_LANE_SWAP_CTRL 0x000000ac #define REG_DSI_LANE_SWAP_CTRL 0x000000ac
...@@ -564,6 +591,53 @@ static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val) ...@@ -564,6 +591,53 @@ static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
#define REG_DSI_PHY_RESET 0x00000128 #define REG_DSI_PHY_RESET 0x00000128
#define DSI_PHY_RESET_RESET 0x00000001 #define DSI_PHY_RESET_RESET 0x00000001
#define REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL 0x00000160
#define REG_DSI_TPG_MAIN_CONTROL 0x00000198
#define DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN 0x00000100
#define REG_DSI_TPG_VIDEO_CONFIG 0x000001a0
#define DSI_TPG_VIDEO_CONFIG_BPP__MASK 0x00000003
#define DSI_TPG_VIDEO_CONFIG_BPP__SHIFT 0
static inline uint32_t DSI_TPG_VIDEO_CONFIG_BPP(enum video_config_bpp val)
{
return ((val) << DSI_TPG_VIDEO_CONFIG_BPP__SHIFT) & DSI_TPG_VIDEO_CONFIG_BPP__MASK;
}
#define DSI_TPG_VIDEO_CONFIG_RGB 0x00000004
#define REG_DSI_TEST_PATTERN_GEN_CTRL 0x00000158
#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK 0x00030000
#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT 16
static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL(enum cmd_dma_pattern_sel val)
{
return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK;
}
#define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK 0x00000300
#define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT 8
static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(enum cmd_mdp_stream0_pattern_sel val)
{
return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK;
}
#define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK 0x00000030
#define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT 4
static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(enum video_pattern_sel val)
{
return ((val) << DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK;
}
#define DSI_TEST_PATTERN_GEN_CTRL_TPG_DMA_FIFO_MODE 0x00000004
#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_TPG_EN 0x00000002
#define DSI_TEST_PATTERN_GEN_CTRL_EN 0x00000001
#define REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0 0x00000168
#define REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER 0x00000180
#define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER 0x00000001
#define REG_DSI_TPG_MAIN_CONTROL2 0x0000019c
#define DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN 0x00000080
#define DSI_TPG_MAIN_CONTROL2_CMD_MDP1_CHECKERED_RECTANGLE_PATTERN 0x00010000
#define DSI_TPG_MAIN_CONTROL2_CMD_MDP2_CHECKERED_RECTANGLE_PATTERN 0x02000000
#define REG_DSI_T_CLK_PRE_EXTEND 0x0000017c #define REG_DSI_T_CLK_PRE_EXTEND 0x0000017c
#define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK 0x00000001 #define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK 0x00000001
......
...@@ -32,9 +32,8 @@ static const char * const dsi_6g_bus_clk_names[] = { ...@@ -32,9 +32,8 @@ static const char * const dsi_6g_bus_clk_names[] = {
static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT, .io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = { .reg_cfg = {
.num = 4, .num = 3,
.regs = { .regs = {
{"gdsc", -1, -1},
{"vdd", 150000, 100}, /* 3.0 V */ {"vdd", 150000, 100}, /* 3.0 V */
{"vdda", 100000, 100}, /* 1.2 V */ {"vdda", 100000, 100}, /* 1.2 V */
{"vddio", 100000, 100}, /* 1.8 V */ {"vddio", 100000, 100}, /* 1.8 V */
...@@ -53,9 +52,8 @@ static const char * const dsi_8916_bus_clk_names[] = { ...@@ -53,9 +52,8 @@ static const char * const dsi_8916_bus_clk_names[] = {
static const struct msm_dsi_config msm8916_dsi_cfg = { static const struct msm_dsi_config msm8916_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT, .io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = { .reg_cfg = {
.num = 3, .num = 2,
.regs = { .regs = {
{"gdsc", -1, -1},
{"vdda", 100000, 100}, /* 1.2 V */ {"vdda", 100000, 100}, /* 1.2 V */
{"vddio", 100000, 100}, /* 1.8 V */ {"vddio", 100000, 100}, /* 1.8 V */
}, },
...@@ -73,9 +71,8 @@ static const char * const dsi_8976_bus_clk_names[] = { ...@@ -73,9 +71,8 @@ static const char * const dsi_8976_bus_clk_names[] = {
static const struct msm_dsi_config msm8976_dsi_cfg = { static const struct msm_dsi_config msm8976_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT, .io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = { .reg_cfg = {
.num = 3, .num = 2,
.regs = { .regs = {
{"gdsc", -1, -1},
{"vdda", 100000, 100}, /* 1.2 V */ {"vdda", 100000, 100}, /* 1.2 V */
{"vddio", 100000, 100}, /* 1.8 V */ {"vddio", 100000, 100}, /* 1.8 V */
}, },
...@@ -89,9 +86,8 @@ static const struct msm_dsi_config msm8976_dsi_cfg = { ...@@ -89,9 +86,8 @@ static const struct msm_dsi_config msm8976_dsi_cfg = {
static const struct msm_dsi_config msm8994_dsi_cfg = { static const struct msm_dsi_config msm8994_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT, .io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = { .reg_cfg = {
.num = 7, .num = 6,
.regs = { .regs = {
{"gdsc", -1, -1},
{"vdda", 100000, 100}, /* 1.25 V */ {"vdda", 100000, 100}, /* 1.25 V */
{"vddio", 100000, 100}, /* 1.8 V */ {"vddio", 100000, 100}, /* 1.8 V */
{"vcca", 10000, 100}, /* 1.0 V */ {"vcca", 10000, 100}, /* 1.0 V */
...@@ -154,7 +150,6 @@ static const struct msm_dsi_config sdm660_dsi_cfg = { ...@@ -154,7 +150,6 @@ static const struct msm_dsi_config sdm660_dsi_cfg = {
.reg_cfg = { .reg_cfg = {
.num = 2, .num = 2,
.regs = { .regs = {
{"vdd", 73400, 32 }, /* 0.9 V */
{"vdda", 12560, 4 }, /* 1.2 V */ {"vdda", 12560, 4 }, /* 1.2 V */
}, },
}, },
...@@ -200,6 +195,24 @@ static const struct msm_dsi_config sc7180_dsi_cfg = { ...@@ -200,6 +195,24 @@ static const struct msm_dsi_config sc7180_dsi_cfg = {
.num_dsi = 1, .num_dsi = 1,
}; };
static const char * const dsi_sc7280_bus_clk_names[] = {
"iface", "bus",
};
static const struct msm_dsi_config sc7280_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
.num = 1,
.regs = {
{"vdda", 8350, 0 }, /* 1.2 V */
},
},
.bus_clk_names = dsi_sc7280_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sc7280_bus_clk_names),
.io_start = { 0xae94000 },
.num_dsi = 1,
};
static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = { static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
.link_clk_set_rate = dsi_link_clk_set_rate_v2, .link_clk_set_rate = dsi_link_clk_set_rate_v2,
.link_clk_enable = dsi_link_clk_enable_v2, .link_clk_enable = dsi_link_clk_enable_v2,
...@@ -267,6 +280,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { ...@@ -267,6 +280,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,
&sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops}, &sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_0,
&sc7280_dsi_cfg, &msm_dsi_6g_v2_host_ops},
}; };
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor) const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000 #define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000
#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000 #define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000
#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001 #define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000
#define MSM_DSI_V2_VER_MINOR_8064 0x0 #define MSM_DSI_V2_VER_MINOR_8064 0x0
...@@ -47,7 +48,7 @@ struct msm_dsi_host_cfg_ops { ...@@ -47,7 +48,7 @@ struct msm_dsi_host_cfg_ops {
void* (*tx_buf_get)(struct msm_dsi_host *msm_host); void* (*tx_buf_get)(struct msm_dsi_host *msm_host);
void (*tx_buf_put)(struct msm_dsi_host *msm_host); void (*tx_buf_put)(struct msm_dsi_host *msm_host);
int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova); int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova);
int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_dual_dsi); int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
}; };
struct msm_dsi_cfg_handler { struct msm_dsi_cfg_handler {
......
This diff is collapsed.
This diff is collapsed.
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/clk-provider.h> #include <linux/clk-provider.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <dt-bindings/phy/phy.h>
#include "dsi_phy.h" #include "dsi_phy.h"
...@@ -461,6 +462,51 @@ int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, ...@@ -461,6 +462,51 @@ int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
return 0; return 0;
} }
int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req)
{
const unsigned long bit_rate = clk_req->bitclk_rate;
const unsigned long esc_rate = clk_req->escclk_rate;
s32 ui, ui_x7;
s32 tmax, tmin;
s32 coeff = 1000; /* Precision, should avoid overflow */
s32 temp;
if (!bit_rate || !esc_rate)
return -EINVAL;
ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
ui_x7 = ui * 7;
temp = S_DIV_ROUND_UP(38 * coeff, ui_x7);
tmin = max_t(s32, temp, 0);
temp = (95 * coeff) / ui_x7;
tmax = max_t(s32, temp, 0);
timing->clk_prepare = linear_inter(tmax, tmin, 50, 0, false);
tmin = DIV_ROUND_UP(50 * coeff, ui_x7);
tmax = 255;
timing->hs_rqst = linear_inter(tmax, tmin, 1, 0, false);
tmin = DIV_ROUND_UP(100 * coeff, ui_x7) - 1;
tmax = 255;
timing->hs_exit = linear_inter(tmax, tmin, 10, 0, false);
tmin = 1;
tmax = 32;
timing->shared_timings.clk_post = linear_inter(tmax, tmin, 80, 0, false);
tmin = min_t(s32, 64, S_DIV_ROUND_UP(262 * coeff, ui_x7) - 1);
tmax = 64;
timing->shared_timings.clk_pre = linear_inter(tmax, tmin, 20, 0, false);
DBG("%d, %d, %d, %d, %d",
timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
timing->clk_prepare, timing->hs_exit, timing->hs_rqst);
return 0;
}
static int dsi_phy_regulator_init(struct msm_dsi_phy *phy) static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
{ {
struct regulator_bulk_data *s = phy->supplies; struct regulator_bulk_data *s = phy->supplies;
...@@ -593,6 +639,8 @@ static const struct of_device_id dsi_phy_dt_match[] = { ...@@ -593,6 +639,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_7nm_cfgs }, .data = &dsi_phy_7nm_cfgs },
{ .compatible = "qcom,dsi-phy-7nm-8150", { .compatible = "qcom,dsi-phy-7nm-8150",
.data = &dsi_phy_7nm_8150_cfgs }, .data = &dsi_phy_7nm_8150_cfgs },
{ .compatible = "qcom,sc7280-dsi-phy-7nm",
.data = &dsi_phy_7nm_7280_cfgs },
#endif #endif
{} {}
}; };
...@@ -625,17 +673,13 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) ...@@ -625,17 +673,13 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
{ {
struct msm_dsi_phy *phy; struct msm_dsi_phy *phy;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
const struct of_device_id *match; u32 phy_type;
int ret; int ret;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy) if (!phy)
return -ENOMEM; return -ENOMEM;
match = of_match_node(dsi_phy_dt_match, dev->of_node);
if (!match)
return -ENODEV;
phy->provided_clocks = devm_kzalloc(dev, phy->provided_clocks = devm_kzalloc(dev,
struct_size(phy->provided_clocks, hws, NUM_PROVIDED_CLKS), struct_size(phy->provided_clocks, hws, NUM_PROVIDED_CLKS),
GFP_KERNEL); GFP_KERNEL);
...@@ -644,7 +688,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) ...@@ -644,7 +688,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->provided_clocks->num = NUM_PROVIDED_CLKS; phy->provided_clocks->num = NUM_PROVIDED_CLKS;
phy->cfg = match->data; phy->cfg = of_device_get_match_data(&pdev->dev);
if (!phy->cfg)
return -ENODEV;
phy->pdev = pdev; phy->pdev = pdev;
phy->id = dsi_phy_get_id(phy); phy->id = dsi_phy_get_id(phy);
...@@ -657,6 +704,8 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) ...@@ -657,6 +704,8 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->regulator_ldo_mode = of_property_read_bool(dev->of_node, phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
"qcom,dsi-phy-regulator-ldo-mode"); "qcom,dsi-phy-regulator-ldo-mode");
if (!of_property_read_u32(dev->of_node, "phy-type", &phy_type))
phy->cphy_mode = (phy_type == PHY_TYPE_CPHY);
phy->base = msm_ioremap_size(pdev, "dsi_phy", "DSI_PHY", &phy->base_size); phy->base = msm_ioremap_size(pdev, "dsi_phy", "DSI_PHY", &phy->base_size);
if (IS_ERR(phy->base)) { if (IS_ERR(phy->base)) {
...@@ -754,7 +803,8 @@ void __exit msm_dsi_phy_driver_unregister(void) ...@@ -754,7 +803,8 @@ void __exit msm_dsi_phy_driver_unregister(void)
} }
int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
struct msm_dsi_phy_clk_request *clk_req) struct msm_dsi_phy_clk_request *clk_req,
struct msm_dsi_phy_shared_timings *shared_timings)
{ {
struct device *dev = &phy->pdev->dev; struct device *dev = &phy->pdev->dev;
int ret; int ret;
...@@ -782,6 +832,9 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, ...@@ -782,6 +832,9 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
goto phy_en_fail; goto phy_en_fail;
} }
memcpy(shared_timings, &phy->timing.shared_timings,
sizeof(*shared_timings));
/* /*
* Resetting DSI PHY silently changes its PLL registers to reset status, * Resetting DSI PHY silently changes its PLL registers to reset status,
* which will confuse clock driver and result in wrong output rate of * which will confuse clock driver and result in wrong output rate of
...@@ -821,13 +874,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy) ...@@ -821,13 +874,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
dsi_phy_disable_resource(phy); dsi_phy_disable_resource(phy);
} }
void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
struct msm_dsi_phy_shared_timings *shared_timings)
{
memcpy(shared_timings, &phy->timing.shared_timings,
sizeof(*shared_timings));
}
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
enum msm_dsi_phy_usecase uc) enum msm_dsi_phy_usecase uc)
{ {
...@@ -835,6 +881,15 @@ void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, ...@@ -835,6 +881,15 @@ void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
phy->usecase = uc; phy->usecase = uc;
} }
/* Returns true if we have to clear DSI_LANE_CTRL.HS_REQ_SEL_PHY */
bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
{
if (!phy || !phy->cfg->ops.set_continuous_clock)
return false;
return phy->cfg->ops.set_continuous_clock(phy, enable);
}
int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy, int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
struct clk **byte_clk_provider, struct clk **pixel_clk_provider) struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
{ {
......
...@@ -24,6 +24,7 @@ struct msm_dsi_phy_ops { ...@@ -24,6 +24,7 @@ struct msm_dsi_phy_ops {
void (*disable)(struct msm_dsi_phy *phy); void (*disable)(struct msm_dsi_phy *phy);
void (*save_pll_state)(struct msm_dsi_phy *phy); void (*save_pll_state)(struct msm_dsi_phy *phy);
int (*restore_pll_state)(struct msm_dsi_phy *phy); int (*restore_pll_state)(struct msm_dsi_phy *phy);
bool (*set_continuous_clock)(struct msm_dsi_phy *phy, bool enable);
}; };
struct msm_dsi_phy_cfg { struct msm_dsi_phy_cfg {
...@@ -51,6 +52,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs; ...@@ -51,6 +52,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs;
struct msm_dsi_dphy_timing { struct msm_dsi_dphy_timing {
u32 clk_zero; u32 clk_zero;
...@@ -99,6 +101,7 @@ struct msm_dsi_phy { ...@@ -99,6 +101,7 @@ struct msm_dsi_phy {
enum msm_dsi_phy_usecase usecase; enum msm_dsi_phy_usecase usecase;
bool regulator_ldo_mode; bool regulator_ldo_mode;
bool cphy_mode;
struct clk_hw *vco_hw; struct clk_hw *vco_hw;
bool pll_on; bool pll_on;
...@@ -119,5 +122,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, ...@@ -119,5 +122,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req); struct msm_dsi_phy_clk_request *clk_req);
int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req); struct msm_dsi_phy_clk_request *clk_req);
int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req);
#endif /* __DSI_PHY_H__ */ #endif /* __DSI_PHY_H__ */
...@@ -84,7 +84,7 @@ struct dsi_pll_10nm { ...@@ -84,7 +84,7 @@ struct dsi_pll_10nm {
#define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, clk_hw) #define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, clk_hw)
/* /*
* Global list of private DSI PLL struct pointers. We need this for Dual DSI * Global list of private DSI PLL struct pointers. We need this for bonded DSI
* mode, where the master PLL's clk_ops needs access the slave's private data * mode, where the master PLL's clk_ops needs access the slave's private data
*/ */
static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX]; static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
......
...@@ -86,7 +86,7 @@ struct dsi_pll_14nm { ...@@ -86,7 +86,7 @@ struct dsi_pll_14nm {
/* /*
* Private struct for N1/N2 post-divider clocks. These clocks are similar to * Private struct for N1/N2 post-divider clocks. These clocks are similar to
* the generic clk_divider class of clocks. The only difference is that it * the generic clk_divider class of clocks. The only difference is that it
* also sets the slave DSI PLL's post-dividers if in Dual DSI mode * also sets the slave DSI PLL's post-dividers if in bonded DSI mode
*/ */
struct dsi_pll_14nm_postdiv { struct dsi_pll_14nm_postdiv {
struct clk_hw hw; struct clk_hw hw;
...@@ -102,7 +102,7 @@ struct dsi_pll_14nm_postdiv { ...@@ -102,7 +102,7 @@ struct dsi_pll_14nm_postdiv {
#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw) #define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw)
/* /*
* Global list of private DSI PLL struct pointers. We need this for Dual DSI * Global list of private DSI PLL struct pointers. We need this for bonded DSI
* mode, where the master PLL's clk_ops needs access the slave's private data * mode, where the master PLL's clk_ops needs access the slave's private data
*/ */
static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX]; static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
...@@ -658,7 +658,7 @@ static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate, ...@@ -658,7 +658,7 @@ static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
val |= value << shift; val |= value << shift;
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val); dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
/* If we're master in dual DSI mode, then the slave PLL's post-dividers /* If we're master in bonded DSI mode, then the slave PLL's post-dividers
* follow the master's post dividers * follow the master's post dividers
*/ */
if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) { if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) {
...@@ -1050,7 +1050,7 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = { ...@@ -1050,7 +1050,7 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
.reg_cfg = { .reg_cfg = {
.num = 1, .num = 1,
.regs = { .regs = {
{"vcca", 17000, 32}, {"vcca", 73400, 32},
}, },
}, },
.ops = { .ops = {
......
This diff is collapsed.
...@@ -603,6 +603,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) ...@@ -603,6 +603,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (IS_ERR(priv->event_thread[i].worker)) { if (IS_ERR(priv->event_thread[i].worker)) {
ret = PTR_ERR(priv->event_thread[i].worker); ret = PTR_ERR(priv->event_thread[i].worker);
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n"); DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
ret = PTR_ERR(priv->event_thread[i].worker);
goto err_msm_uninit; goto err_msm_uninit;
} }
...@@ -1057,17 +1058,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = { ...@@ -1057,17 +1058,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
}; };
static const struct file_operations fops = { DEFINE_DRM_GEM_FOPS(fops);
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.compat_ioctl = drm_compat_ioctl,
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
.mmap = msm_gem_mmap,
};
static const struct drm_driver msm_driver = { static const struct drm_driver msm_driver = {
.driver_features = DRIVER_GEM | .driver_features = DRIVER_GEM |
...@@ -1083,7 +1074,7 @@ static const struct drm_driver msm_driver = { ...@@ -1083,7 +1074,7 @@ static const struct drm_driver msm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle, .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table, .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
.gem_prime_mmap = msm_gem_prime_mmap, .gem_prime_mmap = drm_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init, .debugfs_init = msm_debugfs_init,
#endif #endif
......
...@@ -309,7 +309,6 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev); ...@@ -309,7 +309,6 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg); struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj); int msm_gem_prime_pin(struct drm_gem_object *obj);
...@@ -350,7 +349,9 @@ void __exit msm_dsi_unregister(void); ...@@ -350,7 +349,9 @@ void __exit msm_dsi_unregister(void);
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoder); struct drm_encoder *encoder);
void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi); void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi);
bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi);
bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi);
bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi);
#else #else
static inline void __init msm_dsi_register(void) static inline void __init msm_dsi_register(void)
{ {
...@@ -367,7 +368,18 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, ...@@ -367,7 +368,18 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
static inline void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi) static inline void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi)
{ {
} }
static inline bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
{
return false;
}
static inline bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi)
{
return false;
}
static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
{
return false;
}
#endif #endif
#ifdef CONFIG_DRM_MSM_DP #ifdef CONFIG_DRM_MSM_DP
......
...@@ -8,13 +8,12 @@ ...@@ -8,13 +8,12 @@
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h> #include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h> #include <drm/drm_fourcc.h>
#include <drm/drm_prime.h>
#include "msm_drv.h" #include "msm_drv.h"
#include "msm_gem.h" #include "msm_gem.h"
#include "msm_kms.h" #include "msm_kms.h"
extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma); static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
/* /*
...@@ -48,15 +47,8 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) ...@@ -48,15 +47,8 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par; struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
struct msm_fbdev *fbdev = to_msm_fbdev(helper); struct msm_fbdev *fbdev = to_msm_fbdev(helper);
struct drm_gem_object *bo = msm_framebuffer_bo(fbdev->fb, 0); struct drm_gem_object *bo = msm_framebuffer_bo(fbdev->fb, 0);
int ret = 0;
ret = drm_gem_mmap_obj(bo, bo->size, vma); return drm_gem_prime_mmap(bo, vma);
if (ret) {
pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
return ret;
}
return msm_gem_mmap_obj(bo, vma);
} }
static int msm_fbdev_create(struct drm_fb_helper *helper, static int msm_fbdev_create(struct drm_fb_helper *helper,
......
...@@ -217,31 +217,6 @@ static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot) ...@@ -217,31 +217,6 @@ static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
return prot; return prot;
} }
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
}
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
ret = drm_gem_mmap(filp, vma);
if (ret) {
DBG("mmap failed: %d", ret);
return ret;
}
return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
static vm_fault_t msm_gem_fault(struct vm_fault *vmf) static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
...@@ -817,8 +792,7 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) ...@@ -817,8 +792,7 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
mutex_lock(&priv->mm_lock); mutex_lock(&priv->mm_lock);
if (msm_obj->evictable) if (msm_obj->evictable)
mark_unevictable(msm_obj); mark_unevictable(msm_obj);
list_del(&msm_obj->mm_list); list_move_tail(&msm_obj->mm_list, &gpu->active_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
mutex_unlock(&priv->mm_lock); mutex_unlock(&priv->mm_lock);
} }
} }
...@@ -1077,6 +1051,17 @@ void msm_gem_free_object(struct drm_gem_object *obj) ...@@ -1077,6 +1051,17 @@ void msm_gem_free_object(struct drm_gem_object *obj)
kfree(msm_obj); kfree(msm_obj);
} }
static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
}
/* convenience method to construct a GEM buffer object, and userspace handle */ /* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle, uint32_t size, uint32_t flags, uint32_t *handle,
...@@ -1114,6 +1099,7 @@ static const struct drm_gem_object_funcs msm_gem_object_funcs = { ...@@ -1114,6 +1099,7 @@ static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.get_sg_table = msm_gem_prime_get_sg_table, .get_sg_table = msm_gem_prime_get_sg_table,
.vmap = msm_gem_prime_vmap, .vmap = msm_gem_prime_vmap,
.vunmap = msm_gem_prime_vunmap, .vunmap = msm_gem_prime_vunmap,
.mmap = msm_gem_object_mmap,
.vm_ops = &vm_ops, .vm_ops = &vm_ops,
}; };
......
...@@ -106,9 +106,6 @@ struct msm_gem_object { ...@@ -106,9 +106,6 @@ struct msm_gem_object {
}; };
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base) #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj, int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova); struct msm_gem_address_space *aspace, uint64_t *iova);
......
...@@ -39,17 +39,6 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) ...@@ -39,17 +39,6 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
msm_gem_put_vaddr(obj); msm_gem_put_vaddr(obj);
} }
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
int ret;
ret = drm_gem_mmap_obj(obj, obj->size, vma);
if (ret < 0)
return ret;
return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg) struct dma_buf_attachment *attach, struct sg_table *sg)
{ {
......
...@@ -117,9 +117,6 @@ struct msm_kms_funcs { ...@@ -117,9 +117,6 @@ struct msm_kms_funcs {
struct drm_encoder *encoder, struct drm_encoder *encoder,
struct drm_encoder *slave_encoder, struct drm_encoder *slave_encoder,
bool is_cmd_mode); bool is_cmd_mode);
void (*set_encoder_mode)(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode);
/* cleanup: */ /* cleanup: */
void (*destroy)(struct msm_kms *kms); void (*destroy)(struct msm_kms *kms);
......
...@@ -20,5 +20,7 @@ ...@@ -20,5 +20,7 @@
#define PHY_TYPE_XPCS 7 #define PHY_TYPE_XPCS 7
#define PHY_TYPE_SGMII 8 #define PHY_TYPE_SGMII 8
#define PHY_TYPE_QSGMII 9 #define PHY_TYPE_QSGMII 9
#define PHY_TYPE_DPHY 10
#define PHY_TYPE_CPHY 11
#endif /* _DT_BINDINGS_PHY */ #endif /* _DT_BINDINGS_PHY */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment