Commit 17180961 authored by Dmitry Baryshkov's avatar Dmitry Baryshkov

Merge branches 'msm-next-lumag-core', 'msm-next-lumag-dpu',...

Merge branches 'msm-next-lumag-core', 'msm-next-lumag-dpu', 'msm-next-lumag-dp', 'msm-next-lumag-dsi', 'msm-next-lumag-hdmi' and 'msm-next-lumag-mdp5' into msm-next-lumag
...@@ -21,6 +21,7 @@ properties: ...@@ -21,6 +21,7 @@ properties:
- qcom,sc7280-edp - qcom,sc7280-edp
- qcom,sc8180x-dp - qcom,sc8180x-dp
- qcom,sc8180x-edp - qcom,sc8180x-edp
- qcom,sm8350-dp
reg: reg:
items: items:
......
# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/msm/dpu-msm8998.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Display DPU dt properties for MSM8998 target
maintainers:
- AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org>
description: |
Device tree bindings for MSM Mobile Display Subsystem(MDSS) that encapsulates
sub-blocks like DPU display controller, DSI and DP interfaces etc. Device tree
bindings of MDSS and DPU are mentioned for MSM8998 target.
properties:
compatible:
items:
- const: qcom,msm8998-mdss
reg:
maxItems: 1
reg-names:
const: mdss
power-domains:
maxItems: 1
clocks:
items:
- description: Display AHB clock
- description: Display AXI clock
- description: Display core clock
clock-names:
items:
- const: iface
- const: bus
- const: core
interrupts:
maxItems: 1
interrupt-controller: true
"#address-cells": true
"#size-cells": true
"#interrupt-cells":
const: 1
iommus:
items:
- description: Phandle to apps_smmu node with SID mask for Hard-Fail port0
ranges: true
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
description: Node containing the properties of DPU.
properties:
compatible:
items:
- const: qcom,msm8998-dpu
reg:
items:
- description: Address offset and size for mdp register set
- description: Address offset and size for regdma register set
- description: Address offset and size for vbif register set
- description: Address offset and size for non-realtime vbif register set
reg-names:
items:
- const: mdp
- const: regdma
- const: vbif
- const: vbif_nrt
clocks:
items:
- description: Display ahb clock
- description: Display axi clock
- description: Display mem-noc clock
- description: Display core clock
- description: Display vsync clock
clock-names:
items:
- const: iface
- const: bus
- const: mnoc
- const: core
- const: vsync
interrupts:
maxItems: 1
power-domains:
maxItems: 1
operating-points-v2: true
ports:
$ref: /schemas/graph.yaml#/properties/ports
description: |
Contains the list of output ports from DPU device. These ports
connect to interfaces that are external to the DPU hardware,
such as DSI, DP etc. Each output port contains an endpoint that
describes how it is connected to an external interface.
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description: DPU_INTF1 (DSI1)
port@1:
$ref: /schemas/graph.yaml#/properties/port
description: DPU_INTF2 (DSI2)
required:
- port@0
- port@1
required:
- compatible
- reg
- reg-names
- clocks
- interrupts
- power-domains
- operating-points-v2
- ports
required:
- compatible
- reg
- reg-names
- power-domains
- clocks
- interrupts
- interrupt-controller
- iommus
- ranges
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,mmcc-msm8998.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/qcom-rpmpd.h>
display-subsystem@c900000 {
compatible = "qcom,msm8998-mdss";
reg = <0x0c900000 0x1000>;
reg-names = "mdss";
clocks = <&mmcc MDSS_AHB_CLK>,
<&mmcc MDSS_AXI_CLK>,
<&mmcc MDSS_MDP_CLK>;
clock-names = "iface", "bus", "core";
#address-cells = <1>;
#interrupt-cells = <1>;
#size-cells = <1>;
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
iommus = <&mmss_smmu 0>;
power-domains = <&mmcc MDSS_GDSC>;
ranges;
display-controller@c901000 {
compatible = "qcom,msm8998-dpu";
reg = <0x0c901000 0x8f000>,
<0x0c9a8e00 0xf0>,
<0x0c9b0000 0x2008>,
<0x0c9b8000 0x1040>;
reg-names = "mdp", "regdma", "vbif", "vbif_nrt";
clocks = <&mmcc MDSS_AHB_CLK>,
<&mmcc MDSS_AXI_CLK>,
<&mmcc MNOC_AHB_CLK>,
<&mmcc MDSS_MDP_CLK>,
<&mmcc MDSS_VSYNC_CLK>;
clock-names = "iface", "bus", "mnoc", "core", "vsync";
interrupt-parent = <&mdss>;
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
operating-points-v2 = <&mdp_opp_table>;
power-domains = <&rpmpd MSM8998_VDDMX>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
dpu_intf1_out: endpoint {
remote-endpoint = <&dsi0_in>;
};
};
port@1 {
reg = <1>;
dpu_intf2_out: endpoint {
remote-endpoint = <&dsi1_in>;
};
};
};
};
};
...
...@@ -35,6 +35,38 @@ properties: ...@@ -35,6 +35,38 @@ properties:
Connected to DSI0_MIPI_DSI_PLL_VDDA0P9 pin for sc7180 target and Connected to DSI0_MIPI_DSI_PLL_VDDA0P9 pin for sc7180 target and
connected to VDDA_MIPI_DSI_0_PLL_0P9 pin for sdm845 target connected to VDDA_MIPI_DSI_0_PLL_0P9 pin for sdm845 target
qcom,phy-rescode-offset-top:
$ref: /schemas/types.yaml#/definitions/int8-array
minItems: 5
maxItems: 5
description:
Integer array of offset for pull-up legs rescode for all five lanes.
To offset the drive strength from the calibrated value in an increasing
manner, -32 is the weakest and +31 is the strongest.
items:
minimum: -32
maximum: 31
qcom,phy-rescode-offset-bot:
$ref: /schemas/types.yaml#/definitions/int8-array
minItems: 5
maxItems: 5
description:
Integer array of offset for pull-down legs rescode for all five lanes.
To offset the drive strength from the calibrated value in a decreasing
manner, -32 is the weakest and +31 is the strongest.
items:
minimum: -32
maximum: 31
qcom,phy-drive-ldo-level:
$ref: "/schemas/types.yaml#/definitions/uint32"
description:
The PHY LDO has an amplitude tuning feature to adjust the LDO output
for the HSTX drive. Use supported levels (mV) to offset the drive level
from the default value.
enum: [ 375, 400, 425, 450, 475, 500 ]
required: required:
- compatible - compatible
- reg - reg
...@@ -64,5 +96,9 @@ examples: ...@@ -64,5 +96,9 @@ examples:
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
<&rpmhcc RPMH_CXO_CLK>; <&rpmhcc RPMH_CXO_CLK>;
clock-names = "iface", "ref"; clock-names = "iface", "ref";
qcom,phy-rescode-offset-top = /bits/ 8 <0 0 0 0 0>;
qcom,phy-rescode-offset-bot = /bits/ 8 <0 0 0 0 0>;
qcom,phy-drive-ldo-level = <400>;
}; };
... ...
...@@ -66,7 +66,6 @@ msm-y := \ ...@@ -66,7 +66,6 @@ msm-y := \
disp/dpu1/dpu_hw_top.o \ disp/dpu1/dpu_hw_top.o \
disp/dpu1/dpu_hw_util.o \ disp/dpu1/dpu_hw_util.o \
disp/dpu1/dpu_hw_vbif.o \ disp/dpu1/dpu_hw_vbif.o \
disp/dpu1/dpu_io_util.o \
disp/dpu1/dpu_kms.o \ disp/dpu1/dpu_kms.o \
disp/dpu1/dpu_mdss.o \ disp/dpu1/dpu_mdss.o \
disp/dpu1/dpu_plane.o \ disp/dpu1/dpu_plane.o \
...@@ -103,6 +102,7 @@ msm-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o ...@@ -103,6 +102,7 @@ msm-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o
msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
dp/dp_catalog.o \ dp/dp_catalog.o \
dp/dp_clk_util.o \
dp/dp_ctrl.o \ dp/dp_ctrl.o \
dp/dp_display.o \ dp/dp_display.o \
dp/dp_drm.o \ dp/dp_drm.o \
......
...@@ -284,17 +284,6 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc) ...@@ -284,17 +284,6 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
} }
} }
static int _dpu_core_perf_set_core_clk_rate(struct dpu_kms *kms, u64 rate)
{
struct dss_clk *core_clk = kms->perf.core_clk;
if (core_clk->max_rate && (rate > core_clk->max_rate))
rate = core_clk->max_rate;
core_clk->rate = rate;
return dev_pm_opp_set_rate(&kms->pdev->dev, core_clk->rate);
}
static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms) static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
{ {
u64 clk_rate = kms->perf.perf_tune.min_core_clk; u64 clk_rate = kms->perf.perf_tune.min_core_clk;
...@@ -306,7 +295,7 @@ static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms) ...@@ -306,7 +295,7 @@ static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
dpu_cstate = to_dpu_crtc_state(crtc->state); dpu_cstate = to_dpu_crtc_state(crtc->state);
clk_rate = max(dpu_cstate->new_perf.core_clk_rate, clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
clk_rate); clk_rate);
clk_rate = clk_round_rate(kms->perf.core_clk->clk, clk_rate = clk_round_rate(kms->perf.core_clk,
clk_rate); clk_rate);
} }
} }
...@@ -405,10 +394,10 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc, ...@@ -405,10 +394,10 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
trace_dpu_core_perf_update_clk(kms->dev, stop_req, clk_rate); trace_dpu_core_perf_update_clk(kms->dev, stop_req, clk_rate);
ret = _dpu_core_perf_set_core_clk_rate(kms, clk_rate); clk_rate = min(clk_rate, kms->perf.max_core_clk_rate);
ret = dev_pm_opp_set_rate(&kms->pdev->dev, clk_rate);
if (ret) { if (ret) {
DPU_ERROR("failed to set %s clock rate %llu\n", DPU_ERROR("failed to set core clock rate %llu\n", clk_rate);
kms->perf.core_clk->clk_name, clk_rate);
return ret; return ret;
} }
...@@ -529,13 +518,13 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf) ...@@ -529,13 +518,13 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf)
int dpu_core_perf_init(struct dpu_core_perf *perf, int dpu_core_perf_init(struct dpu_core_perf *perf,
struct drm_device *dev, struct drm_device *dev,
struct dpu_mdss_cfg *catalog, struct dpu_mdss_cfg *catalog,
struct dss_clk *core_clk) struct clk *core_clk)
{ {
perf->dev = dev; perf->dev = dev;
perf->catalog = catalog; perf->catalog = catalog;
perf->core_clk = core_clk; perf->core_clk = core_clk;
perf->max_core_clk_rate = core_clk->max_rate; perf->max_core_clk_rate = clk_get_rate(core_clk);
if (!perf->max_core_clk_rate) { if (!perf->max_core_clk_rate) {
DPU_DEBUG("optional max core clk rate, use default\n"); DPU_DEBUG("optional max core clk rate, use default\n");
perf->max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE; perf->max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
......
...@@ -56,7 +56,7 @@ struct dpu_core_perf_tune { ...@@ -56,7 +56,7 @@ struct dpu_core_perf_tune {
* @dev: Pointer to drm device * @dev: Pointer to drm device
* @debugfs_root: top level debug folder * @debugfs_root: top level debug folder
* @catalog: Pointer to catalog configuration * @catalog: Pointer to catalog configuration
* @core_clk: Pointer to core clock structure * @core_clk: Pointer to the core clock
* @core_clk_rate: current core clock rate * @core_clk_rate: current core clock rate
* @max_core_clk_rate: maximum allowable core clock rate * @max_core_clk_rate: maximum allowable core clock rate
* @perf_tune: debug control for performance tuning * @perf_tune: debug control for performance tuning
...@@ -69,7 +69,7 @@ struct dpu_core_perf { ...@@ -69,7 +69,7 @@ struct dpu_core_perf {
struct drm_device *dev; struct drm_device *dev;
struct dentry *debugfs_root; struct dentry *debugfs_root;
struct dpu_mdss_cfg *catalog; struct dpu_mdss_cfg *catalog;
struct dss_clk *core_clk; struct clk *core_clk;
u64 core_clk_rate; u64 core_clk_rate;
u64 max_core_clk_rate; u64 max_core_clk_rate;
struct dpu_core_perf_tune perf_tune; struct dpu_core_perf_tune perf_tune;
...@@ -120,7 +120,7 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf); ...@@ -120,7 +120,7 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf);
int dpu_core_perf_init(struct dpu_core_perf *perf, int dpu_core_perf_init(struct dpu_core_perf *perf,
struct drm_device *dev, struct drm_device *dev,
struct dpu_mdss_cfg *catalog, struct dpu_mdss_cfg *catalog,
struct dss_clk *core_clk); struct clk *core_clk);
struct dpu_kms; struct dpu_kms;
......
...@@ -408,7 +408,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, ...@@ -408,7 +408,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
if (ctl->ops.set_active_pipes) if (ctl->ops.set_active_pipes)
ctl->ops.set_active_pipes(ctl, fetch_active); ctl->ops.set_active_pipes(ctl, fetch_active);
_dpu_crtc_program_lm_output_roi(crtc); _dpu_crtc_program_lm_output_roi(crtc);
} }
/** /**
......
...@@ -127,7 +127,6 @@ enum dpu_enc_rc_states { ...@@ -127,7 +127,6 @@ enum dpu_enc_rc_states {
* Virtual encoder registers itself with the DRM Framework as the encoder. * Virtual encoder registers itself with the DRM Framework as the encoder.
* @base: drm_encoder base class for registration with DRM * @base: drm_encoder base class for registration with DRM
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @bus_scaling_client: Client handle to the bus scaling interface
* @enabled: True if the encoder is active, protected by enc_lock * @enabled: True if the encoder is active, protected by enc_lock
* @num_phys_encs: Actual number of physical encoders contained. * @num_phys_encs: Actual number of physical encoders contained.
* @phys_encs: Container of physical encoders managed. * @phys_encs: Container of physical encoders managed.
...@@ -144,6 +143,7 @@ enum dpu_enc_rc_states { ...@@ -144,6 +143,7 @@ enum dpu_enc_rc_states {
* link between encoder/crtc. However in this case we need * link between encoder/crtc. However in this case we need
* to track crtc in the disable() hook which is called * to track crtc in the disable() hook which is called
* _after_ encoder_mask is cleared. * _after_ encoder_mask is cleared.
* @connector: If a mode is set, cached pointer to the active connector
* @crtc_kickoff_cb: Callback into CRTC that will flush & start * @crtc_kickoff_cb: Callback into CRTC that will flush & start
* all CTL paths * all CTL paths
* @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
...@@ -168,12 +168,10 @@ enum dpu_enc_rc_states { ...@@ -168,12 +168,10 @@ enum dpu_enc_rc_states {
* @vsync_event_work: worker to handle vsync event for autorefresh * @vsync_event_work: worker to handle vsync event for autorefresh
* @topology: topology of the display * @topology: topology of the display
* @idle_timeout: idle timeout duration in milliseconds * @idle_timeout: idle timeout duration in milliseconds
* @dp: msm_dp pointer, for DP encoders
*/ */
struct dpu_encoder_virt { struct dpu_encoder_virt {
struct drm_encoder base; struct drm_encoder base;
spinlock_t enc_spinlock; spinlock_t enc_spinlock;
uint32_t bus_scaling_client;
bool enabled; bool enabled;
...@@ -186,6 +184,7 @@ struct dpu_encoder_virt { ...@@ -186,6 +184,7 @@ struct dpu_encoder_virt {
bool intfs_swapped; bool intfs_swapped;
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct drm_connector *connector;
struct dentry *debugfs_root; struct dentry *debugfs_root;
struct mutex enc_lock; struct mutex enc_lock;
...@@ -207,8 +206,6 @@ struct dpu_encoder_virt { ...@@ -207,8 +206,6 @@ struct dpu_encoder_virt {
struct msm_display_topology topology; struct msm_display_topology topology;
u32 idle_timeout; u32 idle_timeout;
struct msm_dp *dp;
}; };
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base) #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
...@@ -420,26 +417,6 @@ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc) ...@@ -420,26 +417,6 @@ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
return linecount; return linecount;
} }
void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
struct dpu_encoder_hw_resources *hw_res)
{
struct dpu_encoder_virt *dpu_enc = NULL;
int i = 0;
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
/* Query resources used by phys encs, expected to be without overlap */
memset(hw_res, 0, sizeof(*hw_res));
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys->ops.get_hw_resources)
phys->ops.get_hw_resources(phys, hw_res);
}
}
static void dpu_encoder_destroy(struct drm_encoder *drm_enc) static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
{ {
struct dpu_encoder_virt *dpu_enc = NULL; struct dpu_encoder_virt *dpu_enc = NULL;
...@@ -607,10 +584,6 @@ static int dpu_encoder_virt_atomic_check( ...@@ -607,10 +584,6 @@ static int dpu_encoder_virt_atomic_check(
if (phys->ops.atomic_check) if (phys->ops.atomic_check)
ret = phys->ops.atomic_check(phys, crtc_state, ret = phys->ops.atomic_check(phys, crtc_state,
conn_state); conn_state);
else if (phys->ops.mode_fixup)
if (!phys->ops.mode_fixup(phys, mode, adj_mode))
ret = -EINVAL;
if (ret) { if (ret) {
DPU_ERROR_ENC(dpu_enc, DPU_ERROR_ENC(dpu_enc,
"mode unsupported, phys idx %d\n", i); "mode unsupported, phys idx %d\n", i);
...@@ -956,16 +929,13 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, ...@@ -956,16 +929,13 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
return 0; return 0;
} }
static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
struct drm_display_mode *mode, struct drm_crtc_state *crtc_state,
struct drm_display_mode *adj_mode) struct drm_connector_state *conn_state)
{ {
struct dpu_encoder_virt *dpu_enc; struct dpu_encoder_virt *dpu_enc;
struct msm_drm_private *priv; struct msm_drm_private *priv;
struct dpu_kms *dpu_kms; struct dpu_kms *dpu_kms;
struct list_head *connector_list;
struct drm_connector *conn = NULL, *conn_iter;
struct drm_crtc *drm_crtc;
struct dpu_crtc_state *cstate; struct dpu_crtc_state *cstate;
struct dpu_global_state *global_state; struct dpu_global_state *global_state;
struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
...@@ -973,7 +943,7 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, ...@@ -973,7 +943,7 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL }; struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
int num_lm, num_ctl, num_pp; int num_lm, num_ctl, num_pp;
int i, j; int i;
if (!drm_enc) { if (!drm_enc) {
DPU_ERROR("invalid encoder\n"); DPU_ERROR("invalid encoder\n");
...@@ -985,7 +955,6 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, ...@@ -985,7 +955,6 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
priv = drm_enc->dev->dev_private; priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms); dpu_kms = to_dpu_kms(priv->kms);
connector_list = &dpu_kms->dev->mode_config.connector_list;
global_state = dpu_kms_get_existing_global_state(dpu_kms); global_state = dpu_kms_get_existing_global_state(dpu_kms);
if (IS_ERR_OR_NULL(global_state)) { if (IS_ERR_OR_NULL(global_state)) {
...@@ -995,22 +964,6 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, ...@@ -995,22 +964,6 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
trace_dpu_enc_mode_set(DRMID(drm_enc)); trace_dpu_enc_mode_set(DRMID(drm_enc));
list_for_each_entry(conn_iter, connector_list, head)
if (conn_iter->encoder == drm_enc)
conn = conn_iter;
if (!conn) {
DPU_ERROR_ENC(dpu_enc, "failed to find attached connector\n");
return;
} else if (!conn->state) {
DPU_ERROR_ENC(dpu_enc, "invalid connector state\n");
return;
}
drm_for_each_crtc(drm_crtc, drm_enc->dev)
if (drm_crtc->state->encoder_mask & drm_encoder_mask(drm_enc))
break;
/* Query resource that have been reserved in atomic check step. */ /* Query resource that have been reserved in atomic check step. */
num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp, drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
...@@ -1027,7 +980,7 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, ...@@ -1027,7 +980,7 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
: NULL; : NULL;
cstate = to_dpu_crtc_state(drm_crtc->state); cstate = to_dpu_crtc_state(crtc_state);
for (i = 0; i < num_lm; i++) { for (i = 0; i < num_lm; i++) {
int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
...@@ -1039,9 +992,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, ...@@ -1039,9 +992,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
cstate->num_mixers = num_lm; cstate->num_mixers = num_lm;
dpu_enc->connector = conn_state->connector;
for (i = 0; i < dpu_enc->num_phys_encs; i++) { for (i = 0; i < dpu_enc->num_phys_encs; i++) {
int num_blk;
struct dpu_hw_blk *hw_blk[MAX_CHANNELS_PER_ENC];
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (!dpu_enc->hw_pp[i]) { if (!dpu_enc->hw_pp[i]) {
...@@ -1059,16 +1012,8 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, ...@@ -1059,16 +1012,8 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
phys->hw_pp = dpu_enc->hw_pp[i]; phys->hw_pp = dpu_enc->hw_pp[i];
phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]); phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
num_blk = dpu_rm_get_assigned_resources(&dpu_kms->rm, if (phys->intf_idx >= INTF_0 && phys->intf_idx < INTF_MAX)
global_state, drm_enc->base.id, DPU_HW_BLK_INTF, phys->hw_intf = dpu_rm_get_intf(&dpu_kms->rm, phys->intf_idx);
hw_blk, ARRAY_SIZE(hw_blk));
for (j = 0; j < num_blk; j++) {
struct dpu_hw_intf *hw_intf;
hw_intf = to_dpu_hw_intf(hw_blk[i]);
if (hw_intf->idx == phys->intf_idx)
phys->hw_intf = hw_intf;
}
if (!phys->hw_intf) { if (!phys->hw_intf) {
DPU_ERROR_ENC(dpu_enc, DPU_ERROR_ENC(dpu_enc,
...@@ -1076,9 +1021,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, ...@@ -1076,9 +1021,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
return; return;
} }
phys->connector = conn->state->connector; phys->cached_mode = crtc_state->adjusted_mode;
if (phys->ops.mode_set) if (phys->ops.atomic_mode_set)
phys->ops.mode_set(phys, mode, adj_mode); phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
} }
} }
...@@ -1099,7 +1044,7 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) ...@@ -1099,7 +1044,7 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
} }
if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort && if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_TMDS &&
dpu_enc->cur_master->hw_mdptop && dpu_enc->cur_master->hw_mdptop &&
dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select) dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select( dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
...@@ -1109,7 +1054,7 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) ...@@ -1109,7 +1054,7 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI && if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
!WARN_ON(dpu_enc->num_phys_encs == 0)) { !WARN_ON(dpu_enc->num_phys_encs == 0)) {
unsigned bpc = dpu_enc->phys_encs[0]->connector->display_info.bpc; unsigned bpc = dpu_enc->connector->display_info.bpc;
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
if (!dpu_enc->hw_pp[i]) if (!dpu_enc->hw_pp[i])
continue; continue;
...@@ -1142,14 +1087,12 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) ...@@ -1142,14 +1087,12 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
{ {
struct dpu_encoder_virt *dpu_enc = NULL; struct dpu_encoder_virt *dpu_enc = NULL;
int ret = 0; int ret = 0;
struct msm_drm_private *priv;
struct drm_display_mode *cur_mode = NULL; struct drm_display_mode *cur_mode = NULL;
dpu_enc = to_dpu_encoder_virt(drm_enc); dpu_enc = to_dpu_encoder_virt(drm_enc);
mutex_lock(&dpu_enc->enc_lock); mutex_lock(&dpu_enc->enc_lock);
cur_mode = &dpu_enc->base.crtc->state->adjusted_mode; cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
priv = drm_enc->dev->dev_private;
trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay, trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
cur_mode->vdisplay); cur_mode->vdisplay);
...@@ -1179,7 +1122,6 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) ...@@ -1179,7 +1122,6 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
{ {
struct dpu_encoder_virt *dpu_enc = NULL; struct dpu_encoder_virt *dpu_enc = NULL;
struct msm_drm_private *priv;
int i = 0; int i = 0;
dpu_enc = to_dpu_encoder_virt(drm_enc); dpu_enc = to_dpu_encoder_virt(drm_enc);
...@@ -1188,8 +1130,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) ...@@ -1188,8 +1130,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
mutex_lock(&dpu_enc->enc_lock); mutex_lock(&dpu_enc->enc_lock);
dpu_enc->enabled = false; dpu_enc->enabled = false;
priv = drm_enc->dev->dev_private;
trace_dpu_enc_disable(DRMID(drm_enc)); trace_dpu_enc_disable(DRMID(drm_enc));
/* wait for idle */ /* wait for idle */
...@@ -1213,9 +1153,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) ...@@ -1213,9 +1153,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP); dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
for (i = 0; i < dpu_enc->num_phys_encs; i++) { dpu_enc->connector = NULL;
dpu_enc->phys_encs[i]->connector = NULL;
}
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
...@@ -2093,7 +2031,7 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t) ...@@ -2093,7 +2031,7 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
} }
static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = { static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
.mode_set = dpu_encoder_virt_mode_set, .atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
.disable = dpu_encoder_virt_disable, .disable = dpu_encoder_virt_disable,
.enable = dpu_encoder_virt_enable, .enable = dpu_encoder_virt_enable,
.atomic_check = dpu_encoder_virt_atomic_check, .atomic_check = dpu_encoder_virt_atomic_check,
...@@ -2128,8 +2066,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, ...@@ -2128,8 +2066,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
timer_setup(&dpu_enc->vsync_event_timer, timer_setup(&dpu_enc->vsync_event_timer,
dpu_encoder_vsync_event_handler, dpu_encoder_vsync_event_handler,
0); 0);
else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS)
dpu_enc->dp = priv->dp[disp_info->h_tile_instance[0]];
INIT_DELAYED_WORK(&dpu_enc->delayed_off_work, INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work); dpu_encoder_off_work);
......
...@@ -19,21 +19,23 @@ ...@@ -19,21 +19,23 @@
#define IDLE_TIMEOUT (66 - 16/2) #define IDLE_TIMEOUT (66 - 16/2)
/** /**
* Encoder functions and data types * struct msm_display_info - defines display properties
* @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused * @intf_type: DRM_MODE_ENCODER_ type
*/ * @capabilities: Bitmask of display flags
struct dpu_encoder_hw_resources { * @num_of_h_tiles: Number of horizontal tiles in case of split interface
enum dpu_intf_mode intfs[INTF_MAX]; * @h_tile_instance: Controller instance used per tile. Number of elements is
* based on num_of_h_tiles
* @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
* used instead of panel TE in cmd mode panels
*/
struct msm_display_info {
int intf_type;
uint32_t capabilities;
uint32_t num_of_h_tiles;
uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
bool is_te_using_watchdog_timer;
}; };
/**
* dpu_encoder_get_hw_resources - Populate table of required hardware resources
* @encoder: encoder pointer
* @hw_res: resource table to populate with encoder required resources
*/
void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
struct dpu_encoder_hw_resources *hw_res);
/** /**
* dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to
* @encoder: encoder pointer * @encoder: encoder pointer
......
...@@ -84,16 +84,12 @@ struct dpu_encoder_virt_ops { ...@@ -84,16 +84,12 @@ struct dpu_encoder_virt_ops {
* @is_master: Whether this phys_enc is the current master * @is_master: Whether this phys_enc is the current master
* encoder. Can be switched at enable time. Based * encoder. Can be switched at enable time. Based
* on split_role and current mode (CMD/VID). * on split_role and current mode (CMD/VID).
* @mode_fixup: DRM Call. Fixup a DRM mode. * @atomic_mode_set: DRM Call. Set a DRM mode.
* @mode_set: DRM Call. Set a DRM mode.
* This likely caches the mode, for use at enable. * This likely caches the mode, for use at enable.
* @enable: DRM Call. Enable a DRM mode. * @enable: DRM Call. Enable a DRM mode.
* @disable: DRM Call. Disable mode. * @disable: DRM Call. Disable mode.
* @atomic_check: DRM Call. Atomic check new DRM state. * @atomic_check: DRM Call. Atomic check new DRM state.
* @destroy: DRM Call. Destroy and release resources. * @destroy: DRM Call. Destroy and release resources.
* @get_hw_resources: Populate the structure with the hardware
* resources that this phys_enc is using.
* Expect no overlap between phys_encs.
* @control_vblank_irq Register/Deregister for VBLANK IRQ * @control_vblank_irq Register/Deregister for VBLANK IRQ
* @wait_for_commit_done: Wait for hardware to have flushed the * @wait_for_commit_done: Wait for hardware to have flushed the
* current pending frames to hardware * current pending frames to hardware
...@@ -117,20 +113,15 @@ struct dpu_encoder_phys_ops { ...@@ -117,20 +113,15 @@ struct dpu_encoder_phys_ops {
struct dentry *debugfs_root); struct dentry *debugfs_root);
void (*prepare_commit)(struct dpu_encoder_phys *encoder); void (*prepare_commit)(struct dpu_encoder_phys *encoder);
bool (*is_master)(struct dpu_encoder_phys *encoder); bool (*is_master)(struct dpu_encoder_phys *encoder);
bool (*mode_fixup)(struct dpu_encoder_phys *encoder, void (*atomic_mode_set)(struct dpu_encoder_phys *encoder,
const struct drm_display_mode *mode, struct drm_crtc_state *crtc_state,
struct drm_display_mode *adjusted_mode); struct drm_connector_state *conn_state);
void (*mode_set)(struct dpu_encoder_phys *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*enable)(struct dpu_encoder_phys *encoder); void (*enable)(struct dpu_encoder_phys *encoder);
void (*disable)(struct dpu_encoder_phys *encoder); void (*disable)(struct dpu_encoder_phys *encoder);
int (*atomic_check)(struct dpu_encoder_phys *encoder, int (*atomic_check)(struct dpu_encoder_phys *encoder,
struct drm_crtc_state *crtc_state, struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state); struct drm_connector_state *conn_state);
void (*destroy)(struct dpu_encoder_phys *encoder); void (*destroy)(struct dpu_encoder_phys *encoder);
void (*get_hw_resources)(struct dpu_encoder_phys *encoder,
struct dpu_encoder_hw_resources *hw_res);
int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable); int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc); int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc); int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
...@@ -182,7 +173,6 @@ struct dpu_encoder_irq { ...@@ -182,7 +173,6 @@ struct dpu_encoder_irq {
* tied to a specific panel / sub-panel. Abstract type, sub-classed by * tied to a specific panel / sub-panel. Abstract type, sub-classed by
* phys_vid or phys_cmd for video mode or command mode encs respectively. * phys_vid or phys_cmd for video mode or command mode encs respectively.
* @parent: Pointer to the containing virtual encoder * @parent: Pointer to the containing virtual encoder
* @connector: If a mode is set, cached pointer to the active connector
* @ops: Operations exposed to the virtual encoder * @ops: Operations exposed to the virtual encoder
* @parent_ops: Callbacks exposed by the parent to the phys_enc * @parent_ops: Callbacks exposed by the parent to the phys_enc
* @hw_mdptop: Hardware interface to the top registers * @hw_mdptop: Hardware interface to the top registers
...@@ -211,7 +201,6 @@ struct dpu_encoder_irq { ...@@ -211,7 +201,6 @@ struct dpu_encoder_irq {
*/ */
struct dpu_encoder_phys { struct dpu_encoder_phys {
struct drm_encoder *parent; struct drm_encoder *parent;
struct drm_connector *connector;
struct dpu_encoder_phys_ops ops; struct dpu_encoder_phys_ops ops;
const struct dpu_encoder_virt_ops *parent_ops; const struct dpu_encoder_virt_ops *parent_ops;
struct dpu_hw_mdp *hw_mdptop; struct dpu_hw_mdp *hw_mdptop;
......
...@@ -45,15 +45,6 @@ static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc) ...@@ -45,15 +45,6 @@ static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
return (phys_enc->split_role != ENC_ROLE_SLAVE); return (phys_enc->split_role != ENC_ROLE_SLAVE);
} }
static bool dpu_encoder_phys_cmd_mode_fixup(
struct dpu_encoder_phys *phys_enc,
const struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
return true;
}
static void _dpu_encoder_phys_cmd_update_intf_cfg( static void _dpu_encoder_phys_cmd_update_intf_cfg(
struct dpu_encoder_phys *phys_enc) struct dpu_encoder_phys *phys_enc)
{ {
...@@ -144,23 +135,13 @@ static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx) ...@@ -144,23 +135,13 @@ static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
phys_enc); phys_enc);
} }
static void dpu_encoder_phys_cmd_mode_set( static void dpu_encoder_phys_cmd_atomic_mode_set(
struct dpu_encoder_phys *phys_enc, struct dpu_encoder_phys *phys_enc,
struct drm_display_mode *mode, struct drm_crtc_state *crtc_state,
struct drm_display_mode *adj_mode) struct drm_connector_state *conn_state)
{ {
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
struct dpu_encoder_irq *irq; struct dpu_encoder_irq *irq;
if (!mode || !adj_mode) {
DPU_ERROR("invalid args\n");
return;
}
phys_enc->cached_mode = *adj_mode;
DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
drm_mode_debug_printmodeline(adj_mode);
irq = &phys_enc->irq[INTR_IDX_CTL_START]; irq = &phys_enc->irq[INTR_IDX_CTL_START];
irq->irq_idx = phys_enc->hw_ctl->caps->intr_start; irq->irq_idx = phys_enc->hw_ctl->caps->intr_start;
...@@ -534,13 +515,6 @@ static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc) ...@@ -534,13 +515,6 @@ static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
kfree(cmd_enc); kfree(cmd_enc);
} }
static void dpu_encoder_phys_cmd_get_hw_resources(
struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_hw_resources *hw_res)
{
hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
}
static void dpu_encoder_phys_cmd_prepare_for_kickoff( static void dpu_encoder_phys_cmd_prepare_for_kickoff(
struct dpu_encoder_phys *phys_enc) struct dpu_encoder_phys *phys_enc)
{ {
...@@ -682,6 +656,9 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done( ...@@ -682,6 +656,9 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done(
if (!dpu_encoder_phys_cmd_is_master(phys_enc)) if (!dpu_encoder_phys_cmd_is_master(phys_enc))
return 0; return 0;
if (phys_enc->hw_ctl->ops.is_started(phys_enc->hw_ctl))
return dpu_encoder_phys_cmd_wait_for_tx_complete(phys_enc);
return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc); return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
} }
...@@ -731,12 +708,10 @@ static void dpu_encoder_phys_cmd_init_ops( ...@@ -731,12 +708,10 @@ static void dpu_encoder_phys_cmd_init_ops(
{ {
ops->prepare_commit = dpu_encoder_phys_cmd_prepare_commit; ops->prepare_commit = dpu_encoder_phys_cmd_prepare_commit;
ops->is_master = dpu_encoder_phys_cmd_is_master; ops->is_master = dpu_encoder_phys_cmd_is_master;
ops->mode_set = dpu_encoder_phys_cmd_mode_set; ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup;
ops->enable = dpu_encoder_phys_cmd_enable; ops->enable = dpu_encoder_phys_cmd_enable;
ops->disable = dpu_encoder_phys_cmd_disable; ops->disable = dpu_encoder_phys_cmd_disable;
ops->destroy = dpu_encoder_phys_cmd_destroy; ops->destroy = dpu_encoder_phys_cmd_destroy;
ops->get_hw_resources = dpu_encoder_phys_cmd_get_hw_resources;
ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq; ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done; ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff; ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
......
...@@ -225,19 +225,6 @@ static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc, ...@@ -225,19 +225,6 @@ static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
} }
static bool dpu_encoder_phys_vid_mode_fixup(
struct dpu_encoder_phys *phys_enc,
const struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
DPU_DEBUG_VIDENC(phys_enc, "\n");
/*
* Modifying mode has consequences when the mode comes back to us
*/
return true;
}
static void dpu_encoder_phys_vid_setup_timing_engine( static void dpu_encoder_phys_vid_setup_timing_engine(
struct dpu_encoder_phys *phys_enc) struct dpu_encoder_phys *phys_enc)
{ {
...@@ -361,19 +348,13 @@ static bool dpu_encoder_phys_vid_needs_single_flush( ...@@ -361,19 +348,13 @@ static bool dpu_encoder_phys_vid_needs_single_flush(
return phys_enc->split_role != ENC_ROLE_SOLO; return phys_enc->split_role != ENC_ROLE_SOLO;
} }
static void dpu_encoder_phys_vid_mode_set( static void dpu_encoder_phys_vid_atomic_mode_set(
struct dpu_encoder_phys *phys_enc, struct dpu_encoder_phys *phys_enc,
struct drm_display_mode *mode, struct drm_crtc_state *crtc_state,
struct drm_display_mode *adj_mode) struct drm_connector_state *conn_state)
{ {
struct dpu_encoder_irq *irq; struct dpu_encoder_irq *irq;
if (adj_mode) {
phys_enc->cached_mode = *adj_mode;
drm_mode_debug_printmodeline(adj_mode);
DPU_DEBUG_VIDENC(phys_enc, "caching mode:\n");
}
irq = &phys_enc->irq[INTR_IDX_VSYNC]; irq = &phys_enc->irq[INTR_IDX_VSYNC];
irq->irq_idx = phys_enc->hw_intf->cap->intr_vsync; irq->irq_idx = phys_enc->hw_intf->cap->intr_vsync;
...@@ -465,13 +446,6 @@ static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc) ...@@ -465,13 +446,6 @@ static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
kfree(phys_enc); kfree(phys_enc);
} }
static void dpu_encoder_phys_vid_get_hw_resources(
struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_hw_resources *hw_res)
{
hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_VIDEO;
}
static int dpu_encoder_phys_vid_wait_for_vblank( static int dpu_encoder_phys_vid_wait_for_vblank(
struct dpu_encoder_phys *phys_enc) struct dpu_encoder_phys *phys_enc)
{ {
...@@ -675,12 +649,10 @@ static int dpu_encoder_phys_vid_get_frame_count( ...@@ -675,12 +649,10 @@ static int dpu_encoder_phys_vid_get_frame_count(
static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops) static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
{ {
ops->is_master = dpu_encoder_phys_vid_is_master; ops->is_master = dpu_encoder_phys_vid_is_master;
ops->mode_set = dpu_encoder_phys_vid_mode_set; ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set;
ops->mode_fixup = dpu_encoder_phys_vid_mode_fixup;
ops->enable = dpu_encoder_phys_vid_enable; ops->enable = dpu_encoder_phys_vid_enable;
ops->disable = dpu_encoder_phys_vid_disable; ops->disable = dpu_encoder_phys_vid_disable;
ops->destroy = dpu_encoder_phys_vid_destroy; ops->destroy = dpu_encoder_phys_vid_destroy;
ops->get_hw_resources = dpu_encoder_phys_vid_get_hw_resources;
ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq; ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done; ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done;
ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank; ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */ #define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sm8150 v1.0 */ #define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sm8150 v1.0 */
#define DPU_HW_VER_501 DPU_HW_VER(5, 0, 1) /* sm8150 v2.0 */ #define DPU_HW_VER_501 DPU_HW_VER(5, 0, 1) /* sm8150 v2.0 */
#define DPU_HW_VER_510 DPU_HW_VER(5, 1, 1) /* sc8180 */
#define DPU_HW_VER_600 DPU_HW_VER(6, 0, 0) /* sm8250 */ #define DPU_HW_VER_600 DPU_HW_VER(6, 0, 0) /* sm8250 */
#define DPU_HW_VER_620 DPU_HW_VER(6, 2, 0) /* sc7180 v1.0 */ #define DPU_HW_VER_620 DPU_HW_VER(6, 2, 0) /* sc7180 v1.0 */
#define DPU_HW_VER_720 DPU_HW_VER(7, 2, 0) /* sc7280 */ #define DPU_HW_VER_720 DPU_HW_VER(7, 2, 0) /* sc7280 */
...@@ -87,6 +88,7 @@ enum { ...@@ -87,6 +88,7 @@ enum {
DPU_MDP_BWC, DPU_MDP_BWC,
DPU_MDP_UBWC_1_0, DPU_MDP_UBWC_1_0,
DPU_MDP_UBWC_1_5, DPU_MDP_UBWC_1_5,
DPU_MDP_AUDIO_SELECT,
DPU_MDP_MAX DPU_MDP_MAX
}; };
...@@ -435,6 +437,8 @@ enum dpu_clk_ctrl_type { ...@@ -435,6 +437,8 @@ enum dpu_clk_ctrl_type {
DPU_CLK_CTRL_RGB3, DPU_CLK_CTRL_RGB3,
DPU_CLK_CTRL_DMA0, DPU_CLK_CTRL_DMA0,
DPU_CLK_CTRL_DMA1, DPU_CLK_CTRL_DMA1,
DPU_CLK_CTRL_DMA2,
DPU_CLK_CTRL_DMA3,
DPU_CLK_CTRL_CURSOR0, DPU_CLK_CTRL_CURSOR0,
DPU_CLK_CTRL_CURSOR1, DPU_CLK_CTRL_CURSOR1,
DPU_CLK_CTRL_INLINE_ROT0_SSPP, DPU_CLK_CTRL_INLINE_ROT0_SSPP,
...@@ -781,22 +785,6 @@ struct dpu_mdss_hw_cfg_handler { ...@@ -781,22 +785,6 @@ struct dpu_mdss_hw_cfg_handler {
void (*cfg_init)(struct dpu_mdss_cfg *dpu_cfg); void (*cfg_init)(struct dpu_mdss_cfg *dpu_cfg);
}; };
/*
* Access Macros
*/
#define BLK_MDP(s) ((s)->mdp)
#define BLK_CTL(s) ((s)->ctl)
#define BLK_VIG(s) ((s)->vig)
#define BLK_RGB(s) ((s)->rgb)
#define BLK_DMA(s) ((s)->dma)
#define BLK_CURSOR(s) ((s)->cursor)
#define BLK_MIXER(s) ((s)->mixer)
#define BLK_PINGPONG(s) ((s)->pingpong)
#define BLK_INTF(s) ((s)->intf)
#define BLK_AD(s) ((s)->ad)
#define BLK_DSPP(s) ((s)->dspp)
#define BLK_MERGE3d(s) ((s)->merge_3d)
/** /**
* dpu_hw_catalog_init - dpu hardware catalog init API retrieves * dpu_hw_catalog_init - dpu hardware catalog init API retrieves
* hardcoded target specific catalog information in config structure * hardcoded target specific catalog information in config structure
......
...@@ -92,6 +92,11 @@ static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx) ...@@ -92,6 +92,11 @@ static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1); DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
} }
static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
{
return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
}
static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx) static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
{ {
trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask, trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
...@@ -587,6 +592,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, ...@@ -587,6 +592,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->get_pending_flush = dpu_hw_ctl_get_pending_flush; ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
ops->get_flush_register = dpu_hw_ctl_get_flush_register; ops->get_flush_register = dpu_hw_ctl_get_flush_register;
ops->trigger_start = dpu_hw_ctl_trigger_start; ops->trigger_start = dpu_hw_ctl_trigger_start;
ops->is_started = dpu_hw_ctl_is_started;
ops->trigger_pending = dpu_hw_ctl_trigger_pending; ops->trigger_pending = dpu_hw_ctl_trigger_pending;
ops->reset = dpu_hw_ctl_reset_control; ops->reset = dpu_hw_ctl_reset_control;
ops->wait_reset_status = dpu_hw_ctl_wait_reset_status; ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
......
...@@ -61,6 +61,13 @@ struct dpu_hw_ctl_ops { ...@@ -61,6 +61,13 @@ struct dpu_hw_ctl_ops {
*/ */
void (*trigger_start)(struct dpu_hw_ctl *ctx); void (*trigger_start)(struct dpu_hw_ctl *ctx);
/**
* check if the ctl is started
* @ctx : ctl path ctx pointer
* @Return: true if started, false if stopped
*/
bool (*is_started)(struct dpu_hw_ctl *ctx);
/** /**
* kickoff prepare is in progress hw operation for sw * kickoff prepare is in progress hw operation for sw
* controlled interfaces: DSI cmd mode and WB interface * controlled interfaces: DSI cmd mode and WB interface
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include "dpu_hw_mdss.h" #include "dpu_hw_mdss.h"
#include "dpu_trace.h" #include "dpu_trace.h"
/** /*
* Register offsets in MDSS register file for the interrupt registers * Register offsets in MDSS register file for the interrupt registers
* w.r.t. to the MDP base * w.r.t. to the MDP base
*/ */
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#define MDP_INTF_2_OFF 0x6B000 #define MDP_INTF_2_OFF 0x6B000
#define MDP_INTF_3_OFF 0x6B800 #define MDP_INTF_3_OFF 0x6B800
#define MDP_INTF_4_OFF 0x6C000 #define MDP_INTF_4_OFF 0x6C000
#define MDP_INTF_5_OFF 0x6C800
#define MDP_AD4_0_OFF 0x7C000 #define MDP_AD4_0_OFF 0x7C000
#define MDP_AD4_1_OFF 0x7D000 #define MDP_AD4_1_OFF 0x7D000
#define MDP_AD4_INTR_EN_OFF 0x41c #define MDP_AD4_INTR_EN_OFF 0x41c
...@@ -93,6 +94,11 @@ static const struct dpu_intr_reg dpu_intr_set[] = { ...@@ -93,6 +94,11 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
MDP_INTF_4_OFF+INTF_INTR_EN, MDP_INTF_4_OFF+INTF_INTR_EN,
MDP_INTF_4_OFF+INTF_INTR_STATUS MDP_INTF_4_OFF+INTF_INTR_STATUS
}, },
{
MDP_INTF_5_OFF+INTF_INTR_CLEAR,
MDP_INTF_5_OFF+INTF_INTR_EN,
MDP_INTF_5_OFF+INTF_INTR_STATUS
},
{ {
MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF, MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF, MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
...@@ -140,7 +146,7 @@ static const struct dpu_intr_reg dpu_intr_set[] = { ...@@ -140,7 +146,7 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
/** /**
* dpu_core_irq_callback_handler - dispatch core interrupts * dpu_core_irq_callback_handler - dispatch core interrupts
* @arg: private data of callback handler * @dpu_kms: Pointer to DPU's KMS structure
* @irq_idx: interrupt index * @irq_idx: interrupt index
*/ */
static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx) static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
......
...@@ -22,6 +22,7 @@ enum dpu_hw_intr_reg { ...@@ -22,6 +22,7 @@ enum dpu_hw_intr_reg {
MDP_INTF2_INTR, MDP_INTF2_INTR,
MDP_INTF3_INTR, MDP_INTF3_INTR,
MDP_INTF4_INTR, MDP_INTF4_INTR,
MDP_INTF5_INTR,
MDP_AD4_0_INTR, MDP_AD4_0_INTR,
MDP_AD4_1_INTR, MDP_AD4_1_INTR,
MDP_INTF0_7xxx_INTR, MDP_INTF0_7xxx_INTR,
......
...@@ -78,7 +78,6 @@ struct dpu_hw_intf_ops { ...@@ -78,7 +78,6 @@ struct dpu_hw_intf_ops {
}; };
struct dpu_hw_intf { struct dpu_hw_intf {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw; struct dpu_hw_blk_reg_map hw;
/* intf */ /* intf */
...@@ -90,16 +89,6 @@ struct dpu_hw_intf { ...@@ -90,16 +89,6 @@ struct dpu_hw_intf {
struct dpu_hw_intf_ops ops; struct dpu_hw_intf_ops ops;
}; };
/**
* to_dpu_hw_intf - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_intf, base);
}
/** /**
* dpu_hw_intf_init(): Initializes the intf driver for the passed * dpu_hw_intf_init(): Initializes the intf driver for the passed
* interface idx. * interface idx.
......
...@@ -268,7 +268,9 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops, ...@@ -268,7 +268,9 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
ops->get_danger_status = dpu_hw_get_danger_status; ops->get_danger_status = dpu_hw_get_danger_status;
ops->setup_vsync_source = dpu_hw_setup_vsync_source; ops->setup_vsync_source = dpu_hw_setup_vsync_source;
ops->get_safe_status = dpu_hw_get_safe_status; ops->get_safe_status = dpu_hw_get_safe_status;
ops->intf_audio_select = dpu_hw_intf_audio_select;
if (cap & BIT(DPU_MDP_AUDIO_SELECT))
ops->intf_audio_select = dpu_hw_intf_audio_select;
} }
static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp, static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
......
...@@ -271,6 +271,10 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) ...@@ -271,6 +271,10 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
if (!p) if (!p)
return -EINVAL; return -EINVAL;
/* Only create a set of debugfs for the primary node, ignore render nodes */
if (minor->type != DRM_MINOR_PRIMARY)
return 0;
dev = dpu_kms->dev; dev = dpu_kms->dev;
priv = dev->dev_private; priv = dev->dev_private;
...@@ -991,29 +995,15 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) ...@@ -991,29 +995,15 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
return 0; return 0;
} }
static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
char *clock_name)
{
struct dss_module_power *mp = &dpu_kms->mp;
int i;
for (i = 0; i < mp->num_clk; i++) {
if (!strcmp(mp->clk_config[i].clk_name, clock_name))
return &mp->clk_config[i];
}
return NULL;
}
u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name) u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
{ {
struct dss_clk *clk; struct clk *clk;
clk = _dpu_kms_get_clk(dpu_kms, clock_name); clk = msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, clock_name);
if (!clk) if (!clk)
return -EINVAL; return -EINVAL;
return clk_get_rate(clk->clk); return clk_get_rate(clk);
} }
static int dpu_kms_hw_init(struct msm_kms *kms) static int dpu_kms_hw_init(struct msm_kms *kms)
...@@ -1125,7 +1115,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms) ...@@ -1125,7 +1115,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
} }
rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog, rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
_dpu_kms_get_clk(dpu_kms, "core")); msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, "core"));
if (rc) { if (rc) {
DPU_ERROR("failed to init perf %d\n", rc); DPU_ERROR("failed to init perf %d\n", rc);
goto perf_err; goto perf_err;
...@@ -1212,7 +1202,6 @@ static int dpu_bind(struct device *dev, struct device *master, void *data) ...@@ -1212,7 +1202,6 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct drm_device *ddev = priv->dev; struct drm_device *ddev = priv->dev;
struct dpu_kms *dpu_kms; struct dpu_kms *dpu_kms;
struct dss_module_power *mp;
int ret = 0; int ret = 0;
dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL); dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
...@@ -1229,12 +1218,12 @@ static int dpu_bind(struct device *dev, struct device *master, void *data) ...@@ -1229,12 +1218,12 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
return ret; return ret;
} }
mp = &dpu_kms->mp; ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
ret = msm_dss_parse_clock(pdev, mp); if (ret < 0) {
if (ret) {
DPU_ERROR("failed to parse clocks, ret=%d\n", ret); DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
return ret; return ret;
} }
dpu_kms->num_clocks = ret;
platform_set_drvdata(pdev, dpu_kms); platform_set_drvdata(pdev, dpu_kms);
...@@ -1258,11 +1247,6 @@ static void dpu_unbind(struct device *dev, struct device *master, void *data) ...@@ -1258,11 +1247,6 @@ static void dpu_unbind(struct device *dev, struct device *master, void *data)
{ {
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
struct dss_module_power *mp = &dpu_kms->mp;
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
mp->num_clk = 0;
if (dpu_kms->rpm_enabled) if (dpu_kms->rpm_enabled)
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
...@@ -1286,21 +1270,18 @@ static int dpu_dev_remove(struct platform_device *pdev) ...@@ -1286,21 +1270,18 @@ static int dpu_dev_remove(struct platform_device *pdev)
static int __maybe_unused dpu_runtime_suspend(struct device *dev) static int __maybe_unused dpu_runtime_suspend(struct device *dev)
{ {
int i, rc = -1; int i;
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
struct dss_module_power *mp = &dpu_kms->mp;
/* Drop the performance state vote */ /* Drop the performance state vote */
dev_pm_opp_set_rate(dev, 0); dev_pm_opp_set_rate(dev, 0);
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false); clk_bulk_disable_unprepare(dpu_kms->num_clocks, dpu_kms->clocks);
if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc);
for (i = 0; i < dpu_kms->num_paths; i++) for (i = 0; i < dpu_kms->num_paths; i++)
icc_set_bw(dpu_kms->path[i], 0, 0); icc_set_bw(dpu_kms->path[i], 0, 0);
return rc; return 0;
} }
static int __maybe_unused dpu_runtime_resume(struct device *dev) static int __maybe_unused dpu_runtime_resume(struct device *dev)
...@@ -1310,7 +1291,6 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev) ...@@ -1310,7 +1291,6 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
struct drm_encoder *encoder; struct drm_encoder *encoder;
struct drm_device *ddev; struct drm_device *ddev;
struct dss_module_power *mp = &dpu_kms->mp;
int i; int i;
ddev = dpu_kms->dev; ddev = dpu_kms->dev;
...@@ -1320,7 +1300,7 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev) ...@@ -1320,7 +1300,7 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
for (i = 0; i < dpu_kms->num_paths; i++) for (i = 0; i < dpu_kms->num_paths; i++)
icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW)); icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW));
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); rc = clk_bulk_prepare_enable(dpu_kms->num_clocks, dpu_kms->clocks);
if (rc) { if (rc) {
DPU_ERROR("clock enable failed rc:%d\n", rc); DPU_ERROR("clock enable failed rc:%d\n", rc);
return rc; return rc;
...@@ -1341,9 +1321,11 @@ static const struct dev_pm_ops dpu_pm_ops = { ...@@ -1341,9 +1321,11 @@ static const struct dev_pm_ops dpu_pm_ops = {
}; };
const struct of_device_id dpu_dt_match[] = { const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,msm8998-dpu", },
{ .compatible = "qcom,sdm845-dpu", }, { .compatible = "qcom,sdm845-dpu", },
{ .compatible = "qcom,sc7180-dpu", }, { .compatible = "qcom,sc7180-dpu", },
{ .compatible = "qcom,sc7280-dpu", }, { .compatible = "qcom,sc7280-dpu", },
{ .compatible = "qcom,sc8180x-dpu", },
{ .compatible = "qcom,sm8150-dpu", }, { .compatible = "qcom,sm8150-dpu", },
{ .compatible = "qcom,sm8250-dpu", }, { .compatible = "qcom,sm8250-dpu", },
{} {}
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#include "dpu_hw_lm.h" #include "dpu_hw_lm.h"
#include "dpu_hw_interrupts.h" #include "dpu_hw_interrupts.h"
#include "dpu_hw_top.h" #include "dpu_hw_top.h"
#include "dpu_io_util.h"
#include "dpu_rm.h" #include "dpu_rm.h"
#include "dpu_core_perf.h" #include "dpu_core_perf.h"
...@@ -113,7 +112,8 @@ struct dpu_kms { ...@@ -113,7 +112,8 @@ struct dpu_kms {
struct platform_device *pdev; struct platform_device *pdev;
bool rpm_enabled; bool rpm_enabled;
struct dss_module_power mp; struct clk_bulk_data *clocks;
size_t num_clocks;
/* reference count bandwidth requests, so we know when we can /* reference count bandwidth requests, so we know when we can
* release bandwidth. Each atomic update increments, and frame- * release bandwidth. Each atomic update increments, and frame-
...@@ -144,7 +144,6 @@ struct dpu_global_state { ...@@ -144,7 +144,6 @@ struct dpu_global_state {
uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0]; uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0];
uint32_t mixer_to_enc_id[LM_MAX - LM_0]; uint32_t mixer_to_enc_id[LM_MAX - LM_0];
uint32_t ctl_to_enc_id[CTL_MAX - CTL_0]; uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
uint32_t intf_to_enc_id[INTF_MAX - INTF_0];
uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0]; uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
}; };
......
...@@ -29,7 +29,8 @@ struct dpu_irq_controller { ...@@ -29,7 +29,8 @@ struct dpu_irq_controller {
struct dpu_mdss { struct dpu_mdss {
struct msm_mdss base; struct msm_mdss base;
void __iomem *mmio; void __iomem *mmio;
struct dss_module_power mp; struct clk_bulk_data *clocks;
size_t num_clocks;
struct dpu_irq_controller irq_controller; struct dpu_irq_controller irq_controller;
}; };
...@@ -136,10 +137,9 @@ static void _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss) ...@@ -136,10 +137,9 @@ static void _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
static int dpu_mdss_enable(struct msm_mdss *mdss) static int dpu_mdss_enable(struct msm_mdss *mdss)
{ {
struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss); struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
int ret; int ret;
ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); ret = clk_bulk_prepare_enable(dpu_mdss->num_clocks, dpu_mdss->clocks);
if (ret) { if (ret) {
DPU_ERROR("clock enable failed, ret:%d\n", ret); DPU_ERROR("clock enable failed, ret:%d\n", ret);
return ret; return ret;
...@@ -174,21 +174,16 @@ static int dpu_mdss_enable(struct msm_mdss *mdss) ...@@ -174,21 +174,16 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
static int dpu_mdss_disable(struct msm_mdss *mdss) static int dpu_mdss_disable(struct msm_mdss *mdss)
{ {
struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss); struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
int ret;
ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false); clk_bulk_disable_unprepare(dpu_mdss->num_clocks, dpu_mdss->clocks);
if (ret)
DPU_ERROR("clock disable failed, ret:%d\n", ret);
return ret; return 0;
} }
static void dpu_mdss_destroy(struct msm_mdss *mdss) static void dpu_mdss_destroy(struct msm_mdss *mdss)
{ {
struct platform_device *pdev = to_platform_device(mdss->dev); struct platform_device *pdev = to_platform_device(mdss->dev);
struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss); struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
int irq; int irq;
pm_runtime_suspend(mdss->dev); pm_runtime_suspend(mdss->dev);
...@@ -196,8 +191,6 @@ static void dpu_mdss_destroy(struct msm_mdss *mdss) ...@@ -196,8 +191,6 @@ static void dpu_mdss_destroy(struct msm_mdss *mdss)
_dpu_mdss_irq_domain_fini(dpu_mdss); _dpu_mdss_irq_domain_fini(dpu_mdss);
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
irq_set_chained_handler_and_data(irq, NULL, NULL); irq_set_chained_handler_and_data(irq, NULL, NULL);
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
if (dpu_mdss->mmio) if (dpu_mdss->mmio)
devm_iounmap(&pdev->dev, dpu_mdss->mmio); devm_iounmap(&pdev->dev, dpu_mdss->mmio);
...@@ -214,7 +207,6 @@ int dpu_mdss_init(struct platform_device *pdev) ...@@ -214,7 +207,6 @@ int dpu_mdss_init(struct platform_device *pdev)
{ {
struct msm_drm_private *priv = platform_get_drvdata(pdev); struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct dpu_mdss *dpu_mdss; struct dpu_mdss *dpu_mdss;
struct dss_module_power *mp;
int ret; int ret;
int irq; int irq;
...@@ -228,12 +220,12 @@ int dpu_mdss_init(struct platform_device *pdev) ...@@ -228,12 +220,12 @@ int dpu_mdss_init(struct platform_device *pdev)
DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio); DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
mp = &dpu_mdss->mp; ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_mdss->clocks);
ret = msm_dss_parse_clock(pdev, mp); if (ret < 0) {
if (ret) {
DPU_ERROR("failed to parse clocks, ret=%d\n", ret); DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
goto clk_parse_err; goto clk_parse_err;
} }
dpu_mdss->num_clocks = ret;
dpu_mdss->base.dev = &pdev->dev; dpu_mdss->base.dev = &pdev->dev;
dpu_mdss->base.funcs = &mdss_funcs; dpu_mdss->base.funcs = &mdss_funcs;
...@@ -260,9 +252,7 @@ int dpu_mdss_init(struct platform_device *pdev) ...@@ -260,9 +252,7 @@ int dpu_mdss_init(struct platform_device *pdev)
irq_error: irq_error:
_dpu_mdss_irq_domain_fini(dpu_mdss); _dpu_mdss_irq_domain_fini(dpu_mdss);
irq_domain_error: irq_domain_error:
msm_dss_put_clk(mp->clk_config, mp->num_clk);
clk_parse_err: clk_parse_err:
devm_kfree(&pdev->dev, mp->clk_config);
if (dpu_mdss->mmio) if (dpu_mdss->mmio)
devm_iounmap(&pdev->dev, dpu_mdss->mmio); devm_iounmap(&pdev->dev, dpu_mdss->mmio);
dpu_mdss->mmio = NULL; dpu_mdss->mmio = NULL;
......
...@@ -28,13 +28,20 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx, ...@@ -28,13 +28,20 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx,
*/ */
struct dpu_rm_requirements { struct dpu_rm_requirements {
struct msm_display_topology topology; struct msm_display_topology topology;
struct dpu_encoder_hw_resources hw_res;
}; };
int dpu_rm_destroy(struct dpu_rm *rm) int dpu_rm_destroy(struct dpu_rm *rm)
{ {
int i; int i;
for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) {
struct dpu_hw_dspp *hw;
if (rm->dspp_blks[i]) {
hw = to_dpu_hw_dspp(rm->dspp_blks[i]);
dpu_hw_dspp_destroy(hw);
}
}
for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) { for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
struct dpu_hw_pingpong *hw; struct dpu_hw_pingpong *hw;
...@@ -67,14 +74,8 @@ int dpu_rm_destroy(struct dpu_rm *rm) ...@@ -67,14 +74,8 @@ int dpu_rm_destroy(struct dpu_rm *rm)
dpu_hw_ctl_destroy(hw); dpu_hw_ctl_destroy(hw);
} }
} }
for (i = 0; i < ARRAY_SIZE(rm->intf_blks); i++) { for (i = 0; i < ARRAY_SIZE(rm->hw_intf); i++)
struct dpu_hw_intf *hw; dpu_hw_intf_destroy(rm->hw_intf[i]);
if (rm->intf_blks[i]) {
hw = to_dpu_hw_intf(rm->intf_blks[i]);
dpu_hw_intf_destroy(hw);
}
}
return 0; return 0;
} }
...@@ -108,24 +109,12 @@ int dpu_rm_init(struct dpu_rm *rm, ...@@ -108,24 +109,12 @@ int dpu_rm_init(struct dpu_rm *rm,
continue; continue;
} }
hw = dpu_hw_lm_init(lm->id, mmio, cat); hw = dpu_hw_lm_init(lm->id, mmio, cat);
if (IS_ERR_OR_NULL(hw)) { if (IS_ERR(hw)) {
rc = PTR_ERR(hw); rc = PTR_ERR(hw);
DPU_ERROR("failed lm object creation: err %d\n", rc); DPU_ERROR("failed lm object creation: err %d\n", rc);
goto fail; goto fail;
} }
rm->mixer_blks[lm->id - LM_0] = &hw->base; rm->mixer_blks[lm->id - LM_0] = &hw->base;
if (!rm->lm_max_width) {
rm->lm_max_width = lm->sblk->maxwidth;
} else if (rm->lm_max_width != lm->sblk->maxwidth) {
/*
* Don't expect to have hw where lm max widths differ.
* If found, take the min.
*/
DPU_ERROR("unsupported: lm maxwidth differs\n");
if (rm->lm_max_width > lm->sblk->maxwidth)
rm->lm_max_width = lm->sblk->maxwidth;
}
} }
for (i = 0; i < cat->merge_3d_count; i++) { for (i = 0; i < cat->merge_3d_count; i++) {
...@@ -137,7 +126,7 @@ int dpu_rm_init(struct dpu_rm *rm, ...@@ -137,7 +126,7 @@ int dpu_rm_init(struct dpu_rm *rm,
continue; continue;
} }
hw = dpu_hw_merge_3d_init(merge_3d->id, mmio, cat); hw = dpu_hw_merge_3d_init(merge_3d->id, mmio, cat);
if (IS_ERR_OR_NULL(hw)) { if (IS_ERR(hw)) {
rc = PTR_ERR(hw); rc = PTR_ERR(hw);
DPU_ERROR("failed merge_3d object creation: err %d\n", DPU_ERROR("failed merge_3d object creation: err %d\n",
rc); rc);
...@@ -155,7 +144,7 @@ int dpu_rm_init(struct dpu_rm *rm, ...@@ -155,7 +144,7 @@ int dpu_rm_init(struct dpu_rm *rm,
continue; continue;
} }
hw = dpu_hw_pingpong_init(pp->id, mmio, cat); hw = dpu_hw_pingpong_init(pp->id, mmio, cat);
if (IS_ERR_OR_NULL(hw)) { if (IS_ERR(hw)) {
rc = PTR_ERR(hw); rc = PTR_ERR(hw);
DPU_ERROR("failed pingpong object creation: err %d\n", DPU_ERROR("failed pingpong object creation: err %d\n",
rc); rc);
...@@ -179,12 +168,12 @@ int dpu_rm_init(struct dpu_rm *rm, ...@@ -179,12 +168,12 @@ int dpu_rm_init(struct dpu_rm *rm,
continue; continue;
} }
hw = dpu_hw_intf_init(intf->id, mmio, cat); hw = dpu_hw_intf_init(intf->id, mmio, cat);
if (IS_ERR_OR_NULL(hw)) { if (IS_ERR(hw)) {
rc = PTR_ERR(hw); rc = PTR_ERR(hw);
DPU_ERROR("failed intf object creation: err %d\n", rc); DPU_ERROR("failed intf object creation: err %d\n", rc);
goto fail; goto fail;
} }
rm->intf_blks[intf->id - INTF_0] = &hw->base; rm->hw_intf[intf->id - INTF_0] = hw;
} }
for (i = 0; i < cat->ctl_count; i++) { for (i = 0; i < cat->ctl_count; i++) {
...@@ -196,7 +185,7 @@ int dpu_rm_init(struct dpu_rm *rm, ...@@ -196,7 +185,7 @@ int dpu_rm_init(struct dpu_rm *rm,
continue; continue;
} }
hw = dpu_hw_ctl_init(ctl->id, mmio, cat); hw = dpu_hw_ctl_init(ctl->id, mmio, cat);
if (IS_ERR_OR_NULL(hw)) { if (IS_ERR(hw)) {
rc = PTR_ERR(hw); rc = PTR_ERR(hw);
DPU_ERROR("failed ctl object creation: err %d\n", rc); DPU_ERROR("failed ctl object creation: err %d\n", rc);
goto fail; goto fail;
...@@ -213,7 +202,7 @@ int dpu_rm_init(struct dpu_rm *rm, ...@@ -213,7 +202,7 @@ int dpu_rm_init(struct dpu_rm *rm,
continue; continue;
} }
hw = dpu_hw_dspp_init(dspp->id, mmio, cat); hw = dpu_hw_dspp_init(dspp->id, mmio, cat);
if (IS_ERR_OR_NULL(hw)) { if (IS_ERR(hw)) {
rc = PTR_ERR(hw); rc = PTR_ERR(hw);
DPU_ERROR("failed dspp object creation: err %d\n", rc); DPU_ERROR("failed dspp object creation: err %d\n", rc);
goto fail; goto fail;
...@@ -452,54 +441,6 @@ static int _dpu_rm_reserve_ctls( ...@@ -452,54 +441,6 @@ static int _dpu_rm_reserve_ctls(
return 0; return 0;
} }
static int _dpu_rm_reserve_intf(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
uint32_t enc_id,
uint32_t id)
{
int idx = id - INTF_0;
if (idx < 0 || idx >= ARRAY_SIZE(rm->intf_blks)) {
DPU_ERROR("invalid intf id: %d", id);
return -EINVAL;
}
if (!rm->intf_blks[idx]) {
DPU_ERROR("couldn't find intf id %d\n", id);
return -EINVAL;
}
if (reserved_by_other(global_state->intf_to_enc_id, idx, enc_id)) {
DPU_ERROR("intf id %d already reserved\n", id);
return -ENAVAIL;
}
global_state->intf_to_enc_id[idx] = enc_id;
return 0;
}
static int _dpu_rm_reserve_intf_related_hw(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
uint32_t enc_id,
struct dpu_encoder_hw_resources *hw_res)
{
int i, ret = 0;
u32 id;
for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
if (hw_res->intfs[i] == INTF_MODE_NONE)
continue;
id = i + INTF_0;
ret = _dpu_rm_reserve_intf(rm, global_state, enc_id, id);
if (ret)
return ret;
}
return ret;
}
static int _dpu_rm_make_reservation( static int _dpu_rm_make_reservation(
struct dpu_rm *rm, struct dpu_rm *rm,
struct dpu_global_state *global_state, struct dpu_global_state *global_state,
...@@ -521,11 +462,6 @@ static int _dpu_rm_make_reservation( ...@@ -521,11 +462,6 @@ static int _dpu_rm_make_reservation(
return ret; return ret;
} }
ret = _dpu_rm_reserve_intf_related_hw(rm, global_state, enc->base.id,
&reqs->hw_res);
if (ret)
return ret;
return ret; return ret;
} }
...@@ -534,8 +470,6 @@ static int _dpu_rm_populate_requirements( ...@@ -534,8 +470,6 @@ static int _dpu_rm_populate_requirements(
struct dpu_rm_requirements *reqs, struct dpu_rm_requirements *reqs,
struct msm_display_topology req_topology) struct msm_display_topology req_topology)
{ {
dpu_encoder_get_hw_resources(enc, &reqs->hw_res);
reqs->topology = req_topology; reqs->topology = req_topology;
DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n", DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
...@@ -565,8 +499,6 @@ void dpu_rm_release(struct dpu_global_state *global_state, ...@@ -565,8 +499,6 @@ void dpu_rm_release(struct dpu_global_state *global_state,
ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id); ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
_dpu_rm_clear_mapping(global_state->ctl_to_enc_id, _dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id); ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
_dpu_rm_clear_mapping(global_state->intf_to_enc_id,
ARRAY_SIZE(global_state->intf_to_enc_id), enc->base.id);
} }
int dpu_rm_reserve( int dpu_rm_reserve(
...@@ -630,11 +562,6 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm, ...@@ -630,11 +562,6 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
hw_to_enc_id = global_state->ctl_to_enc_id; hw_to_enc_id = global_state->ctl_to_enc_id;
max_blks = ARRAY_SIZE(rm->ctl_blks); max_blks = ARRAY_SIZE(rm->ctl_blks);
break; break;
case DPU_HW_BLK_INTF:
hw_blks = rm->intf_blks;
hw_to_enc_id = global_state->intf_to_enc_id;
max_blks = ARRAY_SIZE(rm->intf_blks);
break;
case DPU_HW_BLK_DSPP: case DPU_HW_BLK_DSPP:
hw_blks = rm->dspp_blks; hw_blks = rm->dspp_blks;
hw_to_enc_id = global_state->dspp_to_enc_id; hw_to_enc_id = global_state->dspp_to_enc_id;
......
...@@ -18,20 +18,16 @@ struct dpu_global_state; ...@@ -18,20 +18,16 @@ struct dpu_global_state;
* @pingpong_blks: array of pingpong hardware resources * @pingpong_blks: array of pingpong hardware resources
* @mixer_blks: array of layer mixer hardware resources * @mixer_blks: array of layer mixer hardware resources
* @ctl_blks: array of ctl hardware resources * @ctl_blks: array of ctl hardware resources
* @intf_blks: array of intf hardware resources * @hw_intf: array of intf hardware resources
* @dspp_blks: array of dspp hardware resources * @dspp_blks: array of dspp hardware resources
* @lm_max_width: cached layer mixer maximum width
* @rm_lock: resource manager mutex
*/ */
struct dpu_rm { struct dpu_rm {
struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0]; struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0];
struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0]; struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0];
struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0]; struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0]; struct dpu_hw_intf *hw_intf[INTF_MAX - INTF_0];
struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0]; struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0];
struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0]; struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0];
uint32_t lm_max_width;
}; };
/** /**
...@@ -88,5 +84,16 @@ void dpu_rm_release(struct dpu_global_state *global_state, ...@@ -88,5 +84,16 @@ void dpu_rm_release(struct dpu_global_state *global_state,
int dpu_rm_get_assigned_resources(struct dpu_rm *rm, int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
struct dpu_global_state *global_state, uint32_t enc_id, struct dpu_global_state *global_state, uint32_t enc_id,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size); enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
/**
* dpu_rm_get_intf - Return a struct dpu_hw_intf instance given it's index.
* @rm: DPU Resource Manager handle
* @intf_idx: INTF's index
*/
static inline struct dpu_hw_intf *dpu_rm_get_intf(struct dpu_rm *rm, enum dpu_intf intf_idx)
{
return rm->hw_intf[intf_idx - INTF_0];
}
#endif /* __DPU_RM_H__ */ #endif /* __DPU_RM_H__ */
...@@ -68,7 +68,7 @@ static int smp_request_block(struct mdp5_smp *smp, ...@@ -68,7 +68,7 @@ static int smp_request_block(struct mdp5_smp *smp,
uint8_t reserved; uint8_t reserved;
/* we shouldn't be requesting blocks for an in-use client: */ /* we shouldn't be requesting blocks for an in-use client: */
WARN_ON(bitmap_weight(cs, cnt) > 0); WARN_ON(!bitmap_empty(cs, cnt));
reserved = smp->reserved[cid]; reserved = smp->reserved[cid];
......
...@@ -456,19 +456,19 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, ...@@ -456,19 +456,19 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0); dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0);
} }
int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog, int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog,
u32 pattern) u32 state_bit)
{ {
int bit, ret; int bit, ret;
u32 data; u32 data;
struct dp_catalog_private *catalog = container_of(dp_catalog, struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog); struct dp_catalog_private, dp_catalog);
bit = BIT(pattern - 1); bit = BIT(state_bit - 1);
DRM_DEBUG_DP("hw: bit=%d train=%d\n", bit, pattern); DRM_DEBUG_DP("hw: bit=%d train=%d\n", bit, state_bit);
dp_catalog_ctrl_state_ctrl(dp_catalog, bit); dp_catalog_ctrl_state_ctrl(dp_catalog, bit);
bit = BIT(pattern - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT; bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
/* Poll for mainlink ready status */ /* Poll for mainlink ready status */
ret = readx_poll_timeout(readl, catalog->io->dp_controller.link.base + ret = readx_poll_timeout(readl, catalog->io->dp_controller.link.base +
...@@ -476,7 +476,7 @@ int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog, ...@@ -476,7 +476,7 @@ int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog,
data, data & bit, data, data & bit,
POLLING_SLEEP_US, POLLING_TIMEOUT_US); POLLING_SLEEP_US, POLLING_TIMEOUT_US);
if (ret < 0) { if (ret < 0) {
DRM_ERROR("set pattern for link_train=%d failed\n", pattern); DRM_ERROR("set state_bit for link_train=%d failed\n", state_bit);
return ret; return ret;
} }
return 0; return 0;
......
...@@ -94,7 +94,7 @@ void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable); ...@@ -94,7 +94,7 @@ void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable);
void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb); void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb);
void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate, void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate,
u32 stream_rate_khz, bool fixed_nvid); u32 stream_rate_khz, bool fixed_nvid);
int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog, u32 pattern); int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, u32 pattern);
void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog); void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog);
bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog); bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable); void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable);
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include <drm/drm_print.h> #include <drm/drm_print.h>
#include "dpu_io_util.h" #include "dp_clk_util.h"
void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk) void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
{ {
...@@ -118,70 +118,3 @@ int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable) ...@@ -118,70 +118,3 @@ int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
return rc; return rc;
} }
int msm_dss_parse_clock(struct platform_device *pdev,
struct dss_module_power *mp)
{
u32 i, rc = 0;
const char *clock_name;
int num_clk = 0;
if (!pdev || !mp)
return -EINVAL;
mp->num_clk = 0;
num_clk = of_property_count_strings(pdev->dev.of_node, "clock-names");
if (num_clk <= 0) {
pr_debug("clocks are not defined\n");
return 0;
}
mp->clk_config = devm_kcalloc(&pdev->dev,
num_clk, sizeof(struct dss_clk),
GFP_KERNEL);
if (!mp->clk_config)
return -ENOMEM;
for (i = 0; i < num_clk; i++) {
rc = of_property_read_string_index(pdev->dev.of_node,
"clock-names", i,
&clock_name);
if (rc) {
DRM_DEV_ERROR(&pdev->dev, "Failed to get clock name for %d\n",
i);
break;
}
strlcpy(mp->clk_config[i].clk_name, clock_name,
sizeof(mp->clk_config[i].clk_name));
mp->clk_config[i].type = DSS_CLK_AHB;
}
rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
if (rc) {
DRM_DEV_ERROR(&pdev->dev, "Failed to get clock refs %d\n", rc);
goto err;
}
rc = of_clk_set_defaults(pdev->dev.of_node, false);
if (rc) {
DRM_DEV_ERROR(&pdev->dev, "Failed to set clock defaults %d\n", rc);
goto err;
}
for (i = 0; i < num_clk; i++) {
u32 rate = clk_get_rate(mp->clk_config[i].clk);
if (!rate)
continue;
mp->clk_config[i].rate = rate;
mp->clk_config[i].type = DSS_CLK_PCLK;
mp->clk_config[i].max_rate = rate;
}
mp->num_clk = num_clk;
return 0;
err:
msm_dss_put_clk(mp->clk_config, num_clk);
return rc;
}
...@@ -2,8 +2,8 @@ ...@@ -2,8 +2,8 @@
/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved. /* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
*/ */
#ifndef __DPU_IO_UTIL_H__ #ifndef __DP_CLK_UTIL_H__
#define __DPU_IO_UTIL_H__ #define __DP_CLK_UTIL_H__
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -35,6 +35,4 @@ int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk); ...@@ -35,6 +35,4 @@ int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk); void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk); int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable); int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
int msm_dss_parse_clock(struct platform_device *pdev, #endif /* __DP_CLK_UTIL_H__ */
struct dss_module_power *mp);
#endif /* __DPU_IO_UTIL_H__ */
...@@ -1083,7 +1083,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, ...@@ -1083,7 +1083,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
*training_step = DP_TRAINING_1; *training_step = DP_TRAINING_1;
ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, DP_TRAINING_PATTERN_1); ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1);
if (ret) if (ret)
return ret; return ret;
dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 | dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
...@@ -1181,7 +1181,8 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, ...@@ -1181,7 +1181,8 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
int *training_step) int *training_step)
{ {
int tries = 0, ret = 0; int tries = 0, ret = 0;
char pattern; u8 pattern;
u32 state_ctrl_bit;
int const maximum_retries = 5; int const maximum_retries = 5;
u8 link_status[DP_LINK_STATUS_SIZE]; u8 link_status[DP_LINK_STATUS_SIZE];
...@@ -1189,12 +1190,18 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, ...@@ -1189,12 +1190,18 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
*training_step = DP_TRAINING_2; *training_step = DP_TRAINING_2;
if (drm_dp_tps3_supported(ctrl->panel->dpcd)) if (drm_dp_tps4_supported(ctrl->panel->dpcd)) {
pattern = DP_TRAINING_PATTERN_4;
state_ctrl_bit = 4;
} else if (drm_dp_tps3_supported(ctrl->panel->dpcd)) {
pattern = DP_TRAINING_PATTERN_3; pattern = DP_TRAINING_PATTERN_3;
else state_ctrl_bit = 3;
} else {
pattern = DP_TRAINING_PATTERN_2; pattern = DP_TRAINING_PATTERN_2;
state_ctrl_bit = 2;
}
ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, pattern); ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit);
if (ret) if (ret)
return ret; return ret;
...@@ -1365,60 +1372,48 @@ static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl) ...@@ -1365,60 +1372,48 @@ static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl)
return ret; return ret;
} }
int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset) void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable)
{
struct dp_ctrl_private *ctrl;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
dp_catalog_ctrl_reset(ctrl->catalog);
if (enable)
dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
}
void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl)
{ {
struct dp_ctrl_private *ctrl; struct dp_ctrl_private *ctrl;
struct dp_io *dp_io; struct dp_io *dp_io;
struct phy *phy; struct phy *phy;
if (!dp_ctrl) {
DRM_ERROR("Invalid input data\n");
return -EINVAL;
}
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
dp_io = &ctrl->parser->io; dp_io = &ctrl->parser->io;
phy = dp_io->phy; phy = dp_io->phy;
ctrl->dp_ctrl.orientation = flip;
if (reset)
dp_catalog_ctrl_reset(ctrl->catalog);
DRM_DEBUG_DP("flip=%d\n", flip);
dp_catalog_ctrl_phy_reset(ctrl->catalog); dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_init(phy); phy_init(phy);
dp_catalog_ctrl_enable_irq(ctrl->catalog, true); DRM_DEBUG_DP("phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
return 0;
} }
/** void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl)
* dp_ctrl_host_deinit() - Uninitialize DP controller
* @dp_ctrl: Display Port Driver data
*
* Perform required steps to uninitialize DP controller
* and its resources.
*/
void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
{ {
struct dp_ctrl_private *ctrl; struct dp_ctrl_private *ctrl;
struct dp_io *dp_io; struct dp_io *dp_io;
struct phy *phy; struct phy *phy;
if (!dp_ctrl) {
DRM_ERROR("Invalid input data\n");
return;
}
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
dp_io = &ctrl->parser->io; dp_io = &ctrl->parser->io;
phy = dp_io->phy; phy = dp_io->phy;
dp_catalog_ctrl_enable_irq(ctrl->catalog, false); dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_exit(phy); phy_exit(phy);
DRM_DEBUG_DP("phy=%p init=%d power_on=%d\n",
DRM_DEBUG_DP("Host deinitialized successfully\n"); phy, phy->init_count, phy->power_count);
} }
static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl) static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
...@@ -1488,8 +1483,13 @@ static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl) ...@@ -1488,8 +1483,13 @@ static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
} }
phy_power_off(phy); phy_power_off(phy);
/* aux channel down, reinit phy */
phy_exit(phy); phy_exit(phy);
phy_init(phy);
DRM_DEBUG_DP("phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
return 0; return 0;
} }
...@@ -1761,6 +1761,9 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) ...@@ -1761,6 +1761,9 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
/* end with failure */ /* end with failure */
break; /* lane == 1 already */ break; /* lane == 1 already */
} }
/* stop link training before start re training */
dp_ctrl_clear_training_pattern(ctrl);
} }
} }
...@@ -1893,33 +1896,20 @@ int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl) ...@@ -1893,33 +1896,20 @@ int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
return ret; return ret;
} }
DRM_DEBUG_DP("Before, phy=%x init_count=%d power_on=%d\n",
(u32)(uintptr_t)phy, phy->init_count, phy->power_count);
phy_power_off(phy); phy_power_off(phy);
/* aux channel down, reinit phy */ /* aux channel down, reinit phy */
phy_exit(phy); phy_exit(phy);
phy_init(phy); phy_init(phy);
DRM_DEBUG_DP("DP off link/stream done\n"); DRM_DEBUG_DP("phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
return ret; return ret;
} }
void dp_ctrl_off_phy(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
struct dp_io *dp_io;
struct phy *phy;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
dp_io = &ctrl->parser->io;
phy = dp_io->phy;
dp_catalog_ctrl_reset(ctrl->catalog);
phy_exit(phy);
DRM_DEBUG_DP("DP off phy done\n");
}
int dp_ctrl_off(struct dp_ctrl *dp_ctrl) int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
{ {
struct dp_ctrl_private *ctrl; struct dp_ctrl_private *ctrl;
...@@ -1948,9 +1938,9 @@ int dp_ctrl_off(struct dp_ctrl *dp_ctrl) ...@@ -1948,9 +1938,9 @@ int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
} }
phy_power_off(phy); phy_power_off(phy);
phy_exit(phy); DRM_DEBUG_DP("phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
DRM_DEBUG_DP("DP off done\n");
return ret; return ret;
} }
......
...@@ -19,12 +19,9 @@ struct dp_ctrl { ...@@ -19,12 +19,9 @@ struct dp_ctrl {
u32 pixel_rate; u32 pixel_rate;
}; };
int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset);
void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl);
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl); int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl); int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl); int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
void dp_ctrl_off_phy(struct dp_ctrl *dp_ctrl);
int dp_ctrl_off(struct dp_ctrl *dp_ctrl); int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl); void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
void dp_ctrl_isr(struct dp_ctrl *dp_ctrl); void dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
...@@ -34,4 +31,9 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, ...@@ -34,4 +31,9 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
struct dp_power *power, struct dp_catalog *catalog, struct dp_power *power, struct dp_catalog *catalog,
struct dp_parser *parser); struct dp_parser *parser);
void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable);
void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl);
void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl);
void dp_ctrl_irq_phy_exit(struct dp_ctrl *dp_ctrl);
#endif /* _DP_CTRL_H_ */ #endif /* _DP_CTRL_H_ */
...@@ -207,39 +207,39 @@ static const struct file_operations test_active_fops = { ...@@ -207,39 +207,39 @@ static const struct file_operations test_active_fops = {
.write = dp_test_active_write .write = dp_test_active_write
}; };
static int dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor) static void dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor)
{ {
int rc = 0; char path[64];
struct dp_debug_private *debug = container_of(dp_debug, struct dp_debug_private *debug = container_of(dp_debug,
struct dp_debug_private, dp_debug); struct dp_debug_private, dp_debug);
debugfs_create_file("dp_debug", 0444, minor->debugfs_root, snprintf(path, sizeof(path), "msm_dp-%s", debug->connector->name);
debug->root = debugfs_create_dir(path, minor->debugfs_root);
debugfs_create_file("dp_debug", 0444, debug->root,
debug, &dp_debug_fops); debug, &dp_debug_fops);
debugfs_create_file("msm_dp_test_active", 0444, debugfs_create_file("msm_dp_test_active", 0444,
minor->debugfs_root, debug->root,
debug, &test_active_fops); debug, &test_active_fops);
debugfs_create_file("msm_dp_test_data", 0444, debugfs_create_file("msm_dp_test_data", 0444,
minor->debugfs_root, debug->root,
debug, &dp_test_data_fops); debug, &dp_test_data_fops);
debugfs_create_file("msm_dp_test_type", 0444, debugfs_create_file("msm_dp_test_type", 0444,
minor->debugfs_root, debug->root,
debug, &dp_test_type_fops); debug, &dp_test_type_fops);
debug->root = minor->debugfs_root;
return rc;
} }
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_usbpd *usbpd, struct dp_link *link, struct dp_usbpd *usbpd, struct dp_link *link,
struct drm_connector *connector, struct drm_minor *minor) struct drm_connector *connector, struct drm_minor *minor)
{ {
int rc = 0;
struct dp_debug_private *debug; struct dp_debug_private *debug;
struct dp_debug *dp_debug; struct dp_debug *dp_debug;
int rc;
if (!dev || !panel || !usbpd || !link) { if (!dev || !panel || !usbpd || !link) {
DRM_ERROR("invalid input\n"); DRM_ERROR("invalid input\n");
...@@ -266,11 +266,7 @@ struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, ...@@ -266,11 +266,7 @@ struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
dp_debug->hdisplay = 0; dp_debug->hdisplay = 0;
dp_debug->vrefresh = 0; dp_debug->vrefresh = 0;
rc = dp_debug_init(dp_debug, minor); dp_debug_init(dp_debug, minor);
if (rc) {
devm_kfree(dev, debug);
goto error;
}
return dp_debug; return dp_debug;
error: error:
......
This diff is collapsed.
...@@ -16,7 +16,7 @@ struct msm_dp { ...@@ -16,7 +16,7 @@ struct msm_dp {
struct drm_bridge *bridge; struct drm_bridge *bridge;
struct drm_connector *connector; struct drm_connector *connector;
struct drm_encoder *encoder; struct drm_encoder *encoder;
struct drm_bridge *panel_bridge; struct drm_bridge *next_bridge;
bool is_connected; bool is_connected;
bool audio_enabled; bool audio_enabled;
bool power_on; bool power_on;
......
...@@ -169,16 +169,6 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display) ...@@ -169,16 +169,6 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display)
drm_connector_attach_encoder(connector, dp_display->encoder); drm_connector_attach_encoder(connector, dp_display->encoder);
if (dp_display->panel_bridge) {
ret = drm_bridge_attach(dp_display->encoder,
dp_display->panel_bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0) {
DRM_ERROR("failed to attach panel bridge: %d\n", ret);
return ERR_PTR(ret);
}
}
return connector; return connector;
} }
...@@ -246,5 +236,16 @@ struct drm_bridge *msm_dp_bridge_init(struct msm_dp *dp_display, struct drm_devi ...@@ -246,5 +236,16 @@ struct drm_bridge *msm_dp_bridge_init(struct msm_dp *dp_display, struct drm_devi
return ERR_PTR(rc); return ERR_PTR(rc);
} }
if (dp_display->next_bridge) {
rc = drm_bridge_attach(dp_display->encoder,
dp_display->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (rc < 0) {
DRM_ERROR("failed to attach panel bridge: %d\n", rc);
drm_bridge_remove(bridge);
return ERR_PTR(rc);
}
}
return bridge; return bridge;
} }
...@@ -212,6 +212,11 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel, ...@@ -212,6 +212,11 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
if (drm_add_modes_noedid(connector, 640, 480)) if (drm_add_modes_noedid(connector, 640, 480))
drm_set_preferred_mode(connector, 640, 480); drm_set_preferred_mode(connector, 640, 480);
mutex_unlock(&connector->dev->mode_config.mutex); mutex_unlock(&connector->dev->mode_config.mutex);
} else {
/* always add fail-safe mode as backup mode */
mutex_lock(&connector->dev->mode_config.mutex);
drm_add_modes_noedid(connector, 640, 480);
mutex_unlock(&connector->dev->mode_config.mutex);
} }
if (panel->aux_cfg_update_done) { if (panel->aux_cfg_update_done) {
......
...@@ -265,23 +265,16 @@ static int dp_parser_clock(struct dp_parser *parser) ...@@ -265,23 +265,16 @@ static int dp_parser_clock(struct dp_parser *parser)
return 0; return 0;
} }
static int dp_parser_find_panel(struct dp_parser *parser) static int dp_parser_find_next_bridge(struct dp_parser *parser)
{ {
struct device *dev = &parser->pdev->dev; struct device *dev = &parser->pdev->dev;
struct drm_panel *panel; struct drm_bridge *bridge;
int rc;
rc = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL); bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
if (rc) { if (IS_ERR(bridge))
DRM_ERROR("failed to acquire DRM panel: %d\n", rc); return PTR_ERR(bridge);
return rc;
}
parser->panel_bridge = devm_drm_panel_bridge_add(dev, panel); parser->next_bridge = bridge;
if (IS_ERR(parser->panel_bridge)) {
DRM_ERROR("failed to create panel bridge\n");
return PTR_ERR(parser->panel_bridge);
}
return 0; return 0;
} }
...@@ -307,10 +300,23 @@ static int dp_parser_parse(struct dp_parser *parser, int connector_type) ...@@ -307,10 +300,23 @@ static int dp_parser_parse(struct dp_parser *parser, int connector_type)
if (rc) if (rc)
return rc; return rc;
if (connector_type == DRM_MODE_CONNECTOR_eDP) { /*
rc = dp_parser_find_panel(parser); * External bridges are mandatory for eDP interfaces: one has to
if (rc) * provide at least an eDP panel (which gets wrapped into panel-bridge).
*
* For DisplayPort interfaces external bridges are optional, so
* silently ignore an error if one is not present (-ENODEV).
*/
rc = dp_parser_find_next_bridge(parser);
if (rc == -ENODEV) {
if (connector_type == DRM_MODE_CONNECTOR_eDP) {
DRM_ERROR("eDP: next bridge is not present\n");
return rc; return rc;
}
} else if (rc) {
if (rc != -EPROBE_DEFER)
DRM_ERROR("DP: error parsing next bridge: %d\n", rc);
return rc;
} }
/* Map the corresponding regulator information according to /* Map the corresponding regulator information according to
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#include <linux/phy/phy.h> #include <linux/phy/phy.h>
#include <linux/phy/phy-dp.h> #include <linux/phy/phy-dp.h>
#include "dpu_io_util.h" #include "dp_clk_util.h"
#include "msm_drv.h" #include "msm_drv.h"
#define DP_LABEL "MDSS DP DISPLAY" #define DP_LABEL "MDSS DP DISPLAY"
...@@ -123,7 +123,7 @@ struct dp_parser { ...@@ -123,7 +123,7 @@ struct dp_parser {
struct dp_display_data disp_data; struct dp_display_data disp_data;
const struct dp_regulator_cfg *regulator_cfg; const struct dp_regulator_cfg *regulator_cfg;
u32 max_dp_lanes; u32 max_dp_lanes;
struct drm_bridge *panel_bridge; struct drm_bridge *next_bridge;
int (*parse)(struct dp_parser *parser, int connector_type); int (*parse)(struct dp_parser *parser, int connector_type);
}; };
......
...@@ -1877,7 +1877,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) ...@@ -1877,7 +1877,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
/* do not autoenable, will be enabled later */ /* do not autoenable, will be enabled later */
ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq, ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN, IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN,
"dsi_isr", msm_host); "dsi_isr", msm_host);
if (ret < 0) { if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ%u: %d\n", dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
......
...@@ -315,13 +315,12 @@ dsi_mgr_connector_best_encoder(struct drm_connector *connector) ...@@ -315,13 +315,12 @@ dsi_mgr_connector_best_encoder(struct drm_connector *connector)
return msm_dsi_get_encoder(msm_dsi); return msm_dsi_get_encoder(msm_dsi);
} }
static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) static void dsi_mgr_bridge_power_on(struct drm_bridge *bridge)
{ {
int id = dsi_mgr_bridge_get_id(bridge); int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host; struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
struct msm_dsi_phy_shared_timings phy_shared_timings[DSI_MAX]; struct msm_dsi_phy_shared_timings phy_shared_timings[DSI_MAX];
bool is_bonded_dsi = IS_BONDED_DSI(); bool is_bonded_dsi = IS_BONDED_DSI();
int ret; int ret;
...@@ -362,6 +361,34 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) ...@@ -362,6 +361,34 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
if (is_bonded_dsi && msm_dsi1) if (is_bonded_dsi && msm_dsi1)
msm_dsi_host_enable_irq(msm_dsi1->host); msm_dsi_host_enable_irq(msm_dsi1->host);
return;
host1_on_fail:
msm_dsi_host_power_off(host);
host_on_fail:
dsi_mgr_phy_disable(id);
phy_en_fail:
return;
}
static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
DBG("id=%d", id);
if (!msm_dsi_device_connected(msm_dsi))
return;
/* Do nothing with the host if it is slave-DSI in case of bonded DSI */
if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
return;
/* Always call panel functions once, because even for dual panels, /* Always call panel functions once, because even for dual panels,
* there is only one drm_panel instance. * there is only one drm_panel instance.
*/ */
...@@ -396,17 +423,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) ...@@ -396,17 +423,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
if (panel) if (panel)
drm_panel_unprepare(panel); drm_panel_unprepare(panel);
panel_prep_fail: panel_prep_fail:
msm_dsi_host_disable_irq(host);
if (is_bonded_dsi && msm_dsi1)
msm_dsi_host_disable_irq(msm_dsi1->host);
if (is_bonded_dsi && msm_dsi1)
msm_dsi_host_power_off(msm_dsi1->host);
host1_on_fail:
msm_dsi_host_power_off(host);
host_on_fail:
dsi_mgr_phy_disable(id);
phy_en_fail:
return; return;
} }
...@@ -552,6 +569,8 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge, ...@@ -552,6 +569,8 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
msm_dsi_host_set_display_mode(host, adjusted_mode); msm_dsi_host_set_display_mode(host, adjusted_mode);
if (is_bonded_dsi && other_dsi) if (is_bonded_dsi && other_dsi)
msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode); msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
dsi_mgr_bridge_power_on(bridge);
} }
static const struct drm_connector_funcs dsi_mgr_connector_funcs = { static const struct drm_connector_funcs dsi_mgr_connector_funcs = {
......
...@@ -741,6 +741,12 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) ...@@ -741,6 +741,12 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
} }
} }
if (phy->cfg->ops.parse_dt_properties) {
ret = phy->cfg->ops.parse_dt_properties(phy);
if (ret)
goto fail;
}
ret = dsi_phy_regulator_init(phy); ret = dsi_phy_regulator_init(phy);
if (ret) if (ret)
goto fail; goto fail;
......
...@@ -25,6 +25,7 @@ struct msm_dsi_phy_ops { ...@@ -25,6 +25,7 @@ struct msm_dsi_phy_ops {
void (*save_pll_state)(struct msm_dsi_phy *phy); void (*save_pll_state)(struct msm_dsi_phy *phy);
int (*restore_pll_state)(struct msm_dsi_phy *phy); int (*restore_pll_state)(struct msm_dsi_phy *phy);
bool (*set_continuous_clock)(struct msm_dsi_phy *phy, bool enable); bool (*set_continuous_clock)(struct msm_dsi_phy *phy, bool enable);
int (*parse_dt_properties)(struct msm_dsi_phy *phy);
}; };
struct msm_dsi_phy_cfg { struct msm_dsi_phy_cfg {
...@@ -82,6 +83,8 @@ struct msm_dsi_dphy_timing { ...@@ -82,6 +83,8 @@ struct msm_dsi_dphy_timing {
#define DSI_PIXEL_PLL_CLK 1 #define DSI_PIXEL_PLL_CLK 1
#define NUM_PROVIDED_CLKS 2 #define NUM_PROVIDED_CLKS 2
#define DSI_LANE_MAX 5
struct msm_dsi_phy { struct msm_dsi_phy {
struct platform_device *pdev; struct platform_device *pdev;
void __iomem *base; void __iomem *base;
...@@ -99,6 +102,7 @@ struct msm_dsi_phy { ...@@ -99,6 +102,7 @@ struct msm_dsi_phy {
struct msm_dsi_dphy_timing timing; struct msm_dsi_dphy_timing timing;
const struct msm_dsi_phy_cfg *cfg; const struct msm_dsi_phy_cfg *cfg;
void *tuning_cfg;
enum msm_dsi_phy_usecase usecase; enum msm_dsi_phy_usecase usecase;
bool regulator_ldo_mode; bool regulator_ldo_mode;
......
...@@ -83,6 +83,18 @@ struct dsi_pll_10nm { ...@@ -83,6 +83,18 @@ struct dsi_pll_10nm {
#define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, clk_hw) #define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, clk_hw)
/**
* struct dsi_phy_10nm_tuning_cfg - Holds 10nm PHY tuning config parameters.
* @rescode_offset_top: Offset for pull-up legs rescode.
* @rescode_offset_bot: Offset for pull-down legs rescode.
* @vreg_ctrl: vreg ctrl to drive LDO level
*/
struct dsi_phy_10nm_tuning_cfg {
u8 rescode_offset_top[DSI_LANE_MAX];
u8 rescode_offset_bot[DSI_LANE_MAX];
u8 vreg_ctrl;
};
/* /*
* Global list of private DSI PLL struct pointers. We need this for bonded DSI * Global list of private DSI PLL struct pointers. We need this for bonded DSI
* mode, where the master PLL's clk_ops needs access the slave's private data * mode, where the master PLL's clk_ops needs access the slave's private data
...@@ -562,7 +574,9 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **prov ...@@ -562,7 +574,9 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **prov
char clk_name[32], parent[32], vco_name[32]; char clk_name[32], parent[32], vco_name[32];
char parent2[32], parent3[32], parent4[32]; char parent2[32], parent3[32], parent4[32];
struct clk_init_data vco_init = { struct clk_init_data vco_init = {
.parent_names = (const char *[]){ "xo" }, .parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1, .num_parents = 1,
.name = vco_name, .name = vco_name,
.flags = CLK_IGNORE_UNUSED, .flags = CLK_IGNORE_UNUSED,
...@@ -747,6 +761,7 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy) ...@@ -747,6 +761,7 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
int i; int i;
u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 }; u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
void __iomem *lane_base = phy->lane_base; void __iomem *lane_base = phy->lane_base;
struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg;
if (phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS) if (phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)
tx_dctrl[3] = 0x02; tx_dctrl[3] = 0x02;
...@@ -775,10 +790,13 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy) ...@@ -775,10 +790,13 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0); dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i), dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
i == 4 ? 0x80 : 0x0); i == 4 ? 0x80 : 0x0);
dsi_phy_write(lane_base +
REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i), 0x0); /* platform specific dsi phy drive strength adjustment */
dsi_phy_write(lane_base + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i),
REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i), 0x0); tuning_cfg->rescode_offset_top[i]);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i),
tuning_cfg->rescode_offset_bot[i]);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i), dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
tx_dctrl[i]); tx_dctrl[i]);
} }
...@@ -799,6 +817,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, ...@@ -799,6 +817,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy,
u32 const timeout_us = 1000; u32 const timeout_us = 1000;
struct msm_dsi_dphy_timing *timing = &phy->timing; struct msm_dsi_dphy_timing *timing = &phy->timing;
void __iomem *base = phy->base; void __iomem *base = phy->base;
struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg;
u32 data; u32 data;
DBG(""); DBG("");
...@@ -834,8 +853,9 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, ...@@ -834,8 +853,9 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy,
/* Select MS1 byte-clk */ /* Select MS1 byte-clk */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10); dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
/* Enable LDO */ /* Enable LDO with platform specific drive level/amplitude adjustment */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL, 0x59); dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL,
tuning_cfg->vreg_ctrl);
/* Configure PHY lane swap (TODO: we need to calculate this) */ /* Configure PHY lane swap (TODO: we need to calculate this) */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21); dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
...@@ -922,6 +942,92 @@ static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy) ...@@ -922,6 +942,92 @@ static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
DBG("DSI%d PHY disabled", phy->id); DBG("DSI%d PHY disabled", phy->id);
} }
static int dsi_10nm_phy_parse_dt(struct msm_dsi_phy *phy)
{
struct device *dev = &phy->pdev->dev;
struct dsi_phy_10nm_tuning_cfg *tuning_cfg;
s8 offset_top[DSI_LANE_MAX] = { 0 }; /* No offset */
s8 offset_bot[DSI_LANE_MAX] = { 0 }; /* No offset */
u32 ldo_level = 400; /* 400mV */
u8 level;
int ret, i;
tuning_cfg = devm_kzalloc(dev, sizeof(*tuning_cfg), GFP_KERNEL);
if (!tuning_cfg)
return -ENOMEM;
/* Drive strength adjustment parameters */
ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-top",
offset_top, DSI_LANE_MAX);
if (ret && ret != -EINVAL) {
DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-top, %d\n", ret);
return ret;
}
for (i = 0; i < DSI_LANE_MAX; i++) {
if (offset_top[i] < -32 || offset_top[i] > 31) {
DRM_DEV_ERROR(dev,
"qcom,phy-rescode-offset-top value %d is not in range [-32..31]\n",
offset_top[i]);
return -EINVAL;
}
tuning_cfg->rescode_offset_top[i] = 0x3f & offset_top[i];
}
ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-bot",
offset_bot, DSI_LANE_MAX);
if (ret && ret != -EINVAL) {
DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-bot, %d\n", ret);
return ret;
}
for (i = 0; i < DSI_LANE_MAX; i++) {
if (offset_bot[i] < -32 || offset_bot[i] > 31) {
DRM_DEV_ERROR(dev,
"qcom,phy-rescode-offset-bot value %d is not in range [-32..31]\n",
offset_bot[i]);
return -EINVAL;
}
tuning_cfg->rescode_offset_bot[i] = 0x3f & offset_bot[i];
}
/* Drive level/amplitude adjustment parameters */
ret = of_property_read_u32(dev->of_node, "qcom,phy-drive-ldo-level", &ldo_level);
if (ret && ret != -EINVAL) {
DRM_DEV_ERROR(dev, "failed to parse qcom,phy-drive-ldo-level, %d\n", ret);
return ret;
}
switch (ldo_level) {
case 375:
level = 0;
break;
case 400:
level = 1;
break;
case 425:
level = 2;
break;
case 450:
level = 3;
break;
case 475:
level = 4;
break;
case 500:
level = 5;
break;
default:
DRM_DEV_ERROR(dev, "qcom,phy-drive-ldo-level %d is not supported\n", ldo_level);
return -EINVAL;
}
tuning_cfg->vreg_ctrl = 0x58 | (0x7 & level);
phy->tuning_cfg = tuning_cfg;
return 0;
}
const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = { const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
.has_phy_lane = true, .has_phy_lane = true,
.reg_cfg = { .reg_cfg = {
...@@ -936,6 +1042,7 @@ const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = { ...@@ -936,6 +1042,7 @@ const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
.pll_init = dsi_pll_10nm_init, .pll_init = dsi_pll_10nm_init,
.save_pll_state = dsi_10nm_pll_save_state, .save_pll_state = dsi_10nm_pll_save_state,
.restore_pll_state = dsi_10nm_pll_restore_state, .restore_pll_state = dsi_10nm_pll_restore_state,
.parse_dt_properties = dsi_10nm_phy_parse_dt,
}, },
.min_pll_rate = 1000000000UL, .min_pll_rate = 1000000000UL,
.max_pll_rate = 3500000000UL, .max_pll_rate = 3500000000UL,
...@@ -957,6 +1064,7 @@ const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = { ...@@ -957,6 +1064,7 @@ const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
.pll_init = dsi_pll_10nm_init, .pll_init = dsi_pll_10nm_init,
.save_pll_state = dsi_10nm_pll_save_state, .save_pll_state = dsi_10nm_pll_save_state,
.restore_pll_state = dsi_10nm_pll_restore_state, .restore_pll_state = dsi_10nm_pll_restore_state,
.parse_dt_properties = dsi_10nm_phy_parse_dt,
}, },
.min_pll_rate = 1000000000UL, .min_pll_rate = 1000000000UL,
.max_pll_rate = 3500000000UL, .max_pll_rate = 3500000000UL,
......
...@@ -802,7 +802,9 @@ static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm, struct clk_hw **prov ...@@ -802,7 +802,9 @@ static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm, struct clk_hw **prov
{ {
char clk_name[32], parent[32], vco_name[32]; char clk_name[32], parent[32], vco_name[32];
struct clk_init_data vco_init = { struct clk_init_data vco_init = {
.parent_names = (const char *[]){ "xo" }, .parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1, .num_parents = 1,
.name = vco_name, .name = vco_name,
.flags = CLK_IGNORE_UNUSED, .flags = CLK_IGNORE_UNUSED,
......
...@@ -521,7 +521,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov ...@@ -521,7 +521,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
{ {
char clk_name[32], parent1[32], parent2[32], vco_name[32]; char clk_name[32], parent1[32], parent2[32], vco_name[32];
struct clk_init_data vco_init = { struct clk_init_data vco_init = {
.parent_names = (const char *[]){ "xo" }, .parent_data = &(const struct clk_parent_data) {
.fw_name = "ref", .name = "xo",
},
.num_parents = 1, .num_parents = 1,
.name = vco_name, .name = vco_name,
.flags = CLK_IGNORE_UNUSED, .flags = CLK_IGNORE_UNUSED,
......
...@@ -385,7 +385,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov ...@@ -385,7 +385,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
{ {
char *clk_name, *parent_name, *vco_name; char *clk_name, *parent_name, *vco_name;
struct clk_init_data vco_init = { struct clk_init_data vco_init = {
.parent_names = (const char *[]){ "pxo" }, .parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1, .num_parents = 1,
.flags = CLK_IGNORE_UNUSED, .flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_28nm_vco, .ops = &clk_ops_dsi_pll_28nm_vco,
......
...@@ -588,7 +588,9 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide ...@@ -588,7 +588,9 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
char clk_name[32], parent[32], vco_name[32]; char clk_name[32], parent[32], vco_name[32];
char parent2[32], parent3[32], parent4[32]; char parent2[32], parent3[32], parent4[32];
struct clk_init_data vco_init = { struct clk_init_data vco_init = {
.parent_names = (const char *[]){ "bi_tcxo" }, .parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1, .num_parents = 1,
.name = vco_name, .name = vco_name,
.flags = CLK_IGNORE_UNUSED, .flags = CLK_IGNORE_UNUSED,
...@@ -862,20 +864,26 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, ...@@ -862,20 +864,26 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
/* Alter PHY configurations if data rate less than 1.5GHZ*/ /* Alter PHY configurations if data rate less than 1.5GHZ*/
less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000); less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000);
/* For C-PHY, no low power settings for lower clk rate */
if (phy->cphy_mode)
less_than_1500_mhz = false;
if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) { if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52; vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52;
glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00; if (phy->cphy_mode) {
glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c; glbl_rescode_top_ctrl = 0x00;
glbl_rescode_bot_ctrl = 0x3c;
} else {
glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00;
glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c;
}
glbl_str_swi_cal_sel_ctrl = 0x00; glbl_str_swi_cal_sel_ctrl = 0x00;
glbl_hstx_str_ctrl_0 = 0x88; glbl_hstx_str_ctrl_0 = 0x88;
} else { } else {
vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59; vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59;
glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00; if (phy->cphy_mode) {
glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88; glbl_str_swi_cal_sel_ctrl = 0x03;
glbl_hstx_str_ctrl_0 = 0x66;
} else {
glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00;
glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88;
}
glbl_rescode_top_ctrl = 0x03; glbl_rescode_top_ctrl = 0x03;
glbl_rescode_bot_ctrl = 0x3c; glbl_rescode_bot_ctrl = 0x3c;
} }
......
...@@ -305,7 +305,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, ...@@ -305,7 +305,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
} }
ret = devm_request_irq(&pdev->dev, hdmi->irq, ret = devm_request_irq(&pdev->dev, hdmi->irq,
msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, msm_hdmi_irq, IRQF_TRIGGER_HIGH,
"hdmi_isr", hdmi); "hdmi_isr", hdmi);
if (ret < 0) { if (ret < 0) {
DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n", DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
......
...@@ -1281,9 +1281,11 @@ static void msm_pdev_shutdown(struct platform_device *pdev) ...@@ -1281,9 +1281,11 @@ static void msm_pdev_shutdown(struct platform_device *pdev)
static const struct of_device_id dt_match[] = { static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 }, { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
{ .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 }, { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
{ .compatible = "qcom,msm8998-mdss", .data = (void *)KMS_DPU },
{ .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU }, { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
{ .compatible = "qcom,sc7180-mdss", .data = (void *)KMS_DPU }, { .compatible = "qcom,sc7180-mdss", .data = (void *)KMS_DPU },
{ .compatible = "qcom,sc7280-mdss", .data = (void *)KMS_DPU }, { .compatible = "qcom,sc7280-mdss", .data = (void *)KMS_DPU },
{ .compatible = "qcom,sc8180x-mdss", .data = (void *)KMS_DPU },
{ .compatible = "qcom,sm8150-mdss", .data = (void *)KMS_DPU }, { .compatible = "qcom,sm8150-mdss", .data = (void *)KMS_DPU },
{ .compatible = "qcom,sm8250-mdss", .data = (void *)KMS_DPU }, { .compatible = "qcom,sm8250-mdss", .data = (void *)KMS_DPU },
{} {}
......
...@@ -109,24 +109,6 @@ struct msm_display_topology { ...@@ -109,24 +109,6 @@ struct msm_display_topology {
u32 num_dspp; u32 num_dspp;
}; };
/**
* struct msm_display_info - defines display properties
* @intf_type: DRM_MODE_ENCODER_ type
* @capabilities: Bitmask of display flags
* @num_of_h_tiles: Number of horizontal tiles in case of split interface
* @h_tile_instance: Controller instance used per tile. Number of elements is
* based on num_of_h_tiles
* @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
* used instead of panel TE in cmd mode panels
*/
struct msm_display_info {
int intf_type;
uint32_t capabilities;
uint32_t num_of_h_tiles;
uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
bool is_te_using_watchdog_timer;
};
/* Commit/Event thread specific structure */ /* Commit/Event thread specific structure */
struct msm_drm_thread { struct msm_drm_thread {
struct drm_device *dev; struct drm_device *dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment