Commit a5f2fafe authored by Dave Airlie's avatar Dave Airlie

Merge https://gitlab.freedesktop.org/drm/msm into drm-next

On the display side, cleanups and fixes to enabled modifiers
(QCOM_COMPRESSED).  And otherwise mostly misc fixes all around.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGuZ5uBKpf=fHvKpTiD10nychuEY8rnE+HeRz0QMvtY5_A@mail.gmail.com
parents 71f4e45a 860433ed
Qualcomm adreno/snapdragon GMU (Graphics management unit)
The GMU is a programmable power controller for the GPU. the CPU controls the
GMU which in turn handles power controls for the GPU.
Required properties:
- compatible: "qcom,adreno-gmu-XYZ.W", "qcom,adreno-gmu"
for example: "qcom,adreno-gmu-630.2", "qcom,adreno-gmu"
Note that you need to list the less specific "qcom,adreno-gmu"
for generic matches and the more specific identifier to identify
the specific device.
- reg: Physical base address and length of the GMU registers.
- reg-names: Matching names for the register regions
* "gmu"
* "gmu_pdc"
* "gmu_pdc_seg"
- interrupts: The interrupt signals from the GMU.
- interrupt-names: Matching names for the interrupts
* "hfi"
* "gmu"
- clocks: phandles to the device clocks
- clock-names: Matching names for the clocks
* "gmu"
* "cxo"
* "axi"
* "mnoc"
- power-domains: should be <&clock_gpucc GPU_CX_GDSC>
- iommus: phandle to the adreno iommu
- operating-points-v2: phandle to the OPP operating points
Example:
/ {
...
gmu: gmu@506a000 {
compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu";
reg = <0x506a000 0x30000>,
<0xb280000 0x10000>,
<0xb480000 0x10000>;
reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "hfi", "gmu";
clocks = <&gpucc GPU_CC_CX_GMU_CLK>,
<&gpucc GPU_CC_CXO_CLK>,
<&gcc GCC_DDRSS_GPU_AXI_CLK>,
<&gcc GCC_GPU_MEMNOC_GFX_CLK>;
clock-names = "gmu", "cxo", "axi", "memnoc";
power-domains = <&gpucc GPU_CX_GDSC>;
iommus = <&adreno_smmu 5>;
operating-points-v2 = <&gmu_opp_table>;
};
};
...@@ -10,14 +10,23 @@ Required properties: ...@@ -10,14 +10,23 @@ Required properties:
If "amd,imageon" is used, there should be no top level msm device. If "amd,imageon" is used, there should be no top level msm device.
- reg: Physical base address and length of the controller's registers. - reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt signal from the gpu. - interrupts: The interrupt signal from the gpu.
- clocks: device clocks - clocks: device clocks (if applicable)
See ../clocks/clock-bindings.txt for details. See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required: - clock-names: the following clocks are required by a3xx, a4xx and a5xx
cores:
* "core" * "core"
* "iface" * "iface"
* "mem_iface" * "mem_iface"
For GMU attached devices the GPU clocks are not used and are not required. The
following devices should not list clocks:
- qcom,adreno-630.2
- iommus: optional phandle to an adreno iommu instance
- operating-points-v2: optional phandle to the OPP operating points
- qcom,gmu: For GMU attached devices a phandle to the GMU device that will
control the power for the GPU. Applicable targets:
- qcom,adreno-630.2
Example: Example 3xx/4xx/a5xx:
/ { / {
... ...
...@@ -37,3 +46,30 @@ Example: ...@@ -37,3 +46,30 @@ Example:
<&mmcc MMSS_IMEM_AHB_CLK>; <&mmcc MMSS_IMEM_AHB_CLK>;
}; };
}; };
Example a6xx (with GMU):
/ {
...
gpu@5000000 {
compatible = "qcom,adreno-630.2", "qcom,adreno";
#stream-id-cells = <16>;
reg = <0x5000000 0x40000>, <0x509e000 0x10>;
reg-names = "kgsl_3d0_reg_memory", "cx_mem";
/*
* Look ma, no clocks! The GPU clocks and power are
* controlled entirely by the GMU
*/
interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>;
iommus = <&adreno_smmu 0>;
operating-points-v2 = <&gpu_opp_table>;
qcom,gmu = <&gmu>;
};
};
...@@ -4851,10 +4851,11 @@ F: Documentation/devicetree/bindings/display/multi-inno,mi0283qt.txt ...@@ -4851,10 +4851,11 @@ F: Documentation/devicetree/bindings/display/multi-inno,mi0283qt.txt
DRM DRIVER FOR MSM ADRENO GPU DRM DRIVER FOR MSM ADRENO GPU
M: Rob Clark <robdclark@gmail.com> M: Rob Clark <robdclark@gmail.com>
M: Sean Paul <sean@poorly.run>
L: linux-arm-msm@vger.kernel.org L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org L: dri-devel@lists.freedesktop.org
L: freedreno@lists.freedesktop.org L: freedreno@lists.freedesktop.org
T: git git://people.freedesktop.org/~robclark/linux T: git https://gitlab.freedesktop.org/drm/msm.git
S: Maintained S: Maintained
F: drivers/gpu/drm/msm/ F: drivers/gpu/drm/msm/
F: include/uapi/drm/msm_drm.h F: include/uapi/drm/msm_drm.h
......
...@@ -465,8 +465,6 @@ static void _dpu_crtc_setup_mixer_for_encoder( ...@@ -465,8 +465,6 @@ static void _dpu_crtc_setup_mixer_for_encoder(
return; return;
} }
mixer->encoder = enc;
cstate->num_mixers++; cstate->num_mixers++;
DPU_DEBUG("setup mixer %d: lm %d\n", DPU_DEBUG("setup mixer %d: lm %d\n",
i, mixer->hw_lm->idx - LM_0); i, mixer->hw_lm->idx - LM_0);
...@@ -718,11 +716,8 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async) ...@@ -718,11 +716,8 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async)
* may delay and flush at an irq event (e.g. ppdone) * may delay and flush at an irq event (e.g. ppdone)
*/ */
drm_for_each_encoder_mask(encoder, crtc->dev, drm_for_each_encoder_mask(encoder, crtc->dev,
crtc->state->encoder_mask) { crtc->state->encoder_mask)
struct dpu_encoder_kickoff_params params = { 0 }; dpu_encoder_prepare_for_kickoff(encoder, async);
dpu_encoder_prepare_for_kickoff(encoder, &params, async);
}
if (!async) { if (!async) {
/* wait for frame_event_done completion */ /* wait for frame_event_done completion */
......
...@@ -84,14 +84,12 @@ struct dpu_crtc_smmu_state_data { ...@@ -84,14 +84,12 @@ struct dpu_crtc_smmu_state_data {
* struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
* @hw_lm: LM HW Driver context * @hw_lm: LM HW Driver context
* @lm_ctl: CTL Path HW driver context * @lm_ctl: CTL Path HW driver context
* @encoder: Encoder attached to this lm & ctl
* @mixer_op_mode: mixer blending operation mode * @mixer_op_mode: mixer blending operation mode
* @flush_mask: mixer flush mask for ctl, mixer and pipe * @flush_mask: mixer flush mask for ctl, mixer and pipe
*/ */
struct dpu_crtc_mixer { struct dpu_crtc_mixer {
struct dpu_hw_mixer *hw_lm; struct dpu_hw_mixer *hw_lm;
struct dpu_hw_ctl *lm_ctl; struct dpu_hw_ctl *lm_ctl;
struct drm_encoder *encoder;
u32 mixer_op_mode; u32 mixer_op_mode;
u32 flush_mask; u32 flush_mask;
}; };
......
...@@ -205,7 +205,7 @@ struct dpu_encoder_virt { ...@@ -205,7 +205,7 @@ struct dpu_encoder_virt {
bool idle_pc_supported; bool idle_pc_supported;
struct mutex rc_lock; struct mutex rc_lock;
enum dpu_enc_rc_states rc_state; enum dpu_enc_rc_states rc_state;
struct kthread_delayed_work delayed_off_work; struct delayed_work delayed_off_work;
struct kthread_work vsync_event_work; struct kthread_work vsync_event_work;
struct msm_display_topology topology; struct msm_display_topology topology;
bool mode_set_complete; bool mode_set_complete;
...@@ -742,7 +742,6 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, ...@@ -742,7 +742,6 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
{ {
struct dpu_encoder_virt *dpu_enc; struct dpu_encoder_virt *dpu_enc;
struct msm_drm_private *priv; struct msm_drm_private *priv;
struct msm_drm_thread *disp_thread;
bool is_vid_mode = false; bool is_vid_mode = false;
if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private || if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
...@@ -755,12 +754,6 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, ...@@ -755,12 +754,6 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
is_vid_mode = dpu_enc->disp_info.capabilities & is_vid_mode = dpu_enc->disp_info.capabilities &
MSM_DISPLAY_CAP_VID_MODE; MSM_DISPLAY_CAP_VID_MODE;
if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
DPU_ERROR("invalid crtc index\n");
return -EINVAL;
}
disp_thread = &priv->disp_thread[drm_enc->crtc->index];
/* /*
* when idle_pc is not supported, process only KICKOFF, STOP and MODESET * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
* events and return early for other events (ie wb display). * events and return early for other events (ie wb display).
...@@ -777,8 +770,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, ...@@ -777,8 +770,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
switch (sw_event) { switch (sw_event) {
case DPU_ENC_RC_EVENT_KICKOFF: case DPU_ENC_RC_EVENT_KICKOFF:
/* cancel delayed off work, if any */ /* cancel delayed off work, if any */
if (kthread_cancel_delayed_work_sync( if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
&dpu_enc->delayed_off_work))
DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
sw_event); sw_event);
...@@ -837,10 +829,8 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, ...@@ -837,10 +829,8 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
return 0; return 0;
} }
kthread_queue_delayed_work( queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
&disp_thread->worker, msecs_to_jiffies(dpu_enc->idle_timeout));
&dpu_enc->delayed_off_work,
msecs_to_jiffies(dpu_enc->idle_timeout));
trace_dpu_enc_rc(DRMID(drm_enc), sw_event, trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
dpu_enc->idle_pc_supported, dpu_enc->rc_state, dpu_enc->idle_pc_supported, dpu_enc->rc_state,
...@@ -849,8 +839,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, ...@@ -849,8 +839,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
case DPU_ENC_RC_EVENT_PRE_STOP: case DPU_ENC_RC_EVENT_PRE_STOP:
/* cancel delayed off work, if any */ /* cancel delayed off work, if any */
if (kthread_cancel_delayed_work_sync( if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
&dpu_enc->delayed_off_work))
DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
sw_event); sw_event);
...@@ -1368,7 +1357,7 @@ static void dpu_encoder_frame_done_callback( ...@@ -1368,7 +1357,7 @@ static void dpu_encoder_frame_done_callback(
} }
} }
static void dpu_encoder_off_work(struct kthread_work *work) static void dpu_encoder_off_work(struct work_struct *work)
{ {
struct dpu_encoder_virt *dpu_enc = container_of(work, struct dpu_encoder_virt *dpu_enc = container_of(work,
struct dpu_encoder_virt, delayed_off_work.work); struct dpu_encoder_virt, delayed_off_work.work);
...@@ -1756,15 +1745,14 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work) ...@@ -1756,15 +1745,14 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
nsecs_to_jiffies(ktime_to_ns(wakeup_time))); nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
} }
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, bool async)
struct dpu_encoder_kickoff_params *params, bool async)
{ {
struct dpu_encoder_virt *dpu_enc; struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys; struct dpu_encoder_phys *phys;
bool needs_hw_reset = false; bool needs_hw_reset = false;
unsigned int i; unsigned int i;
if (!drm_enc || !params) { if (!drm_enc) {
DPU_ERROR("invalid args\n"); DPU_ERROR("invalid args\n");
return; return;
} }
...@@ -1778,7 +1766,7 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, ...@@ -1778,7 +1766,7 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
phys = dpu_enc->phys_encs[i]; phys = dpu_enc->phys_encs[i];
if (phys) { if (phys) {
if (phys->ops.prepare_for_kickoff) if (phys->ops.prepare_for_kickoff)
phys->ops.prepare_for_kickoff(phys, params); phys->ops.prepare_for_kickoff(phys);
if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET) if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
needs_hw_reset = true; needs_hw_reset = true;
} }
...@@ -2193,7 +2181,7 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, ...@@ -2193,7 +2181,7 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
mutex_init(&dpu_enc->rc_lock); mutex_init(&dpu_enc->rc_lock);
kthread_init_delayed_work(&dpu_enc->delayed_off_work, INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work); dpu_encoder_off_work);
dpu_enc->idle_timeout = IDLE_TIMEOUT; dpu_enc->idle_timeout = IDLE_TIMEOUT;
......
...@@ -37,15 +37,6 @@ struct dpu_encoder_hw_resources { ...@@ -37,15 +37,6 @@ struct dpu_encoder_hw_resources {
enum dpu_intf_mode intfs[INTF_MAX]; enum dpu_intf_mode intfs[INTF_MAX];
}; };
/**
* dpu_encoder_kickoff_params - info encoder requires at kickoff
* @affected_displays: bitmask, bit set means the ROI of the commit lies within
* the bounds of the physical display at the bit index
*/
struct dpu_encoder_kickoff_params {
unsigned long affected_displays;
};
/** /**
* dpu_encoder_get_hw_resources - Populate table of required hardware resources * dpu_encoder_get_hw_resources - Populate table of required hardware resources
* @encoder: encoder pointer * @encoder: encoder pointer
...@@ -88,11 +79,9 @@ void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder, ...@@ -88,11 +79,9 @@ void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
* Immediately: if no previous commit is outstanding. * Immediately: if no previous commit is outstanding.
* Delayed: Block until next trigger can be issued. * Delayed: Block until next trigger can be issued.
* @encoder: encoder pointer * @encoder: encoder pointer
* @params: kickoff time parameters
* @async: true if this is an asynchronous commit * @async: true if this is an asynchronous commit
*/ */
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder, void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder, bool async);
struct dpu_encoder_kickoff_params *params, bool async);
/** /**
* dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
......
...@@ -144,8 +144,7 @@ struct dpu_encoder_phys_ops { ...@@ -144,8 +144,7 @@ struct dpu_encoder_phys_ops {
int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc); int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc); int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc); int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc, void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc);
struct dpu_encoder_kickoff_params *params);
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc); void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc); void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc); bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
......
...@@ -594,8 +594,7 @@ static void dpu_encoder_phys_cmd_get_hw_resources( ...@@ -594,8 +594,7 @@ static void dpu_encoder_phys_cmd_get_hw_resources(
} }
static void dpu_encoder_phys_cmd_prepare_for_kickoff( static void dpu_encoder_phys_cmd_prepare_for_kickoff(
struct dpu_encoder_phys *phys_enc, struct dpu_encoder_phys *phys_enc)
struct dpu_encoder_kickoff_params *params)
{ {
struct dpu_encoder_phys_cmd *cmd_enc = struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc); to_dpu_encoder_phys_cmd(phys_enc);
...@@ -693,7 +692,7 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done( ...@@ -693,7 +692,7 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done(
/* required for both controllers */ /* required for both controllers */
if (!rc && cmd_enc->serialize_wait4pp) if (!rc && cmd_enc->serialize_wait4pp)
dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL); dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc);
return rc; return rc;
} }
......
...@@ -587,14 +587,13 @@ static int dpu_encoder_phys_vid_wait_for_vblank( ...@@ -587,14 +587,13 @@ static int dpu_encoder_phys_vid_wait_for_vblank(
} }
static void dpu_encoder_phys_vid_prepare_for_kickoff( static void dpu_encoder_phys_vid_prepare_for_kickoff(
struct dpu_encoder_phys *phys_enc, struct dpu_encoder_phys *phys_enc)
struct dpu_encoder_kickoff_params *params)
{ {
struct dpu_encoder_phys_vid *vid_enc; struct dpu_encoder_phys_vid *vid_enc;
struct dpu_hw_ctl *ctl; struct dpu_hw_ctl *ctl;
int rc; int rc;
if (!phys_enc || !params) { if (!phys_enc) {
DPU_ERROR("invalid encoder/parameters\n"); DPU_ERROR("invalid encoder/parameters\n");
return; return;
} }
......
...@@ -263,13 +263,13 @@ static const struct dpu_format dpu_format_map[] = { ...@@ -263,13 +263,13 @@ static const struct dpu_format dpu_format_map[] = {
INTERLEAVED_RGB_FMT(RGB565, INTERLEAVED_RGB_FMT(RGB565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 2, 0, false, 2, 0,
DPU_FETCH_LINEAR, 1), DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGR565, INTERLEAVED_RGB_FMT(BGR565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
false, 2, 0, false, 2, 0,
DPU_FETCH_LINEAR, 1), DPU_FETCH_LINEAR, 1),
...@@ -1137,36 +1137,3 @@ const struct msm_format *dpu_get_msm_format( ...@@ -1137,36 +1137,3 @@ const struct msm_format *dpu_get_msm_format(
return &fmt->base; return &fmt->base;
return NULL; return NULL;
} }
uint32_t dpu_populate_formats(
const struct dpu_format_extended *format_list,
uint32_t *pixel_formats,
uint64_t *pixel_modifiers,
uint32_t pixel_formats_max)
{
uint32_t i, fourcc_format;
if (!format_list || !pixel_formats)
return 0;
for (i = 0, fourcc_format = 0;
format_list->fourcc_format && i < pixel_formats_max;
++format_list) {
/* verify if listed format is in dpu_format_map? */
/* optionally return modified formats */
if (pixel_modifiers) {
/* assume same modifier for all fb planes */
pixel_formats[i] = format_list->fourcc_format;
pixel_modifiers[i++] = format_list->modifier;
} else {
/* assume base formats grouped together */
if (fourcc_format != format_list->fourcc_format) {
fourcc_format = format_list->fourcc_format;
pixel_formats[i++] = fourcc_format;
}
}
}
return i;
}
...@@ -40,20 +40,6 @@ const struct msm_format *dpu_get_msm_format( ...@@ -40,20 +40,6 @@ const struct msm_format *dpu_get_msm_format(
const uint32_t format, const uint32_t format,
const uint64_t modifiers); const uint64_t modifiers);
/**
* dpu_populate_formats - populate the given array with fourcc codes supported
* @format_list: pointer to list of possible formats
* @pixel_formats: array to populate with fourcc codes
* @pixel_modifiers: array to populate with drm modifiers, can be NULL
* @pixel_formats_max: length of pixel formats array
* Return: number of elements populated
*/
uint32_t dpu_populate_formats(
const struct dpu_format_extended *format_list,
uint32_t *pixel_formats,
uint64_t *pixel_modifiers,
uint32_t pixel_formats_max);
/** /**
* dpu_format_check_modified_format - validate format and buffers for * dpu_format_check_modified_format - validate format and buffers for
* dpu non-standard, i.e. modified format * dpu non-standard, i.e. modified format
......
...@@ -151,7 +151,9 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = { ...@@ -151,7 +151,9 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
.id = DPU_SSPP_CSC_10BIT, \ .id = DPU_SSPP_CSC_10BIT, \
.base = 0x1a00, .len = 0x100,}, \ .base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \ .format_list = plane_formats_yuv, \
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
.virt_format_list = plane_formats, \ .virt_format_list = plane_formats, \
.virt_num_formats = ARRAY_SIZE(plane_formats), \
} }
#define _DMA_SBLK(num, sdma_pri) \ #define _DMA_SBLK(num, sdma_pri) \
...@@ -163,7 +165,9 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = { ...@@ -163,7 +165,9 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
.src_blk = {.name = STRCAT("sspp_src_", num), \ .src_blk = {.name = STRCAT("sspp_src_", num), \
.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
.format_list = plane_formats, \ .format_list = plane_formats, \
.num_formats = ARRAY_SIZE(plane_formats), \
.virt_format_list = plane_formats, \ .virt_format_list = plane_formats, \
.virt_num_formats = ARRAY_SIZE(plane_formats), \
} }
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5); static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
......
...@@ -251,17 +251,6 @@ struct dpu_pp_blk { ...@@ -251,17 +251,6 @@ struct dpu_pp_blk {
u32 version; u32 version;
}; };
/**
* struct dpu_format_extended - define dpu specific pixel format+modifier
* @fourcc_format: Base FOURCC pixel format code
* @modifier: 64-bit drm format modifier, same modifier must be applied to all
* framebuffer planes
*/
struct dpu_format_extended {
uint32_t fourcc_format;
uint64_t modifier;
};
/** /**
* enum dpu_qos_lut_usage - define QoS LUT use cases * enum dpu_qos_lut_usage - define QoS LUT use cases
*/ */
...@@ -348,7 +337,9 @@ struct dpu_sspp_blks_common { ...@@ -348,7 +337,9 @@ struct dpu_sspp_blks_common {
* @pcc_blk: * @pcc_blk:
* @igc_blk: * @igc_blk:
* @format_list: Pointer to list of supported formats * @format_list: Pointer to list of supported formats
* @num_formats: Number of supported formats
* @virt_format_list: Pointer to list of supported formats for virtual planes * @virt_format_list: Pointer to list of supported formats for virtual planes
* @virt_num_formats: Number of supported formats for virtual planes
*/ */
struct dpu_sspp_sub_blks { struct dpu_sspp_sub_blks {
const struct dpu_sspp_blks_common *common; const struct dpu_sspp_blks_common *common;
...@@ -366,8 +357,10 @@ struct dpu_sspp_sub_blks { ...@@ -366,8 +357,10 @@ struct dpu_sspp_sub_blks {
struct dpu_pp_blk pcc_blk; struct dpu_pp_blk pcc_blk;
struct dpu_pp_blk igc_blk; struct dpu_pp_blk igc_blk;
const struct dpu_format_extended *format_list; const u32 *format_list;
const struct dpu_format_extended *virt_format_list; u32 num_formats;
const u32 *virt_format_list;
u32 virt_num_formats;
}; };
/** /**
......
...@@ -12,157 +12,81 @@ ...@@ -12,157 +12,81 @@
#include "dpu_hw_mdss.h" #include "dpu_hw_mdss.h"
static const struct dpu_format_extended plane_formats[] = { static const uint32_t qcom_compressed_supported_formats[] = {
{DRM_FORMAT_ARGB8888, 0}, DRM_FORMAT_ABGR8888,
{DRM_FORMAT_ABGR8888, 0}, DRM_FORMAT_XBGR8888,
{DRM_FORMAT_RGBA8888, 0}, DRM_FORMAT_BGR565,
{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_BGRA8888, 0},
{DRM_FORMAT_XRGB8888, 0},
{DRM_FORMAT_RGBX8888, 0},
{DRM_FORMAT_BGRX8888, 0},
{DRM_FORMAT_XBGR8888, 0},
{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_RGB888, 0},
{DRM_FORMAT_BGR888, 0},
{DRM_FORMAT_RGB565, 0},
{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_BGR565, 0},
{DRM_FORMAT_ARGB1555, 0},
{DRM_FORMAT_ABGR1555, 0},
{DRM_FORMAT_RGBA5551, 0},
{DRM_FORMAT_BGRA5551, 0},
{DRM_FORMAT_XRGB1555, 0},
{DRM_FORMAT_XBGR1555, 0},
{DRM_FORMAT_RGBX5551, 0},
{DRM_FORMAT_BGRX5551, 0},
{DRM_FORMAT_ARGB4444, 0},
{DRM_FORMAT_ABGR4444, 0},
{DRM_FORMAT_RGBA4444, 0},
{DRM_FORMAT_BGRA4444, 0},
{DRM_FORMAT_XRGB4444, 0},
{DRM_FORMAT_XBGR4444, 0},
{DRM_FORMAT_RGBX4444, 0},
{DRM_FORMAT_BGRX4444, 0},
{0, 0},
}; };
static const struct dpu_format_extended plane_formats_yuv[] = { static const uint32_t plane_formats[] = {
{DRM_FORMAT_ARGB8888, 0}, DRM_FORMAT_ARGB8888,
{DRM_FORMAT_ABGR8888, 0}, DRM_FORMAT_ABGR8888,
{DRM_FORMAT_RGBA8888, 0}, DRM_FORMAT_RGBA8888,
{DRM_FORMAT_BGRX8888, 0}, DRM_FORMAT_BGRA8888,
{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, DRM_FORMAT_XRGB8888,
{DRM_FORMAT_BGRA8888, 0}, DRM_FORMAT_RGBX8888,
{DRM_FORMAT_XRGB8888, 0}, DRM_FORMAT_BGRX8888,
{DRM_FORMAT_XBGR8888, 0}, DRM_FORMAT_XBGR8888,
{DRM_FORMAT_RGBX8888, 0}, DRM_FORMAT_RGB888,
{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, DRM_FORMAT_BGR888,
{DRM_FORMAT_RGB888, 0}, DRM_FORMAT_RGB565,
{DRM_FORMAT_BGR888, 0}, DRM_FORMAT_BGR565,
{DRM_FORMAT_RGB565, 0}, DRM_FORMAT_ARGB1555,
{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED}, DRM_FORMAT_ABGR1555,
{DRM_FORMAT_BGR565, 0}, DRM_FORMAT_RGBA5551,
{DRM_FORMAT_ARGB1555, 0}, DRM_FORMAT_BGRA5551,
{DRM_FORMAT_ABGR1555, 0}, DRM_FORMAT_XRGB1555,
{DRM_FORMAT_RGBA5551, 0}, DRM_FORMAT_XBGR1555,
{DRM_FORMAT_BGRA5551, 0}, DRM_FORMAT_RGBX5551,
{DRM_FORMAT_XRGB1555, 0}, DRM_FORMAT_BGRX5551,
{DRM_FORMAT_XBGR1555, 0}, DRM_FORMAT_ARGB4444,
{DRM_FORMAT_RGBX5551, 0}, DRM_FORMAT_ABGR4444,
{DRM_FORMAT_BGRX5551, 0}, DRM_FORMAT_RGBA4444,
{DRM_FORMAT_ARGB4444, 0}, DRM_FORMAT_BGRA4444,
{DRM_FORMAT_ABGR4444, 0}, DRM_FORMAT_XRGB4444,
{DRM_FORMAT_RGBA4444, 0}, DRM_FORMAT_XBGR4444,
{DRM_FORMAT_BGRA4444, 0}, DRM_FORMAT_RGBX4444,
{DRM_FORMAT_XRGB4444, 0}, DRM_FORMAT_BGRX4444,
{DRM_FORMAT_XBGR4444, 0},
{DRM_FORMAT_RGBX4444, 0},
{DRM_FORMAT_BGRX4444, 0},
{DRM_FORMAT_NV12, 0},
{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_NV21, 0},
{DRM_FORMAT_NV16, 0},
{DRM_FORMAT_NV61, 0},
{DRM_FORMAT_VYUY, 0},
{DRM_FORMAT_UYVY, 0},
{DRM_FORMAT_YUYV, 0},
{DRM_FORMAT_YVYU, 0},
{DRM_FORMAT_YUV420, 0},
{DRM_FORMAT_YVU420, 0},
{0, 0},
};
static const struct dpu_format_extended cursor_formats[] = {
{DRM_FORMAT_ARGB8888, 0},
{DRM_FORMAT_ABGR8888, 0},
{DRM_FORMAT_RGBA8888, 0},
{DRM_FORMAT_BGRA8888, 0},
{DRM_FORMAT_XRGB8888, 0},
{DRM_FORMAT_ARGB1555, 0},
{DRM_FORMAT_ABGR1555, 0},
{DRM_FORMAT_RGBA5551, 0},
{DRM_FORMAT_BGRA5551, 0},
{DRM_FORMAT_ARGB4444, 0},
{DRM_FORMAT_ABGR4444, 0},
{DRM_FORMAT_RGBA4444, 0},
{DRM_FORMAT_BGRA4444, 0},
{0, 0},
}; };
static const struct dpu_format_extended wb2_formats[] = { static const uint32_t plane_formats_yuv[] = {
{DRM_FORMAT_RGB565, 0}, DRM_FORMAT_ARGB8888,
{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED}, DRM_FORMAT_ABGR8888,
{DRM_FORMAT_RGB888, 0}, DRM_FORMAT_RGBA8888,
{DRM_FORMAT_ARGB8888, 0}, DRM_FORMAT_BGRX8888,
{DRM_FORMAT_RGBA8888, 0}, DRM_FORMAT_BGRA8888,
{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, DRM_FORMAT_XRGB8888,
{DRM_FORMAT_XRGB8888, 0}, DRM_FORMAT_XBGR8888,
{DRM_FORMAT_RGBX8888, 0}, DRM_FORMAT_RGBX8888,
{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, DRM_FORMAT_RGB888,
{DRM_FORMAT_ARGB1555, 0}, DRM_FORMAT_BGR888,
{DRM_FORMAT_RGBA5551, 0}, DRM_FORMAT_RGB565,
{DRM_FORMAT_XRGB1555, 0}, DRM_FORMAT_BGR565,
{DRM_FORMAT_RGBX5551, 0}, DRM_FORMAT_ARGB1555,
{DRM_FORMAT_ARGB4444, 0}, DRM_FORMAT_ABGR1555,
{DRM_FORMAT_RGBA4444, 0}, DRM_FORMAT_RGBA5551,
{DRM_FORMAT_RGBX4444, 0}, DRM_FORMAT_BGRA5551,
{DRM_FORMAT_XRGB4444, 0}, DRM_FORMAT_XRGB1555,
DRM_FORMAT_XBGR1555,
{DRM_FORMAT_BGR565, 0}, DRM_FORMAT_RGBX5551,
{DRM_FORMAT_BGR888, 0}, DRM_FORMAT_BGRX5551,
{DRM_FORMAT_ABGR8888, 0}, DRM_FORMAT_ARGB4444,
{DRM_FORMAT_BGRA8888, 0}, DRM_FORMAT_ABGR4444,
{DRM_FORMAT_BGRX8888, 0}, DRM_FORMAT_RGBA4444,
{DRM_FORMAT_XBGR8888, 0}, DRM_FORMAT_BGRA4444,
{DRM_FORMAT_ABGR1555, 0}, DRM_FORMAT_XRGB4444,
{DRM_FORMAT_BGRA5551, 0}, DRM_FORMAT_XBGR4444,
{DRM_FORMAT_XBGR1555, 0}, DRM_FORMAT_RGBX4444,
{DRM_FORMAT_BGRX5551, 0}, DRM_FORMAT_BGRX4444,
{DRM_FORMAT_ABGR4444, 0},
{DRM_FORMAT_BGRA4444, 0},
{DRM_FORMAT_BGRX4444, 0},
{DRM_FORMAT_XBGR4444, 0},
{DRM_FORMAT_YUV420, 0},
{DRM_FORMAT_NV12, 0},
{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_NV16, 0},
{DRM_FORMAT_YUYV, 0},
{0, 0},
};
static const struct dpu_format_extended rgb_10bit_formats[] = { DRM_FORMAT_NV12,
{DRM_FORMAT_BGRA1010102, 0}, DRM_FORMAT_NV21,
{DRM_FORMAT_BGRX1010102, 0}, DRM_FORMAT_NV16,
{DRM_FORMAT_RGBA1010102, 0}, DRM_FORMAT_NV61,
{DRM_FORMAT_RGBX1010102, 0}, DRM_FORMAT_VYUY,
{DRM_FORMAT_ABGR2101010, 0}, DRM_FORMAT_UYVY,
{DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED}, DRM_FORMAT_YUYV,
{DRM_FORMAT_XBGR2101010, 0}, DRM_FORMAT_YVYU,
{DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED}, DRM_FORMAT_YUV420,
{DRM_FORMAT_ARGB2101010, 0}, DRM_FORMAT_YVU420,
{DRM_FORMAT_XRGB2101010, 0},
}; };
...@@ -170,10 +170,6 @@ ...@@ -170,10 +170,6 @@
/** /**
* AD4 interrupt status bit definitions * AD4 interrupt status bit definitions
*/ */
#define DPU_INTR_BRIGHTPR_UPDATED BIT(4)
#define DPU_INTR_DARKENH_UPDATED BIT(3)
#define DPU_INTR_STREN_OUTROI_UPDATED BIT(2)
#define DPU_INTR_STREN_INROI_UPDATED BIT(1)
#define DPU_INTR_BACKLIGHT_UPDATED BIT(0) #define DPU_INTR_BACKLIGHT_UPDATED BIT(0)
/** /**
* struct dpu_intr_reg - array of DPU register sets * struct dpu_intr_reg - array of DPU register sets
...@@ -782,18 +778,6 @@ static int dpu_hw_intr_irqidx_lookup(enum dpu_intr_type intr_type, ...@@ -782,18 +778,6 @@ static int dpu_hw_intr_irqidx_lookup(enum dpu_intr_type intr_type,
return -EINVAL; return -EINVAL;
} }
static void dpu_hw_intr_set_mask(struct dpu_hw_intr *intr, uint32_t reg_off,
uint32_t mask)
{
if (!intr)
return;
DPU_REG_WRITE(&intr->hw, reg_off, mask);
/* ensure register writes go through */
wmb();
}
static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr, static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
void (*cbfunc)(void *, int), void (*cbfunc)(void *, int),
void *arg) void *arg)
...@@ -1004,18 +988,6 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr) ...@@ -1004,18 +988,6 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
return 0; return 0;
} }
static int dpu_hw_intr_get_valid_interrupts(struct dpu_hw_intr *intr,
uint32_t *mask)
{
if (!intr || !mask)
return -EINVAL;
*mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
| IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
return 0;
}
static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr) static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
{ {
int i; int i;
...@@ -1065,19 +1037,6 @@ static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr, ...@@ -1065,19 +1037,6 @@ static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
wmb(); wmb();
} }
static void dpu_hw_intr_clear_interrupt_status(struct dpu_hw_intr *intr,
int irq_idx)
{
unsigned long irq_flags;
if (!intr)
return;
spin_lock_irqsave(&intr->irq_lock, irq_flags);
dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
}
static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr, static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
int irq_idx, bool clear) int irq_idx, bool clear)
{ {
...@@ -1113,16 +1072,13 @@ static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr, ...@@ -1113,16 +1072,13 @@ static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
static void __setup_intr_ops(struct dpu_hw_intr_ops *ops) static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
{ {
ops->set_mask = dpu_hw_intr_set_mask;
ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup; ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup;
ops->enable_irq = dpu_hw_intr_enable_irq; ops->enable_irq = dpu_hw_intr_enable_irq;
ops->disable_irq = dpu_hw_intr_disable_irq; ops->disable_irq = dpu_hw_intr_disable_irq;
ops->dispatch_irqs = dpu_hw_intr_dispatch_irq; ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
ops->clear_all_irqs = dpu_hw_intr_clear_irqs; ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
ops->disable_all_irqs = dpu_hw_intr_disable_irqs; ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
ops->get_valid_interrupts = dpu_hw_intr_get_valid_interrupts;
ops->get_interrupt_statuses = dpu_hw_intr_get_interrupt_statuses; ops->get_interrupt_statuses = dpu_hw_intr_get_interrupt_statuses;
ops->clear_interrupt_status = dpu_hw_intr_clear_interrupt_status;
ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock; ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock;
ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status; ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
} }
......
...@@ -20,13 +20,6 @@ ...@@ -20,13 +20,6 @@
#include "dpu_hw_util.h" #include "dpu_hw_util.h"
#include "dpu_hw_mdss.h" #include "dpu_hw_mdss.h"
#define IRQ_SOURCE_MDP BIT(0)
#define IRQ_SOURCE_DSI0 BIT(4)
#define IRQ_SOURCE_DSI1 BIT(5)
#define IRQ_SOURCE_HDMI BIT(8)
#define IRQ_SOURCE_EDP BIT(12)
#define IRQ_SOURCE_MHL BIT(16)
/** /**
* dpu_intr_type - HW Interrupt Type * dpu_intr_type - HW Interrupt Type
* @DPU_IRQ_TYPE_WB_ROT_COMP: WB rotator done * @DPU_IRQ_TYPE_WB_ROT_COMP: WB rotator done
...@@ -95,18 +88,6 @@ struct dpu_hw_intr; ...@@ -95,18 +88,6 @@ struct dpu_hw_intr;
* Interrupt operations. * Interrupt operations.
*/ */
struct dpu_hw_intr_ops { struct dpu_hw_intr_ops {
/**
* set_mask - Programs the given interrupt register with the
* given interrupt mask. Register value will get overwritten.
* @intr: HW interrupt handle
* @reg_off: MDSS HW register offset
* @irqmask: IRQ mask value
*/
void (*set_mask)(
struct dpu_hw_intr *intr,
uint32_t reg,
uint32_t irqmask);
/** /**
* irq_idx_lookup - Lookup IRQ index on the HW interrupt type * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
* Used for all irq related ops * Used for all irq related ops
...@@ -176,16 +157,6 @@ struct dpu_hw_intr_ops { ...@@ -176,16 +157,6 @@ struct dpu_hw_intr_ops {
void (*get_interrupt_statuses)( void (*get_interrupt_statuses)(
struct dpu_hw_intr *intr); struct dpu_hw_intr *intr);
/**
* clear_interrupt_status - Clears HW interrupt status based on given
* lookup IRQ index.
* @intr: HW interrupt handle
* @irq_idx: Lookup irq index return from irq_idx_lookup
*/
void (*clear_interrupt_status)(
struct dpu_hw_intr *intr,
int irq_idx);
/** /**
* clear_intr_status_nolock() - clears the HW interrupts without lock * clear_intr_status_nolock() - clears the HW interrupts without lock
* @intr: HW interrupt handle * @intr: HW interrupt handle
...@@ -206,21 +177,6 @@ struct dpu_hw_intr_ops { ...@@ -206,21 +177,6 @@ struct dpu_hw_intr_ops {
struct dpu_hw_intr *intr, struct dpu_hw_intr *intr,
int irq_idx, int irq_idx,
bool clear); bool clear);
/**
* get_valid_interrupts - Gets a mask of all valid interrupt sources
* within DPU. These are actually status bits
* within interrupt registers that specify the
* source of the interrupt in IRQs. For example,
* valid interrupt sources can be MDP, DSI,
* HDMI etc.
* @intr: HW interrupt handle
* @mask: Returning the interrupt source MASK
* @return: 0 for success, otherwise failure
*/
int (*get_valid_interrupts)(
struct dpu_hw_intr *intr,
uint32_t *mask);
}; };
/** /**
......
...@@ -258,12 +258,6 @@ enum dpu_vbif { ...@@ -258,12 +258,6 @@ enum dpu_vbif {
VBIF_NRT = VBIF_1 VBIF_NRT = VBIF_1
}; };
enum dpu_iommu_domain {
DPU_IOMMU_DOMAIN_UNSECURE,
DPU_IOMMU_DOMAIN_SECURE,
DPU_IOMMU_DOMAIN_MAX
};
/** /**
* DPU HW,Component order color map * DPU HW,Component order color map
*/ */
...@@ -358,7 +352,6 @@ enum dpu_3d_blend_mode { ...@@ -358,7 +352,6 @@ enum dpu_3d_blend_mode {
* @alpha_enable: whether the format has an alpha channel * @alpha_enable: whether the format has an alpha channel
* @num_planes: number of planes (including meta data planes) * @num_planes: number of planes (including meta data planes)
* @fetch_mode: linear, tiled, or ubwc hw fetch behavior * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
* @is_yuv: is format a yuv variant
* @flag: usage bit flags * @flag: usage bit flags
* @tile_width: format tile width * @tile_width: format tile width
* @tile_height: format tile height * @tile_height: format tile height
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include "dpu_hw_mdss.h" #include "dpu_hw_mdss.h"
#define REG_MASK(n) ((BIT(n)) - 1) #define REG_MASK(n) ((BIT(n)) - 1)
struct dpu_format_extended;
/* /*
* This is the common struct maintained by each sub block * This is the common struct maintained by each sub block
......
...@@ -405,35 +405,38 @@ static void dpu_kms_wait_for_commit_done(struct msm_kms *kms, ...@@ -405,35 +405,38 @@ static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
} }
} }
static void _dpu_kms_initialize_dsi(struct drm_device *dev, static int _dpu_kms_initialize_dsi(struct drm_device *dev,
struct msm_drm_private *priv, struct msm_drm_private *priv,
struct dpu_kms *dpu_kms) struct dpu_kms *dpu_kms)
{ {
struct drm_encoder *encoder = NULL; struct drm_encoder *encoder = NULL;
int i, rc; int i, rc = 0;
if (!(priv->dsi[0] || priv->dsi[1]))
return rc;
/*TODO: Support two independent DSI connectors */ /*TODO: Support two independent DSI connectors */
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI); encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
if (IS_ERR_OR_NULL(encoder)) { if (IS_ERR(encoder)) {
DPU_ERROR("encoder init failed for dsi display\n"); DPU_ERROR("encoder init failed for dsi display\n");
return; return PTR_ERR(encoder);
} }
priv->encoders[priv->num_encoders++] = encoder; priv->encoders[priv->num_encoders++] = encoder;
for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
if (!priv->dsi[i]) { if (!priv->dsi[i])
DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i); continue;
return;
}
rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder); rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
if (rc) { if (rc) {
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n", DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
i, rc); i, rc);
continue; break;
} }
} }
return rc;
} }
/** /**
...@@ -444,16 +447,16 @@ static void _dpu_kms_initialize_dsi(struct drm_device *dev, ...@@ -444,16 +447,16 @@ static void _dpu_kms_initialize_dsi(struct drm_device *dev,
* @dpu_kms: Pointer to dpu kms structure * @dpu_kms: Pointer to dpu kms structure
* Returns: Zero on success * Returns: Zero on success
*/ */
static void _dpu_kms_setup_displays(struct drm_device *dev, static int _dpu_kms_setup_displays(struct drm_device *dev,
struct msm_drm_private *priv, struct msm_drm_private *priv,
struct dpu_kms *dpu_kms) struct dpu_kms *dpu_kms)
{ {
_dpu_kms_initialize_dsi(dev, priv, dpu_kms);
/** /**
* Extend this function to initialize other * Extend this function to initialize other
* types of displays * types of displays
*/ */
return _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
} }
static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms) static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
...@@ -516,7 +519,9 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms) ...@@ -516,7 +519,9 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
* Create encoder and query display drivers to create * Create encoder and query display drivers to create
* bridges and connectors * bridges and connectors
*/ */
_dpu_kms_setup_displays(dev, priv, dpu_kms); ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
if (ret)
goto fail;
max_crtc_count = min(catalog->mixer_count, priv->num_encoders); max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
...@@ -627,6 +632,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) ...@@ -627,6 +632,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]); devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
dpu_kms->vbif[VBIF_RT] = NULL; dpu_kms->vbif[VBIF_RT] = NULL;
if (dpu_kms->hw_mdp)
dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
dpu_kms->hw_mdp = NULL;
if (dpu_kms->mmio) if (dpu_kms->mmio)
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio); devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
dpu_kms->mmio = NULL; dpu_kms->mmio = NULL;
...@@ -877,8 +886,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms) ...@@ -877,8 +886,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto power_error; goto power_error;
} }
rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio, rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio);
dpu_kms->dev);
if (rc) { if (rc) {
DPU_ERROR("rm init failed: %d\n", rc); DPU_ERROR("rm init failed: %d\n", rc);
goto power_error; goto power_error;
...@@ -886,11 +894,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms) ...@@ -886,11 +894,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms->rm_init = true; dpu_kms->rm_init = true;
dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm); dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio,
if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) { dpu_kms->catalog);
if (IS_ERR(dpu_kms->hw_mdp)) {
rc = PTR_ERR(dpu_kms->hw_mdp); rc = PTR_ERR(dpu_kms->hw_mdp);
if (!dpu_kms->hw_mdp)
rc = -EINVAL;
DPU_ERROR("failed to get hw_mdp: %d\n", rc); DPU_ERROR("failed to get hw_mdp: %d\n", rc);
dpu_kms->hw_mdp = NULL; dpu_kms->hw_mdp = NULL;
goto power_error; goto power_error;
...@@ -926,16 +933,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms) ...@@ -926,16 +933,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto hw_intr_init_err; goto hw_intr_init_err;
} }
/*
* _dpu_kms_drm_obj_init should create the DRM related objects
* i.e. CRTCs, planes, encoders, connectors and so forth
*/
rc = _dpu_kms_drm_obj_init(dpu_kms);
if (rc) {
DPU_ERROR("modeset init failed: %d\n", rc);
goto drm_obj_init_err;
}
dev->mode_config.min_width = 0; dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0; dev->mode_config.min_height = 0;
...@@ -952,6 +949,16 @@ static int dpu_kms_hw_init(struct msm_kms *kms) ...@@ -952,6 +949,16 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
*/ */
dev->mode_config.allow_fb_modifiers = true; dev->mode_config.allow_fb_modifiers = true;
/*
* _dpu_kms_drm_obj_init should create the DRM related objects
* i.e. CRTCs, planes, encoders, connectors and so forth
*/
rc = _dpu_kms_drm_obj_init(dpu_kms);
if (rc) {
DPU_ERROR("modeset init failed: %d\n", rc);
goto drm_obj_init_err;
}
dpu_vbif_init_memtypes(dpu_kms); dpu_vbif_init_memtypes(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev); pm_runtime_put_sync(&dpu_kms->pdev->dev);
......
...@@ -23,11 +23,14 @@ struct dpu_mdss { ...@@ -23,11 +23,14 @@ struct dpu_mdss {
struct dpu_irq_controller irq_controller; struct dpu_irq_controller irq_controller;
}; };
static irqreturn_t dpu_mdss_irq(int irq, void *arg) static void dpu_mdss_irq(struct irq_desc *desc)
{ {
struct dpu_mdss *dpu_mdss = arg; struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
u32 interrupts; u32 interrupts;
chained_irq_enter(chip, desc);
interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS); interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
while (interrupts) { while (interrupts) {
...@@ -39,20 +42,20 @@ static irqreturn_t dpu_mdss_irq(int irq, void *arg) ...@@ -39,20 +42,20 @@ static irqreturn_t dpu_mdss_irq(int irq, void *arg)
hwirq); hwirq);
if (mapping == 0) { if (mapping == 0) {
DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq); DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
return IRQ_NONE; break;
} }
rc = generic_handle_irq(mapping); rc = generic_handle_irq(mapping);
if (rc < 0) { if (rc < 0) {
DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n", DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
hwirq, mapping, rc); hwirq, mapping, rc);
return IRQ_NONE; break;
} }
interrupts &= ~(1 << hwirq); interrupts &= ~(1 << hwirq);
} }
return IRQ_HANDLED; chained_irq_exit(chip, desc);
} }
static void dpu_mdss_irq_mask(struct irq_data *irqd) static void dpu_mdss_irq_mask(struct irq_data *irqd)
...@@ -83,16 +86,16 @@ static struct irq_chip dpu_mdss_irq_chip = { ...@@ -83,16 +86,16 @@ static struct irq_chip dpu_mdss_irq_chip = {
.irq_unmask = dpu_mdss_irq_unmask, .irq_unmask = dpu_mdss_irq_unmask,
}; };
static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key;
static int dpu_mdss_irqdomain_map(struct irq_domain *domain, static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
unsigned int irq, irq_hw_number_t hwirq) unsigned int irq, irq_hw_number_t hwirq)
{ {
struct dpu_mdss *dpu_mdss = domain->host_data; struct dpu_mdss *dpu_mdss = domain->host_data;
int ret;
irq_set_lockdep_class(irq, &dpu_mdss_lock_key, &dpu_mdss_request_key);
irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq); irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
ret = irq_set_chip_data(irq, dpu_mdss); return irq_set_chip_data(irq, dpu_mdss);
return ret;
} }
static const struct irq_domain_ops dpu_mdss_irqdomain_ops = { static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
...@@ -159,11 +162,13 @@ static void dpu_mdss_destroy(struct drm_device *dev) ...@@ -159,11 +162,13 @@ static void dpu_mdss_destroy(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss); struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
struct dss_module_power *mp = &dpu_mdss->mp; struct dss_module_power *mp = &dpu_mdss->mp;
int irq;
pm_runtime_suspend(dev->dev); pm_runtime_suspend(dev->dev);
pm_runtime_disable(dev->dev); pm_runtime_disable(dev->dev);
_dpu_mdss_irq_domain_fini(dpu_mdss); _dpu_mdss_irq_domain_fini(dpu_mdss);
free_irq(platform_get_irq(pdev, 0), dpu_mdss); irq = platform_get_irq(pdev, 0);
irq_set_chained_handler_and_data(irq, NULL, NULL);
msm_dss_put_clk(mp->clk_config, mp->num_clk); msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config); devm_kfree(&pdev->dev, mp->clk_config);
...@@ -187,6 +192,7 @@ int dpu_mdss_init(struct drm_device *dev) ...@@ -187,6 +192,7 @@ int dpu_mdss_init(struct drm_device *dev)
struct dpu_mdss *dpu_mdss; struct dpu_mdss *dpu_mdss;
struct dss_module_power *mp; struct dss_module_power *mp;
int ret = 0; int ret = 0;
int irq;
dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL); dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
if (!dpu_mdss) if (!dpu_mdss)
...@@ -219,12 +225,12 @@ int dpu_mdss_init(struct drm_device *dev) ...@@ -219,12 +225,12 @@ int dpu_mdss_init(struct drm_device *dev)
if (ret) if (ret)
goto irq_domain_error; goto irq_domain_error;
ret = request_irq(platform_get_irq(pdev, 0), irq = platform_get_irq(pdev, 0);
dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss); if (irq < 0)
if (ret) {
DPU_ERROR("failed to init irq: %d\n", ret);
goto irq_error; goto irq_error;
}
irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
dpu_mdss);
pm_runtime_enable(dev->dev); pm_runtime_enable(dev->dev);
......
...@@ -95,8 +95,6 @@ struct dpu_plane { ...@@ -95,8 +95,6 @@ struct dpu_plane {
enum dpu_sspp pipe; enum dpu_sspp pipe;
uint32_t features; /* capabilities from catalog */ uint32_t features; /* capabilities from catalog */
uint32_t nformats;
uint32_t formats[64];
struct dpu_hw_pipe *pipe_hw; struct dpu_hw_pipe *pipe_hw;
struct dpu_hw_pipe_cfg pipe_cfg; struct dpu_hw_pipe_cfg pipe_cfg;
...@@ -121,6 +119,12 @@ struct dpu_plane { ...@@ -121,6 +119,12 @@ struct dpu_plane {
bool debugfs_default_scale; bool debugfs_default_scale;
}; };
static const uint64_t supported_format_modifiers[] = {
DRM_FORMAT_MOD_QCOM_COMPRESSED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
#define to_dpu_plane(x) container_of(x, struct dpu_plane, base) #define to_dpu_plane(x) container_of(x, struct dpu_plane, base)
static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane) static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
...@@ -1410,6 +1414,23 @@ static void dpu_plane_early_unregister(struct drm_plane *plane) ...@@ -1410,6 +1414,23 @@ static void dpu_plane_early_unregister(struct drm_plane *plane)
debugfs_remove_recursive(pdpu->debugfs_root); debugfs_remove_recursive(pdpu->debugfs_root);
} }
static bool dpu_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format, uint64_t modifier)
{
if (modifier == DRM_FORMAT_MOD_LINEAR)
return true;
if (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) {
int i;
for (i = 0; i < ARRAY_SIZE(qcom_compressed_supported_formats); i++) {
if (format == qcom_compressed_supported_formats[i])
return true;
}
}
return false;
}
static const struct drm_plane_funcs dpu_plane_funcs = { static const struct drm_plane_funcs dpu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane, .update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane, .disable_plane = drm_atomic_helper_disable_plane,
...@@ -1419,6 +1440,7 @@ static const struct drm_plane_funcs dpu_plane_funcs = { ...@@ -1419,6 +1440,7 @@ static const struct drm_plane_funcs dpu_plane_funcs = {
.atomic_destroy_state = dpu_plane_destroy_state, .atomic_destroy_state = dpu_plane_destroy_state,
.late_register = dpu_plane_late_register, .late_register = dpu_plane_late_register,
.early_unregister = dpu_plane_early_unregister, .early_unregister = dpu_plane_early_unregister,
.format_mod_supported = dpu_plane_format_mod_supported,
}; };
static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = { static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
...@@ -1444,11 +1466,12 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev, ...@@ -1444,11 +1466,12 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
unsigned long possible_crtcs, u32 master_plane_id) unsigned long possible_crtcs, u32 master_plane_id)
{ {
struct drm_plane *plane = NULL, *master_plane = NULL; struct drm_plane *plane = NULL, *master_plane = NULL;
const struct dpu_format_extended *format_list; const uint32_t *format_list;
struct dpu_plane *pdpu; struct dpu_plane *pdpu;
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *kms = to_dpu_kms(priv->kms); struct dpu_kms *kms = to_dpu_kms(priv->kms);
int zpos_max = DPU_ZPOS_MAX; int zpos_max = DPU_ZPOS_MAX;
uint32_t num_formats;
int ret = -EINVAL; int ret = -EINVAL;
/* create and zero local structure */ /* create and zero local structure */
...@@ -1491,24 +1514,18 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev, ...@@ -1491,24 +1514,18 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
goto clean_sspp; goto clean_sspp;
} }
if (!master_plane_id) if (pdpu->is_virtual) {
format_list = pdpu->pipe_sblk->format_list;
else
format_list = pdpu->pipe_sblk->virt_format_list; format_list = pdpu->pipe_sblk->virt_format_list;
num_formats = pdpu->pipe_sblk->virt_num_formats;
pdpu->nformats = dpu_populate_formats(format_list, }
pdpu->formats, else {
0, format_list = pdpu->pipe_sblk->format_list;
ARRAY_SIZE(pdpu->formats)); num_formats = pdpu->pipe_sblk->num_formats;
if (!pdpu->nformats) {
DPU_ERROR("[%u]no valid formats for plane\n", pipe);
goto clean_sspp;
} }
ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs, ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
pdpu->formats, pdpu->nformats, format_list, num_formats,
NULL, type, NULL); supported_format_modifiers, type, NULL);
if (ret) if (ret)
goto clean_sspp; goto clean_sspp;
......
...@@ -28,23 +28,18 @@ ...@@ -28,23 +28,18 @@
/** /**
* struct dpu_plane_state: Define dpu extension of drm plane state object * struct dpu_plane_state: Define dpu extension of drm plane state object
* @base: base drm plane state object * @base: base drm plane state object
* @property_state: Local storage for msm_prop properties
* @property_values: cached plane property values
* @aspace: pointer to address space for input/output buffers * @aspace: pointer to address space for input/output buffers
* @input_fence: dereferenced input fence pointer
* @stage: assigned by crtc blender * @stage: assigned by crtc blender
* @multirect_index: index of the rectangle of SSPP * @multirect_index: index of the rectangle of SSPP
* @multirect_mode: parallel or time multiplex multirect mode * @multirect_mode: parallel or time multiplex multirect mode
* @pending: whether the current update is still pending * @pending: whether the current update is still pending
* @scaler3_cfg: configuration data for scaler3 * @scaler3_cfg: configuration data for scaler3
* @pixel_ext: configuration data for pixel extensions * @pixel_ext: configuration data for pixel extensions
* @scaler_check_state: indicates status of user provided pixel extension data
* @cdp_cfg: CDP configuration * @cdp_cfg: CDP configuration
*/ */
struct dpu_plane_state { struct dpu_plane_state {
struct drm_plane_state base; struct drm_plane_state base;
struct msm_gem_address_space *aspace; struct msm_gem_address_space *aspace;
void *input_fence;
enum dpu_stage stage; enum dpu_stage stage;
uint32_t multirect_index; uint32_t multirect_index;
uint32_t multirect_mode; uint32_t multirect_mode;
...@@ -106,12 +101,6 @@ void dpu_plane_restore(struct drm_plane *plane); ...@@ -106,12 +101,6 @@ void dpu_plane_restore(struct drm_plane *plane);
*/ */
void dpu_plane_flush(struct drm_plane *plane); void dpu_plane_flush(struct drm_plane *plane);
/**
* dpu_plane_kickoff - final plane operations before commit kickoff
* @plane: Pointer to drm plane structure
*/
void dpu_plane_kickoff(struct drm_plane *plane);
/** /**
* dpu_plane_set_error: enable/disable error condition * dpu_plane_set_error: enable/disable error condition
* @plane: pointer to drm_plane structure * @plane: pointer to drm_plane structure
...@@ -146,14 +135,6 @@ int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane); ...@@ -146,14 +135,6 @@ int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane);
*/ */
void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state); void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state);
/**
* dpu_plane_wait_input_fence - wait for input fence object
* @plane: Pointer to DRM plane object
* @wait_ms: Wait timeout value
* Returns: Zero on success
*/
int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
/** /**
* dpu_plane_color_fill - enables color fill on plane * dpu_plane_color_fill - enables color fill on plane
* @plane: Pointer to DRM plane object * @plane: Pointer to DRM plane object
...@@ -164,12 +145,4 @@ int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms); ...@@ -164,12 +145,4 @@ int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
int dpu_plane_color_fill(struct drm_plane *plane, int dpu_plane_color_fill(struct drm_plane *plane,
uint32_t color, uint32_t alpha); uint32_t color, uint32_t alpha);
/**
* dpu_plane_set_revalidate - sets revalidate flag which forces a full
* validation of the plane properties in the next atomic check
* @plane: Pointer to DRM plane object
* @enable: Boolean to set/unset the flag
*/
void dpu_plane_set_revalidate(struct drm_plane *plane, bool enable);
#endif /* _DPU_PLANE_H_ */ #endif /* _DPU_PLANE_H_ */
...@@ -21,8 +21,8 @@ ...@@ -21,8 +21,8 @@
#include "dpu_encoder.h" #include "dpu_encoder.h"
#include "dpu_trace.h" #include "dpu_trace.h"
#define RESERVED_BY_OTHER(h, r) \ #define RESERVED_BY_OTHER(h, r) \
((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id)) ((h)->enc_id && (h)->enc_id != r)
/** /**
* struct dpu_rm_requirements - Reservation requirements parameter bundle * struct dpu_rm_requirements - Reservation requirements parameter bundle
...@@ -34,90 +34,21 @@ struct dpu_rm_requirements { ...@@ -34,90 +34,21 @@ struct dpu_rm_requirements {
struct dpu_encoder_hw_resources hw_res; struct dpu_encoder_hw_resources hw_res;
}; };
/**
* struct dpu_rm_rsvp - Use Case Reservation tagging structure
* Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
* By using as a tag, rather than lists of pointers to HW blocks used
* we can avoid some list management since we don't know how many blocks
* of each type a given use case may require.
* @list: List head for list of all reservations
* @seq: Global RSVP sequence number for debugging, especially for
* differentiating differenct allocations for same encoder.
* @enc_id: Reservations are tracked by Encoder DRM object ID.
* CRTCs may be connected to multiple Encoders.
* An encoder or connector id identifies the display path.
*/
struct dpu_rm_rsvp {
struct list_head list;
uint32_t seq;
uint32_t enc_id;
};
/** /**
* struct dpu_rm_hw_blk - hardware block tracking list member * struct dpu_rm_hw_blk - hardware block tracking list member
* @list: List head for list of all hardware blocks tracking items * @list: List head for list of all hardware blocks tracking items
* @rsvp: Pointer to use case reservation if reserved by a client
* @rsvp_nxt: Temporary pointer used during reservation to the incoming
* request. Will be swapped into rsvp if proposal is accepted
* @type: Type of hardware block this structure tracks
* @id: Hardware ID number, within it's own space, ie. LM_X * @id: Hardware ID number, within it's own space, ie. LM_X
* @catalog: Pointer to the hardware catalog entry for this block * @enc_id: Encoder id to which this blk is binded
* @hw: Pointer to the hardware register access object for this block * @hw: Pointer to the hardware register access object for this block
*/ */
struct dpu_rm_hw_blk { struct dpu_rm_hw_blk {
struct list_head list; struct list_head list;
struct dpu_rm_rsvp *rsvp;
struct dpu_rm_rsvp *rsvp_nxt;
enum dpu_hw_blk_type type;
uint32_t id; uint32_t id;
uint32_t enc_id;
struct dpu_hw_blk *hw; struct dpu_hw_blk *hw;
}; };
/**
* dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
*/
enum dpu_rm_dbg_rsvp_stage {
DPU_RM_STAGE_BEGIN,
DPU_RM_STAGE_AFTER_CLEAR,
DPU_RM_STAGE_AFTER_RSVPNEXT,
DPU_RM_STAGE_FINAL
};
static void _dpu_rm_print_rsvps(
struct dpu_rm *rm,
enum dpu_rm_dbg_rsvp_stage stage)
{
struct dpu_rm_rsvp *rsvp;
struct dpu_rm_hw_blk *blk;
enum dpu_hw_blk_type type;
DPU_DEBUG("%d\n", stage);
list_for_each_entry(rsvp, &rm->rsvps, list) {
DRM_DEBUG_KMS("%d rsvp[s%ue%u]\n", stage, rsvp->seq,
rsvp->enc_id);
}
for (type = 0; type < DPU_HW_BLK_MAX; type++) {
list_for_each_entry(blk, &rm->hw_blks[type], list) {
if (!blk->rsvp && !blk->rsvp_nxt)
continue;
DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
(blk->rsvp) ? blk->rsvp->seq : 0,
(blk->rsvp) ? blk->rsvp->enc_id : 0,
(blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
(blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
blk->type, blk->id);
}
}
}
struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
{
return rm->hw_mdp;
}
void dpu_rm_init_hw_iter( void dpu_rm_init_hw_iter(
struct dpu_rm_hw_iter *iter, struct dpu_rm_hw_iter *iter,
uint32_t enc_id, uint32_t enc_id,
...@@ -148,15 +79,7 @@ static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i) ...@@ -148,15 +79,7 @@ static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
i->blk = list_prepare_entry(i->blk, blk_list, list); i->blk = list_prepare_entry(i->blk, blk_list, list);
list_for_each_entry_continue(i->blk, blk_list, list) { list_for_each_entry_continue(i->blk, blk_list, list) {
struct dpu_rm_rsvp *rsvp = i->blk->rsvp; if (i->enc_id == i->blk->enc_id) {
if (i->blk->type != i->type) {
DPU_ERROR("found incorrect block type %d on %d list\n",
i->blk->type, i->type);
return false;
}
if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
i->hw = i->blk->hw; i->hw = i->blk->hw;
DPU_DEBUG("found type %d id %d for enc %d\n", DPU_DEBUG("found type %d id %d for enc %d\n",
i->type, i->blk->id, i->enc_id); i->type, i->blk->id, i->enc_id);
...@@ -208,34 +131,18 @@ static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw) ...@@ -208,34 +131,18 @@ static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
int dpu_rm_destroy(struct dpu_rm *rm) int dpu_rm_destroy(struct dpu_rm *rm)
{ {
struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
struct dpu_rm_hw_blk *hw_cur, *hw_nxt; struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
enum dpu_hw_blk_type type; enum dpu_hw_blk_type type;
if (!rm) {
DPU_ERROR("invalid rm\n");
return -EINVAL;
}
list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
list_del(&rsvp_cur->list);
kfree(rsvp_cur);
}
for (type = 0; type < DPU_HW_BLK_MAX; type++) { for (type = 0; type < DPU_HW_BLK_MAX; type++) {
list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type], list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
list) { list) {
list_del(&hw_cur->list); list_del(&hw_cur->list);
_dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw); _dpu_rm_hw_destroy(type, hw_cur->hw);
kfree(hw_cur); kfree(hw_cur);
} }
} }
dpu_hw_mdp_destroy(rm->hw_mdp);
rm->hw_mdp = NULL;
mutex_destroy(&rm->rm_lock); mutex_destroy(&rm->rm_lock);
return 0; return 0;
...@@ -250,11 +157,8 @@ static int _dpu_rm_hw_blk_create( ...@@ -250,11 +157,8 @@ static int _dpu_rm_hw_blk_create(
void *hw_catalog_info) void *hw_catalog_info)
{ {
struct dpu_rm_hw_blk *blk; struct dpu_rm_hw_blk *blk;
struct dpu_hw_mdp *hw_mdp;
void *hw; void *hw;
hw_mdp = rm->hw_mdp;
switch (type) { switch (type) {
case DPU_HW_BLK_LM: case DPU_HW_BLK_LM:
hw = dpu_hw_lm_init(id, mmio, cat); hw = dpu_hw_lm_init(id, mmio, cat);
...@@ -290,9 +194,9 @@ static int _dpu_rm_hw_blk_create( ...@@ -290,9 +194,9 @@ static int _dpu_rm_hw_blk_create(
return -ENOMEM; return -ENOMEM;
} }
blk->type = type;
blk->id = id; blk->id = id;
blk->hw = hw; blk->hw = hw;
blk->enc_id = 0;
list_add_tail(&blk->list, &rm->hw_blks[type]); list_add_tail(&blk->list, &rm->hw_blks[type]);
return 0; return 0;
...@@ -300,13 +204,12 @@ static int _dpu_rm_hw_blk_create( ...@@ -300,13 +204,12 @@ static int _dpu_rm_hw_blk_create(
int dpu_rm_init(struct dpu_rm *rm, int dpu_rm_init(struct dpu_rm *rm,
struct dpu_mdss_cfg *cat, struct dpu_mdss_cfg *cat,
void __iomem *mmio, void __iomem *mmio)
struct drm_device *dev)
{ {
int rc, i; int rc, i;
enum dpu_hw_blk_type type; enum dpu_hw_blk_type type;
if (!rm || !cat || !mmio || !dev) { if (!rm || !cat || !mmio) {
DPU_ERROR("invalid kms\n"); DPU_ERROR("invalid kms\n");
return -EINVAL; return -EINVAL;
} }
...@@ -316,21 +219,9 @@ int dpu_rm_init(struct dpu_rm *rm, ...@@ -316,21 +219,9 @@ int dpu_rm_init(struct dpu_rm *rm,
mutex_init(&rm->rm_lock); mutex_init(&rm->rm_lock);
INIT_LIST_HEAD(&rm->rsvps);
for (type = 0; type < DPU_HW_BLK_MAX; type++) for (type = 0; type < DPU_HW_BLK_MAX; type++)
INIT_LIST_HEAD(&rm->hw_blks[type]); INIT_LIST_HEAD(&rm->hw_blks[type]);
rm->dev = dev;
/* Some of the sub-blocks require an mdptop to be created */
rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat);
if (IS_ERR_OR_NULL(rm->hw_mdp)) {
rc = PTR_ERR(rm->hw_mdp);
rm->hw_mdp = NULL;
DPU_ERROR("failed: mdp hw not available\n");
goto fail;
}
/* Interrogate HW catalog and create tracking items for hw blocks */ /* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) { for (i = 0; i < cat->mixer_count; i++) {
struct dpu_lm_cfg *lm = &cat->mixer[i]; struct dpu_lm_cfg *lm = &cat->mixer[i];
...@@ -410,7 +301,7 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) ...@@ -410,7 +301,7 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
* proposed use case requirements, incl. hardwired dependent blocks like * proposed use case requirements, incl. hardwired dependent blocks like
* pingpong * pingpong
* @rm: dpu resource manager handle * @rm: dpu resource manager handle
* @rsvp: reservation currently being created * @enc_id: encoder id requesting for allocation
* @reqs: proposed use case requirements * @reqs: proposed use case requirements
* @lm: proposed layer mixer, function checks if lm, and all other hardwired * @lm: proposed layer mixer, function checks if lm, and all other hardwired
* blocks connected to the lm (pp) is available and appropriate * blocks connected to the lm (pp) is available and appropriate
...@@ -422,7 +313,7 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) ...@@ -422,7 +313,7 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
*/ */
static bool _dpu_rm_check_lm_and_get_connected_blks( static bool _dpu_rm_check_lm_and_get_connected_blks(
struct dpu_rm *rm, struct dpu_rm *rm,
struct dpu_rm_rsvp *rsvp, uint32_t enc_id,
struct dpu_rm_requirements *reqs, struct dpu_rm_requirements *reqs,
struct dpu_rm_hw_blk *lm, struct dpu_rm_hw_blk *lm,
struct dpu_rm_hw_blk **pp, struct dpu_rm_hw_blk **pp,
...@@ -449,7 +340,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks( ...@@ -449,7 +340,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(
} }
/* Already reserved? */ /* Already reserved? */
if (RESERVED_BY_OTHER(lm, rsvp)) { if (RESERVED_BY_OTHER(lm, enc_id)) {
DPU_DEBUG("lm %d already reserved\n", lm_cfg->id); DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
return false; return false;
} }
...@@ -467,7 +358,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks( ...@@ -467,7 +358,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(
return false; return false;
} }
if (RESERVED_BY_OTHER(*pp, rsvp)) { if (RESERVED_BY_OTHER(*pp, enc_id)) {
DPU_DEBUG("lm %d pp %d already reserved\n", lm->id, DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
(*pp)->id); (*pp)->id);
return false; return false;
...@@ -476,10 +367,8 @@ static bool _dpu_rm_check_lm_and_get_connected_blks( ...@@ -476,10 +367,8 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(
return true; return true;
} }
static int _dpu_rm_reserve_lms( static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
struct dpu_rm *rm, struct dpu_rm_requirements *reqs)
struct dpu_rm_rsvp *rsvp,
struct dpu_rm_requirements *reqs)
{ {
struct dpu_rm_hw_blk *lm[MAX_BLOCKS]; struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
...@@ -504,7 +393,7 @@ static int _dpu_rm_reserve_lms( ...@@ -504,7 +393,7 @@ static int _dpu_rm_reserve_lms(
lm[lm_count] = iter_i.blk; lm[lm_count] = iter_i.blk;
if (!_dpu_rm_check_lm_and_get_connected_blks( if (!_dpu_rm_check_lm_and_get_connected_blks(
rm, rsvp, reqs, lm[lm_count], rm, enc_id, reqs, lm[lm_count],
&pp[lm_count], NULL)) &pp[lm_count], NULL))
continue; continue;
...@@ -519,7 +408,7 @@ static int _dpu_rm_reserve_lms( ...@@ -519,7 +408,7 @@ static int _dpu_rm_reserve_lms(
continue; continue;
if (!_dpu_rm_check_lm_and_get_connected_blks( if (!_dpu_rm_check_lm_and_get_connected_blks(
rm, rsvp, reqs, iter_j.blk, rm, enc_id, reqs, iter_j.blk,
&pp[lm_count], iter_i.blk)) &pp[lm_count], iter_i.blk))
continue; continue;
...@@ -537,11 +426,10 @@ static int _dpu_rm_reserve_lms( ...@@ -537,11 +426,10 @@ static int _dpu_rm_reserve_lms(
if (!lm[i]) if (!lm[i])
break; break;
lm[i]->rsvp_nxt = rsvp; lm[i]->enc_id = enc_id;
pp[i]->rsvp_nxt = rsvp; pp[i]->enc_id = enc_id;
trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id, trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id);
pp[i]->id);
} }
return rc; return rc;
...@@ -549,7 +437,7 @@ static int _dpu_rm_reserve_lms( ...@@ -549,7 +437,7 @@ static int _dpu_rm_reserve_lms(
static int _dpu_rm_reserve_ctls( static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm, struct dpu_rm *rm,
struct dpu_rm_rsvp *rsvp, uint32_t enc_id,
const struct msm_display_topology *top) const struct msm_display_topology *top)
{ {
struct dpu_rm_hw_blk *ctls[MAX_BLOCKS]; struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
...@@ -570,7 +458,7 @@ static int _dpu_rm_reserve_ctls( ...@@ -570,7 +458,7 @@ static int _dpu_rm_reserve_ctls(
unsigned long features = ctl->caps->features; unsigned long features = ctl->caps->features;
bool has_split_display; bool has_split_display;
if (RESERVED_BY_OTHER(iter.blk, rsvp)) if (RESERVED_BY_OTHER(iter.blk, enc_id))
continue; continue;
has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features; has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
...@@ -591,9 +479,8 @@ static int _dpu_rm_reserve_ctls( ...@@ -591,9 +479,8 @@ static int _dpu_rm_reserve_ctls(
return -ENAVAIL; return -ENAVAIL;
for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) { for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
ctls[i]->rsvp_nxt = rsvp; ctls[i]->enc_id = enc_id;
trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type, trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id);
rsvp->enc_id);
} }
return 0; return 0;
...@@ -601,7 +488,7 @@ static int _dpu_rm_reserve_ctls( ...@@ -601,7 +488,7 @@ static int _dpu_rm_reserve_ctls(
static int _dpu_rm_reserve_intf( static int _dpu_rm_reserve_intf(
struct dpu_rm *rm, struct dpu_rm *rm,
struct dpu_rm_rsvp *rsvp, uint32_t enc_id,
uint32_t id, uint32_t id,
enum dpu_hw_blk_type type) enum dpu_hw_blk_type type)
{ {
...@@ -614,14 +501,13 @@ static int _dpu_rm_reserve_intf( ...@@ -614,14 +501,13 @@ static int _dpu_rm_reserve_intf(
if (iter.blk->id != id) if (iter.blk->id != id)
continue; continue;
if (RESERVED_BY_OTHER(iter.blk, rsvp)) { if (RESERVED_BY_OTHER(iter.blk, enc_id)) {
DPU_ERROR("type %d id %d already reserved\n", type, id); DPU_ERROR("type %d id %d already reserved\n", type, id);
return -ENAVAIL; return -ENAVAIL;
} }
iter.blk->rsvp_nxt = rsvp; iter.blk->enc_id = enc_id;
trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type, trace_dpu_rm_reserve_intf(iter.blk->id, enc_id);
rsvp->enc_id);
break; break;
} }
...@@ -636,7 +522,7 @@ static int _dpu_rm_reserve_intf( ...@@ -636,7 +522,7 @@ static int _dpu_rm_reserve_intf(
static int _dpu_rm_reserve_intf_related_hw( static int _dpu_rm_reserve_intf_related_hw(
struct dpu_rm *rm, struct dpu_rm *rm,
struct dpu_rm_rsvp *rsvp, uint32_t enc_id,
struct dpu_encoder_hw_resources *hw_res) struct dpu_encoder_hw_resources *hw_res)
{ {
int i, ret = 0; int i, ret = 0;
...@@ -646,7 +532,7 @@ static int _dpu_rm_reserve_intf_related_hw( ...@@ -646,7 +532,7 @@ static int _dpu_rm_reserve_intf_related_hw(
if (hw_res->intfs[i] == INTF_MODE_NONE) if (hw_res->intfs[i] == INTF_MODE_NONE)
continue; continue;
id = i + INTF_0; id = i + INTF_0;
ret = _dpu_rm_reserve_intf(rm, rsvp, id, ret = _dpu_rm_reserve_intf(rm, enc_id, id,
DPU_HW_BLK_INTF); DPU_HW_BLK_INTF);
if (ret) if (ret)
return ret; return ret;
...@@ -655,33 +541,27 @@ static int _dpu_rm_reserve_intf_related_hw( ...@@ -655,33 +541,27 @@ static int _dpu_rm_reserve_intf_related_hw(
return ret; return ret;
} }
static int _dpu_rm_make_next_rsvp( static int _dpu_rm_make_reservation(
struct dpu_rm *rm, struct dpu_rm *rm,
struct drm_encoder *enc, struct drm_encoder *enc,
struct drm_crtc_state *crtc_state, struct drm_crtc_state *crtc_state,
struct dpu_rm_rsvp *rsvp,
struct dpu_rm_requirements *reqs) struct dpu_rm_requirements *reqs)
{ {
int ret; int ret;
/* Create reservation info, tag reserved blocks with it as we go */ ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs);
rsvp->seq = ++rm->rsvp_next_seq;
rsvp->enc_id = enc->base.id;
list_add_tail(&rsvp->list, &rm->rsvps);
ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
if (ret) { if (ret) {
DPU_ERROR("unable to find appropriate mixers\n"); DPU_ERROR("unable to find appropriate mixers\n");
return ret; return ret;
} }
ret = _dpu_rm_reserve_ctls(rm, rsvp, &reqs->topology); ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology);
if (ret) { if (ret) {
DPU_ERROR("unable to find appropriate CTL\n"); DPU_ERROR("unable to find appropriate CTL\n");
return ret; return ret;
} }
ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res); ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res);
if (ret) if (ret)
return ret; return ret;
...@@ -706,108 +586,31 @@ static int _dpu_rm_populate_requirements( ...@@ -706,108 +586,31 @@ static int _dpu_rm_populate_requirements(
return 0; return 0;
} }
static struct dpu_rm_rsvp *_dpu_rm_get_rsvp( static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id)
struct dpu_rm *rm,
struct drm_encoder *enc)
{ {
struct dpu_rm_rsvp *i;
if (!rm || !enc) {
DPU_ERROR("invalid params\n");
return NULL;
}
if (list_empty(&rm->rsvps))
return NULL;
list_for_each_entry(i, &rm->rsvps, list)
if (i->enc_id == enc->base.id)
return i;
return NULL;
}
/**
* _dpu_rm_release_rsvp - release resources and release a reservation
* @rm: KMS handle
* @rsvp: RSVP pointer to release and release resources for
*/
static void _dpu_rm_release_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp)
{
struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
struct dpu_rm_hw_blk *blk; struct dpu_rm_hw_blk *blk;
enum dpu_hw_blk_type type; enum dpu_hw_blk_type type;
if (!rsvp)
return;
DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
if (rsvp == rsvp_c) {
list_del(&rsvp_c->list);
break;
}
}
for (type = 0; type < DPU_HW_BLK_MAX; type++) { for (type = 0; type < DPU_HW_BLK_MAX; type++) {
list_for_each_entry(blk, &rm->hw_blks[type], list) { list_for_each_entry(blk, &rm->hw_blks[type], list) {
if (blk->rsvp == rsvp) { if (blk->enc_id == enc_id) {
blk->rsvp = NULL; blk->enc_id = 0;
DPU_DEBUG("rel rsvp %d enc %d %d %d\n", DPU_DEBUG("rel enc %d %d %d\n", enc_id,
rsvp->seq, rsvp->enc_id, type, blk->id);
blk->type, blk->id);
}
if (blk->rsvp_nxt == rsvp) {
blk->rsvp_nxt = NULL;
DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
rsvp->seq, rsvp->enc_id,
blk->type, blk->id);
} }
} }
} }
kfree(rsvp);
} }
void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc) void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
{ {
struct dpu_rm_rsvp *rsvp;
if (!rm || !enc) {
DPU_ERROR("invalid params\n");
return;
}
mutex_lock(&rm->rm_lock); mutex_lock(&rm->rm_lock);
rsvp = _dpu_rm_get_rsvp(rm, enc); _dpu_rm_release_reservation(rm, enc->base.id);
if (!rsvp) {
DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
goto end;
}
_dpu_rm_release_rsvp(rm, rsvp);
end:
mutex_unlock(&rm->rm_lock); mutex_unlock(&rm->rm_lock);
} }
static void _dpu_rm_commit_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp)
{
struct dpu_rm_hw_blk *blk;
enum dpu_hw_blk_type type;
/* Swap next rsvp to be the active */
for (type = 0; type < DPU_HW_BLK_MAX; type++) {
list_for_each_entry(blk, &rm->hw_blks[type], list) {
if (blk->rsvp_nxt) {
blk->rsvp = blk->rsvp_nxt;
blk->rsvp_nxt = NULL;
}
}
}
}
int dpu_rm_reserve( int dpu_rm_reserve(
struct dpu_rm *rm, struct dpu_rm *rm,
struct drm_encoder *enc, struct drm_encoder *enc,
...@@ -815,7 +618,6 @@ int dpu_rm_reserve( ...@@ -815,7 +618,6 @@ int dpu_rm_reserve(
struct msm_display_topology topology, struct msm_display_topology topology,
bool test_only) bool test_only)
{ {
struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
struct dpu_rm_requirements reqs; struct dpu_rm_requirements reqs;
int ret; int ret;
...@@ -828,8 +630,6 @@ int dpu_rm_reserve( ...@@ -828,8 +630,6 @@ int dpu_rm_reserve(
mutex_lock(&rm->rm_lock); mutex_lock(&rm->rm_lock);
_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs, ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs,
topology); topology);
if (ret) { if (ret) {
...@@ -837,50 +637,17 @@ int dpu_rm_reserve( ...@@ -837,50 +637,17 @@ int dpu_rm_reserve(
goto end; goto end;
} }
/* ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs);
* We only support one active reservation per-hw-block. But to implement
* transactional semantics for test-only, and for allowing failure while
* modifying your existing reservation, over the course of this
* function we can have two reservations:
* Current: Existing reservation
* Next: Proposed reservation. The proposed reservation may fail, or may
* be discarded if in test-only mode.
* If reservation is successful, and we're not in test-only, then we
* replace the current with the next.
*/
rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
if (!rsvp_nxt) {
ret = -ENOMEM;
goto end;
}
rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
/* Check the proposed reservation, store it in hw's "next" field */
ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, rsvp_nxt, &reqs);
_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
if (ret) { if (ret) {
DPU_ERROR("failed to reserve hw resources: %d\n", ret); DPU_ERROR("failed to reserve hw resources: %d\n", ret);
_dpu_rm_release_rsvp(rm, rsvp_nxt); _dpu_rm_release_reservation(rm, enc->base.id);
} else if (test_only) { } else if (test_only) {
/* /* test_only: test the reservation and then undo */
* Normally, if test_only, test the reservation and then undo DPU_DEBUG("test_only: discard test [enc: %d]\n",
* However, if the user requests LOCK, then keep the reservation enc->base.id);
* made during the atomic_check phase. _dpu_rm_release_reservation(rm, enc->base.id);
*/
DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
rsvp_nxt->seq, rsvp_nxt->enc_id);
_dpu_rm_release_rsvp(rm, rsvp_nxt);
} else {
_dpu_rm_release_rsvp(rm, rsvp_cur);
_dpu_rm_commit_rsvp(rm, rsvp_nxt);
} }
_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);
end: end:
mutex_unlock(&rm->rm_lock); mutex_unlock(&rm->rm_lock);
......
...@@ -22,22 +22,14 @@ ...@@ -22,22 +22,14 @@
/** /**
* struct dpu_rm - DPU dynamic hardware resource manager * struct dpu_rm - DPU dynamic hardware resource manager
* @dev: device handle for event logging purposes
* @rsvps: list of hardware reservations by each crtc->encoder->connector
* @hw_blks: array of lists of hardware resources present in the system, one * @hw_blks: array of lists of hardware resources present in the system, one
* list per type of hardware block * list per type of hardware block
* @hw_mdp: hardware object for mdp_top
* @lm_max_width: cached layer mixer maximum width * @lm_max_width: cached layer mixer maximum width
* @rsvp_next_seq: sequence number for next reservation for debugging purposes
* @rm_lock: resource manager mutex * @rm_lock: resource manager mutex
*/ */
struct dpu_rm { struct dpu_rm {
struct drm_device *dev;
struct list_head rsvps;
struct list_head hw_blks[DPU_HW_BLK_MAX]; struct list_head hw_blks[DPU_HW_BLK_MAX];
struct dpu_hw_mdp *hw_mdp;
uint32_t lm_max_width; uint32_t lm_max_width;
uint32_t rsvp_next_seq;
struct mutex rm_lock; struct mutex rm_lock;
}; };
...@@ -67,13 +59,11 @@ struct dpu_rm_hw_iter { ...@@ -67,13 +59,11 @@ struct dpu_rm_hw_iter {
* @rm: DPU Resource Manager handle * @rm: DPU Resource Manager handle
* @cat: Pointer to hardware catalog * @cat: Pointer to hardware catalog
* @mmio: mapped register io address of MDP * @mmio: mapped register io address of MDP
* @dev: device handle for event logging purposes
* @Return: 0 on Success otherwise -ERROR * @Return: 0 on Success otherwise -ERROR
*/ */
int dpu_rm_init(struct dpu_rm *rm, int dpu_rm_init(struct dpu_rm *rm,
struct dpu_mdss_cfg *cat, struct dpu_mdss_cfg *cat,
void __iomem *mmio, void __iomem *mmio);
struct drm_device *dev);
/** /**
* dpu_rm_destroy - Free all memory allocated by dpu_rm_init * dpu_rm_destroy - Free all memory allocated by dpu_rm_init
...@@ -111,14 +101,6 @@ int dpu_rm_reserve(struct dpu_rm *rm, ...@@ -111,14 +101,6 @@ int dpu_rm_reserve(struct dpu_rm *rm,
*/ */
void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc); void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
/**
* dpu_rm_get_mdp - Retrieve HW block for MDP TOP.
* This is never reserved, and is usable by any display.
* @rm: DPU Resource Manager handle
* @Return: Pointer to hw block or NULL
*/
struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm);
/** /**
* dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
* using dpu_rm_get_hw * using dpu_rm_get_hw
...@@ -144,12 +126,4 @@ void dpu_rm_init_hw_iter( ...@@ -144,12 +126,4 @@ void dpu_rm_init_hw_iter(
* @Return: true on match found, false on no match found * @Return: true on match found, false on no match found
*/ */
bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter); bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
/**
* dpu_rm_check_property_topctl - validate property bitmask before it is set
* @val: user's proposed topology control bitmask
* @Return: 0 on success or error
*/
int dpu_rm_check_property_topctl(uint64_t val);
#endif /* __DPU_RM_H__ */ #endif /* __DPU_RM_H__ */
...@@ -831,48 +831,42 @@ TRACE_EVENT(dpu_plane_disable, ...@@ -831,48 +831,42 @@ TRACE_EVENT(dpu_plane_disable,
); );
DECLARE_EVENT_CLASS(dpu_rm_iter_template, DECLARE_EVENT_CLASS(dpu_rm_iter_template,
TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id), TP_PROTO(uint32_t id, uint32_t enc_id),
TP_ARGS(id, type, enc_id), TP_ARGS(id, enc_id),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( uint32_t, id ) __field( uint32_t, id )
__field( enum dpu_hw_blk_type, type )
__field( uint32_t, enc_id ) __field( uint32_t, enc_id )
), ),
TP_fast_assign( TP_fast_assign(
__entry->id = id; __entry->id = id;
__entry->type = type;
__entry->enc_id = enc_id; __entry->enc_id = enc_id;
), ),
TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type, TP_printk("id:%d enc_id:%u", __entry->id, __entry->enc_id)
__entry->enc_id)
); );
DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf, DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id), TP_PROTO(uint32_t id, uint32_t enc_id),
TP_ARGS(id, type, enc_id) TP_ARGS(id, enc_id)
); );
DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls, DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls,
TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id), TP_PROTO(uint32_t id, uint32_t enc_id),
TP_ARGS(id, type, enc_id) TP_ARGS(id, enc_id)
); );
TRACE_EVENT(dpu_rm_reserve_lms, TRACE_EVENT(dpu_rm_reserve_lms,
TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id, TP_PROTO(uint32_t id, uint32_t enc_id, uint32_t pp_id),
uint32_t pp_id), TP_ARGS(id, enc_id, pp_id),
TP_ARGS(id, type, enc_id, pp_id),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( uint32_t, id ) __field( uint32_t, id )
__field( enum dpu_hw_blk_type, type )
__field( uint32_t, enc_id ) __field( uint32_t, enc_id )
__field( uint32_t, pp_id ) __field( uint32_t, pp_id )
), ),
TP_fast_assign( TP_fast_assign(
__entry->id = id; __entry->id = id;
__entry->type = type;
__entry->enc_id = enc_id; __entry->enc_id = enc_id;
__entry->pp_id = pp_id; __entry->pp_id = pp_id;
), ),
TP_printk("id:%d type:%d enc_id:%u pp_id:%u", __entry->id, TP_printk("id:%d enc_id:%u pp_id:%u", __entry->id,
__entry->type, __entry->enc_id, __entry->pp_id) __entry->enc_id, __entry->pp_id)
); );
TRACE_EVENT(dpu_vbif_wait_xin_halt_fail, TRACE_EVENT(dpu_vbif_wait_xin_halt_fail,
......
...@@ -207,62 +207,44 @@ u32 msm_readl(const void __iomem *addr) ...@@ -207,62 +207,44 @@ u32 msm_readl(const void __iomem *addr)
return val; return val;
} }
struct vblank_event { struct msm_vblank_work {
struct list_head node; struct work_struct work;
int crtc_id; int crtc_id;
bool enable; bool enable;
struct msm_drm_private *priv;
}; };
static void vblank_ctrl_worker(struct kthread_work *work) static void vblank_ctrl_worker(struct work_struct *work)
{ {
struct msm_vblank_ctrl *vbl_ctrl = container_of(work, struct msm_vblank_work *vbl_work = container_of(work,
struct msm_vblank_ctrl, work); struct msm_vblank_work, work);
struct msm_drm_private *priv = container_of(vbl_ctrl, struct msm_drm_private *priv = vbl_work->priv;
struct msm_drm_private, vblank_ctrl);
struct msm_kms *kms = priv->kms; struct msm_kms *kms = priv->kms;
struct vblank_event *vbl_ev, *tmp;
unsigned long flags;
spin_lock_irqsave(&vbl_ctrl->lock, flags);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
list_del(&vbl_ev->node);
spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
if (vbl_ev->enable)
kms->funcs->enable_vblank(kms,
priv->crtcs[vbl_ev->crtc_id]);
else
kms->funcs->disable_vblank(kms,
priv->crtcs[vbl_ev->crtc_id]);
kfree(vbl_ev);
spin_lock_irqsave(&vbl_ctrl->lock, flags); if (vbl_work->enable)
} kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
else
kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
spin_unlock_irqrestore(&vbl_ctrl->lock, flags); kfree(vbl_work);
} }
static int vblank_ctrl_queue_work(struct msm_drm_private *priv, static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
int crtc_id, bool enable) int crtc_id, bool enable)
{ {
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; struct msm_vblank_work *vbl_work;
struct vblank_event *vbl_ev;
unsigned long flags;
vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC); vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
if (!vbl_ev) if (!vbl_work)
return -ENOMEM; return -ENOMEM;
vbl_ev->crtc_id = crtc_id; INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
vbl_ev->enable = enable;
spin_lock_irqsave(&vbl_ctrl->lock, flags); vbl_work->crtc_id = crtc_id;
list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list); vbl_work->enable = enable;
spin_unlock_irqrestore(&vbl_ctrl->lock, flags); vbl_work->priv = priv;
kthread_queue_work(&priv->disp_thread[crtc_id].worker, queue_work(priv->wq, &vbl_work->work);
&vbl_ctrl->work);
return 0; return 0;
} }
...@@ -274,31 +256,20 @@ static int msm_drm_uninit(struct device *dev) ...@@ -274,31 +256,20 @@ static int msm_drm_uninit(struct device *dev)
struct msm_drm_private *priv = ddev->dev_private; struct msm_drm_private *priv = ddev->dev_private;
struct msm_kms *kms = priv->kms; struct msm_kms *kms = priv->kms;
struct msm_mdss *mdss = priv->mdss; struct msm_mdss *mdss = priv->mdss;
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
struct vblank_event *vbl_ev, *tmp;
int i; int i;
/* We must cancel and cleanup any pending vblank enable/disable /* We must cancel and cleanup any pending vblank enable/disable
* work before drm_irq_uninstall() to avoid work re-enabling an * work before drm_irq_uninstall() to avoid work re-enabling an
* irq after uninstall has disabled it. * irq after uninstall has disabled it.
*/ */
kthread_flush_work(&vbl_ctrl->work);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
list_del(&vbl_ev->node);
kfree(vbl_ev);
}
/* clean up display commit/event worker threads */ flush_workqueue(priv->wq);
for (i = 0; i < priv->num_crtcs; i++) { destroy_workqueue(priv->wq);
if (priv->disp_thread[i].thread) {
kthread_flush_worker(&priv->disp_thread[i].worker);
kthread_stop(priv->disp_thread[i].thread);
priv->disp_thread[i].thread = NULL;
}
/* clean up event worker threads */
for (i = 0; i < priv->num_crtcs; i++) {
if (priv->event_thread[i].thread) { if (priv->event_thread[i].thread) {
kthread_flush_worker(&priv->event_thread[i].worker); kthread_destroy_worker(&priv->event_thread[i].worker);
kthread_stop(priv->event_thread[i].thread);
priv->event_thread[i].thread = NULL; priv->event_thread[i].thread = NULL;
} }
} }
...@@ -323,9 +294,6 @@ static int msm_drm_uninit(struct device *dev) ...@@ -323,9 +294,6 @@ static int msm_drm_uninit(struct device *dev)
drm_irq_uninstall(ddev); drm_irq_uninstall(ddev);
pm_runtime_put_sync(dev); pm_runtime_put_sync(dev);
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
if (kms && kms->funcs) if (kms && kms->funcs)
kms->funcs->destroy(kms); kms->funcs->destroy(kms);
...@@ -490,9 +458,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) ...@@ -490,9 +458,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv->wq = alloc_ordered_workqueue("msm", 0); priv->wq = alloc_ordered_workqueue("msm", 0);
INIT_LIST_HEAD(&priv->inactive_list); INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
spin_lock_init(&priv->vblank_ctrl.lock);
drm_mode_config_init(ddev); drm_mode_config_init(ddev);
...@@ -554,27 +519,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) ...@@ -554,27 +519,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
*/ */
param.sched_priority = 16; param.sched_priority = 16;
for (i = 0; i < priv->num_crtcs; i++) { for (i = 0; i < priv->num_crtcs; i++) {
/* initialize display thread */
priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
kthread_init_worker(&priv->disp_thread[i].worker);
priv->disp_thread[i].dev = ddev;
priv->disp_thread[i].thread =
kthread_run(kthread_worker_fn,
&priv->disp_thread[i].worker,
"crtc_commit:%d", priv->disp_thread[i].crtc_id);
if (IS_ERR(priv->disp_thread[i].thread)) {
DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n");
priv->disp_thread[i].thread = NULL;
goto err_msm_uninit;
}
ret = sched_setscheduler(priv->disp_thread[i].thread,
SCHED_FIFO, &param);
if (ret)
dev_warn(dev, "disp_thread set priority failed: %d\n",
ret);
/* initialize event thread */ /* initialize event thread */
priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id; priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
kthread_init_worker(&priv->event_thread[i].worker); kthread_init_worker(&priv->event_thread[i].worker);
...@@ -589,13 +533,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) ...@@ -589,13 +533,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
goto err_msm_uninit; goto err_msm_uninit;
} }
/**
* event thread should also run at same priority as disp_thread
* because it is handling frame_done events. A lower priority
* event thread and higher priority disp_thread can causes
* frame_pending counters beyond 2. This can lead to commit
* failure at crtc commit level.
*/
ret = sched_setscheduler(priv->event_thread[i].thread, ret = sched_setscheduler(priv->event_thread[i].thread,
SCHED_FIFO, &param); SCHED_FIFO, &param);
if (ret) if (ret)
...@@ -914,8 +851,12 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, ...@@ -914,8 +851,12 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
ret = copy_from_user(msm_obj->name, if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
u64_to_user_ptr(args->value), args->len); args->len)) {
msm_obj->name[0] = '\0';
ret = -EFAULT;
break;
}
msm_obj->name[args->len] = '\0'; msm_obj->name[args->len] = '\0';
for (i = 0; i < args->len; i++) { for (i = 0; i < args->len; i++) {
if (!isprint(msm_obj->name[i])) { if (!isprint(msm_obj->name[i])) {
...@@ -931,8 +872,9 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, ...@@ -931,8 +872,9 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
} }
args->len = strlen(msm_obj->name); args->len = strlen(msm_obj->name);
if (args->value) { if (args->value) {
ret = copy_to_user(u64_to_user_ptr(args->value), if (copy_to_user(u64_to_user_ptr(args->value),
msm_obj->name, args->len); msm_obj->name, args->len))
ret = -EFAULT;
} }
break; break;
} }
......
...@@ -77,12 +77,6 @@ enum msm_mdp_plane_property { ...@@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
PLANE_PROP_MAX_NUM PLANE_PROP_MAX_NUM
}; };
struct msm_vblank_ctrl {
struct kthread_work work;
struct list_head event_list;
spinlock_t lock;
};
#define MSM_GPU_MAX_RINGS 4 #define MSM_GPU_MAX_RINGS 4
#define MAX_H_TILES_PER_DISPLAY 2 #define MAX_H_TILES_PER_DISPLAY 2
...@@ -126,7 +120,7 @@ struct msm_display_topology { ...@@ -126,7 +120,7 @@ struct msm_display_topology {
/** /**
* struct msm_display_info - defines display properties * struct msm_display_info - defines display properties
* @intf_type: DRM_MODE_CONNECTOR_ display type * @intf_type: DRM_MODE_ENCODER_ type
* @capabilities: Bitmask of display flags * @capabilities: Bitmask of display flags
* @num_of_h_tiles: Number of horizontal tiles in case of split interface * @num_of_h_tiles: Number of horizontal tiles in case of split interface
* @h_tile_instance: Controller instance used per tile. Number of elements is * @h_tile_instance: Controller instance used per tile. Number of elements is
...@@ -199,7 +193,6 @@ struct msm_drm_private { ...@@ -199,7 +193,6 @@ struct msm_drm_private {
unsigned int num_crtcs; unsigned int num_crtcs;
struct drm_crtc *crtcs[MAX_CRTCS]; struct drm_crtc *crtcs[MAX_CRTCS];
struct msm_drm_thread disp_thread[MAX_CRTCS];
struct msm_drm_thread event_thread[MAX_CRTCS]; struct msm_drm_thread event_thread[MAX_CRTCS];
unsigned int num_encoders; unsigned int num_encoders;
...@@ -228,7 +221,6 @@ struct msm_drm_private { ...@@ -228,7 +221,6 @@ struct msm_drm_private {
struct notifier_block vmap_notifier; struct notifier_block vmap_notifier;
struct shrinker shrinker; struct shrinker shrinker;
struct msm_vblank_ctrl vblank_ctrl;
struct drm_atomic_state *pm_state; struct drm_atomic_state *pm_state;
}; };
......
...@@ -560,6 +560,8 @@ ...@@ -560,6 +560,8 @@
# define DP_TEST_LINK_EDID_READ (1 << 2) # define DP_TEST_LINK_EDID_READ (1 << 2)
# define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */ # define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */
# define DP_TEST_LINK_FAUX_PATTERN (1 << 4) /* DPCD >= 1.2 */ # define DP_TEST_LINK_FAUX_PATTERN (1 << 4) /* DPCD >= 1.2 */
# define DP_TEST_LINK_AUDIO_PATTERN (1 << 5) /* DPCD >= 1.2 */
# define DP_TEST_LINK_AUDIO_DISABLED_VIDEO (1 << 6) /* DPCD >= 1.2 */
#define DP_TEST_LINK_RATE 0x219 #define DP_TEST_LINK_RATE 0x219
# define DP_LINK_RATE_162 (0x6) # define DP_LINK_RATE_162 (0x6)
...@@ -608,6 +610,7 @@ ...@@ -608,6 +610,7 @@
# define DP_COLOR_FORMAT_RGB (0 << 1) # define DP_COLOR_FORMAT_RGB (0 << 1)
# define DP_COLOR_FORMAT_YCbCr422 (1 << 1) # define DP_COLOR_FORMAT_YCbCr422 (1 << 1)
# define DP_COLOR_FORMAT_YCbCr444 (2 << 1) # define DP_COLOR_FORMAT_YCbCr444 (2 << 1)
# define DP_TEST_DYNAMIC_RANGE_VESA (0 << 3)
# define DP_TEST_DYNAMIC_RANGE_CEA (1 << 3) # define DP_TEST_DYNAMIC_RANGE_CEA (1 << 3)
# define DP_TEST_YCBCR_COEFFICIENTS (1 << 4) # define DP_TEST_YCBCR_COEFFICIENTS (1 << 4)
# define DP_YCBCR_COEFFICIENTS_ITU601 (0 << 4) # define DP_YCBCR_COEFFICIENTS_ITU601 (0 << 4)
...@@ -657,6 +660,16 @@ ...@@ -657,6 +660,16 @@
#define DP_TEST_SINK 0x270 #define DP_TEST_SINK 0x270
# define DP_TEST_SINK_START (1 << 0) # define DP_TEST_SINK_START (1 << 0)
#define DP_TEST_AUDIO_MODE 0x271
#define DP_TEST_AUDIO_PATTERN_TYPE 0x272
#define DP_TEST_AUDIO_PERIOD_CH1 0x273
#define DP_TEST_AUDIO_PERIOD_CH2 0x274
#define DP_TEST_AUDIO_PERIOD_CH3 0x275
#define DP_TEST_AUDIO_PERIOD_CH4 0x276
#define DP_TEST_AUDIO_PERIOD_CH5 0x277
#define DP_TEST_AUDIO_PERIOD_CH6 0x278
#define DP_TEST_AUDIO_PERIOD_CH7 0x279
#define DP_TEST_AUDIO_PERIOD_CH8 0x27A
#define DP_FEC_STATUS 0x280 /* 1.4 */ #define DP_FEC_STATUS 0x280 /* 1.4 */
# define DP_FEC_DECODE_EN_DETECTED (1 << 0) # define DP_FEC_DECODE_EN_DETECTED (1 << 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment