Commit feb5cde6 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'msm-next' of git://people.freedesktop.org/~robclark/linux into drm-next

MSM tree from Rob.

* 'msm-next' of git://people.freedesktop.org/~robclark/linux:
  drm/msm: add a330/apq8x74
  drm/msm: add mdp5/apq8x74
  drm/msm: add hdmi support for apq8x74/mdp5
  drm/msm: move irq utils to mdp_kms
  drm/msm: split out msm_kms.h
  drm/msm: mdp4_format -> mdp_format
  drm/msm: resync generated headers
  drm/msm: move mdp4 -> mdp/mdp4
  drm/msm: add support for msm8060ab/bstem
  drm/msm: add support for non-IOMMU systems
  drm/msm: fix bus scaling
  drm/msm: add missing MODULE_FIRMWARE()s
  drm/msm: COMPILE_TEST support
parents 0a6659bd 55459968
...@@ -2,8 +2,7 @@ ...@@ -2,8 +2,7 @@
config DRM_MSM config DRM_MSM
tristate "MSM DRM" tristate "MSM DRM"
depends on DRM depends on DRM
depends on ARCH_MSM depends on (ARCH_MSM && ARCH_MSM8960) || (ARM && COMPILE_TEST)
depends on ARCH_MSM8960
select DRM_KMS_HELPER select DRM_KMS_HELPER
select SHMEM select SHMEM
select TMPFS select TMPFS
......
...@@ -12,18 +12,27 @@ msm-y := \ ...@@ -12,18 +12,27 @@ msm-y := \
hdmi/hdmi_i2c.o \ hdmi/hdmi_i2c.o \
hdmi/hdmi_phy_8960.o \ hdmi/hdmi_phy_8960.o \
hdmi/hdmi_phy_8x60.o \ hdmi/hdmi_phy_8x60.o \
mdp4/mdp4_crtc.o \ hdmi/hdmi_phy_8x74.o \
mdp4/mdp4_dtv_encoder.o \ mdp/mdp_format.o \
mdp4/mdp4_format.o \ mdp/mdp_kms.o \
mdp4/mdp4_irq.o \ mdp/mdp4/mdp4_crtc.o \
mdp4/mdp4_kms.o \ mdp/mdp4/mdp4_dtv_encoder.o \
mdp4/mdp4_plane.o \ mdp/mdp4/mdp4_irq.o \
mdp/mdp4/mdp4_kms.o \
mdp/mdp4/mdp4_plane.o \
mdp/mdp5/mdp5_crtc.o \
mdp/mdp5/mdp5_encoder.o \
mdp/mdp5/mdp5_irq.o \
mdp/mdp5/mdp5_kms.o \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
msm_drv.o \ msm_drv.o \
msm_fb.o \ msm_fb.o \
msm_gem.o \ msm_gem.o \
msm_gem_prime.o \ msm_gem_prime.o \
msm_gem_submit.o \ msm_gem_submit.o \
msm_gpu.o \ msm_gpu.o \
msm_iommu.o \
msm_ringbuffer.o msm_ringbuffer.o
msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
......
...@@ -4,7 +4,7 @@ In the current snapdragon SoC's, we have (at least) 3 different ...@@ -4,7 +4,7 @@ In the current snapdragon SoC's, we have (at least) 3 different
display controller blocks at play: display controller blocks at play:
+ MDP3 - ?? seems to be what is on geeksphone peak device + MDP3 - ?? seems to be what is on geeksphone peak device
+ MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410) + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410)
+ MDSS - snapdragon 800 + MDP5 - snapdragon 800
(I don't have a completely clear picture on which display controller (I don't have a completely clear picture on which display controller
maps to which part #) maps to which part #)
...@@ -46,6 +46,24 @@ and treat the MDP4 block's irq as "the" irq. Even though the connectors ...@@ -46,6 +46,24 @@ and treat the MDP4 block's irq as "the" irq. Even though the connectors
may have their own irqs which they install themselves. For this reason may have their own irqs which they install themselves. For this reason
the display controller is the "master" device. the display controller is the "master" device.
For MDP5, the mapping is:
plane -> PIPE{RGBn,VIGn} \
crtc -> LM (layer mixer) |-> MDP "device"
encoder -> INTF /
connector -> HDMI/DSI/eDP/etc --> other device(s)
Unlike MDP4, it appears we can get by with a single encoder, rather
than needing a different implementation for DTV, DSI, etc. (Ie. the
register interface is same, just different bases.)
Also unlike MDP4, with MDP5 all the IRQs for other blocks (HDMI, DSI,
etc) are routed through MDP.
And finally, MDP5 has this "Shared Memory Pool" (called "SMP"), from
which blocks need to be allocated to the active pipes based on fetch
stride.
Each connector probably ends up being a separate device, just for the Each connector probably ends up being a separate device, just for the
logistics of finding/mapping io region, irq, etc. Idealy we would logistics of finding/mapping io region, irq, etc. Idealy we would
have a better way than just stashing the platform device in a global have a better way than just stashing the platform device in a global
......
...@@ -8,12 +8,13 @@ This file was generated by the rules-ng-ng headergen tool in this git repository ...@@ -8,12 +8,13 @@ This file was generated by the rules-ng-ng headergen tool in this git repository
git clone https://github.com/freedreno/envytools.git git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are: The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
Copyright (C) 2013 by the following authors: Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark) - Rob Clark <robdclark@gmail.com> (robclark)
...@@ -202,6 +203,12 @@ enum a2xx_rb_copy_sample_select { ...@@ -202,6 +203,12 @@ enum a2xx_rb_copy_sample_select {
SAMPLE_0123 = 6, SAMPLE_0123 = 6,
}; };
enum adreno_mmu_clnt_beh {
BEH_NEVR = 0,
BEH_TRAN_RNG = 1,
BEH_TRAN_FLT = 2,
};
enum sq_tex_clamp { enum sq_tex_clamp {
SQ_TEX_WRAP = 0, SQ_TEX_WRAP = 0,
SQ_TEX_MIRROR = 1, SQ_TEX_MIRROR = 1,
...@@ -238,6 +245,92 @@ enum sq_tex_filter { ...@@ -238,6 +245,92 @@ enum sq_tex_filter {
#define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1 #define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1
#define REG_A2XX_MH_MMU_CONFIG 0x00000040
#define A2XX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001
#define A2XX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002
#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030
#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4
static inline uint32_t A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
}
#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0
#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6
static inline uint32_t A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
}
#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300
#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8
static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
}
#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00
#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10
static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
}
#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000
#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12
static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
}
#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000
#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14
static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
}
#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000
#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16
static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
}
#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000
#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18
static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
}
#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000
#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20
static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
}
#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000
#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22
static inline uint32_t A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
}
#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000
#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24
static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
}
#define REG_A2XX_MH_MMU_VA_RANGE 0x00000041
#define REG_A2XX_MH_MMU_PT_BASE 0x00000042
#define REG_A2XX_MH_MMU_PAGE_FAULT 0x00000043
#define REG_A2XX_MH_MMU_TRAN_ERROR 0x00000044
#define REG_A2XX_MH_MMU_INVALIDATE 0x00000045
#define REG_A2XX_MH_MMU_MPU_BASE 0x00000046
#define REG_A2XX_MH_MMU_MPU_END 0x00000047
#define REG_A2XX_NQWAIT_UNTIL 0x00000394
#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000395 #define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000395
#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000397 #define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000397
...@@ -276,20 +369,6 @@ enum sq_tex_filter { ...@@ -276,20 +369,6 @@ enum sq_tex_filter {
#define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447 #define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447
#define REG_A2XX_CP_ST_BASE 0x0000044d
#define REG_A2XX_CP_ST_BUFSZ 0x0000044e
#define REG_A2XX_CP_IB1_BASE 0x00000458
#define REG_A2XX_CP_IB1_BUFSZ 0x00000459
#define REG_A2XX_CP_IB2_BASE 0x0000045a
#define REG_A2XX_CP_IB2_BUFSZ 0x0000045b
#define REG_A2XX_CP_STAT 0x0000047f
#define REG_A2XX_RBBM_STATUS 0x000005d0 #define REG_A2XX_RBBM_STATUS 0x000005d0
#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f #define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f
#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0 #define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0
...@@ -808,6 +887,12 @@ static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val) ...@@ -808,6 +887,12 @@ static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7 #define REG_A2XX_SQ_VS_PROGRAM 0x000021f7
#define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9
#define REG_A2XX_VGT_DRAW_INITIATOR 0x000021fc
#define REG_A2XX_VGT_IMMED_DATA 0x000021fd
#define REG_A2XX_RB_DEPTHCONTROL 0x00002200 #define REG_A2XX_RB_DEPTHCONTROL 0x00002200
#define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001 #define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001
#define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002 #define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002
......
...@@ -8,12 +8,13 @@ This file was generated by the rules-ng-ng headergen tool in this git repository ...@@ -8,12 +8,13 @@ This file was generated by the rules-ng-ng headergen tool in this git repository
git clone https://github.com/freedreno/envytools.git git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are: The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
Copyright (C) 2013 by the following authors: Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark) - Rob Clark <robdclark@gmail.com> (robclark)
...@@ -292,6 +293,8 @@ enum a3xx_tex_type { ...@@ -292,6 +293,8 @@ enum a3xx_tex_type {
#define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000 #define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
#define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000 #define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000
#define REG_A3XX_RBBM_NQWAIT_UNTIL 0x00000040
#define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033 #define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033
#define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050 #define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050
...@@ -304,6 +307,8 @@ enum a3xx_tex_type { ...@@ -304,6 +307,8 @@ enum a3xx_tex_type {
#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a #define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a
#define REG_A3XX_RBBM_INT_SET_CMD 0x00000060
#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061 #define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061
#define REG_A3XX_RBBM_INT_0_MASK 0x00000063 #define REG_A3XX_RBBM_INT_0_MASK 0x00000063
...@@ -937,13 +942,13 @@ static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val) ...@@ -937,13 +942,13 @@ static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val)
return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK; return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK;
} }
#define REG_A3XX_UNKNOWN_20E8 0x000020e8 #define REG_A3XX_RB_CLEAR_COLOR_DW0 0x000020e8
#define REG_A3XX_UNKNOWN_20E9 0x000020e9 #define REG_A3XX_RB_CLEAR_COLOR_DW1 0x000020e9
#define REG_A3XX_UNKNOWN_20EA 0x000020ea #define REG_A3XX_RB_CLEAR_COLOR_DW2 0x000020ea
#define REG_A3XX_UNKNOWN_20EB 0x000020eb #define REG_A3XX_RB_CLEAR_COLOR_DW3 0x000020eb
#define REG_A3XX_RB_COPY_CONTROL 0x000020ec #define REG_A3XX_RB_COPY_CONTROL 0x000020ec
#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003 #define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
...@@ -1026,7 +1031,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val) ...@@ -1026,7 +1031,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080 #define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000 #define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
#define REG_A3XX_UNKNOWN_2101 0x00002101 #define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
#define REG_A3XX_RB_DEPTH_INFO 0x00002102 #define REG_A3XX_RB_DEPTH_INFO 0x00002102
#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001 #define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
...@@ -1103,11 +1108,11 @@ static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v ...@@ -1103,11 +1108,11 @@ static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v
return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK; return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
} }
#define REG_A3XX_UNKNOWN_2105 0x00002105 #define REG_A3XX_RB_STENCIL_CLEAR 0x00002105
#define REG_A3XX_UNKNOWN_2106 0x00002106 #define REG_A3XX_RB_STENCIL_BUF_INFO 0x00002106
#define REG_A3XX_UNKNOWN_2107 0x00002107 #define REG_A3XX_RB_STENCIL_BUF_PITCH 0x00002107
#define REG_A3XX_RB_STENCILREFMASK 0x00002108 #define REG_A3XX_RB_STENCILREFMASK 0x00002108
#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff #define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
...@@ -1149,20 +1154,31 @@ static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val) ...@@ -1149,20 +1154,31 @@ static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK; return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
} }
#define REG_A3XX_PA_SC_WINDOW_OFFSET 0x0000210e #define REG_A3XX_RB_LRZ_VSC_CONTROL 0x0000210c
#define A3XX_PA_SC_WINDOW_OFFSET_X__MASK 0x0000ffff #define A3XX_RB_LRZ_VSC_CONTROL_BINNING_ENABLE 0x00000002
#define A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_X(uint32_t val) #define REG_A3XX_RB_WINDOW_OFFSET 0x0000210e
#define A3XX_RB_WINDOW_OFFSET_X__MASK 0x0000ffff
#define A3XX_RB_WINDOW_OFFSET_X__SHIFT 0
static inline uint32_t A3XX_RB_WINDOW_OFFSET_X(uint32_t val)
{ {
return ((val) << A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_X__MASK; return ((val) << A3XX_RB_WINDOW_OFFSET_X__SHIFT) & A3XX_RB_WINDOW_OFFSET_X__MASK;
} }
#define A3XX_PA_SC_WINDOW_OFFSET_Y__MASK 0xffff0000 #define A3XX_RB_WINDOW_OFFSET_Y__MASK 0xffff0000
#define A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16 #define A3XX_RB_WINDOW_OFFSET_Y__SHIFT 16
static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_Y(uint32_t val) static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val)
{ {
return ((val) << A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_Y__MASK; return ((val) << A3XX_RB_WINDOW_OFFSET_Y__SHIFT) & A3XX_RB_WINDOW_OFFSET_Y__MASK;
} }
#define REG_A3XX_RB_SAMPLE_COUNT_CONTROL 0x00002110
#define REG_A3XX_RB_SAMPLE_COUNT_ADDR 0x00002111
#define REG_A3XX_RB_Z_CLAMP_MIN 0x00002114
#define REG_A3XX_RB_Z_CLAMP_MAX 0x00002115
#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4 #define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4
#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea #define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea
...@@ -1309,6 +1325,8 @@ static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val) ...@@ -1309,6 +1325,8 @@ static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215 #define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215
#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x00002216
#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217 #define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217
#define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a #define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a
...@@ -1491,12 +1509,13 @@ static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0 ...@@ -1491,12 +1509,13 @@ static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0
#define REG_A3XX_SP_SP_CTRL_REG 0x000022c0 #define REG_A3XX_SP_SP_CTRL_REG 0x000022c0
#define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000 #define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000
#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x000c0000 #define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x00040000
#define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18 #define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18
static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val) static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val)
{ {
return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK; return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK;
} }
#define A3XX_SP_SP_CTRL_REG_BINNING 0x00080000
#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000 #define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000
#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20 #define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20
static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val) static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
...@@ -1669,7 +1688,7 @@ static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) ...@@ -1669,7 +1688,7 @@ static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5 #define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5
#define REG_A3XX_SP_VS_PVT_MEM_CTRL_REG 0x000022d6 #define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG 0x000022d6
#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7 #define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7
...@@ -1772,7 +1791,7 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) ...@@ -1772,7 +1791,7 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3 #define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3
#define REG_A3XX_SP_FS_PVT_MEM_CTRL_REG 0x000022e4 #define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG 0x000022e4
#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5 #define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5
...@@ -1943,6 +1962,9 @@ static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00 ...@@ -1943,6 +1962,9 @@ static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00
static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; } static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
#define REG_A3XX_VSC_BIN_CONTROL 0x00000c3c
#define A3XX_VSC_BIN_CONTROL_BINNING_ENABLE 0x00000001
#define REG_A3XX_UNKNOWN_0C3D 0x00000c3d #define REG_A3XX_UNKNOWN_0C3D 0x00000c3d
#define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48 #define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48
...@@ -1953,7 +1975,7 @@ static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x000 ...@@ -1953,7 +1975,7 @@ static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x000
#define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b #define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b
#define REG_A3XX_UNKNOWN_0C81 0x00000c81 #define REG_A3XX_GRAS_TSE_DEBUG_ECO 0x00000c81
#define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88 #define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88
...@@ -1975,22 +1997,24 @@ static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x000 ...@@ -1975,22 +1997,24 @@ static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x000
#define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0 #define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0
#define REG_A3XX_RB_DEBUG_ECO_CONTROLS_ADDR 0x00000cc1
#define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6 #define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6
#define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7 #define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7
#define REG_A3XX_RB_WINDOW_SIZE 0x00000ce0 #define REG_A3XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
#define A3XX_RB_WINDOW_SIZE_WIDTH__MASK 0x00003fff #define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff
#define A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT 0 #define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0
static inline uint32_t A3XX_RB_WINDOW_SIZE_WIDTH(uint32_t val) static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val)
{ {
return ((val) << A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT) & A3XX_RB_WINDOW_SIZE_WIDTH__MASK; return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK;
} }
#define A3XX_RB_WINDOW_SIZE_HEIGHT__MASK 0x0fffc000 #define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x0fffc000
#define A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT 14 #define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 14
static inline uint32_t A3XX_RB_WINDOW_SIZE_HEIGHT(uint32_t val) static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
{ {
return ((val) << A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT) & A3XX_RB_WINDOW_SIZE_HEIGHT__MASK; return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK;
} }
#define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00 #define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00
...@@ -2088,6 +2112,14 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op ...@@ -2088,6 +2112,14 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op
#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09 #define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09
#define REG_A3XX_VGT_CL_INITIATOR 0x000021f0
#define REG_A3XX_VGT_EVENT_INITIATOR 0x000021f9
#define REG_A3XX_VGT_DRAW_INITIATOR 0x000021fc
#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
#define REG_A3XX_TEX_SAMP_0 0x00000000 #define REG_A3XX_TEX_SAMP_0 0x00000000
#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002 #define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002
#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c #define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
...@@ -2123,6 +2155,18 @@ static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val) ...@@ -2123,6 +2155,18 @@ static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000 #define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
#define REG_A3XX_TEX_SAMP_1 0x00000001 #define REG_A3XX_TEX_SAMP_1 0x00000001
#define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000
#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12
static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val)
{
return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
}
#define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000
#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22
static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
{
return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
}
#define REG_A3XX_TEX_CONST_0 0x00000000 #define REG_A3XX_TEX_CONST_0 0x00000000
#define A3XX_TEX_CONST_0_TILED 0x00000001 #define A3XX_TEX_CONST_0_TILED 0x00000001
......
...@@ -15,6 +15,10 @@ ...@@ -15,6 +15,10 @@
* this program. If not, see <http://www.gnu.org/licenses/>. * this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifdef CONFIG_MSM_OCMEM
# include <mach/ocmem.h>
#endif
#include "a3xx_gpu.h" #include "a3xx_gpu.h"
#define A3XX_INT0_MASK \ #define A3XX_INT0_MASK \
...@@ -63,6 +67,7 @@ static void a3xx_me_init(struct msm_gpu *gpu) ...@@ -63,6 +67,7 @@ static void a3xx_me_init(struct msm_gpu *gpu)
static int a3xx_hw_init(struct msm_gpu *gpu) static int a3xx_hw_init(struct msm_gpu *gpu)
{ {
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
uint32_t *ptr, len; uint32_t *ptr, len;
int i, ret; int i, ret;
...@@ -105,6 +110,21 @@ static int a3xx_hw_init(struct msm_gpu *gpu) ...@@ -105,6 +110,21 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff); gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
} else if (adreno_is_a330v2(adreno_gpu)) {
/*
* Most of the VBIF registers on 8974v2 have the correct
* values at power on, so we won't modify those if we don't
* need to
*/
/* Enable 1k sort: */
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
/* Enable WR-REQ: */
gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
/* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
} else if (adreno_is_a330(adreno_gpu)) { } else if (adreno_is_a330(adreno_gpu)) {
/* Set up 16 deep read/write request queues: */ /* Set up 16 deep read/write request queues: */
gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818); gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
...@@ -121,10 +141,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu) ...@@ -121,10 +141,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* Set up VBIF_ROUND_ROBIN_QOS_ARB: */ /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001); gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
/* Set up AOOO: */ /* Set up AOOO: */
gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000ffff); gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f);
gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0xffffffff); gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f);
/* Enable 1K sort: */ /* Enable 1K sort: */
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001ffff); gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
/* Disable VBIF clock gating. This is to enable AXI running /* Disable VBIF clock gating. This is to enable AXI running
* higher frequency than GPU: * higher frequency than GPU:
...@@ -162,14 +182,23 @@ static int a3xx_hw_init(struct msm_gpu *gpu) ...@@ -162,14 +182,23 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001); gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
/* Enable Clock gating: */ /* Enable Clock gating: */
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff); if (adreno_is_a320(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
/* Set the OCMEM base address for A330 */ else if (adreno_is_a330v2(adreno_gpu))
//TODO: gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
// if (adreno_is_a330(adreno_gpu)) { else if (adreno_is_a330(adreno_gpu))
// gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR, gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff);
// (unsigned int)(a3xx_gpu->ocmem_base >> 14));
// } if (adreno_is_a330v2(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455);
else if (adreno_is_a330(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000);
/* Set the OCMEM base address for A330, etc */
if (a3xx_gpu->ocmem_hdl) {
gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
(unsigned int)(a3xx_gpu->ocmem_base >> 14));
}
/* Turn on performance counters: */ /* Turn on performance counters: */
gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01); gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
...@@ -219,7 +248,7 @@ static int a3xx_hw_init(struct msm_gpu *gpu) ...@@ -219,7 +248,7 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* Load PM4: */ /* Load PM4: */
ptr = (uint32_t *)(adreno_gpu->pm4->data); ptr = (uint32_t *)(adreno_gpu->pm4->data);
len = adreno_gpu->pm4->size / 4; len = adreno_gpu->pm4->size / 4;
DBG("loading PM4 ucode version: %u", ptr[0]); DBG("loading PM4 ucode version: %x", ptr[1]);
gpu_write(gpu, REG_AXXX_CP_DEBUG, gpu_write(gpu, REG_AXXX_CP_DEBUG,
AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE | AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
...@@ -231,19 +260,26 @@ static int a3xx_hw_init(struct msm_gpu *gpu) ...@@ -231,19 +260,26 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* Load PFP: */ /* Load PFP: */
ptr = (uint32_t *)(adreno_gpu->pfp->data); ptr = (uint32_t *)(adreno_gpu->pfp->data);
len = adreno_gpu->pfp->size / 4; len = adreno_gpu->pfp->size / 4;
DBG("loading PFP ucode version: %u", ptr[0]); DBG("loading PFP ucode version: %x", ptr[5]);
gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0); gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
for (i = 1; i < len; i++) for (i = 1; i < len; i++)
gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]); gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
/* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */ /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu)) if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu)) {
gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) | AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) | AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14)); AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
} else if (adreno_is_a330(adreno_gpu)) {
/* NOTE: this (value take from downstream android driver)
* includes some bits outside of the known bitfields. But
* A330 has this "MERCIU queue" thing too, which might
* explain a new bitfield or reshuffling:
*/
gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x003e2008);
}
/* clear ME_HALT to start micro engine */ /* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0); gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
...@@ -253,6 +289,14 @@ static int a3xx_hw_init(struct msm_gpu *gpu) ...@@ -253,6 +289,14 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
return 0; return 0;
} }
static void a3xx_recover(struct msm_gpu *gpu)
{
gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1);
gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD);
gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0);
adreno_recover(gpu);
}
static void a3xx_destroy(struct msm_gpu *gpu) static void a3xx_destroy(struct msm_gpu *gpu)
{ {
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
...@@ -261,6 +305,12 @@ static void a3xx_destroy(struct msm_gpu *gpu) ...@@ -261,6 +305,12 @@ static void a3xx_destroy(struct msm_gpu *gpu)
DBG("%s", gpu->name); DBG("%s", gpu->name);
adreno_gpu_cleanup(adreno_gpu); adreno_gpu_cleanup(adreno_gpu);
#ifdef CONFIG_MSM_OCMEM
if (a3xx_gpu->ocmem_base)
ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl);
#endif
put_device(&a3xx_gpu->pdev->dev); put_device(&a3xx_gpu->pdev->dev);
kfree(a3xx_gpu); kfree(a3xx_gpu);
} }
...@@ -371,7 +421,7 @@ static const struct adreno_gpu_funcs funcs = { ...@@ -371,7 +421,7 @@ static const struct adreno_gpu_funcs funcs = {
.hw_init = a3xx_hw_init, .hw_init = a3xx_hw_init,
.pm_suspend = msm_gpu_pm_suspend, .pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume, .pm_resume = msm_gpu_pm_resume,
.recover = adreno_recover, .recover = a3xx_recover,
.last_fence = adreno_last_fence, .last_fence = adreno_last_fence,
.submit = adreno_submit, .submit = adreno_submit,
.flush = adreno_flush, .flush = adreno_flush,
...@@ -387,6 +437,7 @@ static const struct adreno_gpu_funcs funcs = { ...@@ -387,6 +437,7 @@ static const struct adreno_gpu_funcs funcs = {
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
{ {
struct a3xx_gpu *a3xx_gpu = NULL; struct a3xx_gpu *a3xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu; struct msm_gpu *gpu;
struct platform_device *pdev = a3xx_pdev; struct platform_device *pdev = a3xx_pdev;
struct adreno_platform_config *config; struct adreno_platform_config *config;
...@@ -406,7 +457,8 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) ...@@ -406,7 +457,8 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
goto fail; goto fail;
} }
gpu = &a3xx_gpu->base.base; adreno_gpu = &a3xx_gpu->base;
gpu = &adreno_gpu->base;
get_device(&pdev->dev); get_device(&pdev->dev);
a3xx_gpu->pdev = pdev; a3xx_gpu->pdev = pdev;
...@@ -414,16 +466,46 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) ...@@ -414,16 +466,46 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
gpu->fast_rate = config->fast_rate; gpu->fast_rate = config->fast_rate;
gpu->slow_rate = config->slow_rate; gpu->slow_rate = config->slow_rate;
gpu->bus_freq = config->bus_freq; gpu->bus_freq = config->bus_freq;
#ifdef CONFIG_MSM_BUS_SCALING
gpu->bus_scale_table = config->bus_scale_table;
#endif
DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u", DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
gpu->fast_rate, gpu->slow_rate, gpu->bus_freq); gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
ret = adreno_gpu_init(dev, pdev, &a3xx_gpu->base, ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, config->rev);
&funcs, config->rev);
if (ret) if (ret)
goto fail; goto fail;
return &a3xx_gpu->base.base; /* if needed, allocate gmem: */
if (adreno_is_a330(adreno_gpu)) {
#ifdef CONFIG_MSM_OCMEM
/* TODO this is different/missing upstream: */
struct ocmem_buf *ocmem_hdl =
ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
a3xx_gpu->ocmem_hdl = ocmem_hdl;
a3xx_gpu->ocmem_base = ocmem_hdl->addr;
adreno_gpu->gmem = ocmem_hdl->len;
DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
a3xx_gpu->ocmem_base);
#endif
}
if (!gpu->mmu) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
* limp along with just modesetting. If it turns out
* to not be possible to restrict access, then we must
* implement a cmdstream validator.
*/
dev_err(dev->dev, "No memory protection without IOMMU\n");
ret = -ENXIO;
goto fail;
}
return gpu;
fail: fail:
if (a3xx_gpu) if (a3xx_gpu)
...@@ -436,19 +518,59 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) ...@@ -436,19 +518,59 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
* The a3xx device: * The a3xx device:
*/ */
#if defined(CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
# include <mach/kgsl.h>
#endif
static int a3xx_probe(struct platform_device *pdev) static int a3xx_probe(struct platform_device *pdev)
{ {
static struct adreno_platform_config config = {}; static struct adreno_platform_config config = {};
#ifdef CONFIG_OF #ifdef CONFIG_OF
/* TODO */ struct device_node *child, *node = pdev->dev.of_node;
u32 val;
int ret;
ret = of_property_read_u32(node, "qcom,chipid", &val);
if (ret) {
dev_err(&pdev->dev, "could not find chipid: %d\n", ret);
return ret;
}
config.rev = ADRENO_REV((val >> 24) & 0xff,
(val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
/* find clock rates: */
config.fast_rate = 0;
config.slow_rate = ~0;
for_each_child_of_node(node, child) {
if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
struct device_node *pwrlvl;
for_each_child_of_node(child, pwrlvl) {
ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
if (ret) {
dev_err(&pdev->dev, "could not find gpu-freq: %d\n", ret);
return ret;
}
config.fast_rate = max(config.fast_rate, val);
config.slow_rate = min(config.slow_rate, val);
}
}
}
if (!config.fast_rate) {
dev_err(&pdev->dev, "could not find clk rates\n");
return -ENXIO;
}
#else #else
struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
uint32_t version = socinfo_get_version(); uint32_t version = socinfo_get_version();
if (cpu_is_apq8064ab()) { if (cpu_is_apq8064ab()) {
config.fast_rate = 450000000; config.fast_rate = 450000000;
config.slow_rate = 27000000; config.slow_rate = 27000000;
config.bus_freq = 4; config.bus_freq = 4;
config.rev = ADRENO_REV(3, 2, 1, 0); config.rev = ADRENO_REV(3, 2, 1, 0);
} else if (cpu_is_apq8064() || cpu_is_msm8960ab()) { } else if (cpu_is_apq8064()) {
config.fast_rate = 400000000; config.fast_rate = 400000000;
config.slow_rate = 27000000; config.slow_rate = 27000000;
config.bus_freq = 4; config.bus_freq = 4;
...@@ -461,6 +583,16 @@ static int a3xx_probe(struct platform_device *pdev) ...@@ -461,6 +583,16 @@ static int a3xx_probe(struct platform_device *pdev)
else else
config.rev = ADRENO_REV(3, 2, 0, 0); config.rev = ADRENO_REV(3, 2, 0, 0);
} else if (cpu_is_msm8960ab()) {
config.fast_rate = 400000000;
config.slow_rate = 320000000;
config.bus_freq = 4;
if (SOCINFO_VERSION_MINOR(version) == 0)
config.rev = ADRENO_REV(3, 2, 1, 0);
else
config.rev = ADRENO_REV(3, 2, 1, 1);
} else if (cpu_is_msm8930()) { } else if (cpu_is_msm8930()) {
config.fast_rate = 400000000; config.fast_rate = 400000000;
config.slow_rate = 27000000; config.slow_rate = 27000000;
...@@ -473,6 +605,9 @@ static int a3xx_probe(struct platform_device *pdev) ...@@ -473,6 +605,9 @@ static int a3xx_probe(struct platform_device *pdev)
config.rev = ADRENO_REV(3, 0, 5, 0); config.rev = ADRENO_REV(3, 0, 5, 0);
} }
# ifdef CONFIG_MSM_BUS_SCALING
config.bus_scale_table = pdata->bus_scale_table;
# endif
#endif #endif
pdev->dev.platform_data = &config; pdev->dev.platform_data = &config;
a3xx_pdev = pdev; a3xx_pdev = pdev;
...@@ -485,10 +620,19 @@ static int a3xx_remove(struct platform_device *pdev) ...@@ -485,10 +620,19 @@ static int a3xx_remove(struct platform_device *pdev)
return 0; return 0;
} }
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,kgsl-3d0" },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
static struct platform_driver a3xx_driver = { static struct platform_driver a3xx_driver = {
.probe = a3xx_probe, .probe = a3xx_probe,
.remove = a3xx_remove, .remove = a3xx_remove,
.driver.name = "kgsl-3d0", .driver = {
.name = "kgsl-3d0",
.of_match_table = dt_match,
},
}; };
void __init a3xx_register(void) void __init a3xx_register(void)
......
...@@ -24,6 +24,10 @@ ...@@ -24,6 +24,10 @@
struct a3xx_gpu { struct a3xx_gpu {
struct adreno_gpu base; struct adreno_gpu base;
struct platform_device *pdev; struct platform_device *pdev;
/* if OCMEM is used for GMEM: */
uint32_t ocmem_base;
void *ocmem_hdl;
}; };
#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base) #define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
......
...@@ -8,12 +8,13 @@ This file was generated by the rules-ng-ng headergen tool in this git repository ...@@ -8,12 +8,13 @@ This file was generated by the rules-ng-ng headergen tool in this git repository
git clone https://github.com/freedreno/envytools.git git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are: The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
Copyright (C) 2013 by the following authors: Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark) - Rob Clark <robdclark@gmail.com> (robclark)
...@@ -115,96 +116,6 @@ enum adreno_rb_depth_format { ...@@ -115,96 +116,6 @@ enum adreno_rb_depth_format {
DEPTHX_24_8 = 1, DEPTHX_24_8 = 1,
}; };
enum adreno_mmu_clnt_beh {
BEH_NEVR = 0,
BEH_TRAN_RNG = 1,
BEH_TRAN_FLT = 2,
};
#define REG_AXXX_MH_MMU_CONFIG 0x00000040
#define AXXX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001
#define AXXX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002
#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030
#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4
static inline uint32_t AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
}
#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0
#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6
static inline uint32_t AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
}
#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300
#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8
static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
}
#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00
#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10
static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
}
#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000
#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12
static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
}
#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000
#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14
static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
}
#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000
#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16
static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
}
#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000
#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18
static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
}
#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000
#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20
static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
}
#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000
#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22
static inline uint32_t AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
}
#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000
#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24
static inline uint32_t AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
{
return ((val) << AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
}
#define REG_AXXX_MH_MMU_VA_RANGE 0x00000041
#define REG_AXXX_MH_MMU_PT_BASE 0x00000042
#define REG_AXXX_MH_MMU_PAGE_FAULT 0x00000043
#define REG_AXXX_MH_MMU_TRAN_ERROR 0x00000044
#define REG_AXXX_MH_MMU_INVALIDATE 0x00000045
#define REG_AXXX_MH_MMU_MPU_BASE 0x00000046
#define REG_AXXX_MH_MMU_MPU_END 0x00000047
#define REG_AXXX_CP_RB_BASE 0x000001c0 #define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1 #define REG_AXXX_CP_RB_CNTL 0x000001c1
...@@ -275,6 +186,18 @@ static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val) ...@@ -275,6 +186,18 @@ static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val)
} }
#define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6 #define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6
#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK 0x001f0000
#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT 16
static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_MEQ_END(uint32_t val)
{
return ((val) << AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK;
}
#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK 0x1f000000
#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT 24
static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_ROQ_END(uint32_t val)
{
return ((val) << AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK;
}
#define REG_AXXX_CP_CSQ_AVAIL 0x000001d7 #define REG_AXXX_CP_CSQ_AVAIL 0x000001d7
#define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f #define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f
...@@ -402,6 +325,36 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val) ...@@ -402,6 +325,36 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK; return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK;
} }
#define REG_AXXX_CP_NON_PREFETCH_CNTRS 0x00000440
#define REG_AXXX_CP_STQ_ST_STAT 0x00000443
#define REG_AXXX_CP_ST_BASE 0x0000044d
#define REG_AXXX_CP_ST_BUFSZ 0x0000044e
#define REG_AXXX_CP_MEQ_STAT 0x0000044f
#define REG_AXXX_CP_MIU_TAG_STAT 0x00000452
#define REG_AXXX_CP_BIN_MASK_LO 0x00000454
#define REG_AXXX_CP_BIN_MASK_HI 0x00000455
#define REG_AXXX_CP_BIN_SELECT_LO 0x00000456
#define REG_AXXX_CP_BIN_SELECT_HI 0x00000457
#define REG_AXXX_CP_IB1_BASE 0x00000458
#define REG_AXXX_CP_IB1_BUFSZ 0x00000459
#define REG_AXXX_CP_IB2_BASE 0x0000045a
#define REG_AXXX_CP_IB2_BUFSZ 0x0000045b
#define REG_AXXX_CP_STAT 0x0000047f
#define REG_AXXX_CP_SCRATCH_REG0 0x00000578 #define REG_AXXX_CP_SCRATCH_REG0 0x00000578
#define REG_AXXX_CP_SCRATCH_REG1 0x00000579 #define REG_AXXX_CP_SCRATCH_REG1 0x00000579
...@@ -418,6 +371,26 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val) ...@@ -418,6 +371,26 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
#define REG_AXXX_CP_SCRATCH_REG7 0x0000057f #define REG_AXXX_CP_SCRATCH_REG7 0x0000057f
#define REG_AXXX_CP_ME_VS_EVENT_SRC 0x00000600
#define REG_AXXX_CP_ME_VS_EVENT_ADDR 0x00000601
#define REG_AXXX_CP_ME_VS_EVENT_DATA 0x00000602
#define REG_AXXX_CP_ME_VS_EVENT_ADDR_SWM 0x00000603
#define REG_AXXX_CP_ME_VS_EVENT_DATA_SWM 0x00000604
#define REG_AXXX_CP_ME_PS_EVENT_SRC 0x00000605
#define REG_AXXX_CP_ME_PS_EVENT_ADDR 0x00000606
#define REG_AXXX_CP_ME_PS_EVENT_DATA 0x00000607
#define REG_AXXX_CP_ME_PS_EVENT_ADDR_SWM 0x00000608
#define REG_AXXX_CP_ME_PS_EVENT_DATA_SWM 0x00000609
#define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a #define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a
#define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b #define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b
...@@ -428,5 +401,11 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val) ...@@ -428,5 +401,11 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
#define REG_AXXX_CP_ME_NRT_DATA 0x0000060e #define REG_AXXX_CP_ME_NRT_DATA 0x0000060e
#define REG_AXXX_CP_ME_VS_FETCH_DONE_SRC 0x00000612
#define REG_AXXX_CP_ME_VS_FETCH_DONE_ADDR 0x00000613
#define REG_AXXX_CP_ME_VS_FETCH_DONE_DATA 0x00000614
#endif /* ADRENO_COMMON_XML */ #endif /* ADRENO_COMMON_XML */
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include "adreno_gpu.h" #include "adreno_gpu.h"
#include "msm_gem.h" #include "msm_gem.h"
#include "msm_mmu.h"
struct adreno_info { struct adreno_info {
struct adreno_rev rev; struct adreno_rev rev;
...@@ -44,7 +45,7 @@ static const struct adreno_info gpulist[] = { ...@@ -44,7 +45,7 @@ static const struct adreno_info gpulist[] = {
.pfpfw = "a300_pfp.fw", .pfpfw = "a300_pfp.fw",
.gmem = SZ_512K, .gmem = SZ_512K,
}, { }, {
.rev = ADRENO_REV(3, 3, 0, 0), .rev = ADRENO_REV(3, 3, 0, ANY_ID),
.revn = 330, .revn = 330,
.name = "A330", .name = "A330",
.pm4fw = "a330_pm4.fw", .pm4fw = "a330_pm4.fw",
...@@ -53,6 +54,11 @@ static const struct adreno_info gpulist[] = { ...@@ -53,6 +54,11 @@ static const struct adreno_info gpulist[] = {
}, },
}; };
MODULE_FIRMWARE("a300_pm4.fw");
MODULE_FIRMWARE("a300_pfp.fw");
MODULE_FIRMWARE("a330_pm4.fw");
MODULE_FIRMWARE("a330_pfp.fw");
#define RB_SIZE SZ_32K #define RB_SIZE SZ_32K
#define RB_BLKSIZE 16 #define RB_BLKSIZE 16
...@@ -65,7 +71,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) ...@@ -65,7 +71,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
*value = adreno_gpu->info->revn; *value = adreno_gpu->info->revn;
return 0; return 0;
case MSM_PARAM_GMEM_SIZE: case MSM_PARAM_GMEM_SIZE:
*value = adreno_gpu->info->gmem; *value = adreno_gpu->gmem;
return 0; return 0;
default: default:
DBG("%s: invalid param: %u", gpu->name, param); DBG("%s: invalid param: %u", gpu->name, param);
...@@ -86,7 +92,7 @@ int adreno_hw_init(struct msm_gpu *gpu) ...@@ -86,7 +92,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_AXXX_CP_RB_CNTL, gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
/* size is log2(quad-words): */ /* size is log2(quad-words): */
AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE)); AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
/* Setup ringbuffer address: */ /* Setup ringbuffer address: */
gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova); gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
...@@ -286,6 +292,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -286,6 +292,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
struct adreno_rev rev) struct adreno_rev rev)
{ {
struct msm_mmu *mmu;
int i, ret; int i, ret;
/* identify gpu: */ /* identify gpu: */
...@@ -311,6 +318,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -311,6 +318,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
rev.core, rev.major, rev.minor, rev.patchid); rev.core, rev.major, rev.minor, rev.patchid);
gpu->funcs = funcs; gpu->funcs = funcs;
gpu->gmem = gpu->info->gmem;
gpu->rev = rev; gpu->rev = rev;
ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev); ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev);
...@@ -333,10 +341,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -333,10 +341,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (ret) if (ret)
return ret; return ret;
ret = msm_iommu_attach(drm, gpu->base.iommu, mmu = gpu->base.mmu;
iommu_ports, ARRAY_SIZE(iommu_ports)); if (mmu) {
if (ret) ret = mmu->funcs->attach(mmu, iommu_ports,
return ret; ARRAY_SIZE(iommu_ports));
if (ret)
return ret;
}
gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs), gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
MSM_BO_UNCACHED); MSM_BO_UNCACHED);
......
...@@ -51,6 +51,7 @@ struct adreno_gpu { ...@@ -51,6 +51,7 @@ struct adreno_gpu {
struct msm_gpu base; struct msm_gpu base;
struct adreno_rev rev; struct adreno_rev rev;
const struct adreno_info *info; const struct adreno_info *info;
uint32_t gmem; /* actual gmem size */
uint32_t revn; /* numeric revision name */ uint32_t revn; /* numeric revision name */
const struct adreno_gpu_funcs *funcs; const struct adreno_gpu_funcs *funcs;
...@@ -70,6 +71,9 @@ struct adreno_gpu { ...@@ -70,6 +71,9 @@ struct adreno_gpu {
struct adreno_platform_config { struct adreno_platform_config {
struct adreno_rev rev; struct adreno_rev rev;
uint32_t fast_rate, slow_rate, bus_freq; uint32_t fast_rate, slow_rate, bus_freq;
#ifdef CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
#endif
}; };
#define ADRENO_IDLE_TIMEOUT (20 * 1000) #define ADRENO_IDLE_TIMEOUT (20 * 1000)
...@@ -94,6 +98,11 @@ static inline bool adreno_is_a330(struct adreno_gpu *gpu) ...@@ -94,6 +98,11 @@ static inline bool adreno_is_a330(struct adreno_gpu *gpu)
return gpu->revn == 330; return gpu->revn == 330;
} }
static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
{
return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
}
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu); int adreno_hw_init(struct msm_gpu *gpu);
uint32_t adreno_last_fence(struct msm_gpu *gpu); uint32_t adreno_last_fence(struct msm_gpu *gpu);
......
...@@ -8,12 +8,13 @@ This file was generated by the rules-ng-ng headergen tool in this git repository ...@@ -8,12 +8,13 @@ This file was generated by the rules-ng-ng headergen tool in this git repository
git clone https://github.com/freedreno/envytools.git git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are: The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12) - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
Copyright (C) 2013 by the following authors: Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark) - Rob Clark <robdclark@gmail.com> (robclark)
...@@ -66,13 +67,15 @@ enum vgt_event_type { ...@@ -66,13 +67,15 @@ enum vgt_event_type {
enum pc_di_primtype { enum pc_di_primtype {
DI_PT_NONE = 0, DI_PT_NONE = 0,
DI_PT_POINTLIST = 1, DI_PT_POINTLIST_A2XX = 1,
DI_PT_LINELIST = 2, DI_PT_LINELIST = 2,
DI_PT_LINESTRIP = 3, DI_PT_LINESTRIP = 3,
DI_PT_TRILIST = 4, DI_PT_TRILIST = 4,
DI_PT_TRIFAN = 5, DI_PT_TRIFAN = 5,
DI_PT_TRISTRIP = 6, DI_PT_TRISTRIP = 6,
DI_PT_LINELOOP = 7,
DI_PT_RECTLIST = 8, DI_PT_RECTLIST = 8,
DI_PT_POINTLIST_A3XX = 9,
DI_PT_QUADLIST = 13, DI_PT_QUADLIST = 13,
DI_PT_QUADSTRIP = 14, DI_PT_QUADSTRIP = 14,
DI_PT_POLYGON = 15, DI_PT_POLYGON = 15,
...@@ -119,7 +122,7 @@ enum adreno_pm4_type3_packets { ...@@ -119,7 +122,7 @@ enum adreno_pm4_type3_packets {
CP_WAIT_FOR_IDLE = 38, CP_WAIT_FOR_IDLE = 38,
CP_WAIT_REG_MEM = 60, CP_WAIT_REG_MEM = 60,
CP_WAIT_REG_EQ = 82, CP_WAIT_REG_EQ = 82,
CP_WAT_REG_GTE = 83, CP_WAIT_REG_GTE = 83,
CP_WAIT_UNTIL_READ = 92, CP_WAIT_UNTIL_READ = 92,
CP_WAIT_IB_PFD_COMPLETE = 93, CP_WAIT_IB_PFD_COMPLETE = 93,
CP_REG_RMW = 33, CP_REG_RMW = 33,
...@@ -151,7 +154,6 @@ enum adreno_pm4_type3_packets { ...@@ -151,7 +154,6 @@ enum adreno_pm4_type3_packets {
CP_CONTEXT_UPDATE = 94, CP_CONTEXT_UPDATE = 94,
CP_INTERRUPT = 64, CP_INTERRUPT = 64,
CP_IM_STORE = 44, CP_IM_STORE = 44,
CP_SET_BIN_BASE_OFFSET = 75,
CP_SET_DRAW_INIT_FLAGS = 75, CP_SET_DRAW_INIT_FLAGS = 75,
CP_SET_PROTECTED_MODE = 95, CP_SET_PROTECTED_MODE = 95,
CP_LOAD_STATE = 48, CP_LOAD_STATE = 48,
...@@ -159,6 +161,16 @@ enum adreno_pm4_type3_packets { ...@@ -159,6 +161,16 @@ enum adreno_pm4_type3_packets {
CP_COND_INDIRECT_BUFFER_PFD = 50, CP_COND_INDIRECT_BUFFER_PFD = 50,
CP_INDIRECT_BUFFER_PFE = 63, CP_INDIRECT_BUFFER_PFE = 63,
CP_SET_BIN = 76, CP_SET_BIN = 76,
CP_TEST_TWO_MEMS = 113,
CP_WAIT_FOR_ME = 19,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
IN_INSTR_PREFETCH = 32,
IN_INSTR_MATCH = 71,
IN_CONST_PREFETCH = 73,
IN_INCR_UPDT_STATE = 85,
IN_INCR_UPDT_CONST = 86,
IN_INCR_UPDT_INSTR = 87,
}; };
enum adreno_state_block { enum adreno_state_block {
......
...@@ -8,14 +8,16 @@ This file was generated by the rules-ng-ng headergen tool in this git repository ...@@ -8,14 +8,16 @@ This file was generated by the rules-ng-ng headergen tool in this git repository
git clone https://github.com/freedreno/envytools.git git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are: The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors: Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark) - Rob Clark <robdclark@gmail.com> (robclark)
......
...@@ -8,14 +8,16 @@ This file was generated by the rules-ng-ng headergen tool in this git repository ...@@ -8,14 +8,16 @@ This file was generated by the rules-ng-ng headergen tool in this git repository
git clone https://github.com/freedreno/envytools.git git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are: The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors: Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark) - Rob Clark <robdclark@gmail.com> (robclark)
......
...@@ -8,14 +8,16 @@ This file was generated by the rules-ng-ng headergen tool in this git repository ...@@ -8,14 +8,16 @@ This file was generated by the rules-ng-ng headergen tool in this git repository
git clone https://github.com/freedreno/envytools.git git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are: The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors: Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark) - Rob Clark <robdclark@gmail.com> (robclark)
......
...@@ -41,7 +41,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on) ...@@ -41,7 +41,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
power_on ? "Enable" : "Disable", ctrl); power_on ? "Enable" : "Disable", ctrl);
} }
static irqreturn_t hdmi_irq(int irq, void *dev_id) irqreturn_t hdmi_irq(int irq, void *dev_id)
{ {
struct hdmi *hdmi = dev_id; struct hdmi *hdmi = dev_id;
...@@ -71,13 +71,13 @@ void hdmi_destroy(struct kref *kref) ...@@ -71,13 +71,13 @@ void hdmi_destroy(struct kref *kref)
} }
/* initialize connector */ /* initialize connector */
int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
{ {
struct hdmi *hdmi = NULL; struct hdmi *hdmi = NULL;
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = hdmi_pdev; struct platform_device *pdev = hdmi_pdev;
struct hdmi_platform_config *config; struct hdmi_platform_config *config;
int ret; int i, ret;
if (!pdev) { if (!pdev) {
dev_err(dev->dev, "no hdmi device\n"); dev_err(dev->dev, "no hdmi device\n");
...@@ -99,6 +99,7 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) ...@@ -99,6 +99,7 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
hdmi->dev = dev; hdmi->dev = dev;
hdmi->pdev = pdev; hdmi->pdev = pdev;
hdmi->config = config;
hdmi->encoder = encoder; hdmi->encoder = encoder;
/* not sure about which phy maps to which msm.. probably I miss some */ /* not sure about which phy maps to which msm.. probably I miss some */
...@@ -114,44 +115,70 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) ...@@ -114,44 +115,70 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
goto fail; goto fail;
} }
hdmi->mmio = msm_ioremap(pdev, "hdmi_msm_hdmi_addr", "HDMI"); hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
if (IS_ERR(hdmi->mmio)) { if (IS_ERR(hdmi->mmio)) {
ret = PTR_ERR(hdmi->mmio); ret = PTR_ERR(hdmi->mmio);
goto fail; goto fail;
} }
hdmi->mvs = devm_regulator_get(&pdev->dev, "8901_hdmi_mvs"); BUG_ON(config->hpd_reg_cnt > ARRAY_SIZE(hdmi->hpd_regs));
if (IS_ERR(hdmi->mvs)) for (i = 0; i < config->hpd_reg_cnt; i++) {
hdmi->mvs = devm_regulator_get(&pdev->dev, "hdmi_mvs"); struct regulator *reg;
if (IS_ERR(hdmi->mvs)) {
ret = PTR_ERR(hdmi->mvs); reg = devm_regulator_get(&pdev->dev, config->hpd_reg_names[i]);
dev_err(dev->dev, "failed to get mvs regulator: %d\n", ret); if (IS_ERR(reg)) {
goto fail; ret = PTR_ERR(reg);
dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
hdmi->hpd_regs[i] = reg;
} }
hdmi->mpp0 = devm_regulator_get(&pdev->dev, "8901_mpp0"); BUG_ON(config->pwr_reg_cnt > ARRAY_SIZE(hdmi->pwr_regs));
if (IS_ERR(hdmi->mpp0)) for (i = 0; i < config->pwr_reg_cnt; i++) {
hdmi->mpp0 = NULL; struct regulator *reg;
hdmi->clk = devm_clk_get(&pdev->dev, "core_clk"); reg = devm_regulator_get(&pdev->dev, config->pwr_reg_names[i]);
if (IS_ERR(hdmi->clk)) { if (IS_ERR(reg)) {
ret = PTR_ERR(hdmi->clk); ret = PTR_ERR(reg);
dev_err(dev->dev, "failed to get 'clk': %d\n", ret); dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n",
goto fail; config->pwr_reg_names[i], ret);
goto fail;
}
hdmi->pwr_regs[i] = reg;
} }
hdmi->m_pclk = devm_clk_get(&pdev->dev, "master_iface_clk"); BUG_ON(config->hpd_clk_cnt > ARRAY_SIZE(hdmi->hpd_clks));
if (IS_ERR(hdmi->m_pclk)) { for (i = 0; i < config->hpd_clk_cnt; i++) {
ret = PTR_ERR(hdmi->m_pclk); struct clk *clk;
dev_err(dev->dev, "failed to get 'm_pclk': %d\n", ret);
goto fail; clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(dev->dev, "failed to get hpd clk: %s (%d)\n",
config->hpd_clk_names[i], ret);
goto fail;
}
hdmi->hpd_clks[i] = clk;
} }
hdmi->s_pclk = devm_clk_get(&pdev->dev, "slave_iface_clk"); BUG_ON(config->pwr_clk_cnt > ARRAY_SIZE(hdmi->pwr_clks));
if (IS_ERR(hdmi->s_pclk)) { for (i = 0; i < config->pwr_clk_cnt; i++) {
ret = PTR_ERR(hdmi->s_pclk); struct clk *clk;
dev_err(dev->dev, "failed to get 's_pclk': %d\n", ret);
goto fail; clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(dev->dev, "failed to get pwr clk: %s (%d)\n",
config->pwr_clk_names[i], ret);
goto fail;
}
hdmi->pwr_clks[i] = clk;
} }
hdmi->i2c = hdmi_i2c_init(hdmi); hdmi->i2c = hdmi_i2c_init(hdmi);
...@@ -178,20 +205,22 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) ...@@ -178,20 +205,22 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
goto fail; goto fail;
} }
hdmi->irq = platform_get_irq(pdev, 0); if (!config->shared_irq) {
if (hdmi->irq < 0) { hdmi->irq = platform_get_irq(pdev, 0);
ret = hdmi->irq; if (hdmi->irq < 0) {
dev_err(dev->dev, "failed to get irq: %d\n", ret); ret = hdmi->irq;
goto fail; dev_err(dev->dev, "failed to get irq: %d\n", ret);
} goto fail;
}
ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq, ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"hdmi_isr", hdmi); "hdmi_isr", hdmi);
if (ret < 0) { if (ret < 0) {
dev_err(dev->dev, "failed to request IRQ%u: %d\n", dev_err(dev->dev, "failed to request IRQ%u: %d\n",
hdmi->irq, ret); hdmi->irq, ret);
goto fail; goto fail;
}
} }
encoder->bridge = hdmi->bridge; encoder->bridge = hdmi->bridge;
...@@ -199,7 +228,7 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) ...@@ -199,7 +228,7 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
priv->bridges[priv->num_bridges++] = hdmi->bridge; priv->bridges[priv->num_bridges++] = hdmi->bridge;
priv->connectors[priv->num_connectors++] = hdmi->connector; priv->connectors[priv->num_connectors++] = hdmi->connector;
return 0; return hdmi;
fail: fail:
if (hdmi) { if (hdmi) {
...@@ -211,37 +240,100 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) ...@@ -211,37 +240,100 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
hdmi_destroy(&hdmi->refcount); hdmi_destroy(&hdmi->refcount);
} }
return ret; return ERR_PTR(ret);
} }
/* /*
* The hdmi device: * The hdmi device:
*/ */
#include <linux/of_gpio.h>
static int hdmi_dev_probe(struct platform_device *pdev) static int hdmi_dev_probe(struct platform_device *pdev)
{ {
static struct hdmi_platform_config config = {}; static struct hdmi_platform_config config = {};
#ifdef CONFIG_OF #ifdef CONFIG_OF
/* TODO */ struct device_node *of_node = pdev->dev.of_node;
int get_gpio(const char *name)
{
int gpio = of_get_named_gpio(of_node, name, 0);
if (gpio < 0) {
dev_err(&pdev->dev, "failed to get gpio: %s (%d)\n",
name, gpio);
gpio = -1;
}
return gpio;
}
/* TODO actually use DT.. */
static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
config.phy_init = hdmi_phy_8x74_init;
config.mmio_name = "core_physical";
config.hpd_reg_names = hpd_reg_names;
config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
config.pwr_reg_names = pwr_reg_names;
config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
config.hpd_clk_names = hpd_clk_names;
config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.pwr_clk_names = pwr_clk_names;
config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk");
config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data");
config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd");
config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en");
config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel");
config.shared_irq = true;
#else #else
static const char *hpd_clk_names[] = {
"core_clk", "master_iface_clk", "slave_iface_clk",
};
if (cpu_is_apq8064()) { if (cpu_is_apq8064()) {
static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
config.phy_init = hdmi_phy_8960_init; config.phy_init = hdmi_phy_8960_init;
config.mmio_name = "hdmi_msm_hdmi_addr";
config.hpd_reg_names = hpd_reg_names;
config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
config.hpd_clk_names = hpd_clk_names;
config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.ddc_clk_gpio = 70; config.ddc_clk_gpio = 70;
config.ddc_data_gpio = 71; config.ddc_data_gpio = 71;
config.hpd_gpio = 72; config.hpd_gpio = 72;
config.pmic_gpio = 13 + NR_GPIO_IRQS; config.mux_en_gpio = -1;
} else if (cpu_is_msm8960()) { config.mux_sel_gpio = 13 + NR_GPIO_IRQS;
} else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
config.phy_init = hdmi_phy_8960_init; config.phy_init = hdmi_phy_8960_init;
config.mmio_name = "hdmi_msm_hdmi_addr";
config.hpd_reg_names = hpd_reg_names;
config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
config.hpd_clk_names = hpd_clk_names;
config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.ddc_clk_gpio = 100; config.ddc_clk_gpio = 100;
config.ddc_data_gpio = 101; config.ddc_data_gpio = 101;
config.hpd_gpio = 102; config.hpd_gpio = 102;
config.pmic_gpio = -1; config.mux_en_gpio = -1;
config.mux_sel_gpio = -1;
} else if (cpu_is_msm8x60()) { } else if (cpu_is_msm8x60()) {
static const char *hpd_reg_names[] = {
"8901_hdmi_mvs", "8901_mpp0"
};
config.phy_init = hdmi_phy_8x60_init; config.phy_init = hdmi_phy_8x60_init;
config.mmio_name = "hdmi_msm_hdmi_addr";
config.hpd_reg_names = hpd_reg_names;
config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
config.hpd_clk_names = hpd_clk_names;
config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.ddc_clk_gpio = 170; config.ddc_clk_gpio = 170;
config.ddc_data_gpio = 171; config.ddc_data_gpio = 171;
config.hpd_gpio = 172; config.hpd_gpio = 172;
config.pmic_gpio = -1; config.mux_en_gpio = -1;
config.mux_sel_gpio = -1;
} }
#endif #endif
pdev->dev.platform_data = &config; pdev->dev.platform_data = &config;
...@@ -255,10 +347,19 @@ static int hdmi_dev_remove(struct platform_device *pdev) ...@@ -255,10 +347,19 @@ static int hdmi_dev_remove(struct platform_device *pdev)
return 0; return 0;
} }
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,hdmi-tx" },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
static struct platform_driver hdmi_driver = { static struct platform_driver hdmi_driver = {
.probe = hdmi_dev_probe, .probe = hdmi_dev_probe,
.remove = hdmi_dev_remove, .remove = hdmi_dev_remove,
.driver.name = "hdmi_msm", .driver = {
.name = "hdmi_msm",
.of_match_table = dt_match,
},
}; };
void __init hdmi_register(void) void __init hdmi_register(void)
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
struct hdmi_phy; struct hdmi_phy;
struct hdmi_platform_config;
struct hdmi { struct hdmi {
struct kref refcount; struct kref refcount;
...@@ -35,14 +36,14 @@ struct hdmi { ...@@ -35,14 +36,14 @@ struct hdmi {
struct drm_device *dev; struct drm_device *dev;
struct platform_device *pdev; struct platform_device *pdev;
void __iomem *mmio; const struct hdmi_platform_config *config;
struct regulator *mvs; /* HDMI_5V */ void __iomem *mmio;
struct regulator *mpp0; /* External 5V */
struct clk *clk; struct regulator *hpd_regs[2];
struct clk *m_pclk; struct regulator *pwr_regs[2];
struct clk *s_pclk; struct clk *hpd_clks[3];
struct clk *pwr_clks[2];
struct hdmi_phy *phy; struct hdmi_phy *phy;
struct i2c_adapter *i2c; struct i2c_adapter *i2c;
...@@ -60,7 +61,29 @@ struct hdmi { ...@@ -60,7 +61,29 @@ struct hdmi {
/* platform config data (ie. from DT, or pdata) */ /* platform config data (ie. from DT, or pdata) */
struct hdmi_platform_config { struct hdmi_platform_config {
struct hdmi_phy *(*phy_init)(struct hdmi *hdmi); struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, pmic_gpio; const char *mmio_name;
/* regulators that need to be on for hpd: */
const char **hpd_reg_names;
int hpd_reg_cnt;
/* regulators that need to be on for screen pwr: */
const char **pwr_reg_names;
int pwr_reg_cnt;
/* clks that need to be on for hpd: */
const char **hpd_clk_names;
int hpd_clk_cnt;
/* clks that need to be on for screen pwr (ie pixel clk): */
const char **pwr_clk_names;
int pwr_clk_cnt;
/* gpio's: */
int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
/* older devices had their own irq, mdp5+ it is shared w/ mdp: */
bool shared_irq;
}; };
void hdmi_set_mode(struct hdmi *hdmi, bool power_on); void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
...@@ -106,6 +129,7 @@ struct hdmi_phy { ...@@ -106,6 +129,7 @@ struct hdmi_phy {
struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi); struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi);
struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi); struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi);
struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi);
/* /*
* hdmi bridge: * hdmi bridge:
......
...@@ -8,14 +8,16 @@ This file was generated by the rules-ng-ng headergen tool in this git repository ...@@ -8,14 +8,16 @@ This file was generated by the rules-ng-ng headergen tool in this git repository
git clone https://github.com/freedreno/envytools.git git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are: The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors: Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark) - Rob Clark <robdclark@gmail.com> (robclark)
...@@ -212,6 +214,20 @@ static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state ...@@ -212,6 +214,20 @@ static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state
#define REG_HDMI_HDCP_RESET 0x00000130 #define REG_HDMI_HDCP_RESET 0x00000130
#define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001 #define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001
#define REG_HDMI_VENSPEC_INFO0 0x0000016c
#define REG_HDMI_VENSPEC_INFO1 0x00000170
#define REG_HDMI_VENSPEC_INFO2 0x00000174
#define REG_HDMI_VENSPEC_INFO3 0x00000178
#define REG_HDMI_VENSPEC_INFO4 0x0000017c
#define REG_HDMI_VENSPEC_INFO5 0x00000180
#define REG_HDMI_VENSPEC_INFO6 0x00000184
#define REG_HDMI_AUDIO_CFG 0x000001d0 #define REG_HDMI_AUDIO_CFG 0x000001d0
#define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001 #define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001
#define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0 #define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0
...@@ -235,6 +251,9 @@ static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val) ...@@ -235,6 +251,9 @@ static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val)
return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK; return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK;
} }
#define REG_HDMI_DDC_ARBITRATION 0x00000210
#define HDMI_DDC_ARBITRATION_HW_ARBITRATION 0x00000010
#define REG_HDMI_DDC_INT_CTRL 0x00000214 #define REG_HDMI_DDC_INT_CTRL 0x00000214
#define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001 #define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001
#define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002 #define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002
...@@ -340,6 +359,20 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val) ...@@ -340,6 +359,20 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK; return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK;
} }
#define REG_HDMI_CEC_STATUS 0x00000298
#define REG_HDMI_CEC_INT 0x0000029c
#define REG_HDMI_CEC_ADDR 0x000002a0
#define REG_HDMI_CEC_TIME 0x000002a4
#define REG_HDMI_CEC_REFTIMER 0x000002a8
#define REG_HDMI_CEC_RD_DATA 0x000002ac
#define REG_HDMI_CEC_RD_FILTER 0x000002b0
#define REG_HDMI_ACTIVE_HSYNC 0x000002b4 #define REG_HDMI_ACTIVE_HSYNC 0x000002b4
#define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff #define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff
#define HDMI_ACTIVE_HSYNC_START__SHIFT 0 #define HDMI_ACTIVE_HSYNC_START__SHIFT 0
...@@ -410,17 +443,33 @@ static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val) ...@@ -410,17 +443,33 @@ static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
#define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000 #define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000
#define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000 #define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000
#define REG_HDMI_AUD_INT 0x000002cc
#define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001
#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002
#define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004
#define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008
#define REG_HDMI_PHY_CTRL 0x000002d4 #define REG_HDMI_PHY_CTRL 0x000002d4
#define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001 #define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001
#define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002 #define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002
#define HDMI_PHY_CTRL_SW_RESET 0x00000004 #define HDMI_PHY_CTRL_SW_RESET 0x00000004
#define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008 #define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008
#define REG_HDMI_AUD_INT 0x000002cc #define REG_HDMI_CEC_WR_RANGE 0x000002dc
#define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001
#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002 #define REG_HDMI_CEC_RD_RANGE 0x000002e0
#define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004
#define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008 #define REG_HDMI_VERSION 0x000002e4
#define REG_HDMI_CEC_COMPL_CTL 0x00000360
#define REG_HDMI_CEC_RD_START_RANGE 0x00000364
#define REG_HDMI_CEC_RD_TOTAL_RANGE 0x00000368
#define REG_HDMI_CEC_RD_ERR_RESP_LO 0x0000036c
#define REG_HDMI_CEC_WR_CHECK_CONFIG 0x00000370
#define REG_HDMI_8x60_PHY_REG0 0x00000300 #define REG_HDMI_8x60_PHY_REG0 0x00000300
#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c #define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c
...@@ -504,5 +553,23 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val) ...@@ -504,5 +553,23 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
#define REG_HDMI_8960_PHY_REG12 0x00000430 #define REG_HDMI_8960_PHY_REG12 0x00000430
#define REG_HDMI_8x74_ANA_CFG0 0x00000000
#define REG_HDMI_8x74_ANA_CFG1 0x00000004
#define REG_HDMI_8x74_PD_CTRL0 0x00000010
#define REG_HDMI_8x74_PD_CTRL1 0x00000014
#define REG_HDMI_8x74_BIST_CFG0 0x00000034
#define REG_HDMI_8x74_BIST_PATN0 0x0000003c
#define REG_HDMI_8x74_BIST_PATN1 0x00000040
#define REG_HDMI_8x74_BIST_PATN2 0x00000044
#define REG_HDMI_8x74_BIST_PATN3 0x00000048
#endif /* HDMI_XML */ #endif /* HDMI_XML */
...@@ -21,6 +21,7 @@ struct hdmi_bridge { ...@@ -21,6 +21,7 @@ struct hdmi_bridge {
struct drm_bridge base; struct drm_bridge base;
struct hdmi *hdmi; struct hdmi *hdmi;
bool power_on;
unsigned long int pixclock; unsigned long int pixclock;
}; };
...@@ -34,6 +35,65 @@ static void hdmi_bridge_destroy(struct drm_bridge *bridge) ...@@ -34,6 +35,65 @@ static void hdmi_bridge_destroy(struct drm_bridge *bridge)
kfree(hdmi_bridge); kfree(hdmi_bridge);
} }
static void power_on(struct drm_bridge *bridge)
{
struct drm_device *dev = bridge->dev;
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
int i, ret;
for (i = 0; i < config->pwr_reg_cnt; i++) {
ret = regulator_enable(hdmi->pwr_regs[i]);
if (ret) {
dev_err(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
}
}
if (config->pwr_clk_cnt > 0) {
DBG("pixclock: %lu", hdmi_bridge->pixclock);
ret = clk_set_rate(hdmi->pwr_clks[0], hdmi_bridge->pixclock);
if (ret) {
dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n",
config->pwr_clk_names[0], ret);
}
}
for (i = 0; i < config->pwr_clk_cnt; i++) {
ret = clk_prepare_enable(hdmi->pwr_clks[i]);
if (ret) {
dev_err(dev->dev, "failed to enable pwr clk: %s (%d)\n",
config->pwr_clk_names[i], ret);
}
}
}
static void power_off(struct drm_bridge *bridge)
{
struct drm_device *dev = bridge->dev;
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
int i, ret;
/* TODO do we need to wait for final vblank somewhere before
* cutting the clocks?
*/
mdelay(16 + 4);
for (i = 0; i < config->pwr_clk_cnt; i++)
clk_disable_unprepare(hdmi->pwr_clks[i]);
for (i = 0; i < config->pwr_reg_cnt; i++) {
ret = regulator_disable(hdmi->pwr_regs[i]);
if (ret) {
dev_err(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
}
}
}
static void hdmi_bridge_pre_enable(struct drm_bridge *bridge) static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
{ {
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
...@@ -41,6 +101,12 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge) ...@@ -41,6 +101,12 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
struct hdmi_phy *phy = hdmi->phy; struct hdmi_phy *phy = hdmi->phy;
DBG("power up"); DBG("power up");
if (!hdmi_bridge->power_on) {
power_on(bridge);
hdmi_bridge->power_on = true;
}
phy->funcs->powerup(phy, hdmi_bridge->pixclock); phy->funcs->powerup(phy, hdmi_bridge->pixclock);
hdmi_set_mode(hdmi, true); hdmi_set_mode(hdmi, true);
} }
...@@ -62,6 +128,11 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge) ...@@ -62,6 +128,11 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
DBG("power down"); DBG("power down");
hdmi_set_mode(hdmi, false); hdmi_set_mode(hdmi, false);
phy->funcs->powerdown(phy); phy->funcs->powerdown(phy);
if (hdmi_bridge->power_on) {
power_off(bridge);
hdmi_bridge->power_on = false;
}
} }
static void hdmi_bridge_mode_set(struct drm_bridge *bridge, static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
......
...@@ -17,19 +17,20 @@ ...@@ -17,19 +17,20 @@
#include <linux/gpio.h> #include <linux/gpio.h>
#include "msm_kms.h"
#include "hdmi.h" #include "hdmi.h"
struct hdmi_connector { struct hdmi_connector {
struct drm_connector base; struct drm_connector base;
struct hdmi *hdmi; struct hdmi *hdmi;
struct work_struct hpd_work;
}; };
#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base) #define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
static int gpio_config(struct hdmi *hdmi, bool on) static int gpio_config(struct hdmi *hdmi, bool on)
{ {
struct drm_device *dev = hdmi->dev; struct drm_device *dev = hdmi->dev;
struct hdmi_platform_config *config = const struct hdmi_platform_config *config = hdmi->config;
hdmi->pdev->dev.platform_data;
int ret; int ret;
if (on) { if (on) {
...@@ -39,26 +40,43 @@ static int gpio_config(struct hdmi *hdmi, bool on) ...@@ -39,26 +40,43 @@ static int gpio_config(struct hdmi *hdmi, bool on)
"HDMI_DDC_CLK", config->ddc_clk_gpio, ret); "HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
goto error1; goto error1;
} }
gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA"); ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
if (ret) { if (ret) {
dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
"HDMI_DDC_DATA", config->ddc_data_gpio, ret); "HDMI_DDC_DATA", config->ddc_data_gpio, ret);
goto error2; goto error2;
} }
gpio_set_value_cansleep(config->ddc_data_gpio, 1);
ret = gpio_request(config->hpd_gpio, "HDMI_HPD"); ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
if (ret) { if (ret) {
dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
"HDMI_HPD", config->hpd_gpio, ret); "HDMI_HPD", config->hpd_gpio, ret);
goto error3; goto error3;
} }
if (config->pmic_gpio != -1) { gpio_direction_input(config->hpd_gpio);
ret = gpio_request(config->pmic_gpio, "PMIC_HDMI_MUX_SEL"); gpio_set_value_cansleep(config->hpd_gpio, 1);
if (config->mux_en_gpio != -1) {
ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
if (ret) { if (ret) {
dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
"PMIC_HDMI_MUX_SEL", config->pmic_gpio, ret); "HDMI_MUX_SEL", config->mux_en_gpio, ret);
goto error4; goto error4;
} }
gpio_set_value_cansleep(config->pmic_gpio, 0); gpio_set_value_cansleep(config->mux_en_gpio, 1);
}
if (config->mux_sel_gpio != -1) {
ret = gpio_request(config->mux_sel_gpio, "HDMI_MUX_SEL");
if (ret) {
dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
"HDMI_MUX_SEL", config->mux_sel_gpio, ret);
goto error5;
}
gpio_set_value_cansleep(config->mux_sel_gpio, 0);
} }
DBG("gpio on"); DBG("gpio on");
} else { } else {
...@@ -66,15 +84,23 @@ static int gpio_config(struct hdmi *hdmi, bool on) ...@@ -66,15 +84,23 @@ static int gpio_config(struct hdmi *hdmi, bool on)
gpio_free(config->ddc_data_gpio); gpio_free(config->ddc_data_gpio);
gpio_free(config->hpd_gpio); gpio_free(config->hpd_gpio);
if (config->pmic_gpio != -1) { if (config->mux_en_gpio != -1) {
gpio_set_value_cansleep(config->pmic_gpio, 1); gpio_set_value_cansleep(config->mux_en_gpio, 0);
gpio_free(config->pmic_gpio); gpio_free(config->mux_en_gpio);
}
if (config->mux_sel_gpio != -1) {
gpio_set_value_cansleep(config->mux_sel_gpio, 1);
gpio_free(config->mux_sel_gpio);
} }
DBG("gpio off"); DBG("gpio off");
} }
return 0; return 0;
error5:
if (config->mux_en_gpio != -1)
gpio_free(config->mux_en_gpio);
error4: error4:
gpio_free(config->hpd_gpio); gpio_free(config->hpd_gpio);
error3: error3:
...@@ -88,10 +114,11 @@ static int gpio_config(struct hdmi *hdmi, bool on) ...@@ -88,10 +114,11 @@ static int gpio_config(struct hdmi *hdmi, bool on)
static int hpd_enable(struct hdmi_connector *hdmi_connector) static int hpd_enable(struct hdmi_connector *hdmi_connector)
{ {
struct hdmi *hdmi = hdmi_connector->hdmi; struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct drm_device *dev = hdmi_connector->base.dev; struct drm_device *dev = hdmi_connector->base.dev;
struct hdmi_phy *phy = hdmi->phy; struct hdmi_phy *phy = hdmi->phy;
uint32_t hpd_ctrl; uint32_t hpd_ctrl;
int ret; int i, ret;
ret = gpio_config(hdmi, true); ret = gpio_config(hdmi, true);
if (ret) { if (ret) {
...@@ -99,31 +126,22 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) ...@@ -99,31 +126,22 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
goto fail; goto fail;
} }
ret = clk_prepare_enable(hdmi->clk); for (i = 0; i < config->hpd_clk_cnt; i++) {
if (ret) { ret = clk_prepare_enable(hdmi->hpd_clks[i]);
dev_err(dev->dev, "failed to enable 'clk': %d\n", ret); if (ret) {
goto fail; dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n",
} config->hpd_clk_names[i], ret);
goto fail;
ret = clk_prepare_enable(hdmi->m_pclk); }
if (ret) {
dev_err(dev->dev, "failed to enable 'm_pclk': %d\n", ret);
goto fail;
}
ret = clk_prepare_enable(hdmi->s_pclk);
if (ret) {
dev_err(dev->dev, "failed to enable 's_pclk': %d\n", ret);
goto fail;
} }
if (hdmi->mpp0) for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_enable(hdmi->mpp0); ret = regulator_enable(hdmi->hpd_regs[i]);
if (!ret) if (ret) {
ret = regulator_enable(hdmi->mvs); dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
if (ret) { config->hpd_reg_names[i], ret);
dev_err(dev->dev, "failed to enable regulators: %d\n", ret); goto fail;
goto fail; }
} }
hdmi_set_mode(hdmi, false); hdmi_set_mode(hdmi, false);
...@@ -156,26 +174,26 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) ...@@ -156,26 +174,26 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
static int hdp_disable(struct hdmi_connector *hdmi_connector) static int hdp_disable(struct hdmi_connector *hdmi_connector)
{ {
struct hdmi *hdmi = hdmi_connector->hdmi; struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct drm_device *dev = hdmi_connector->base.dev; struct drm_device *dev = hdmi_connector->base.dev;
int ret = 0; int i, ret = 0;
/* Disable HPD interrupt */ /* Disable HPD interrupt */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0); hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
hdmi_set_mode(hdmi, false); hdmi_set_mode(hdmi, false);
if (hdmi->mpp0) for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_disable(hdmi->mpp0); ret = regulator_disable(hdmi->hpd_regs[i]);
if (!ret) if (ret) {
ret = regulator_disable(hdmi->mvs); dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
if (ret) { config->hpd_reg_names[i], ret);
dev_err(dev->dev, "failed to enable regulators: %d\n", ret); goto fail;
goto fail; }
} }
clk_disable_unprepare(hdmi->clk); for (i = 0; i < config->hpd_clk_cnt; i++)
clk_disable_unprepare(hdmi->m_pclk); clk_disable_unprepare(hdmi->hpd_clks[i]);
clk_disable_unprepare(hdmi->s_pclk);
ret = gpio_config(hdmi, false); ret = gpio_config(hdmi, false);
if (ret) { if (ret) {
...@@ -189,9 +207,19 @@ static int hdp_disable(struct hdmi_connector *hdmi_connector) ...@@ -189,9 +207,19 @@ static int hdp_disable(struct hdmi_connector *hdmi_connector)
return ret; return ret;
} }
static void
hotplug_work(struct work_struct *work)
{
struct hdmi_connector *hdmi_connector =
container_of(work, struct hdmi_connector, hpd_work);
struct drm_connector *connector = &hdmi_connector->base;
drm_helper_hpd_irq_event(connector->dev);
}
void hdmi_connector_irq(struct drm_connector *connector) void hdmi_connector_irq(struct drm_connector *connector)
{ {
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
struct msm_drm_private *priv = connector->dev->dev_private;
struct hdmi *hdmi = hdmi_connector->hdmi; struct hdmi *hdmi = hdmi_connector->hdmi;
uint32_t hpd_int_status, hpd_int_ctrl; uint32_t hpd_int_status, hpd_int_ctrl;
...@@ -209,13 +237,13 @@ void hdmi_connector_irq(struct drm_connector *connector) ...@@ -209,13 +237,13 @@ void hdmi_connector_irq(struct drm_connector *connector)
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK); hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
drm_helper_hpd_irq_event(connector->dev);
/* detect disconnect if we are connected or visa versa: */ /* detect disconnect if we are connected or visa versa: */
hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN; hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
if (!detected) if (!detected)
hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT; hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl); hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
queue_work(priv->wq, &hdmi_connector->hpd_work);
} }
} }
...@@ -224,6 +252,7 @@ static enum drm_connector_status hdmi_connector_detect( ...@@ -224,6 +252,7 @@ static enum drm_connector_status hdmi_connector_detect(
{ {
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
struct hdmi *hdmi = hdmi_connector->hdmi; struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
uint32_t hpd_int_status; uint32_t hpd_int_status;
int retry = 20; int retry = 20;
...@@ -233,6 +262,14 @@ static enum drm_connector_status hdmi_connector_detect( ...@@ -233,6 +262,14 @@ static enum drm_connector_status hdmi_connector_detect(
* let that trick us into thinking the monitor is gone: * let that trick us into thinking the monitor is gone:
*/ */
while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) { while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) {
/* hdmi debounce logic seems to get stuck sometimes,
* read directly the gpio to get a second opinion:
*/
if (gpio_get_value(config->hpd_gpio)) {
DBG("gpio tells us we are connected!");
hpd_int_status |= HDMI_HPD_INT_STATUS_CABLE_DETECTED;
break;
}
mdelay(10); mdelay(10);
hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
DBG("status=%08x", hpd_int_status); DBG("status=%08x", hpd_int_status);
...@@ -285,6 +322,8 @@ static int hdmi_connector_mode_valid(struct drm_connector *connector, ...@@ -285,6 +322,8 @@ static int hdmi_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct msm_drm_private *priv = connector->dev->dev_private; struct msm_drm_private *priv = connector->dev->dev_private;
struct msm_kms *kms = priv->kms; struct msm_kms *kms = priv->kms;
long actual, requested; long actual, requested;
...@@ -293,6 +332,13 @@ static int hdmi_connector_mode_valid(struct drm_connector *connector, ...@@ -293,6 +332,13 @@ static int hdmi_connector_mode_valid(struct drm_connector *connector,
actual = kms->funcs->round_pixclk(kms, actual = kms->funcs->round_pixclk(kms,
requested, hdmi_connector->hdmi->encoder); requested, hdmi_connector->hdmi->encoder);
/* for mdp5/apq8074, we manage our own pixel clk (as opposed to
* mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
* instead):
*/
if (config->pwr_clk_cnt > 0)
actual = clk_round_rate(hdmi->pwr_clks[0], actual);
DBG("requested=%ld, actual=%ld", requested, actual); DBG("requested=%ld, actual=%ld", requested, actual);
if (actual != requested) if (actual != requested)
...@@ -335,6 +381,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi) ...@@ -335,6 +381,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
} }
hdmi_connector->hdmi = hdmi_reference(hdmi); hdmi_connector->hdmi = hdmi_reference(hdmi);
INIT_WORK(&hdmi_connector->hpd_work, hotplug_work);
connector = &hdmi_connector->base; connector = &hdmi_connector->base;
......
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "hdmi.h"
struct hdmi_phy_8x74 {
struct hdmi_phy base;
struct hdmi *hdmi;
void __iomem *mmio;
};
#define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base)
static void phy_write(struct hdmi_phy_8x74 *phy, u32 reg, u32 data)
{
msm_writel(data, phy->mmio + reg);
}
//static u32 phy_read(struct hdmi_phy_8x74 *phy, u32 reg)
//{
// return msm_readl(phy->mmio + reg);
//}
static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy)
{
struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
kfree(phy_8x74);
}
static void hdmi_phy_8x74_reset(struct hdmi_phy *phy)
{
struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
struct hdmi *hdmi = phy_8x74->hdmi;
unsigned int val;
/* NOTE that HDMI_PHY_CTL is in core mmio, not phy mmio: */
val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
/* pull low */
hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
val & ~HDMI_PHY_CTRL_SW_RESET);
} else {
/* pull high */
hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
val | HDMI_PHY_CTRL_SW_RESET);
}
if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
/* pull low */
hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
} else {
/* pull high */
hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
val | HDMI_PHY_CTRL_SW_RESET_PLL);
}
msleep(100);
if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
/* pull high */
hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
val | HDMI_PHY_CTRL_SW_RESET);
} else {
/* pull low */
hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
val & ~HDMI_PHY_CTRL_SW_RESET);
}
if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
/* pull high */
hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
val | HDMI_PHY_CTRL_SW_RESET_PLL);
} else {
/* pull low */
hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
}
}
static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy,
unsigned long int pixclock)
{
struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG0, 0x1b);
phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG1, 0xf2);
phy_write(phy_8x74, REG_HDMI_8x74_BIST_CFG0, 0x0);
phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN0, 0x0);
phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN1, 0x0);
phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN2, 0x0);
phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN3, 0x0);
phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL1, 0x20);
}
static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy)
{
struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL0, 0x7f);
}
static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = {
.destroy = hdmi_phy_8x74_destroy,
.reset = hdmi_phy_8x74_reset,
.powerup = hdmi_phy_8x74_powerup,
.powerdown = hdmi_phy_8x74_powerdown,
};
struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi)
{
struct hdmi_phy_8x74 *phy_8x74;
struct hdmi_phy *phy = NULL;
int ret;
phy_8x74 = kzalloc(sizeof(*phy_8x74), GFP_KERNEL);
if (!phy_8x74) {
ret = -ENOMEM;
goto fail;
}
phy = &phy_8x74->base;
phy->funcs = &hdmi_phy_8x74_funcs;
phy_8x74->hdmi = hdmi;
/* for 8x74, the phy mmio is mapped separately: */
phy_8x74->mmio = msm_ioremap(hdmi->pdev,
"phy_physical", "HDMI_8x74");
if (IS_ERR(phy_8x74->mmio)) {
ret = PTR_ERR(phy_8x74->mmio);
goto fail;
}
return phy;
fail:
if (phy)
hdmi_phy_8x74_destroy(phy);
return ERR_PTR(ret);
}
...@@ -8,14 +8,16 @@ This file was generated by the rules-ng-ng headergen tool in this git repository ...@@ -8,14 +8,16 @@ This file was generated by the rules-ng-ng headergen tool in this git repository
git clone https://github.com/freedreno/envytools.git git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are: The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors: Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark) - Rob Clark <robdclark@gmail.com> (robclark)
......
...@@ -8,14 +8,16 @@ This file was generated by the rules-ng-ng headergen tool in this git repository ...@@ -8,14 +8,16 @@ This file was generated by the rules-ng-ng headergen tool in this git repository
git clone https://github.com/freedreno/envytools.git git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are: The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12) - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors: Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark) - Rob Clark <robdclark@gmail.com> (robclark)
...@@ -42,27 +44,6 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ...@@ -42,27 +44,6 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/ */
enum mdp4_bpc {
BPC1 = 0,
BPC5 = 1,
BPC6 = 2,
BPC8 = 3,
};
enum mdp4_bpc_alpha {
BPC1A = 0,
BPC4A = 1,
BPC6A = 2,
BPC8A = 3,
};
enum mdp4_alpha_type {
FG_CONST = 0,
BG_CONST = 1,
FG_PIXEL = 2,
BG_PIXEL = 3,
};
enum mdp4_pipe { enum mdp4_pipe {
VG1 = 0, VG1 = 0,
VG2 = 1, VG2 = 1,
...@@ -79,15 +60,6 @@ enum mdp4_mixer { ...@@ -79,15 +60,6 @@ enum mdp4_mixer {
MIXER2 = 2, MIXER2 = 2,
}; };
enum mdp4_mixer_stage_id {
STAGE_UNUSED = 0,
STAGE_BASE = 1,
STAGE0 = 2,
STAGE1 = 3,
STAGE2 = 4,
STAGE3 = 5,
};
enum mdp4_intf { enum mdp4_intf {
INTF_LCDC_DTV = 0, INTF_LCDC_DTV = 0,
INTF_DSI_VIDEO = 1, INTF_DSI_VIDEO = 1,
...@@ -194,56 +166,56 @@ static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val) ...@@ -194,56 +166,56 @@ static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0 #define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0
#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007 #define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007
#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0 #define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val) static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
{ {
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK; return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
} }
#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008 #define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008
#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070 #define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070
#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4 #define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val) static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
{ {
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK; return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
} }
#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080 #define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080
#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700 #define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700
#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8 #define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val) static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
{ {
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK; return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
} }
#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800 #define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800
#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000 #define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12 #define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val) static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
{ {
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK; return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
} }
#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000 #define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000 #define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16 #define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val) static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
{ {
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK; return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
} }
#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000 #define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000 #define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20 #define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val) static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
{ {
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK; return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
} }
#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000 #define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000 #define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24 #define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val) static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
{ {
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK; return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
} }
#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000 #define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000 #define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000
#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28 #define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val) static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
{ {
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK; return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
} }
...@@ -254,56 +226,56 @@ static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id va ...@@ -254,56 +226,56 @@ static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id va
#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100 #define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100
#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007 #define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007
#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0 #define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val) static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
{ {
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK; return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
} }
#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008 #define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008
#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070 #define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070
#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4 #define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val) static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
{ {
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK; return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
} }
#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080 #define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080
#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700 #define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700
#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8 #define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val) static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
{ {
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK; return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
} }
#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800 #define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800
#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000 #define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000
#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12 #define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val) static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
{ {
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK; return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
} }
#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000 #define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000
#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000 #define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000
#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16 #define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val) static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
{ {
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK; return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
} }
#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000 #define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000
#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000 #define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000
#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20 #define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val) static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
{ {
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK; return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
} }
#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000 #define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000
#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000 #define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000
#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24 #define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val) static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
{ {
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK; return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
} }
#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000 #define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000
#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000 #define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000
#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28 #define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val) static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
{ {
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK; return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
} }
...@@ -369,7 +341,7 @@ static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x ...@@ -369,7 +341,7 @@ static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x
static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); } static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003 #define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003
#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0 #define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0
static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val) static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp_alpha_type val)
{ {
return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK; return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
} }
...@@ -377,7 +349,7 @@ static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val) ...@@ -377,7 +349,7 @@ static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val)
#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008 #define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008
#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030 #define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030
#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4 #define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4
static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp4_alpha_type val) static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp_alpha_type val)
{ {
return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK; return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
} }
...@@ -472,19 +444,19 @@ static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __of ...@@ -472,19 +444,19 @@ static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __of
static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); } static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003 #define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003
#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0 #define MDP4_DMA_CONFIG_G_BPC__SHIFT 0
static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp4_bpc val) static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp_bpc val)
{ {
return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK; return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
} }
#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c #define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c
#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2 #define MDP4_DMA_CONFIG_B_BPC__SHIFT 2
static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp4_bpc val) static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp_bpc val)
{ {
return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK; return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
} }
#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030 #define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030
#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4 #define MDP4_DMA_CONFIG_R_BPC__SHIFT 4
static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp4_bpc val) static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp_bpc val)
{ {
return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK; return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
} }
...@@ -710,25 +682,25 @@ static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val) ...@@ -710,25 +682,25 @@ static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val)
static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; } static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; }
#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 #define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 #define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp4_bpc val) static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
{ {
return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK; return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
} }
#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c #define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 #define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp4_bpc val) static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
{ {
return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK; return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
} }
#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 #define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 #define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp4_bpc val) static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
{ {
return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK; return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
} }
#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 #define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 #define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp4_bpc_alpha val) static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
{ {
return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK; return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
} }
......
...@@ -66,15 +66,15 @@ struct mdp4_crtc { ...@@ -66,15 +66,15 @@ struct mdp4_crtc {
/* for unref'ing cursor bo's after scanout completes: */ /* for unref'ing cursor bo's after scanout completes: */
struct drm_flip_work unref_cursor_work; struct drm_flip_work unref_cursor_work;
struct mdp4_irq vblank; struct mdp_irq vblank;
struct mdp4_irq err; struct mdp_irq err;
}; };
#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base) #define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
static struct mdp4_kms *get_kms(struct drm_crtc *crtc) static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
{ {
struct msm_drm_private *priv = crtc->dev->dev_private; struct msm_drm_private *priv = crtc->dev->dev_private;
return to_mdp4_kms(priv->kms); return to_mdp4_kms(to_mdp_kms(priv->kms));
} }
static void update_fb(struct drm_crtc *crtc, bool async, static void update_fb(struct drm_crtc *crtc, bool async,
...@@ -93,7 +93,7 @@ static void update_fb(struct drm_crtc *crtc, bool async, ...@@ -93,7 +93,7 @@ static void update_fb(struct drm_crtc *crtc, bool async,
if (!async) { if (!async) {
/* enable vblank to pick up the old_fb */ /* enable vblank to pick up the old_fb */
mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank); mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
} }
} }
...@@ -145,7 +145,7 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending) ...@@ -145,7 +145,7 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
atomic_or(pending, &mdp4_crtc->pending); atomic_or(pending, &mdp4_crtc->pending);
mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank); mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
} }
static void pageflip_cb(struct msm_fence_cb *cb) static void pageflip_cb(struct msm_fence_cb *cb)
...@@ -210,9 +210,9 @@ static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode) ...@@ -210,9 +210,9 @@ static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
if (enabled != mdp4_crtc->enabled) { if (enabled != mdp4_crtc->enabled) {
if (enabled) { if (enabled) {
mdp4_enable(mdp4_kms); mdp4_enable(mdp4_kms);
mdp4_irq_register(mdp4_kms, &mdp4_crtc->err); mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
} else { } else {
mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->err); mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
mdp4_disable(mdp4_kms); mdp4_disable(mdp4_kms);
} }
mdp4_crtc->enabled = enabled; mdp4_crtc->enabled = enabled;
...@@ -232,7 +232,7 @@ static void blend_setup(struct drm_crtc *crtc) ...@@ -232,7 +232,7 @@ static void blend_setup(struct drm_crtc *crtc)
struct mdp4_kms *mdp4_kms = get_kms(crtc); struct mdp4_kms *mdp4_kms = get_kms(crtc);
int i, ovlp = mdp4_crtc->ovlp; int i, ovlp = mdp4_crtc->ovlp;
uint32_t mixer_cfg = 0; uint32_t mixer_cfg = 0;
static const enum mdp4_mixer_stage_id stages[] = { static const enum mdp_mixer_stage_id stages[] = {
STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3, STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
}; };
/* statically (for now) map planes to mixer stage (z-order): */ /* statically (for now) map planes to mixer stage (z-order): */
...@@ -262,8 +262,8 @@ static void blend_setup(struct drm_crtc *crtc) ...@@ -262,8 +262,8 @@ static void blend_setup(struct drm_crtc *crtc)
enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
int idx = idxs[pipe_id]; int idx = idxs[pipe_id];
if (idx > 0) { if (idx > 0) {
const struct mdp4_format *format = const struct mdp_format *format =
to_mdp4_format(msm_framebuffer_format(plane->fb)); to_mdp_format(msm_framebuffer_format(plane->fb));
alpha[idx-1] = format->alpha_enable; alpha[idx-1] = format->alpha_enable;
} }
mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]); mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]);
...@@ -571,14 +571,14 @@ static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = { ...@@ -571,14 +571,14 @@ static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
.load_lut = mdp4_crtc_load_lut, .load_lut = mdp4_crtc_load_lut,
}; };
static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus) static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
{ {
struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank); struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
struct drm_crtc *crtc = &mdp4_crtc->base; struct drm_crtc *crtc = &mdp4_crtc->base;
struct msm_drm_private *priv = crtc->dev->dev_private; struct msm_drm_private *priv = crtc->dev->dev_private;
unsigned pending; unsigned pending;
mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank); mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
pending = atomic_xchg(&mdp4_crtc->pending, 0); pending = atomic_xchg(&mdp4_crtc->pending, 0);
...@@ -593,7 +593,7 @@ static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus) ...@@ -593,7 +593,7 @@ static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
} }
} }
static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus) static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
{ {
struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err); struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
struct drm_crtc *crtc = &mdp4_crtc->base; struct drm_crtc *crtc = &mdp4_crtc->base;
......
...@@ -15,8 +15,6 @@ ...@@ -15,8 +15,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>. * this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <mach/clk.h>
#include "mdp4_kms.h" #include "mdp4_kms.h"
#include "drm_crtc.h" #include "drm_crtc.h"
...@@ -37,7 +35,7 @@ struct mdp4_dtv_encoder { ...@@ -37,7 +35,7 @@ struct mdp4_dtv_encoder {
static struct mdp4_kms *get_kms(struct drm_encoder *encoder) static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
{ {
struct msm_drm_private *priv = encoder->dev->dev_private; struct msm_drm_private *priv = encoder->dev->dev_private;
return to_mdp4_kms(priv->kms); return to_mdp4_kms(to_mdp_kms(priv->kms));
} }
#ifdef CONFIG_MSM_BUS_SCALING #ifdef CONFIG_MSM_BUS_SCALING
...@@ -139,7 +137,7 @@ static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode) ...@@ -139,7 +137,7 @@ static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode)
* the settings changes for the new modeset (like new * the settings changes for the new modeset (like new
* scanout buffer) don't latch properly.. * scanout buffer) don't latch properly..
*/ */
mdp4_irq_wait(mdp4_kms, MDP4_IRQ_EXTERNAL_VSYNC); mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
clk_disable_unprepare(mdp4_dtv_encoder->src_clk); clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
......
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
#include "mdp4_kms.h"
void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
{
mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
}
static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
DRM_ERROR("errors: %08x\n", irqstatus);
}
void mdp4_irq_preinstall(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
}
int mdp4_irq_postinstall(struct msm_kms *kms)
{
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
struct mdp_irq *error_handler = &mdp4_kms->error_handler;
error_handler->irq = mdp4_irq_error_handler;
error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
mdp_irq_register(mdp_kms, error_handler);
return 0;
}
void mdp4_irq_uninstall(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
}
irqreturn_t mdp4_irq(struct msm_kms *kms)
{
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
struct drm_device *dev = mdp4_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
unsigned int id;
uint32_t status;
status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
VERB("status=%08x", status);
for (id = 0; id < priv->num_crtcs; id++)
if (status & mdp4_crtc_vblank(priv->crtcs[id]))
drm_handle_vblank(dev, id);
mdp_dispatch_irqs(mdp_kms, status);
return IRQ_HANDLED;
}
int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp4_crtc_vblank(crtc), true);
return 0;
}
void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp4_crtc_vblank(crtc), false);
}
...@@ -17,13 +17,14 @@ ...@@ -17,13 +17,14 @@
#include "msm_drv.h" #include "msm_drv.h"
#include "msm_mmu.h"
#include "mdp4_kms.h" #include "mdp4_kms.h"
static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev); static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
static int mdp4_hw_init(struct msm_kms *kms) static int mdp4_hw_init(struct msm_kms *kms)
{ {
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct drm_device *dev = mdp4_kms->dev; struct drm_device *dev = mdp4_kms->dev;
uint32_t version, major, minor, dmap_cfg, vg_cfg; uint32_t version, major, minor, dmap_cfg, vg_cfg;
unsigned long clk; unsigned long clk;
...@@ -31,12 +32,14 @@ static int mdp4_hw_init(struct msm_kms *kms) ...@@ -31,12 +32,14 @@ static int mdp4_hw_init(struct msm_kms *kms)
pm_runtime_get_sync(dev->dev); pm_runtime_get_sync(dev->dev);
mdp4_enable(mdp4_kms);
version = mdp4_read(mdp4_kms, REG_MDP4_VERSION); version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
mdp4_disable(mdp4_kms);
major = FIELD(version, MDP4_VERSION_MAJOR); major = FIELD(version, MDP4_VERSION_MAJOR);
minor = FIELD(version, MDP4_VERSION_MINOR); minor = FIELD(version, MDP4_VERSION_MINOR);
DBG("found MDP version v%d.%d", major, minor); DBG("found MDP4 version v%d.%d", major, minor);
if (major != 4) { if (major != 4) {
dev_err(dev->dev, "unexpected MDP version: v%d.%d\n", dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
...@@ -130,7 +133,7 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, ...@@ -130,7 +133,7 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
{ {
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct msm_drm_private *priv = mdp4_kms->dev->dev_private; struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
unsigned i; unsigned i;
...@@ -140,11 +143,12 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) ...@@ -140,11 +143,12 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
static void mdp4_destroy(struct msm_kms *kms) static void mdp4_destroy(struct msm_kms *kms)
{ {
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
kfree(mdp4_kms); kfree(mdp4_kms);
} }
static const struct msm_kms_funcs kms_funcs = { static const struct mdp_kms_funcs kms_funcs = {
.base = {
.hw_init = mdp4_hw_init, .hw_init = mdp4_hw_init,
.irq_preinstall = mdp4_irq_preinstall, .irq_preinstall = mdp4_irq_preinstall,
.irq_postinstall = mdp4_irq_postinstall, .irq_postinstall = mdp4_irq_postinstall,
...@@ -152,10 +156,12 @@ static const struct msm_kms_funcs kms_funcs = { ...@@ -152,10 +156,12 @@ static const struct msm_kms_funcs kms_funcs = {
.irq = mdp4_irq, .irq = mdp4_irq,
.enable_vblank = mdp4_enable_vblank, .enable_vblank = mdp4_enable_vblank,
.disable_vblank = mdp4_disable_vblank, .disable_vblank = mdp4_disable_vblank,
.get_format = mdp4_get_format, .get_format = mdp_get_format,
.round_pixclk = mdp4_round_pixclk, .round_pixclk = mdp4_round_pixclk,
.preclose = mdp4_preclose, .preclose = mdp4_preclose,
.destroy = mdp4_destroy, .destroy = mdp4_destroy,
},
.set_irqmask = mdp4_set_irqmask,
}; };
int mdp4_disable(struct mdp4_kms *mdp4_kms) int mdp4_disable(struct mdp4_kms *mdp4_kms)
...@@ -189,6 +195,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms) ...@@ -189,6 +195,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
struct drm_plane *plane; struct drm_plane *plane;
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct drm_encoder *encoder; struct drm_encoder *encoder;
struct hdmi *hdmi;
int ret; int ret;
/* /*
...@@ -238,9 +245,10 @@ static int modeset_init(struct mdp4_kms *mdp4_kms) ...@@ -238,9 +245,10 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */ encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */
priv->encoders[priv->num_encoders++] = encoder; priv->encoders[priv->num_encoders++] = encoder;
ret = hdmi_init(dev, encoder); hdmi = hdmi_init(dev, encoder);
if (ret) { if (IS_ERR(hdmi)) {
dev_err(dev->dev, "failed to initialize HDMI\n"); ret = PTR_ERR(hdmi);
dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
goto fail; goto fail;
} }
...@@ -260,6 +268,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) ...@@ -260,6 +268,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
struct mdp4_platform_config *config = mdp4_get_config(pdev); struct mdp4_platform_config *config = mdp4_get_config(pdev);
struct mdp4_kms *mdp4_kms; struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL; struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
int ret; int ret;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
...@@ -269,8 +278,9 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) ...@@ -269,8 +278,9 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail; goto fail;
} }
kms = &mdp4_kms->base; mdp_kms_init(&mdp4_kms->base, &kms_funcs);
kms->funcs = &kms_funcs;
kms = &mdp4_kms->base.base;
mdp4_kms->dev = dev; mdp4_kms->dev = dev;
...@@ -322,27 +332,34 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) ...@@ -322,27 +332,34 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
clk_set_rate(mdp4_kms->clk, config->max_clk); clk_set_rate(mdp4_kms->clk, config->max_clk);
clk_set_rate(mdp4_kms->lut_clk, config->max_clk); clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
if (!config->iommu) {
dev_err(dev->dev, "no iommu\n");
ret = -ENXIO;
goto fail;
}
/* make sure things are off before attaching iommu (bootloader could /* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if * have left things on, in which case we'll start getting faults if
* we don't disable): * we don't disable):
*/ */
mdp4_enable(mdp4_kms);
mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0); mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
mdp4_disable(mdp4_kms);
mdelay(16); mdelay(16);
ret = msm_iommu_attach(dev, config->iommu, if (config->iommu) {
iommu_ports, ARRAY_SIZE(iommu_ports)); mmu = msm_iommu_new(dev, config->iommu);
if (ret) if (IS_ERR(mmu)) {
goto fail; ret = PTR_ERR(mmu);
goto fail;
}
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret)
goto fail;
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
mmu = NULL;
}
mdp4_kms->id = msm_register_iommu(dev, config->iommu); mdp4_kms->id = msm_register_mmu(dev, mmu);
if (mdp4_kms->id < 0) { if (mdp4_kms->id < 0) {
ret = mdp4_kms->id; ret = mdp4_kms->id;
dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret); dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
......
...@@ -18,29 +18,13 @@ ...@@ -18,29 +18,13 @@
#ifndef __MDP4_KMS_H__ #ifndef __MDP4_KMS_H__
#define __MDP4_KMS_H__ #define __MDP4_KMS_H__
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include "msm_drv.h" #include "msm_drv.h"
#include "msm_kms.h"
#include "mdp/mdp_kms.h"
#include "mdp4.xml.h" #include "mdp4.xml.h"
/* For transiently registering for different MDP4 irqs that various parts
* of the KMS code need during setup/configuration. We these are not
* necessarily the same as what drm_vblank_get/put() are requesting, and
* the hysteresis in drm_vblank_put() is not necessarily desirable for
* internal housekeeping related irq usage.
*/
struct mdp4_irq {
struct list_head node;
uint32_t irqmask;
bool registered;
void (*irq)(struct mdp4_irq *irq, uint32_t irqstatus);
};
struct mdp4_kms { struct mdp4_kms {
struct msm_kms base; struct mdp_kms base;
struct drm_device *dev; struct drm_device *dev;
...@@ -59,11 +43,7 @@ struct mdp4_kms { ...@@ -59,11 +43,7 @@ struct mdp4_kms {
struct clk *pclk; struct clk *pclk;
struct clk *lut_clk; struct clk *lut_clk;
/* irq handling: */ struct mdp_irq error_handler;
bool in_irq;
struct list_head irq_list; /* list of mdp4_irq */
uint32_t vblank_mask; /* irq bits set for userspace vblank */
struct mdp4_irq error_handler;
}; };
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
...@@ -73,16 +53,6 @@ struct mdp4_platform_config { ...@@ -73,16 +53,6 @@ struct mdp4_platform_config {
uint32_t max_clk; uint32_t max_clk;
}; };
struct mdp4_format {
struct msm_format base;
enum mdp4_bpc bpc_r, bpc_g, bpc_b;
enum mdp4_bpc_alpha bpc_a;
uint8_t unpack[4];
bool alpha_enable, unpack_tight;
uint8_t cpp, unpack_count;
};
#define to_mdp4_format(x) container_of(x, struct mdp4_format, base)
static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data) static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
{ {
msm_writel(data, mdp4_kms->mmio + reg); msm_writel(data, mdp4_kms->mmio + reg);
...@@ -134,7 +104,7 @@ static inline uint32_t dma2err(enum mdp4_dma dma) ...@@ -134,7 +104,7 @@ static inline uint32_t dma2err(enum mdp4_dma dma)
} }
static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe, static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe,
enum mdp4_mixer_stage_id stage) enum mdp_mixer_stage_id stage)
{ {
uint32_t mixer_cfg = 0; uint32_t mixer_cfg = 0;
...@@ -178,19 +148,23 @@ static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe, ...@@ -178,19 +148,23 @@ static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe,
int mdp4_disable(struct mdp4_kms *mdp4_kms); int mdp4_disable(struct mdp4_kms *mdp4_kms);
int mdp4_enable(struct mdp4_kms *mdp4_kms); int mdp4_enable(struct mdp4_kms *mdp4_kms);
void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
void mdp4_irq_preinstall(struct msm_kms *kms); void mdp4_irq_preinstall(struct msm_kms *kms);
int mdp4_irq_postinstall(struct msm_kms *kms); int mdp4_irq_postinstall(struct msm_kms *kms);
void mdp4_irq_uninstall(struct msm_kms *kms); void mdp4_irq_uninstall(struct msm_kms *kms);
irqreturn_t mdp4_irq(struct msm_kms *kms); irqreturn_t mdp4_irq(struct msm_kms *kms);
void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask);
void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *formats, static inline
uint32_t max_formats); uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format); uint32_t max_formats)
{
/* TODO when we have YUV, we need to filter supported formats
* based on pipe_id..
*/
return mdp_get_formats(pixel_formats, max_formats);
}
void mdp4_plane_install_properties(struct drm_plane *plane, void mdp4_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj); struct drm_mode_object *obj);
......
...@@ -34,7 +34,7 @@ struct mdp4_plane { ...@@ -34,7 +34,7 @@ struct mdp4_plane {
static struct mdp4_kms *get_kms(struct drm_plane *plane) static struct mdp4_kms *get_kms(struct drm_plane *plane)
{ {
struct msm_drm_private *priv = plane->dev->dev_private; struct msm_drm_private *priv = plane->dev->dev_private;
return to_mdp4_kms(priv->kms); return to_mdp4_kms(to_mdp_kms(priv->kms));
} }
static int mdp4_plane_update(struct drm_plane *plane, static int mdp4_plane_update(struct drm_plane *plane,
...@@ -132,7 +132,7 @@ int mdp4_plane_mode_set(struct drm_plane *plane, ...@@ -132,7 +132,7 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane); struct mdp4_kms *mdp4_kms = get_kms(plane);
enum mdp4_pipe pipe = mdp4_plane->pipe; enum mdp4_pipe pipe = mdp4_plane->pipe;
const struct mdp4_format *format; const struct mdp_format *format;
uint32_t op_mode = 0; uint32_t op_mode = 0;
uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
...@@ -175,7 +175,7 @@ int mdp4_plane_mode_set(struct drm_plane *plane, ...@@ -175,7 +175,7 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
mdp4_plane_set_scanout(plane, fb); mdp4_plane_set_scanout(plane, fb);
format = to_mdp4_format(msm_framebuffer_format(fb)); format = to_mdp_format(msm_framebuffer_format(fb));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe), mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
......
#ifndef MDP5_XML
#define MDP5_XML
/* Autogenerated file, DO NOT EDIT manually!
This file was generated by the rules-ng-ng headergen tool in this git repository:
http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
enum mdp5_intf {
INTF_DSI = 1,
INTF_HDMI = 3,
INTF_LCDC = 5,
INTF_eDP = 9,
};
enum mdp5_intfnum {
NO_INTF = 0,
INTF0 = 1,
INTF1 = 2,
INTF2 = 3,
INTF3 = 4,
};
enum mdp5_pipe {
SSPP_VIG0 = 0,
SSPP_VIG1 = 1,
SSPP_VIG2 = 2,
SSPP_RGB0 = 3,
SSPP_RGB1 = 4,
SSPP_RGB2 = 5,
SSPP_DMA0 = 6,
SSPP_DMA1 = 7,
};
enum mdp5_ctl_mode {
MODE_NONE = 0,
MODE_ROT0 = 1,
MODE_ROT1 = 2,
MODE_WB0 = 3,
MODE_WB1 = 4,
MODE_WFD = 5,
};
enum mdp5_pack_3d {
PACK_3D_FRAME_INT = 0,
PACK_3D_H_ROW_INT = 1,
PACK_3D_V_ROW_INT = 2,
PACK_3D_COL_INT = 3,
};
enum mdp5_chroma_samp_type {
CHROMA_RGB = 0,
CHROMA_H2V1 = 1,
CHROMA_H1V2 = 2,
CHROMA_420 = 3,
};
enum mdp5_scale_filter {
SCALE_FILTER_NEAREST = 0,
SCALE_FILTER_BIL = 1,
SCALE_FILTER_PCMN = 2,
SCALE_FILTER_CA = 3,
};
enum mdp5_pipe_bwc {
BWC_LOSSLESS = 0,
BWC_Q_HIGH = 1,
BWC_Q_MED = 2,
};
enum mdp5_client_id {
CID_UNUSED = 0,
CID_VIG0_Y = 1,
CID_VIG0_CR = 2,
CID_VIG0_CB = 3,
CID_VIG1_Y = 4,
CID_VIG1_CR = 5,
CID_VIG1_CB = 6,
CID_VIG2_Y = 7,
CID_VIG2_CR = 8,
CID_VIG2_CB = 9,
CID_DMA0_Y = 10,
CID_DMA0_CR = 11,
CID_DMA0_CB = 12,
CID_DMA1_Y = 13,
CID_DMA1_CR = 14,
CID_DMA1_CB = 15,
CID_RGB0 = 16,
CID_RGB1 = 17,
CID_RGB2 = 18,
CID_MAX = 19,
};
enum mdp5_igc_type {
IGC_VIG = 0,
IGC_RGB = 1,
IGC_DMA = 2,
IGC_DSPP = 3,
};
#define MDP5_IRQ_INTF0_WB_ROT_COMP 0x00000001
#define MDP5_IRQ_INTF1_WB_ROT_COMP 0x00000002
#define MDP5_IRQ_INTF2_WB_ROT_COMP 0x00000004
#define MDP5_IRQ_INTF3_WB_ROT_COMP 0x00000008
#define MDP5_IRQ_INTF0_WB_WFD 0x00000010
#define MDP5_IRQ_INTF1_WB_WFD 0x00000020
#define MDP5_IRQ_INTF2_WB_WFD 0x00000040
#define MDP5_IRQ_INTF3_WB_WFD 0x00000080
#define MDP5_IRQ_INTF0_PING_PONG_COMP 0x00000100
#define MDP5_IRQ_INTF1_PING_PONG_COMP 0x00000200
#define MDP5_IRQ_INTF2_PING_PONG_COMP 0x00000400
#define MDP5_IRQ_INTF3_PING_PONG_COMP 0x00000800
#define MDP5_IRQ_INTF0_PING_PONG_RD_PTR 0x00001000
#define MDP5_IRQ_INTF1_PING_PONG_RD_PTR 0x00002000
#define MDP5_IRQ_INTF2_PING_PONG_RD_PTR 0x00004000
#define MDP5_IRQ_INTF3_PING_PONG_RD_PTR 0x00008000
#define MDP5_IRQ_INTF0_PING_PONG_WR_PTR 0x00010000
#define MDP5_IRQ_INTF1_PING_PONG_WR_PTR 0x00020000
#define MDP5_IRQ_INTF2_PING_PONG_WR_PTR 0x00040000
#define MDP5_IRQ_INTF3_PING_PONG_WR_PTR 0x00080000
#define MDP5_IRQ_INTF0_PING_PONG_AUTO_REF 0x00100000
#define MDP5_IRQ_INTF1_PING_PONG_AUTO_REF 0x00200000
#define MDP5_IRQ_INTF2_PING_PONG_AUTO_REF 0x00400000
#define MDP5_IRQ_INTF3_PING_PONG_AUTO_REF 0x00800000
#define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000
#define MDP5_IRQ_INTF0_VSYNC 0x02000000
#define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000
#define MDP5_IRQ_INTF1_VSYNC 0x08000000
#define MDP5_IRQ_INTF2_UNDER_RUN 0x10000000
#define MDP5_IRQ_INTF2_VSYNC 0x20000000
#define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000
#define MDP5_IRQ_INTF3_VSYNC 0x80000000
#define REG_MDP5_HW_VERSION 0x00000000
#define REG_MDP5_HW_INTR_STATUS 0x00000010
#define MDP5_HW_INTR_STATUS_INTR_MDP 0x00000001
#define MDP5_HW_INTR_STATUS_INTR_DSI0 0x00000010
#define MDP5_HW_INTR_STATUS_INTR_DSI1 0x00000020
#define MDP5_HW_INTR_STATUS_INTR_HDMI 0x00000100
#define MDP5_HW_INTR_STATUS_INTR_EDP 0x00001000
#define REG_MDP5_MDP_VERSION 0x00000100
#define MDP5_MDP_VERSION_MINOR__MASK 0x00ff0000
#define MDP5_MDP_VERSION_MINOR__SHIFT 16
static inline uint32_t MDP5_MDP_VERSION_MINOR(uint32_t val)
{
return ((val) << MDP5_MDP_VERSION_MINOR__SHIFT) & MDP5_MDP_VERSION_MINOR__MASK;
}
#define MDP5_MDP_VERSION_MAJOR__MASK 0xf0000000
#define MDP5_MDP_VERSION_MAJOR__SHIFT 28
static inline uint32_t MDP5_MDP_VERSION_MAJOR(uint32_t val)
{
return ((val) << MDP5_MDP_VERSION_MAJOR__SHIFT) & MDP5_MDP_VERSION_MAJOR__MASK;
}
#define REG_MDP5_DISP_INTF_SEL 0x00000104
#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff
#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0
static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf val)
{
return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
}
#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8
static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf val)
{
return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
}
#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16
static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf val)
{
return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
}
#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000
#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24
static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf val)
{
return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
}
#define REG_MDP5_INTR_EN 0x00000110
#define REG_MDP5_INTR_STATUS 0x00000114
#define REG_MDP5_INTR_CLEAR 0x00000118
#define REG_MDP5_HIST_INTR_EN 0x0000011c
#define REG_MDP5_HIST_INTR_STATUS 0x00000120
#define REG_MDP5_HIST_INTR_CLEAR 0x00000124
static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000180 + 0x4*i0; }
static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000180 + 0x4*i0; }
#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(enum mdp5_client_id val)
{
return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
}
#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(enum mdp5_client_id val)
{
return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
}
#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(enum mdp5_client_id val)
{
return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
}
static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000230 + 0x4*i0; }
static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000230 + 0x4*i0; }
#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(enum mdp5_client_id val)
{
return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
}
#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(enum mdp5_client_id val)
{
return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
}
#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(enum mdp5_client_id val)
{
return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
}
static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
{
switch (idx) {
case IGC_VIG: return 0x00000300;
case IGC_RGB: return 0x00000310;
case IGC_DMA: return 0x00000320;
case IGC_DSPP: return 0x00000400;
default: return INVALID_IDX(idx);
}
}
static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff
#define MDP5_IGC_LUT_REG_VAL__SHIFT 0
static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
{
return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
}
#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000
#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000600 + 0x100*i0; }
static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007
#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0
static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val)
{
return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK;
}
#define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038
#define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3
static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(enum mdp_mixer_stage_id val)
{
return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK;
}
#define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0
#define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6
static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(enum mdp_mixer_stage_id val)
{
return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK;
}
#define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00
#define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9
static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(enum mdp_mixer_stage_id val)
{
return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK;
}
#define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000
#define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12
static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(enum mdp_mixer_stage_id val)
{
return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK;
}
#define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000
#define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15
static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(enum mdp_mixer_stage_id val)
{
return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK;
}
#define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000
#define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18
static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(enum mdp_mixer_stage_id val)
{
return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK;
}
#define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000
#define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21
static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
{
return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK;
}
#define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000
#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000
static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000614 + 0x100*i0; }
#define MDP5_CTL_OP_MODE__MASK 0x0000000f
#define MDP5_CTL_OP_MODE__SHIFT 0
static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val)
{
return ((val) << MDP5_CTL_OP_MODE__SHIFT) & MDP5_CTL_OP_MODE__MASK;
}
#define MDP5_CTL_OP_INTF_NUM__MASK 0x00000070
#define MDP5_CTL_OP_INTF_NUM__SHIFT 4
static inline uint32_t MDP5_CTL_OP_INTF_NUM(enum mdp5_intfnum val)
{
return ((val) << MDP5_CTL_OP_INTF_NUM__SHIFT) & MDP5_CTL_OP_INTF_NUM__MASK;
}
#define MDP5_CTL_OP_CMD_MODE 0x00020000
#define MDP5_CTL_OP_PACK_3D_ENABLE 0x00080000
#define MDP5_CTL_OP_PACK_3D__MASK 0x00300000
#define MDP5_CTL_OP_PACK_3D__SHIFT 20
static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val)
{
return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK;
}
static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x100*i0; }
#define MDP5_CTL_FLUSH_VIG0 0x00000001
#define MDP5_CTL_FLUSH_VIG1 0x00000002
#define MDP5_CTL_FLUSH_VIG2 0x00000004
#define MDP5_CTL_FLUSH_RGB0 0x00000008
#define MDP5_CTL_FLUSH_RGB1 0x00000010
#define MDP5_CTL_FLUSH_RGB2 0x00000020
#define MDP5_CTL_FLUSH_LM0 0x00000040
#define MDP5_CTL_FLUSH_LM1 0x00000080
#define MDP5_CTL_FLUSH_LM2 0x00000100
#define MDP5_CTL_FLUSH_DMA0 0x00000800
#define MDP5_CTL_FLUSH_DMA1 0x00001000
#define MDP5_CTL_FLUSH_DSPP0 0x00002000
#define MDP5_CTL_FLUSH_DSPP1 0x00004000
#define MDP5_CTL_FLUSH_DSPP2 0x00008000
#define MDP5_CTL_FLUSH_CTL 0x00020000
static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000061c + 0x100*i0; }
static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000620 + 0x100*i0; }
static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000014c4 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000014f0 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00001500 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
{
return ((val) << MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_SIZE_HEIGHT__MASK;
}
#define MDP5_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff
#define MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val)
{
return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK;
}
static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00001204 + 0x400*i0; }
#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000
#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val)
{
return ((val) << MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK;
}
#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK 0x0000ffff
#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val)
{
return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK;
}
static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00001208 + 0x400*i0; }
#define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000
#define MDP5_PIPE_SRC_XY_Y__SHIFT 16
static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val)
{
return ((val) << MDP5_PIPE_SRC_XY_Y__SHIFT) & MDP5_PIPE_SRC_XY_Y__MASK;
}
#define MDP5_PIPE_SRC_XY_X__MASK 0x0000ffff
#define MDP5_PIPE_SRC_XY_X__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val)
{
return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK;
}
static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000120c + 0x400*i0; }
#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000
#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val)
{
return ((val) << MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_OUT_SIZE_HEIGHT__MASK;
}
#define MDP5_PIPE_OUT_SIZE_WIDTH__MASK 0x0000ffff
#define MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT 0
static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val)
{
return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK;
}
static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00001210 + 0x400*i0; }
#define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000
#define MDP5_PIPE_OUT_XY_Y__SHIFT 16
static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val)
{
return ((val) << MDP5_PIPE_OUT_XY_Y__SHIFT) & MDP5_PIPE_OUT_XY_Y__MASK;
}
#define MDP5_PIPE_OUT_XY_X__MASK 0x0000ffff
#define MDP5_PIPE_OUT_XY_X__SHIFT 0
static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val)
{
return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK;
}
static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00001214 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00001218 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000121c + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00001220 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00001224 + 0x400*i0; }
#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val)
{
return ((val) << MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P0__MASK;
}
#define MDP5_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000
#define MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT 16
static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val)
{
return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK;
}
static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00001228 + 0x400*i0; }
#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val)
{
return ((val) << MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P2__MASK;
}
#define MDP5_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000
#define MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT 16
static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val)
{
return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK;
}
static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000122c + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00001230 + 0x400*i0; }
#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
{
return ((val) << MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_G_BPC__MASK;
}
#define MDP5_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
#define MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
static inline uint32_t MDP5_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
{
return ((val) << MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_B_BPC__MASK;
}
#define MDP5_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
#define MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
static inline uint32_t MDP5_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
{
return ((val) << MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_R_BPC__MASK;
}
#define MDP5_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
#define MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
static inline uint32_t MDP5_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
{
return ((val) << MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_A_BPC__MASK;
}
#define MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100
#define MDP5_PIPE_SRC_FORMAT_CPP__MASK 0x00000600
#define MDP5_PIPE_SRC_FORMAT_CPP__SHIFT 9
static inline uint32_t MDP5_PIPE_SRC_FORMAT_CPP(uint32_t val)
{
return ((val) << MDP5_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CPP__MASK;
}
#define MDP5_PIPE_SRC_FORMAT_ROT90 0x00000800
#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00003000
#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 12
static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
{
return ((val) << MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
}
#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00780000
#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT 19
static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(uint32_t val)
{
return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK;
}
#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000
#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23
static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp5_chroma_samp_type val)
{
return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
}
static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00001234 + 0x400*i0; }
#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
{
return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM0__MASK;
}
#define MDP5_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00
#define MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT 8
static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
{
return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM1__MASK;
}
#define MDP5_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000
#define MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT 16
static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
{
return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM2__MASK;
}
#define MDP5_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000
#define MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT 24
static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
{
return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK;
}
static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00001238 + 0x400*i0; }
#define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001
#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006
#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1
static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val)
{
return ((val) << MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT) & MDP5_PIPE_SRC_OP_MODE_BWC__MASK;
}
#define MDP5_PIPE_SRC_OP_MODE_FLIP_LR 0x00002000
#define MDP5_PIPE_SRC_OP_MODE_FLIP_UD 0x00004000
#define MDP5_PIPE_SRC_OP_MODE_IGC_EN 0x00010000
#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_0 0x00020000
#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1 0x00040000
#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000
#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000
static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000123c + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00001248 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000124c + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00001250 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00001254 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00001258 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00001270 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000012a4 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000012a8 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000012ac + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000012b0 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000012b4 + 0x400*i0; }
#define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff
#define MDP5_PIPE_DECIMATION_VERT__SHIFT 0
static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val)
{
return ((val) << MDP5_PIPE_DECIMATION_VERT__SHIFT) & MDP5_PIPE_DECIMATION_VERT__MASK;
}
#define MDP5_PIPE_DECIMATION_HORZ__MASK 0x0000ff00
#define MDP5_PIPE_DECIMATION_HORZ__SHIFT 8
static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
{
return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK;
}
static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00001404 + 0x400*i0; }
#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001
#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002
#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK 0x00000300
#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT 8
static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(enum mdp5_scale_filter val)
{
return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK;
}
#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK 0x00000c00
#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT 10
static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(enum mdp5_scale_filter val)
{
return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK;
}
#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK 0x00003000
#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT 12
static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(enum mdp5_scale_filter val)
{
return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK;
}
#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK 0x0000c000
#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT 14
static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(enum mdp5_scale_filter val)
{
return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK;
}
#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK 0x00030000
#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT 16
static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(enum mdp5_scale_filter val)
{
return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK;
}
#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK 0x000c0000
#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT 18
static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_filter val)
{
return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK;
}
static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00001410 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00001414 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00001420 + 0x400*i0; }
static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00001424 + 0x400*i0; }
static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00003200 + 0x400*i0; }
static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00003200 + 0x400*i0; }
#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002
#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004
#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008
#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010
static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00003204 + 0x400*i0; }
#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000
#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val)
{
return ((val) << MDP5_LM_OUT_SIZE_HEIGHT__SHIFT) & MDP5_LM_OUT_SIZE_HEIGHT__MASK;
}
#define MDP5_LM_OUT_SIZE_WIDTH__MASK 0x0000ffff
#define MDP5_LM_OUT_SIZE_WIDTH__SHIFT 0
static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val)
{
return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK;
}
static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00003208 + 0x400*i0; }
static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00003210 + 0x400*i0; }
static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003
#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0
static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
{
return ((val) << MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK;
}
#define MDP5_LM_BLEND_OP_MODE_FG_INV_ALPHA 0x00000004
#define MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA 0x00000008
#define MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA 0x00000010
#define MDP5_LM_BLEND_OP_MODE_FG_TRANSP_EN 0x00000020
#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK 0x00000300
#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT 8
static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val)
{
return ((val) << MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK;
}
#define MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA 0x00000400
#define MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA 0x00000800
#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000
#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000
static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003224 + 0x400*i0 + 0x30*i1; }
static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003228 + 0x400*i0 + 0x30*i1; }
static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000322c + 0x400*i0 + 0x30*i1; }
static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003230 + 0x400*i0 + 0x30*i1; }
static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003234 + 0x400*i0 + 0x30*i1; }
static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003238 + 0x400*i0 + 0x30*i1; }
static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000323c + 0x400*i0 + 0x30*i1; }
static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003240 + 0x400*i0 + 0x30*i1; }
static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003244 + 0x400*i0 + 0x30*i1; }
static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003248 + 0x400*i0 + 0x30*i1; }
static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000032e0 + 0x400*i0; }
static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000032e4 + 0x400*i0; }
static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000032e8 + 0x400*i0; }
static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000032dc + 0x400*i0; }
static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000032ec + 0x400*i0; }
static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000032f0 + 0x400*i0; }
static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000032f4 + 0x400*i0; }
static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000032f8 + 0x400*i0; }
static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000032fc + 0x400*i0; }
static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00003300 + 0x400*i0; }
static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00003304 + 0x400*i0; }
static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00003308 + 0x400*i0; }
static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000330c + 0x400*i0; }
static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00003310 + 0x400*i0; }
static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00004600 + 0x400*i0; }
static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00004600 + 0x400*i0; }
#define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001
#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e
#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1
static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val)
{
return ((val) << MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT) & MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK;
}
#define MDP5_DSPP_OP_MODE_PCC_EN 0x00000010
#define MDP5_DSPP_OP_MODE_DITHER_EN 0x00000100
#define MDP5_DSPP_OP_MODE_HIST_EN 0x00010000
#define MDP5_DSPP_OP_MODE_AUTO_CLEAR 0x00020000
#define MDP5_DSPP_OP_MODE_HIST_LUT_EN 0x00080000
#define MDP5_DSPP_OP_MODE_PA_EN 0x00100000
#define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000
#define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000
static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00004630 + 0x400*i0; }
static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00004750 + 0x400*i0; }
static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00004810 + 0x400*i0; }
static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00004830 + 0x400*i0; }
static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00004834 + 0x400*i0; }
static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00004838 + 0x400*i0; }
static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000048dc + 0x400*i0; }
static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000048b0 + 0x400*i0; }
static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00012500 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00012500 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00012504 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00012508 + 0x200*i0; }
#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff
#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0
static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val)
{
return ((val) << MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT) & MDP5_INTF_HSYNC_CTL_PULSEW__MASK;
}
#define MDP5_INTF_HSYNC_CTL_PERIOD__MASK 0xffff0000
#define MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT 16
static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val)
{
return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK;
}
static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0001250c + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00012510 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00012514 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00012518 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0001251c + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00012520 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00012524 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00012528 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0001252c + 0x200*i0; }
#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff
#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0
static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val)
{
return ((val) << MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK;
}
#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000
static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00012530 + 0x200*i0; }
#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff
#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0
static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val)
{
return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK;
}
static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00012534 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00012538 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0001253c + 0x200*i0; }
#define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff
#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0
static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val)
{
return ((val) << MDP5_INTF_DISPLAY_HCTL_START__SHIFT) & MDP5_INTF_DISPLAY_HCTL_START__MASK;
}
#define MDP5_INTF_DISPLAY_HCTL_END__MASK 0xffff0000
#define MDP5_INTF_DISPLAY_HCTL_END__SHIFT 16
static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val)
{
return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK;
}
static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00012540 + 0x200*i0; }
#define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff
#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0
static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val)
{
return ((val) << MDP5_INTF_ACTIVE_HCTL_START__SHIFT) & MDP5_INTF_ACTIVE_HCTL_START__MASK;
}
#define MDP5_INTF_ACTIVE_HCTL_END__MASK 0x7fff0000
#define MDP5_INTF_ACTIVE_HCTL_END__SHIFT 16
static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val)
{
return ((val) << MDP5_INTF_ACTIVE_HCTL_END__SHIFT) & MDP5_INTF_ACTIVE_HCTL_END__MASK;
}
#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000
static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00012544 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00012548 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0001254c + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00012550 + 0x200*i0; }
#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001
#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002
#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004
static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00012554 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00012558 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0001255c + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00012584 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00012590 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000125a8 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000125ac + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000125b0 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000125f0 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000125f4 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000125f8 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00012600 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00012604 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00012608 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0001260c + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00012610 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00012614 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00012618 + 0x200*i0; }
static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0001261c + 0x200*i0; }
static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00013100 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00013100 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00013104 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00013108 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0001310c + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00013110 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00013114 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00013118 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0001311c + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00013120 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00013124 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00013128 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0001312c + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00013130 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00013134 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00013138 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0001317c + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000131c8 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000131cc + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000131d0 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000131d4 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000131d8 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000131dc + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000131e0 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000131e8 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000131ec + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000131f0 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000131f4 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000131f8 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00013200 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00013244 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00013248 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0001324c + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00013254 + 0x200*i0; }
static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00013258 + 0x200*i0; }
#endif /* MDP5_XML */
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "mdp5_kms.h"
#include <drm/drm_mode.h>
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_flip_work.h"
struct mdp5_crtc {
struct drm_crtc base;
char name[8];
struct drm_plane *plane;
struct drm_plane *planes[8];
int id;
bool enabled;
/* which mixer/encoder we route output to: */
int mixer;
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
struct msm_fence_cb pageflip_cb;
#define PENDING_CURSOR 0x1
#define PENDING_FLIP 0x2
atomic_t pending;
/* the fb that we logically (from PoV of KMS API) hold a ref
* to. Which we may not yet be scanning out (we may still
* be scanning out previous in case of page_flip while waiting
* for gpu rendering to complete:
*/
struct drm_framebuffer *fb;
/* the fb that we currently hold a scanout ref to: */
struct drm_framebuffer *scanout_fb;
/* for unref'ing framebuffers after scanout completes: */
struct drm_flip_work unref_fb_work;
struct mdp_irq vblank;
struct mdp_irq err;
};
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
static void request_pending(struct drm_crtc *crtc, uint32_t pending)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
atomic_or(pending, &mdp5_crtc->pending);
mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
}
static void crtc_flush(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
int id = mdp5_crtc->id;
uint32_t i, flush = 0;
for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
struct drm_plane *plane = mdp5_crtc->planes[i];
if (plane) {
enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
flush |= pipe2flush(pipe);
}
}
flush |= mixer2flush(mdp5_crtc->id);
flush |= MDP5_CTL_FLUSH_CTL;
DBG("%s: flush=%08x", mdp5_crtc->name, flush);
mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
}
static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_framebuffer *old_fb = mdp5_crtc->fb;
/* grab reference to incoming scanout fb: */
drm_framebuffer_reference(new_fb);
mdp5_crtc->base.fb = new_fb;
mdp5_crtc->fb = new_fb;
if (old_fb)
drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb);
}
/* unlike update_fb(), take a ref to the new scanout fb *before* updating
* plane, then call this. Needed to ensure we don't unref the buffer that
* is actually still being scanned out.
*
* Note that this whole thing goes away with atomic.. since we can defer
* calling into driver until rendering is done.
*/
static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
/* flush updates, to make sure hw is updated to new scanout fb,
* so that we can safely queue unref to current fb (ie. next
* vblank we know hw is done w/ previous scanout_fb).
*/
crtc_flush(crtc);
if (mdp5_crtc->scanout_fb)
drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
mdp5_crtc->scanout_fb);
mdp5_crtc->scanout_fb = fb;
/* enable vblank to complete flip: */
request_pending(crtc, PENDING_FLIP);
}
/* if file!=NULL, this is preclose potential cancel-flip path */
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_pending_vblank_event *event;
unsigned long flags, i;
spin_lock_irqsave(&dev->event_lock, flags);
event = mdp5_crtc->event;
if (event) {
/* if regular vblank case (!file) or if cancel-flip from
* preclose on file that requested flip, then send the
* event:
*/
if (!file || (event->base.file_priv == file)) {
mdp5_crtc->event = NULL;
drm_send_vblank_event(dev, mdp5_crtc->id, event);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
struct drm_plane *plane = mdp5_crtc->planes[i];
if (plane)
mdp5_plane_complete_flip(plane);
}
}
static void pageflip_cb(struct msm_fence_cb *cb)
{
struct mdp5_crtc *mdp5_crtc =
container_of(cb, struct mdp5_crtc, pageflip_cb);
struct drm_crtc *crtc = &mdp5_crtc->base;
struct drm_framebuffer *fb = mdp5_crtc->fb;
if (!fb)
return;
drm_framebuffer_reference(fb);
mdp5_plane_set_scanout(mdp5_crtc->plane, fb);
update_scanout(crtc, fb);
}
static void unref_fb_worker(struct drm_flip_work *work, void *val)
{
struct mdp5_crtc *mdp5_crtc =
container_of(work, struct mdp5_crtc, unref_fb_work);
struct drm_device *dev = mdp5_crtc->base.dev;
mutex_lock(&dev->mode_config.mutex);
drm_framebuffer_unreference(val);
mutex_unlock(&dev->mode_config.mutex);
}
static void mdp5_crtc_destroy(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
mdp5_crtc->plane->funcs->destroy(mdp5_crtc->plane);
drm_crtc_cleanup(crtc);
drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
kfree(mdp5_crtc);
}
static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
bool enabled = (mode == DRM_MODE_DPMS_ON);
DBG("%s: mode=%d", mdp5_crtc->name, mode);
if (enabled != mdp5_crtc->enabled) {
if (enabled) {
mdp5_enable(mdp5_kms);
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
} else {
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
mdp5_disable(mdp5_kms);
}
mdp5_crtc->enabled = enabled;
}
}
static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static void blend_setup(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
int id = mdp5_crtc->id;
/*
* Hard-coded setup for now until I figure out how the
* layer-mixer works
*/
/* LM[id]: */
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id),
MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA);
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0),
MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) |
MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA);
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff);
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00);
/* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
* we want to be setting CTL[m].LAYER[n]. Not sure what the
* point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
* used when chaining up mixers for high resolution displays?
*/
/* CTL[id]: */
mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0),
MDP5_CTL_LAYER_REG_RGB0(STAGE0) |
MDP5_CTL_LAYER_REG_BORDER_COLOR);
mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0);
mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0);
mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0);
mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0);
}
static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
int ret;
mode = adjusted_mode;
DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
mdp5_crtc->name, mode->base.id, mode->name,
mode->vrefresh, mode->clock,
mode->hdisplay, mode->hsync_start,
mode->hsync_end, mode->htotal,
mode->vdisplay, mode->vsync_start,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
/* grab extra ref for update_scanout() */
drm_framebuffer_reference(crtc->fb);
ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
if (ret) {
dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
mdp5_crtc->name, ret);
return ret;
}
mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
update_fb(crtc, crtc->fb);
update_scanout(crtc, crtc->fb);
return 0;
}
static void mdp5_crtc_prepare(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
DBG("%s", mdp5_crtc->name);
/* make sure we hold a ref to mdp clks while setting up mode: */
mdp5_enable(get_kms(crtc));
mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void mdp5_crtc_commit(struct drm_crtc *crtc)
{
mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
crtc_flush(crtc);
/* drop the ref to mdp clk's that we got in prepare: */
mdp5_disable(get_kms(crtc));
}
static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_plane *plane = mdp5_crtc->plane;
struct drm_display_mode *mode = &crtc->mode;
int ret;
/* grab extra ref for update_scanout() */
drm_framebuffer_reference(crtc->fb);
ret = mdp5_plane_mode_set(plane, crtc, crtc->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
update_fb(crtc, crtc->fb);
update_scanout(crtc, crtc->fb);
return ret;
}
static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
{
}
static int mdp5_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *new_fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_gem_object *obj;
unsigned long flags;
if (mdp5_crtc->event) {
dev_err(dev->dev, "already pending flip!\n");
return -EBUSY;
}
obj = msm_framebuffer_bo(new_fb, 0);
spin_lock_irqsave(&dev->event_lock, flags);
mdp5_crtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
update_fb(crtc, new_fb);
return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb);
}
static int mdp5_crtc_set_property(struct drm_crtc *crtc,
struct drm_property *property, uint64_t val)
{
// XXX
return -EINVAL;
}
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.destroy = mdp5_crtc_destroy,
.page_flip = mdp5_crtc_page_flip,
.set_property = mdp5_crtc_set_property,
};
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.dpms = mdp5_crtc_dpms,
.mode_fixup = mdp5_crtc_mode_fixup,
.mode_set = mdp5_crtc_mode_set,
.prepare = mdp5_crtc_prepare,
.commit = mdp5_crtc_commit,
.mode_set_base = mdp5_crtc_mode_set_base,
.load_lut = mdp5_crtc_load_lut,
};
static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
struct drm_crtc *crtc = &mdp5_crtc->base;
struct msm_drm_private *priv = crtc->dev->dev_private;
unsigned pending;
mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
pending = atomic_xchg(&mdp5_crtc->pending, 0);
if (pending & PENDING_FLIP) {
complete_flip(crtc, NULL);
drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
}
}
static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
struct drm_crtc *crtc = &mdp5_crtc->base;
DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
crtc_flush(crtc);
}
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
return mdp5_crtc->vblank.irqmask;
}
void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
{
DBG("cancel: %p", file);
complete_flip(crtc, file);
}
/* set interface for routing crtc->encoder: */
void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
enum mdp5_intf intf_id)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
static const enum mdp5_intfnum intfnum[] = {
INTF0, INTF1, INTF2, INTF3,
};
uint32_t intf_sel;
/* now that we know what irq's we want: */
mdp5_crtc->err.irqmask = intf2err(intf);
mdp5_crtc->vblank.irqmask = intf2vblank(intf);
/* when called from modeset_init(), skip the rest until later: */
if (!mdp5_kms)
return;
intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
switch (intf) {
case 0:
intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
break;
case 1:
intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
break;
case 2:
intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
break;
case 3:
intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
break;
default:
BUG();
break;
}
blend_setup(crtc);
DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
MDP5_CTL_OP_MODE(MODE_NONE) |
MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
crtc_flush(crtc);
}
static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
struct drm_plane *plane)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes));
if (mdp5_crtc->planes[pipe_id] == plane)
return;
mdp5_crtc->planes[pipe_id] = plane;
blend_setup(crtc);
if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane))
crtc_flush(crtc);
}
void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
{
set_attach(crtc, mdp5_plane_pipe(plane), plane);
}
void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
{
set_attach(crtc, mdp5_plane_pipe(plane), NULL);
}
/* initialize crtc */
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id)
{
struct drm_crtc *crtc = NULL;
struct mdp5_crtc *mdp5_crtc;
int ret;
mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
if (!mdp5_crtc) {
ret = -ENOMEM;
goto fail;
}
crtc = &mdp5_crtc->base;
mdp5_crtc->plane = plane;
mdp5_crtc->id = id;
mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
mdp5_crtc->err.irq = mdp5_crtc_err_irq;
snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
pipe2name(mdp5_plane_pipe(plane)), id);
ret = drm_flip_work_init(&mdp5_crtc->unref_fb_work, 16,
"unref fb", unref_fb_worker);
if (ret)
goto fail;
INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
drm_crtc_init(dev, crtc, &mdp5_crtc_funcs);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base);
return crtc;
fail:
if (crtc)
mdp5_crtc_destroy(crtc);
return ERR_PTR(ret);
}
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "mdp5_kms.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
struct mdp5_encoder {
struct drm_encoder base;
int intf;
enum mdp5_intf intf_id;
bool enabled;
uint32_t bsc;
};
#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
{
struct msm_drm_private *priv = encoder->dev->dev_private;
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
#ifdef CONFIG_MSM_BUS_SCALING
#include <mach/board.h>
#include <mach/msm_bus.h>
#include <mach/msm_bus_board.h>
#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
{ \
.src = MSM_BUS_MASTER_MDP_PORT0, \
.dst = MSM_BUS_SLAVE_EBI_CH0, \
.ab = (ab_val), \
.ib = (ib_val), \
}
static struct msm_bus_vectors mdp_bus_vectors[] = {
MDP_BUS_VECTOR_ENTRY(0, 0),
MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
};
static struct msm_bus_paths mdp_bus_usecases[] = { {
.num_paths = 1,
.vectors = &mdp_bus_vectors[0],
}, {
.num_paths = 1,
.vectors = &mdp_bus_vectors[1],
} };
static struct msm_bus_scale_pdata mdp_bus_scale_table = {
.usecase = mdp_bus_usecases,
.num_usecases = ARRAY_SIZE(mdp_bus_usecases),
.name = "mdss_mdp",
};
static void bs_init(struct mdp5_encoder *mdp5_encoder)
{
mdp5_encoder->bsc = msm_bus_scale_register_client(
&mdp_bus_scale_table);
DBG("bus scale client: %08x", mdp5_encoder->bsc);
}
static void bs_fini(struct mdp5_encoder *mdp5_encoder)
{
if (mdp5_encoder->bsc) {
msm_bus_scale_unregister_client(mdp5_encoder->bsc);
mdp5_encoder->bsc = 0;
}
}
static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx)
{
if (mdp5_encoder->bsc) {
DBG("set bus scaling: %d", idx);
/* HACK: scaling down, and then immediately back up
* seems to leave things broken (underflow).. so
* never disable:
*/
idx = 1;
msm_bus_scale_client_update_request(mdp5_encoder->bsc, idx);
}
}
#else
static void bs_init(struct mdp5_encoder *mdp5_encoder) {}
static void bs_fini(struct mdp5_encoder *mdp5_encoder) {}
static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) {}
#endif
static void mdp5_encoder_destroy(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
bs_fini(mdp5_encoder);
drm_encoder_cleanup(encoder);
kfree(mdp5_encoder);
}
static const struct drm_encoder_funcs mdp5_encoder_funcs = {
.destroy = mdp5_encoder_destroy,
};
static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
int intf = mdp5_encoder->intf;
bool enabled = (mode == DRM_MODE_DPMS_ON);
DBG("mode=%d", mode);
if (enabled == mdp5_encoder->enabled)
return;
if (enabled) {
bs_set(mdp5_encoder, 1);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
} else {
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
bs_set(mdp5_encoder, 0);
}
mdp5_encoder->enabled = enabled;
}
static bool mdp5_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
int intf = mdp5_encoder->intf;
uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
uint32_t display_v_start, display_v_end;
uint32_t hsync_start_x, hsync_end_x;
uint32_t format;
mode = adjusted_mode;
DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
mode->base.id, mode->name,
mode->vrefresh, mode->clock,
mode->hdisplay, mode->hsync_start,
mode->hsync_end, mode->htotal,
mode->vdisplay, mode->vsync_start,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
ctrl_pol = 0;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW;
/* probably need to get DATA_EN polarity from panel.. */
dtv_hsync_skew = 0; /* get this from panel? */
format = 0x213f; /* get this from panel? */
hsync_start_x = (mode->htotal - mode->hsync_start);
hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
vsync_period = mode->vtotal * mode->htotal;
vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_PERIOD_F0(intf), vsync_period);
mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_LEN_F0(intf), vsync_len);
mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_HCTL(intf),
MDP5_INTF_DISPLAY_HCTL_START(hsync_start_x) |
MDP5_INTF_DISPLAY_HCTL_END(hsync_end_x));
mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VSTART_F0(intf), display_v_start);
mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VEND_F0(intf), display_v_end);
mdp5_write(mdp5_kms, REG_MDP5_INTF_BORDER_COLOR(intf), 0);
mdp5_write(mdp5_kms, REG_MDP5_INTF_UNDERFLOW_COLOR(intf), 0xff);
mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_SKEW(intf), dtv_hsync_skew);
mdp5_write(mdp5_kms, REG_MDP5_INTF_POLARITY_CTL(intf), ctrl_pol);
mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_HCTL(intf),
MDP5_INTF_ACTIVE_HCTL_START(0) |
MDP5_INTF_ACTIVE_HCTL_END(0));
mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VSTART_F0(intf), 0);
mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
}
static void mdp5_encoder_prepare(struct drm_encoder *encoder)
{
mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
}
static void mdp5_encoder_commit(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
mdp5_crtc_set_intf(encoder->crtc, mdp5_encoder->intf,
mdp5_encoder->intf_id);
mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
}
static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
.dpms = mdp5_encoder_dpms,
.mode_fixup = mdp5_encoder_mode_fixup,
.mode_set = mdp5_encoder_mode_set,
.prepare = mdp5_encoder_prepare,
.commit = mdp5_encoder_commit,
};
/* initialize encoder */
struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
enum mdp5_intf intf_id)
{
struct drm_encoder *encoder = NULL;
struct mdp5_encoder *mdp5_encoder;
int ret;
mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL);
if (!mdp5_encoder) {
ret = -ENOMEM;
goto fail;
}
mdp5_encoder->intf = intf;
mdp5_encoder->intf_id = intf_id;
encoder = &mdp5_encoder->base;
drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
bs_init(mdp5_encoder);
return encoder;
fail:
if (encoder)
mdp5_encoder_destroy(encoder);
return ERR_PTR(ret);
}
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
#include "mdp5_kms.h"
void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
{
mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
}
static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
DRM_ERROR("errors: %08x\n", irqstatus);
}
void mdp5_irq_preinstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
}
int mdp5_irq_postinstall(struct msm_kms *kms)
{
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
struct mdp_irq *error_handler = &mdp5_kms->error_handler;
error_handler->irq = mdp5_irq_error_handler;
error_handler->irqmask = MDP5_IRQ_INTF0_UNDER_RUN |
MDP5_IRQ_INTF1_UNDER_RUN |
MDP5_IRQ_INTF2_UNDER_RUN |
MDP5_IRQ_INTF3_UNDER_RUN;
mdp_irq_register(mdp_kms, error_handler);
return 0;
}
void mdp5_irq_uninstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
}
static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
unsigned int id;
uint32_t status;
status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS);
mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
VERB("status=%08x", status);
for (id = 0; id < priv->num_crtcs; id++)
if (status & mdp5_crtc_vblank(priv->crtcs[id]))
drm_handle_vblank(dev, id);
mdp_dispatch_irqs(mdp_kms, status);
}
irqreturn_t mdp5_irq(struct msm_kms *kms)
{
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
uint32_t intr;
intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS);
VERB("intr=%08x", intr);
if (intr & MDP5_HW_INTR_STATUS_INTR_MDP)
mdp5_irq_mdp(mdp_kms);
if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI)
hdmi_irq(0, mdp5_kms->hdmi);
return IRQ_HANDLED;
}
int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), true);
return 0;
}
void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), false);
}
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
#include "msm_mmu.h"
#include "mdp5_kms.h"
static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct drm_device *dev = mdp5_kms->dev;
uint32_t version, major, minor;
int ret = 0;
pm_runtime_get_sync(dev->dev);
mdp5_enable(mdp5_kms);
version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
mdp5_disable(mdp5_kms);
major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
DBG("found MDP5 version v%d.%d", major, minor);
if ((major != 1) || ((minor != 0) && (minor != 2))) {
dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto out;
}
mdp5_kms->rev = minor;
/* Magic unknown register writes:
*
* W VBIF:0x004 00000001 (mdss_mdp.c:839)
* W MDP5:0x2e0 0xe9 (mdss_mdp.c:839)
* W MDP5:0x2e4 0x55 (mdss_mdp.c:839)
* W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839)
* W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839)
* W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839)
* W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839)
* W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839)
* W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839)
*
* Downstream fbdev driver gets these register offsets/values
* from DT.. not really sure what these registers are or if
* different values for different boards/SoC's, etc. I guess
* they are the golden registers.
*
* Not setting these does not seem to cause any problem. But
* we may be getting lucky with the bootloader initializing
* them for us. OTOH, if we can always count on the bootloader
* setting the golden registers, then perhaps we don't need to
* care.
*/
mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(0), 0);
mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(1), 0);
mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(2), 0);
mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(3), 0);
out:
pm_runtime_put_sync(dev->dev);
return ret;
}
static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder)
{
return rate;
}
static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct msm_drm_private *priv = mdp5_kms->dev->dev_private;
unsigned i;
for (i = 0; i < priv->num_crtcs; i++)
mdp5_crtc_cancel_pending_flip(priv->crtcs[i], file);
}
static void mdp5_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
kfree(mdp5_kms);
}
static const struct mdp_kms_funcs kms_funcs = {
.base = {
.hw_init = mdp5_hw_init,
.irq_preinstall = mdp5_irq_preinstall,
.irq_postinstall = mdp5_irq_postinstall,
.irq_uninstall = mdp5_irq_uninstall,
.irq = mdp5_irq,
.enable_vblank = mdp5_enable_vblank,
.disable_vblank = mdp5_disable_vblank,
.get_format = mdp_get_format,
.round_pixclk = mdp5_round_pixclk,
.preclose = mdp5_preclose,
.destroy = mdp5_destroy,
},
.set_irqmask = mdp5_set_irqmask,
};
int mdp5_disable(struct mdp5_kms *mdp5_kms)
{
DBG("");
clk_disable_unprepare(mdp5_kms->ahb_clk);
clk_disable_unprepare(mdp5_kms->axi_clk);
clk_disable_unprepare(mdp5_kms->core_clk);
clk_disable_unprepare(mdp5_kms->lut_clk);
return 0;
}
int mdp5_enable(struct mdp5_kms *mdp5_kms)
{
DBG("");
clk_prepare_enable(mdp5_kms->ahb_clk);
clk_prepare_enable(mdp5_kms->axi_clk);
clk_prepare_enable(mdp5_kms->core_clk);
clk_prepare_enable(mdp5_kms->lut_clk);
return 0;
}
static int modeset_init(struct mdp5_kms *mdp5_kms)
{
static const enum mdp5_pipe crtcs[] = {
SSPP_RGB0, SSPP_RGB1, SSPP_RGB2,
};
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder;
int i, ret;
/* construct CRTCs: */
for (i = 0; i < ARRAY_SIZE(crtcs); i++) {
struct drm_plane *plane;
struct drm_crtc *crtc;
plane = mdp5_plane_init(dev, crtcs[i], true);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
pipe2name(crtcs[i]), ret);
goto fail;
}
crtc = mdp5_crtc_init(dev, plane, i);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
dev_err(dev->dev, "failed to construct crtc for %s (%d)\n",
pipe2name(crtcs[i]), ret);
goto fail;
}
priv->crtcs[priv->num_crtcs++] = crtc;
}
/* Construct encoder for HDMI: */
encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
if (IS_ERR(encoder)) {
dev_err(dev->dev, "failed to construct encoder\n");
ret = PTR_ERR(encoder);
goto fail;
}
/* NOTE: the vsync and error irq's are actually associated with
* the INTF/encoder.. the easiest way to deal with this (ie. what
* we do now) is assume a fixed relationship between crtc's and
* encoders. I'm not sure if there is ever a need to more freely
* assign crtcs to encoders, but if there is then we need to take
* care of error and vblank irq's that the crtc has registered,
* and also update user-requested vblank_mask.
*/
encoder->possible_crtcs = BIT(0);
mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
priv->encoders[priv->num_encoders++] = encoder;
/* Construct bridge/connector for HDMI: */
mdp5_kms->hdmi = hdmi_init(dev, encoder);
if (IS_ERR(mdp5_kms->hdmi)) {
ret = PTR_ERR(mdp5_kms->hdmi);
dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
goto fail;
}
return 0;
fail:
return ret;
}
static const char *iommu_ports[] = {
"mdp_0",
};
static int get_clk(struct platform_device *pdev, struct clk **clkp,
const char *name)
{
struct device *dev = &pdev->dev;
struct clk *clk = devm_clk_get(dev, name);
if (IS_ERR(clk)) {
dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
return PTR_ERR(clk);
}
*clkp = clk;
return 0;
}
struct msm_kms *mdp5_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = dev->platformdev;
struct mdp5_platform_config *config = mdp5_get_config(pdev);
struct mdp5_kms *mdp5_kms;
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
int ret;
mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
if (!mdp5_kms) {
dev_err(dev->dev, "failed to allocate kms\n");
ret = -ENOMEM;
goto fail;
}
mdp_kms_init(&mdp5_kms->base, &kms_funcs);
kms = &mdp5_kms->base.base;
mdp5_kms->dev = dev;
mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
if (IS_ERR(mdp5_kms->mmio)) {
ret = PTR_ERR(mdp5_kms->mmio);
goto fail;
}
mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
if (IS_ERR(mdp5_kms->vbif)) {
ret = PTR_ERR(mdp5_kms->vbif);
goto fail;
}
mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
if (IS_ERR(mdp5_kms->vdd)) {
ret = PTR_ERR(mdp5_kms->vdd);
goto fail;
}
ret = regulator_enable(mdp5_kms->vdd);
if (ret) {
dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
goto fail;
}
ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk") ||
get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk") ||
get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src") ||
get_clk(pdev, &mdp5_kms->core_clk, "core_clk") ||
get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk") ||
get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
if (ret)
goto fail;
ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk);
/* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if
* we don't disable):
*/
mdp5_enable(mdp5_kms);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(0), 0);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(1), 0);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(2), 0);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(3), 0);
mdp5_disable(mdp5_kms);
mdelay(16);
if (config->iommu) {
mmu = msm_iommu_new(dev, config->iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
goto fail;
}
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret)
goto fail;
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
mmu = NULL;
}
mdp5_kms->id = msm_register_mmu(dev, mmu);
if (mdp5_kms->id < 0) {
ret = mdp5_kms->id;
dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
goto fail;
}
ret = modeset_init(mdp5_kms);
if (ret) {
dev_err(dev->dev, "modeset_init failed: %d\n", ret);
goto fail;
}
return kms;
fail:
if (kms)
mdp5_destroy(kms);
return ERR_PTR(ret);
}
static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
{
static struct mdp5_platform_config config = {};
#ifdef CONFIG_OF
/* TODO */
#endif
return &config;
}
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MDP5_KMS_H__
#define __MDP5_KMS_H__
#include "msm_drv.h"
#include "msm_kms.h"
#include "mdp/mdp_kms.h"
#include "mdp5.xml.h"
#include "mdp5_smp.h"
struct mdp5_kms {
struct mdp_kms base;
struct drm_device *dev;
int rev;
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
/* for tracking smp allocation amongst pipes: */
mdp5_smp_state_t smp_state;
struct mdp5_client_smp_state smp_client_state[CID_MAX];
int smp_blk_cnt;
/* io/register spaces: */
void __iomem *mmio, *vbif;
struct regulator *vdd;
struct clk *axi_clk;
struct clk *ahb_clk;
struct clk *src_clk;
struct clk *core_clk;
struct clk *lut_clk;
struct clk *vsync_clk;
struct hdmi *hdmi;
struct mdp_irq error_handler;
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
/* platform config data (ie. from DT, or pdata) */
struct mdp5_platform_config {
struct iommu_domain *iommu;
uint32_t max_clk;
int smp_blk_cnt;
};
static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
{
msm_writel(data, mdp5_kms->mmio + reg);
}
static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg)
{
return msm_readl(mdp5_kms->mmio + reg);
}
static inline const char *pipe2name(enum mdp5_pipe pipe)
{
static const char *names[] = {
#define NAME(n) [SSPP_ ## n] = #n
NAME(VIG0), NAME(VIG1), NAME(VIG2),
NAME(RGB0), NAME(RGB1), NAME(RGB2),
NAME(DMA0), NAME(DMA1),
#undef NAME
};
return names[pipe];
}
static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
{
switch (pipe) {
case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
default: return 0;
}
}
static inline int pipe2nclients(enum mdp5_pipe pipe)
{
switch (pipe) {
case SSPP_RGB0:
case SSPP_RGB1:
case SSPP_RGB2:
return 1;
default:
return 3;
}
}
static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
{
WARN_ON(plane >= pipe2nclients(pipe));
switch (pipe) {
case SSPP_VIG0: return CID_VIG0_Y + plane;
case SSPP_VIG1: return CID_VIG1_Y + plane;
case SSPP_VIG2: return CID_VIG2_Y + plane;
case SSPP_RGB0: return CID_RGB0;
case SSPP_RGB1: return CID_RGB1;
case SSPP_RGB2: return CID_RGB2;
case SSPP_DMA0: return CID_DMA0_Y + plane;
case SSPP_DMA1: return CID_DMA1_Y + plane;
default: return CID_UNUSED;
}
}
static inline uint32_t mixer2flush(int lm)
{
switch (lm) {
case 0: return MDP5_CTL_FLUSH_LM0;
case 1: return MDP5_CTL_FLUSH_LM1;
case 2: return MDP5_CTL_FLUSH_LM2;
default: return 0;
}
}
static inline uint32_t intf2err(int intf)
{
switch (intf) {
case 0: return MDP5_IRQ_INTF0_UNDER_RUN;
case 1: return MDP5_IRQ_INTF1_UNDER_RUN;
case 2: return MDP5_IRQ_INTF2_UNDER_RUN;
case 3: return MDP5_IRQ_INTF3_UNDER_RUN;
default: return 0;
}
}
static inline uint32_t intf2vblank(int intf)
{
switch (intf) {
case 0: return MDP5_IRQ_INTF0_VSYNC;
case 1: return MDP5_IRQ_INTF1_VSYNC;
case 2: return MDP5_IRQ_INTF2_VSYNC;
case 3: return MDP5_IRQ_INTF3_VSYNC;
default: return 0;
}
}
int mdp5_disable(struct mdp5_kms *mdp5_kms);
int mdp5_enable(struct mdp5_kms *mdp5_kms);
void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
void mdp5_irq_preinstall(struct msm_kms *kms);
int mdp5_irq_postinstall(struct msm_kms *kms);
void mdp5_irq_uninstall(struct msm_kms *kms);
irqreturn_t mdp5_irq(struct msm_kms *kms);
int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
static inline
uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
uint32_t max_formats)
{
/* TODO when we have YUV, we need to filter supported formats
* based on pipe id..
*/
return mdp_get_formats(pixel_formats, max_formats);
}
void mdp5_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
void mdp5_plane_set_scanout(struct drm_plane *plane,
struct drm_framebuffer *fb);
int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
void mdp5_plane_complete_flip(struct drm_plane *plane);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum mdp5_pipe pipe, bool private_plane);
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
enum mdp5_intf intf_id);
void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id);
struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
enum mdp5_intf intf_id);
#endif /* __MDP5_KMS_H__ */
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "mdp5_kms.h"
struct mdp5_plane {
struct drm_plane base;
const char *name;
enum mdp5_pipe pipe;
uint32_t nformats;
uint32_t formats[32];
bool enabled;
};
#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
static struct mdp5_kms *get_kms(struct drm_plane *plane)
{
struct msm_drm_private *priv = plane->dev->dev_private;
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
static int mdp5_plane_update(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
mdp5_plane->enabled = true;
if (plane->fb)
drm_framebuffer_unreference(plane->fb);
drm_framebuffer_reference(fb);
return mdp5_plane_mode_set(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
}
static int mdp5_plane_disable(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
int i;
DBG("%s: disable", mdp5_plane->name);
/* update our SMP request to zero (release all our blks): */
for (i = 0; i < pipe2nclients(pipe); i++)
mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0);
/* TODO detaching now will cause us not to get the last
* vblank and mdp5_smp_commit().. so other planes will
* still see smp blocks previously allocated to us as
* in-use..
*/
if (plane->crtc)
mdp5_crtc_detach(plane->crtc, plane);
return 0;
}
static void mdp5_plane_destroy(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
mdp5_plane_disable(plane);
drm_plane_cleanup(plane);
kfree(mdp5_plane);
}
/* helper to install properties which are common to planes and crtcs */
void mdp5_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj)
{
// XXX
}
int mdp5_plane_set_property(struct drm_plane *plane,
struct drm_property *property, uint64_t val)
{
// XXX
return -EINVAL;
}
static const struct drm_plane_funcs mdp5_plane_funcs = {
.update_plane = mdp5_plane_update,
.disable_plane = mdp5_plane_disable,
.destroy = mdp5_plane_destroy,
.set_property = mdp5_plane_set_property,
};
void mdp5_plane_set_scanout(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
uint32_t nplanes = drm_format_num_planes(fb->pixel_format);
uint32_t iova[4];
int i;
for (i = 0; i < nplanes; i++) {
struct drm_gem_object *bo = msm_framebuffer_bo(fb, i);
msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]);
}
for (; i < 4; i++)
iova[i] = 0;
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]);
plane->fb = fb;
}
/* NOTE: looks like if horizontal decimation is used (if we supported that)
* then the width used to calculate SMP block requirements is the post-
* decimated width. Ie. SMP buffering sits downstream of decimation (which
* presumably happens during the dma from scanout buffer).
*/
static int request_smp_blocks(struct drm_plane *plane, uint32_t format,
uint32_t nplanes, uint32_t width)
{
struct drm_device *dev = plane->dev;
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
int i, hsub, nlines, nblks, ret;
hsub = drm_format_horz_chroma_subsampling(format);
/* different if BWC (compressed framebuffer?) enabled: */
nlines = 2;
for (i = 0, nblks = 0; i < nplanes; i++) {
int n, fetch_stride, cpp;
cpp = drm_format_plane_cpp(format, i);
fetch_stride = width * cpp / (i ? hsub : 1);
n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE);
/* for hw rev v1.00 */
if (mdp5_kms->rev == 0)
n = roundup_pow_of_two(n);
DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n);
ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n);
if (ret) {
dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n",
n, ret);
return ret;
}
nblks += n;
}
/* in success case, return total # of blocks allocated: */
return nblks;
}
static void set_fifo_thresholds(struct drm_plane *plane, int nblks)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
uint32_t val;
/* 1/4 of SMP pool that is being fetched */
val = (nblks * SMP_ENTRIES_PER_BLK) / 4;
mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
}
int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
const struct mdp_format *format;
uint32_t nplanes, config = 0;
uint32_t phasex_step = 0, phasey_step = 0;
uint32_t hdecm = 0, vdecm = 0;
int i, nblks;
nplanes = drm_format_num_planes(fb->pixel_format);
/* bad formats should already be rejected: */
if (WARN_ON(nplanes > pipe2nclients(pipe)))
return -EINVAL;
/* src values are in Q16 fixed point, convert to integer: */
src_x = src_x >> 16;
src_y = src_y >> 16;
src_w = src_w >> 16;
src_h = src_h >> 16;
DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp5_plane->name,
fb->base.id, src_x, src_y, src_w, src_h,
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
/*
* Calculate and request required # of smp blocks:
*/
nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w);
if (nblks < 0)
return nblks;
/*
* Currently we update the hw for allocations/requests immediately,
* but once atomic modeset/pageflip is in place, the allocation
* would move into atomic->check_plane_state(), while updating the
* hw would remain here:
*/
for (i = 0; i < pipe2nclients(pipe); i++)
mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i));
if (src_w != crtc_w) {
config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
/* TODO calc phasex_step, hdecm */
}
if (src_h != crtc_h) {
config |= MDP5_PIPE_SCALE_CONFIG_SCALEY_EN;
/* TODO calc phasey_step, vdecm */
}
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
MDP5_PIPE_SRC_XY_X(src_x) |
MDP5_PIPE_SRC_XY_Y(src_y));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
MDP5_PIPE_OUT_XY_X(crtc_x) |
MDP5_PIPE_OUT_XY_Y(crtc_y));
mdp5_plane_set_scanout(plane, fb);
format = to_mdp_format(msm_framebuffer_format(fb));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
MDP5_PIPE_SRC_FORMAT_NUM_PLANES(nplanes - 1) |
MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(CHROMA_RGB));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
/* not using secure mode: */
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), phasex_step);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), phasey_step);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
MDP5_PIPE_DECIMATION_VERT(vdecm) |
MDP5_PIPE_DECIMATION_HORZ(hdecm));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(SCALE_FILTER_NEAREST) |
MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(SCALE_FILTER_NEAREST) |
MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(SCALE_FILTER_NEAREST) |
MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(SCALE_FILTER_NEAREST) |
MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
set_fifo_thresholds(plane, nblks);
/* TODO detach from old crtc (if we had more than one) */
mdp5_crtc_attach(crtc, plane);
return 0;
}
void mdp5_plane_complete_flip(struct drm_plane *plane)
{
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe;
int i;
for (i = 0; i < pipe2nclients(pipe); i++)
mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i));
}
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
return mdp5_plane->pipe;
}
/* initialize plane */
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum mdp5_pipe pipe, bool private_plane)
{
struct drm_plane *plane = NULL;
struct mdp5_plane *mdp5_plane;
int ret;
mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
if (!mdp5_plane) {
ret = -ENOMEM;
goto fail;
}
plane = &mdp5_plane->base;
mdp5_plane->pipe = pipe;
mdp5_plane->name = pipe2name(pipe);
mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
ARRAY_SIZE(mdp5_plane->formats));
drm_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats,
private_plane);
mdp5_plane_install_properties(plane, &plane->base);
return plane;
fail:
if (plane)
mdp5_plane_destroy(plane);
return ERR_PTR(ret);
}
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "mdp5_kms.h"
#include "mdp5_smp.h"
/* SMP - Shared Memory Pool
*
* These are shared between all the clients, where each plane in a
* scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
* pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
*
* Based on the size of the attached scanout buffer, a certain # of
* blocks must be allocated to that client out of the shared pool.
*
* For each block, it can be either free, or pending/in-use by a
* client. The updates happen in three steps:
*
* 1) mdp5_smp_request():
* When plane scanout is setup, calculate required number of
* blocks needed per client, and request. Blocks not inuse or
* pending by any other client are added to client's pending
* set.
*
* 2) mdp5_smp_configure():
* As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
* are configured for the union(pending, inuse)
*
* 3) mdp5_smp_commit():
* After next vblank, copy pending -> inuse. Optionally update
* MDP5_SMP_ALLOC registers if there are newly unused blocks
*
* On the next vblank after changes have been committed to hw, the
* client's pending blocks become it's in-use blocks (and no-longer
* in-use blocks become available to other clients).
*
* btw, hurray for confusing overloaded acronyms! :-/
*
* NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
* should happen at (or before)? atomic->check(). And we'd need
* an API to discard previous requests if update is aborted or
* (test-only).
*
* TODO would perhaps be nice to have debugfs to dump out kernel
* inuse and pending state of all clients..
*/
static DEFINE_SPINLOCK(smp_lock);
/* step #1: update # of blocks pending for the client: */
int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
enum mdp5_client_id cid, int nblks)
{
struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt;
unsigned long flags;
spin_lock_irqsave(&smp_lock, flags);
avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt);
if (nblks > avail) {
ret = -ENOSPC;
goto fail;
}
cur_nblks = bitmap_weight(ps->pending, cnt);
if (nblks > cur_nblks) {
/* grow the existing pending reservation: */
for (i = cur_nblks; i < nblks; i++) {
int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt);
set_bit(blk, ps->pending);
set_bit(blk, mdp5_kms->smp_state);
}
} else {
/* shrink the existing pending reservation: */
for (i = cur_nblks; i > nblks; i--) {
int blk = find_first_bit(ps->pending, cnt);
clear_bit(blk, ps->pending);
/* don't clear in global smp_state until _commit() */
}
}
fail:
spin_unlock_irqrestore(&smp_lock, flags);
return 0;
}
static void update_smp_state(struct mdp5_kms *mdp5_kms,
enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
{
int cnt = mdp5_kms->smp_blk_cnt;
uint32_t blk, val;
for_each_set_bit(blk, *assigned, cnt) {
int idx = blk / 3;
int fld = blk % 3;
val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
switch (fld) {
case 0:
val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
break;
case 1:
val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
break;
case 2:
val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
break;
}
mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
}
}
/* step #2: configure hw for union(pending, inuse): */
void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
{
struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
int cnt = mdp5_kms->smp_blk_cnt;
mdp5_smp_state_t assigned;
bitmap_or(assigned, ps->inuse, ps->pending, cnt);
update_smp_state(mdp5_kms, cid, &assigned);
}
/* step #3: after vblank, copy pending -> inuse: */
void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
{
struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
int cnt = mdp5_kms->smp_blk_cnt;
mdp5_smp_state_t released;
/*
* Figure out if there are any blocks we where previously
* using, which can be released and made available to other
* clients:
*/
if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
unsigned long flags;
spin_lock_irqsave(&smp_lock, flags);
/* clear released blocks: */
bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
released, cnt);
spin_unlock_irqrestore(&smp_lock, flags);
update_smp_state(mdp5_kms, CID_UNUSED, &released);
}
bitmap_copy(ps->inuse, ps->pending, cnt);
}
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MDP5_SMP_H__
#define __MDP5_SMP_H__
#include "msm_drv.h"
#define MAX_SMP_BLOCKS 22
#define SMP_BLK_SIZE 4096
#define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16)
typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
struct mdp5_client_smp_state {
mdp5_smp_state_t inuse;
mdp5_smp_state_t pending;
};
struct mdp5_kms;
int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks);
void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
#endif /* __MDP5_SMP_H__ */
#ifndef MDP_COMMON_XML
#define MDP_COMMON_XML
/* Autogenerated file, DO NOT EDIT manually!
This file was generated by the rules-ng-ng headergen tool in this git repository:
http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
enum mdp_mixer_stage_id {
STAGE_UNUSED = 0,
STAGE_BASE = 1,
STAGE0 = 2,
STAGE1 = 3,
STAGE2 = 4,
STAGE3 = 5,
};
enum mdp_alpha_type {
FG_CONST = 0,
BG_CONST = 1,
FG_PIXEL = 2,
BG_PIXEL = 3,
};
enum mdp_bpc {
BPC1 = 0,
BPC5 = 1,
BPC6 = 2,
BPC8 = 3,
};
enum mdp_bpc_alpha {
BPC1A = 0,
BPC4A = 1,
BPC6A = 2,
BPC8A = 3,
};
#endif /* MDP_COMMON_XML */
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include "msm_drv.h" #include "msm_drv.h"
#include "mdp4_kms.h" #include "mdp_kms.h"
#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \ #define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \
.base = { .pixel_format = DRM_FORMAT_ ## name }, \ .base = { .pixel_format = DRM_FORMAT_ ## name }, \
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#define BPC0A 0 #define BPC0A 0
static const struct mdp4_format formats[] = { static const struct mdp_format formats[] = {
/* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */ /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */
FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4), FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4),
FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4), FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4),
...@@ -44,12 +44,11 @@ static const struct mdp4_format formats[] = { ...@@ -44,12 +44,11 @@ static const struct mdp4_format formats[] = {
FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3), FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3),
}; };
uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats, uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats)
uint32_t max_formats)
{ {
uint32_t i; uint32_t i;
for (i = 0; i < ARRAY_SIZE(formats); i++) { for (i = 0; i < ARRAY_SIZE(formats); i++) {
const struct mdp4_format *f = &formats[i]; const struct mdp_format *f = &formats[i];
if (i == max_formats) if (i == max_formats)
break; break;
...@@ -60,11 +59,11 @@ uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats, ...@@ -60,11 +59,11 @@ uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
return i; return i;
} }
const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format) const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format)
{ {
int i; int i;
for (i = 0; i < ARRAY_SIZE(formats); i++) { for (i = 0; i < ARRAY_SIZE(formats); i++) {
const struct mdp4_format *f = &formats[i]; const struct mdp_format *f = &formats[i];
if (f->base.pixel_format == format) if (f->base.pixel_format == format)
return &f->base; return &f->base;
} }
......
...@@ -17,11 +17,11 @@ ...@@ -17,11 +17,11 @@
#include "msm_drv.h" #include "msm_drv.h"
#include "mdp4_kms.h" #include "mdp_kms.h"
struct mdp4_irq_wait { struct mdp_irq_wait {
struct mdp4_irq irq; struct mdp_irq irq;
int count; int count;
}; };
...@@ -29,142 +29,83 @@ static DECLARE_WAIT_QUEUE_HEAD(wait_event); ...@@ -29,142 +29,83 @@ static DECLARE_WAIT_QUEUE_HEAD(wait_event);
static DEFINE_SPINLOCK(list_lock); static DEFINE_SPINLOCK(list_lock);
static void update_irq(struct mdp4_kms *mdp4_kms) static void update_irq(struct mdp_kms *mdp_kms)
{ {
struct mdp4_irq *irq; struct mdp_irq *irq;
uint32_t irqmask = mdp4_kms->vblank_mask; uint32_t irqmask = mdp_kms->vblank_mask;
BUG_ON(!spin_is_locked(&list_lock)); BUG_ON(!spin_is_locked(&list_lock));
list_for_each_entry(irq, &mdp4_kms->irq_list, node) list_for_each_entry(irq, &mdp_kms->irq_list, node)
irqmask |= irq->irqmask; irqmask |= irq->irqmask;
mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask); mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
} }
static void update_irq_unlocked(struct mdp4_kms *mdp4_kms) static void update_irq_unlocked(struct mdp_kms *mdp_kms)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&list_lock, flags); spin_lock_irqsave(&list_lock, flags);
update_irq(mdp4_kms); update_irq(mdp_kms);
spin_unlock_irqrestore(&list_lock, flags); spin_unlock_irqrestore(&list_lock, flags);
} }
static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus) void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status)
{ {
DRM_ERROR("errors: %08x\n", irqstatus); struct mdp_irq *handler, *n;
}
void mdp4_irq_preinstall(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
}
int mdp4_irq_postinstall(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
struct mdp4_irq *error_handler = &mdp4_kms->error_handler;
INIT_LIST_HEAD(&mdp4_kms->irq_list);
error_handler->irq = mdp4_irq_error_handler;
error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
mdp4_irq_register(mdp4_kms, error_handler);
return 0;
}
void mdp4_irq_uninstall(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
}
irqreturn_t mdp4_irq(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
struct drm_device *dev = mdp4_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
struct mdp4_irq *handler, *n;
unsigned long flags; unsigned long flags;
unsigned int id;
uint32_t status;
status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
VERB("status=%08x", status);
for (id = 0; id < priv->num_crtcs; id++)
if (status & mdp4_crtc_vblank(priv->crtcs[id]))
drm_handle_vblank(dev, id);
spin_lock_irqsave(&list_lock, flags); spin_lock_irqsave(&list_lock, flags);
mdp4_kms->in_irq = true; mdp_kms->in_irq = true;
list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) { list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) {
if (handler->irqmask & status) { if (handler->irqmask & status) {
spin_unlock_irqrestore(&list_lock, flags); spin_unlock_irqrestore(&list_lock, flags);
handler->irq(handler, handler->irqmask & status); handler->irq(handler, handler->irqmask & status);
spin_lock_irqsave(&list_lock, flags); spin_lock_irqsave(&list_lock, flags);
} }
} }
mdp4_kms->in_irq = false; mdp_kms->in_irq = false;
update_irq(mdp4_kms); update_irq(mdp_kms);
spin_unlock_irqrestore(&list_lock, flags);
return IRQ_HANDLED;
}
int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
unsigned long flags;
spin_lock_irqsave(&list_lock, flags);
mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc);
update_irq(mdp4_kms);
spin_unlock_irqrestore(&list_lock, flags); spin_unlock_irqrestore(&list_lock, flags);
return 0;
} }
void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
{ {
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&list_lock, flags); spin_lock_irqsave(&list_lock, flags);
mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc); if (enable)
update_irq(mdp4_kms); mdp_kms->vblank_mask |= mask;
else
mdp_kms->vblank_mask &= ~mask;
update_irq(mdp_kms);
spin_unlock_irqrestore(&list_lock, flags); spin_unlock_irqrestore(&list_lock, flags);
} }
static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus) static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus)
{ {
struct mdp4_irq_wait *wait = struct mdp_irq_wait *wait =
container_of(irq, struct mdp4_irq_wait, irq); container_of(irq, struct mdp_irq_wait, irq);
wait->count--; wait->count--;
wake_up_all(&wait_event); wake_up_all(&wait_event);
} }
void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask) void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask)
{ {
struct mdp4_irq_wait wait = { struct mdp_irq_wait wait = {
.irq = { .irq = {
.irq = wait_irq, .irq = wait_irq,
.irqmask = irqmask, .irqmask = irqmask,
}, },
.count = 1, .count = 1,
}; };
mdp4_irq_register(mdp4_kms, &wait.irq); mdp_irq_register(mdp_kms, &wait.irq);
wait_event(wait_event, (wait.count <= 0)); wait_event(wait_event, (wait.count <= 0));
mdp4_irq_unregister(mdp4_kms, &wait.irq); mdp_irq_unregister(mdp_kms, &wait.irq);
} }
void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq) void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
{ {
unsigned long flags; unsigned long flags;
bool needs_update = false; bool needs_update = false;
...@@ -173,17 +114,17 @@ void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq) ...@@ -173,17 +114,17 @@ void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
if (!irq->registered) { if (!irq->registered) {
irq->registered = true; irq->registered = true;
list_add(&irq->node, &mdp4_kms->irq_list); list_add(&irq->node, &mdp_kms->irq_list);
needs_update = !mdp4_kms->in_irq; needs_update = !mdp_kms->in_irq;
} }
spin_unlock_irqrestore(&list_lock, flags); spin_unlock_irqrestore(&list_lock, flags);
if (needs_update) if (needs_update)
update_irq_unlocked(mdp4_kms); update_irq_unlocked(mdp_kms);
} }
void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq) void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
{ {
unsigned long flags; unsigned long flags;
bool needs_update = false; bool needs_update = false;
...@@ -193,11 +134,11 @@ void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq) ...@@ -193,11 +134,11 @@ void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
if (irq->registered) { if (irq->registered) {
irq->registered = false; irq->registered = false;
list_del(&irq->node); list_del(&irq->node);
needs_update = !mdp4_kms->in_irq; needs_update = !mdp_kms->in_irq;
} }
spin_unlock_irqrestore(&list_lock, flags); spin_unlock_irqrestore(&list_lock, flags);
if (needs_update) if (needs_update)
update_irq_unlocked(mdp4_kms); update_irq_unlocked(mdp_kms);
} }
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MDP_KMS_H__
#define __MDP_KMS_H__
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include "msm_drv.h"
#include "msm_kms.h"
#include "mdp_common.xml.h"
struct mdp_kms;
struct mdp_kms_funcs {
struct msm_kms_funcs base;
void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask);
};
struct mdp_kms {
struct msm_kms base;
const struct mdp_kms_funcs *funcs;
/* irq handling: */
bool in_irq;
struct list_head irq_list; /* list of mdp4_irq */
uint32_t vblank_mask; /* irq bits set for userspace vblank */
};
#define to_mdp_kms(x) container_of(x, struct mdp_kms, base)
static inline void mdp_kms_init(struct mdp_kms *mdp_kms,
const struct mdp_kms_funcs *funcs)
{
mdp_kms->funcs = funcs;
INIT_LIST_HEAD(&mdp_kms->irq_list);
msm_kms_init(&mdp_kms->base, &funcs->base);
}
/*
* irq helpers:
*/
/* For transiently registering for different MDP irqs that various parts
* of the KMS code need during setup/configuration. These are not
* necessarily the same as what drm_vblank_get/put() are requesting, and
* the hysteresis in drm_vblank_put() is not necessarily desirable for
* internal housekeeping related irq usage.
*/
struct mdp_irq {
struct list_head node;
uint32_t irqmask;
bool registered;
void (*irq)(struct mdp_irq *irq, uint32_t irqstatus);
};
void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status);
void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable);
void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
/*
* pixel format helpers:
*/
struct mdp_format {
struct msm_format base;
enum mdp_bpc bpc_r, bpc_g, bpc_b;
enum mdp_bpc_alpha bpc_a;
uint8_t unpack[4];
bool alpha_enable, unpack_tight;
uint8_t cpp, unpack_count;
};
#define to_mdp_format(x) container_of(x, struct mdp_format, base)
uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats);
const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
#endif /* __MDP_KMS_H__ */
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include "msm_drv.h" #include "msm_drv.h"
#include "msm_gpu.h" #include "msm_gpu.h"
#include "msm_kms.h"
static void msm_fb_output_poll_changed(struct drm_device *dev) static void msm_fb_output_poll_changed(struct drm_device *dev)
{ {
...@@ -30,50 +31,19 @@ static const struct drm_mode_config_funcs mode_config_funcs = { ...@@ -30,50 +31,19 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
.output_poll_changed = msm_fb_output_poll_changed, .output_poll_changed = msm_fb_output_poll_changed,
}; };
static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev, int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
unsigned long iova, int flags, void *arg)
{
DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
return 0;
}
int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu)
{ {
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
int idx = priv->num_iommus++; int idx = priv->num_mmus++;
if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus))) if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
return -EINVAL; return -EINVAL;
priv->iommus[idx] = iommu; priv->mmus[idx] = mmu;
iommu_set_fault_handler(iommu, msm_fault_handler, dev);
/* need to iommu_attach_device() somewhere?? on resume?? */
return idx; return idx;
} }
int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
const char **names, int cnt)
{
int i, ret;
for (i = 0; i < cnt; i++) {
/* TODO maybe some day msm iommu won't require this hack: */
struct device *msm_iommu_get_ctx(const char *ctx_name);
struct device *ctx = msm_iommu_get_ctx(names[i]);
if (!ctx)
continue;
ret = iommu_attach_device(iommu, ctx);
if (ret) {
dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
return ret;
}
}
return 0;
}
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
static bool reglog = false; static bool reglog = false;
MODULE_PARM_DESC(reglog, "Enable register read/write logging"); MODULE_PARM_DESC(reglog, "Enable register read/write logging");
...@@ -82,6 +52,10 @@ module_param(reglog, bool, 0600); ...@@ -82,6 +52,10 @@ module_param(reglog, bool, 0600);
#define reglog 0 #define reglog 0
#endif #endif
static char *vram;
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
module_param(vram, charp, 0);
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname) const char *dbgname)
{ {
...@@ -161,6 +135,14 @@ static int msm_unload(struct drm_device *dev) ...@@ -161,6 +135,14 @@ static int msm_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
if (priv->vram.paddr) {
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
drm_mm_takedown(&priv->vram.mm);
dma_free_attrs(dev->dev, priv->vram.size, NULL,
priv->vram.paddr, &attrs);
}
dev->dev_private = NULL; dev->dev_private = NULL;
kfree(priv); kfree(priv);
...@@ -168,6 +150,24 @@ static int msm_unload(struct drm_device *dev) ...@@ -168,6 +150,24 @@ static int msm_unload(struct drm_device *dev)
return 0; return 0;
} }
static int get_mdp_ver(struct platform_device *pdev)
{
#ifdef CONFIG_OF
const static struct of_device_id match_types[] = { {
.compatible = "qcom,mdss_mdp",
.data = (void *)5,
}, {
/* end node */
} };
struct device *dev = &pdev->dev;
const struct of_device_id *match;
match = of_match_node(match_types, dev->of_node);
if (match)
return (int)match->data;
#endif
return 4;
}
static int msm_load(struct drm_device *dev, unsigned long flags) static int msm_load(struct drm_device *dev, unsigned long flags)
{ {
struct platform_device *pdev = dev->platformdev; struct platform_device *pdev = dev->platformdev;
...@@ -191,7 +191,53 @@ static int msm_load(struct drm_device *dev, unsigned long flags) ...@@ -191,7 +191,53 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_init(dev); drm_mode_config_init(dev);
kms = mdp4_kms_init(dev); /* if we have no IOMMU, then we need to use carveout allocator.
* Grab the entire CMA chunk carved out in early startup in
* mach-msm:
*/
if (!iommu_present(&platform_bus_type)) {
DEFINE_DMA_ATTRS(attrs);
unsigned long size;
void *p;
DBG("using %s VRAM carveout", vram);
size = memparse(vram, NULL);
priv->vram.size = size;
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
/* note that for no-kernel-mapping, the vaddr returned
* is bogus, but non-null if allocation succeeded:
*/
p = dma_alloc_attrs(dev->dev, size,
&priv->vram.paddr, 0, &attrs);
if (!p) {
dev_err(dev->dev, "failed to allocate VRAM\n");
priv->vram.paddr = 0;
ret = -ENOMEM;
goto fail;
}
dev_info(dev->dev, "VRAM: %08x->%08x\n",
(uint32_t)priv->vram.paddr,
(uint32_t)(priv->vram.paddr + size));
}
switch (get_mdp_ver(pdev)) {
case 4:
kms = mdp4_kms_init(dev);
break;
case 5:
kms = mdp5_kms_init(dev);
break;
default:
kms = ERR_PTR(-ENODEV);
break;
}
if (IS_ERR(kms)) { if (IS_ERR(kms)) {
/* /*
* NOTE: once we have GPU support, having no kms should not * NOTE: once we have GPU support, having no kms should not
...@@ -778,6 +824,7 @@ static const struct dev_pm_ops msm_pm_ops = { ...@@ -778,6 +824,7 @@ static const struct dev_pm_ops msm_pm_ops = {
static int msm_pdev_probe(struct platform_device *pdev) static int msm_pdev_probe(struct platform_device *pdev)
{ {
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
return drm_platform_init(&msm_driver, pdev); return drm_platform_init(&msm_driver, pdev);
} }
...@@ -793,12 +840,19 @@ static const struct platform_device_id msm_id[] = { ...@@ -793,12 +840,19 @@ static const struct platform_device_id msm_id[] = {
{ } { }
}; };
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,mdss_mdp" },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
static struct platform_driver msm_platform_driver = { static struct platform_driver msm_platform_driver = {
.probe = msm_pdev_probe, .probe = msm_pdev_probe,
.remove = msm_pdev_remove, .remove = msm_pdev_remove,
.driver = { .driver = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.name = "msm", .name = "msm",
.of_match_table = dt_match,
.pm = &msm_pm_ops, .pm = &msm_pm_ops,
}, },
.id_table = msm_id, .id_table = msm_id,
......
...@@ -31,6 +31,15 @@ ...@@ -31,6 +31,15 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/sizes.h> #include <asm/sizes.h>
#if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_MSM)
/* stubs we need for compile-test: */
static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
{
return NULL;
}
#endif
#ifndef CONFIG_OF #ifndef CONFIG_OF
#include <mach/board.h> #include <mach/board.h>
#include <mach/socinfo.h> #include <mach/socinfo.h>
...@@ -44,6 +53,7 @@ ...@@ -44,6 +53,7 @@
struct msm_kms; struct msm_kms;
struct msm_gpu; struct msm_gpu;
struct msm_mmu;
#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */ #define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
...@@ -76,9 +86,9 @@ struct msm_drm_private { ...@@ -76,9 +86,9 @@ struct msm_drm_private {
/* callbacks deferred until bo is inactive: */ /* callbacks deferred until bo is inactive: */
struct list_head fence_cbs; struct list_head fence_cbs;
/* registered IOMMU domains: */ /* registered MMUs: */
unsigned int num_iommus; unsigned int num_mmus;
struct iommu_domain *iommus[NUM_DOMAINS]; struct msm_mmu *mmus[NUM_DOMAINS];
unsigned int num_planes; unsigned int num_planes;
struct drm_plane *planes[8]; struct drm_plane *planes[8];
...@@ -94,6 +104,16 @@ struct msm_drm_private { ...@@ -94,6 +104,16 @@ struct msm_drm_private {
unsigned int num_connectors; unsigned int num_connectors;
struct drm_connector *connectors[8]; struct drm_connector *connectors[8];
/* VRAM carveout, used when no IOMMU: */
struct {
unsigned long size;
dma_addr_t paddr;
/* NOTE: mm managed at the page level, size is in # of pages
* and position mm_node->start is in # of pages:
*/
struct drm_mm mm;
} vram;
}; };
struct msm_format { struct msm_format {
...@@ -114,39 +134,7 @@ void __msm_fence_worker(struct work_struct *work); ...@@ -114,39 +134,7 @@ void __msm_fence_worker(struct work_struct *work);
(_cb)->func = _func; \ (_cb)->func = _func; \
} while (0) } while (0)
/* As there are different display controller blocks depending on the int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
* snapdragon version, the kms support is split out and the appropriate
* implementation is loaded at runtime. The kms module is responsible
* for constructing the appropriate planes/crtcs/encoders/connectors.
*/
struct msm_kms_funcs {
/* hw initialization: */
int (*hw_init)(struct msm_kms *kms);
/* irq handling: */
void (*irq_preinstall)(struct msm_kms *kms);
int (*irq_postinstall)(struct msm_kms *kms);
void (*irq_uninstall)(struct msm_kms *kms);
irqreturn_t (*irq)(struct msm_kms *kms);
int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
/* misc: */
const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
/* cleanup: */
void (*preclose)(struct msm_kms *kms, struct drm_file *file);
void (*destroy)(struct msm_kms *kms);
};
struct msm_kms {
const struct msm_kms_funcs *funcs;
};
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu);
int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
const char **names, int cnt);
int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
struct timespec *timeout); struct timespec *timeout);
...@@ -202,7 +190,9 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, ...@@ -202,7 +190,9 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder); struct hdmi;
struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
irqreturn_t hdmi_irq(int irq, void *dev_id);
void __init hdmi_register(void); void __init hdmi_register(void);
void __exit hdmi_unregister(void); void __exit hdmi_unregister(void);
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
*/ */
#include "msm_drv.h" #include "msm_drv.h"
#include "msm_kms.h"
#include "drm_crtc.h" #include "drm_crtc.h"
#include "drm_crtc_helper.h" #include "drm_crtc_helper.h"
......
...@@ -22,7 +22,45 @@ ...@@ -22,7 +22,45 @@
#include "msm_drv.h" #include "msm_drv.h"
#include "msm_gem.h" #include "msm_gem.h"
#include "msm_gpu.h" #include "msm_gpu.h"
#include "msm_mmu.h"
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
priv->vram.paddr;
}
/* allocate pages from VRAM carveout, used when no IOMMU: */
static struct page **get_pages_vram(struct drm_gem_object *obj,
int npages)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
dma_addr_t paddr;
struct page **p;
int ret, i;
p = drm_malloc_ab(npages, sizeof(struct page *));
if (!p)
return ERR_PTR(-ENOMEM);
ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
npages, 0, DRM_MM_SEARCH_DEFAULT);
if (ret) {
drm_free_large(p);
return ERR_PTR(ret);
}
paddr = physaddr(obj);
for (i = 0; i < npages; i++) {
p[i] = phys_to_page(paddr);
paddr += PAGE_SIZE;
}
return p;
}
/* called with dev->struct_mutex held */ /* called with dev->struct_mutex held */
static struct page **get_pages(struct drm_gem_object *obj) static struct page **get_pages(struct drm_gem_object *obj)
...@@ -31,9 +69,14 @@ static struct page **get_pages(struct drm_gem_object *obj) ...@@ -31,9 +69,14 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (!msm_obj->pages) { if (!msm_obj->pages) {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
struct page **p = drm_gem_get_pages(obj, 0); struct page **p;
int npages = obj->size >> PAGE_SHIFT; int npages = obj->size >> PAGE_SHIFT;
if (iommu_present(&platform_bus_type))
p = drm_gem_get_pages(obj, 0);
else
p = get_pages_vram(obj, npages);
if (IS_ERR(p)) { if (IS_ERR(p)) {
dev_err(dev->dev, "could not get pages: %ld\n", dev_err(dev->dev, "could not get pages: %ld\n",
PTR_ERR(p)); PTR_ERR(p));
...@@ -73,7 +116,11 @@ static void put_pages(struct drm_gem_object *obj) ...@@ -73,7 +116,11 @@ static void put_pages(struct drm_gem_object *obj)
sg_free_table(msm_obj->sgt); sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt); kfree(msm_obj->sgt);
drm_gem_put_pages(obj, msm_obj->pages, true, false); if (iommu_present(&platform_bus_type))
drm_gem_put_pages(obj, msm_obj->pages, true, false);
else
drm_mm_remove_node(msm_obj->vram_node);
msm_obj->pages = NULL; msm_obj->pages = NULL;
} }
} }
...@@ -138,7 +185,6 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -138,7 +185,6 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_object *obj = vma->vm_private_data;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
struct page **pages; struct page **pages;
unsigned long pfn; unsigned long pfn;
...@@ -163,7 +209,7 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -163,7 +209,7 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
pgoff = ((unsigned long)vmf->virtual_address - pgoff = ((unsigned long)vmf->virtual_address -
vma->vm_start) >> PAGE_SHIFT; vma->vm_start) >> PAGE_SHIFT;
pfn = page_to_pfn(msm_obj->pages[pgoff]); pfn = page_to_pfn(pages[pgoff]);
VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
pfn, pfn << PAGE_SHIFT); pfn, pfn << PAGE_SHIFT);
...@@ -219,67 +265,6 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) ...@@ -219,67 +265,6 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
return offset; return offset;
} }
/* helpers for dealing w/ iommu: */
static int map_range(struct iommu_domain *domain, unsigned int iova,
struct sg_table *sgt, unsigned int len, int prot)
{
struct scatterlist *sg;
unsigned int da = iova;
unsigned int i, j;
int ret;
if (!domain || !sgt)
return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
u32 pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
goto fail;
da += bytes;
}
return 0;
fail:
da = iova;
for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes = sg->length + sg->offset;
iommu_unmap(domain, da, bytes);
da += bytes;
}
return ret;
}
static void unmap_range(struct iommu_domain *domain, unsigned int iova,
struct sg_table *sgt, unsigned int len)
{
struct scatterlist *sg;
unsigned int da = iova;
int i;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes = sg->length + sg->offset;
size_t unmapped;
unmapped = iommu_unmap(domain, da, bytes);
if (unmapped < bytes)
break;
VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
da += bytes;
}
}
/* should be called under struct_mutex.. although it can be called /* should be called under struct_mutex.. although it can be called
* from atomic context without struct_mutex to acquire an extra * from atomic context without struct_mutex to acquire an extra
* iova ref if you know one is already held. * iova ref if you know one is already held.
...@@ -295,15 +280,20 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, ...@@ -295,15 +280,20 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
if (!msm_obj->domain[id].iova) { if (!msm_obj->domain[id].iova) {
struct msm_drm_private *priv = obj->dev->dev_private; struct msm_drm_private *priv = obj->dev->dev_private;
uint32_t offset = (uint32_t)mmap_offset(obj); struct msm_mmu *mmu = priv->mmus[id];
struct page **pages; struct page **pages = get_pages(obj);
pages = get_pages(obj);
if (IS_ERR(pages)) if (IS_ERR(pages))
return PTR_ERR(pages); return PTR_ERR(pages);
// XXX ideally we would not map buffers writable when not needed...
ret = map_range(priv->iommus[id], offset, msm_obj->sgt, if (iommu_present(&platform_bus_type)) {
obj->size, IOMMU_READ | IOMMU_WRITE); uint32_t offset = (uint32_t)mmap_offset(obj);
msm_obj->domain[id].iova = offset; ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
obj->size, IOMMU_READ | IOMMU_WRITE);
msm_obj->domain[id].iova = offset;
} else {
msm_obj->domain[id].iova = physaddr(obj);
}
} }
if (!ret) if (!ret)
...@@ -514,6 +504,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) ...@@ -514,6 +504,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
void msm_gem_free_object(struct drm_gem_object *obj) void msm_gem_free_object(struct drm_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
int id; int id;
...@@ -525,11 +516,10 @@ void msm_gem_free_object(struct drm_gem_object *obj) ...@@ -525,11 +516,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->mm_list); list_del(&msm_obj->mm_list);
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
if (msm_obj->domain[id].iova) { struct msm_mmu *mmu = priv->mmus[id];
struct msm_drm_private *priv = obj->dev->dev_private; if (mmu && msm_obj->domain[id].iova) {
uint32_t offset = (uint32_t)mmap_offset(obj); uint32_t offset = (uint32_t)mmap_offset(obj);
unmap_range(priv->iommus[id], offset, mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
msm_obj->sgt, obj->size);
} }
} }
...@@ -591,6 +581,7 @@ static int msm_gem_new_impl(struct drm_device *dev, ...@@ -591,6 +581,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
{ {
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj; struct msm_gem_object *msm_obj;
unsigned sz;
switch (flags & MSM_BO_CACHE_MASK) { switch (flags & MSM_BO_CACHE_MASK) {
case MSM_BO_UNCACHED: case MSM_BO_UNCACHED:
...@@ -603,10 +594,17 @@ static int msm_gem_new_impl(struct drm_device *dev, ...@@ -603,10 +594,17 @@ static int msm_gem_new_impl(struct drm_device *dev,
return -EINVAL; return -EINVAL;
} }
msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); sz = sizeof(*msm_obj);
if (!iommu_present(&platform_bus_type))
sz += sizeof(struct drm_mm_node);
msm_obj = kzalloc(sz, GFP_KERNEL);
if (!msm_obj) if (!msm_obj)
return -ENOMEM; return -ENOMEM;
if (!iommu_present(&platform_bus_type))
msm_obj->vram_node = (void *)&msm_obj[1];
msm_obj->flags = flags; msm_obj->flags = flags;
msm_obj->resv = &msm_obj->_resv; msm_obj->resv = &msm_obj->_resv;
...@@ -623,7 +621,7 @@ static int msm_gem_new_impl(struct drm_device *dev, ...@@ -623,7 +621,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
struct drm_gem_object *msm_gem_new(struct drm_device *dev, struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags) uint32_t size, uint32_t flags)
{ {
struct drm_gem_object *obj; struct drm_gem_object *obj = NULL;
int ret; int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(!mutex_is_locked(&dev->struct_mutex));
...@@ -634,9 +632,13 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, ...@@ -634,9 +632,13 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
if (ret) if (ret)
goto fail; goto fail;
ret = drm_gem_object_init(dev, obj, size); if (iommu_present(&platform_bus_type)) {
if (ret) ret = drm_gem_object_init(dev, obj, size);
goto fail; if (ret)
goto fail;
} else {
drm_gem_private_object_init(dev, obj, size);
}
return obj; return obj;
...@@ -654,6 +656,12 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, ...@@ -654,6 +656,12 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct drm_gem_object *obj; struct drm_gem_object *obj;
int ret, npages; int ret, npages;
/* if we don't have IOMMU, don't bother pretending we can import: */
if (!iommu_present(&platform_bus_type)) {
dev_err(dev->dev, "cannot import without IOMMU\n");
return ERR_PTR(-EINVAL);
}
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
......
...@@ -57,6 +57,11 @@ struct msm_gem_object { ...@@ -57,6 +57,11 @@ struct msm_gem_object {
/* normally (resv == &_resv) except for imported bo's */ /* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv; struct reservation_object *resv;
struct reservation_object _resv; struct reservation_object _resv;
/* For physically contiguous buffers. Used when we don't have
* an IOMMU.
*/
struct drm_mm_node *vram_node;
}; };
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base) #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include "msm_gpu.h" #include "msm_gpu.h"
#include "msm_gem.h" #include "msm_gem.h"
#include "msm_mmu.h"
/* /*
...@@ -25,20 +26,10 @@ ...@@ -25,20 +26,10 @@
#ifdef CONFIG_MSM_BUS_SCALING #ifdef CONFIG_MSM_BUS_SCALING
#include <mach/board.h> #include <mach/board.h>
#include <mach/kgsl.h> static void bs_init(struct msm_gpu *gpu)
static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
{ {
struct drm_device *dev = gpu->dev; if (gpu->bus_scale_table) {
struct kgsl_device_platform_data *pdata; gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table);
if (!pdev) {
dev_err(dev->dev, "could not find dtv pdata\n");
return;
}
pdata = pdev->dev.platform_data;
if (pdata->bus_scale_table) {
gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
DBG("bus scale client: %08x", gpu->bsc); DBG("bus scale client: %08x", gpu->bsc);
} }
} }
...@@ -59,7 +50,7 @@ static void bs_set(struct msm_gpu *gpu, int idx) ...@@ -59,7 +50,7 @@ static void bs_set(struct msm_gpu *gpu, int idx)
} }
} }
#else #else
static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) {} static void bs_init(struct msm_gpu *gpu) {}
static void bs_fini(struct msm_gpu *gpu) {} static void bs_fini(struct msm_gpu *gpu) {}
static void bs_set(struct msm_gpu *gpu, int idx) {} static void bs_set(struct msm_gpu *gpu, int idx) {}
#endif #endif
...@@ -363,6 +354,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -363,6 +354,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, const char *ioname, const char *irqname, int ringsz) const char *name, const char *ioname, const char *irqname, int ringsz)
{ {
struct iommu_domain *iommu;
int i, ret; int i, ret;
gpu->dev = drm; gpu->dev = drm;
...@@ -428,13 +420,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -428,13 +420,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
* and have separate page tables per context. For now, to keep things * and have separate page tables per context. For now, to keep things
* simple and to get something working, just use a single address space: * simple and to get something working, just use a single address space:
*/ */
gpu->iommu = iommu_domain_alloc(&platform_bus_type); iommu = iommu_domain_alloc(&platform_bus_type);
if (!gpu->iommu) { if (iommu) {
dev_err(drm->dev, "failed to allocate IOMMU\n"); dev_info(drm->dev, "%s: using IOMMU\n", name);
ret = -ENOMEM; gpu->mmu = msm_iommu_new(drm, iommu);
goto fail; } else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
} }
gpu->id = msm_register_iommu(drm, gpu->iommu); gpu->id = msm_register_mmu(drm, gpu->mmu);
/* Create ringbuffer: */ /* Create ringbuffer: */
gpu->rb = msm_ringbuffer_new(gpu, ringsz); gpu->rb = msm_ringbuffer_new(gpu, ringsz);
...@@ -452,7 +445,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -452,7 +445,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail; goto fail;
} }
bs_init(gpu, pdev); bs_init(gpu);
return 0; return 0;
...@@ -474,6 +467,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) ...@@ -474,6 +467,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_ringbuffer_destroy(gpu->rb); msm_ringbuffer_destroy(gpu->rb);
} }
if (gpu->iommu) if (gpu->mmu)
iommu_domain_free(gpu->iommu); gpu->mmu->funcs->destroy(gpu->mmu);
} }
...@@ -78,14 +78,18 @@ struct msm_gpu { ...@@ -78,14 +78,18 @@ struct msm_gpu {
void __iomem *mmio; void __iomem *mmio;
int irq; int irq;
struct iommu_domain *iommu; struct msm_mmu *mmu;
int id; int id;
/* Power Control: */ /* Power Control: */
struct regulator *gpu_reg, *gpu_cx; struct regulator *gpu_reg, *gpu_cx;
struct clk *ebi1_clk, *grp_clks[5]; struct clk *ebi1_clk, *grp_clks[5];
uint32_t fast_rate, slow_rate, bus_freq; uint32_t fast_rate, slow_rate, bus_freq;
#ifdef CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
uint32_t bsc; uint32_t bsc;
#endif
/* Hang Detction: */ /* Hang Detction: */
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
......
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
#include "msm_mmu.h"
struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
};
#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
unsigned long iova, int flags, void *arg)
{
DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
return 0;
}
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
{
struct drm_device *dev = mmu->dev;
struct msm_iommu *iommu = to_msm_iommu(mmu);
int i, ret;
for (i = 0; i < cnt; i++) {
struct device *msm_iommu_get_ctx(const char *ctx_name);
struct device *ctx = msm_iommu_get_ctx(names[i]);
if (IS_ERR_OR_NULL(ctx))
continue;
ret = iommu_attach_device(iommu->domain, ctx);
if (ret) {
dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
return ret;
}
}
return 0;
}
static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
struct sg_table *sgt, unsigned len, int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
unsigned int da = iova;
unsigned int i, j;
int ret;
if (!domain || !sgt)
return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
u32 pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
goto fail;
da += bytes;
}
return 0;
fail:
da = iova;
for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes = sg->length + sg->offset;
iommu_unmap(domain, da, bytes);
da += bytes;
}
return ret;
}
static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
struct sg_table *sgt, unsigned len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
unsigned int da = iova;
int i;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes = sg->length + sg->offset;
size_t unmapped;
unmapped = iommu_unmap(domain, da, bytes);
if (unmapped < bytes)
return unmapped;
VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
da += bytes;
}
return 0;
}
static void msm_iommu_destroy(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
iommu_domain_free(iommu->domain);
kfree(iommu);
}
static const struct msm_mmu_funcs funcs = {
.attach = msm_iommu_attach,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
.destroy = msm_iommu_destroy,
};
struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain)
{
struct msm_iommu *iommu;
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
return ERR_PTR(-ENOMEM);
iommu->domain = domain;
msm_mmu_init(&iommu->base, dev, &funcs);
iommu_set_fault_handler(domain, msm_fault_handler, dev);
return &iommu->base;
}
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSM_KMS_H__
#define __MSM_KMS_H__
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include "msm_drv.h"
/* As there are different display controller blocks depending on the
* snapdragon version, the kms support is split out and the appropriate
* implementation is loaded at runtime. The kms module is responsible
* for constructing the appropriate planes/crtcs/encoders/connectors.
*/
struct msm_kms_funcs {
/* hw initialization: */
int (*hw_init)(struct msm_kms *kms);
/* irq handling: */
void (*irq_preinstall)(struct msm_kms *kms);
int (*irq_postinstall)(struct msm_kms *kms);
void (*irq_uninstall)(struct msm_kms *kms);
irqreturn_t (*irq)(struct msm_kms *kms);
int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
/* misc: */
const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
/* cleanup: */
void (*preclose)(struct msm_kms *kms, struct drm_file *file);
void (*destroy)(struct msm_kms *kms);
};
struct msm_kms {
const struct msm_kms_funcs *funcs;
/* irq handling: */
bool in_irq;
struct list_head irq_list; /* list of mdp4_irq */
uint32_t vblank_mask; /* irq bits set for userspace vblank */
};
static inline void msm_kms_init(struct msm_kms *kms,
const struct msm_kms_funcs *funcs)
{
kms->funcs = funcs;
}
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
#endif /* __MSM_KMS_H__ */
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSM_MMU_H__
#define __MSM_MMU_H__
#include <linux/iommu.h>
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
unsigned len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
unsigned len);
void (*destroy)(struct msm_mmu *mmu);
};
struct msm_mmu {
const struct msm_mmu_funcs *funcs;
struct drm_device *dev;
};
static inline void msm_mmu_init(struct msm_mmu *mmu, struct drm_device *dev,
const struct msm_mmu_funcs *funcs)
{
mmu->dev = dev;
mmu->funcs = funcs;
}
struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain);
struct msm_mmu *msm_gpummu_new(struct drm_device *dev, struct msm_gpu *gpu);
#endif /* __MSM_MMU_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment