Commit ad92af7e authored by Jeykumar Sankaran's avatar Jeykumar Sankaran Committed by Rob Clark

drm/msm/dpu: remove RM topology definition

RM maintained a redundant definition for display topology
to identify the no. of hw blocks needed for a display
and their hardware dependencies. This information can be
implicitly deduced from the msm_display_topology structure
available in RM reserve request. In addition to getting
rid of the redundant topology, this change also removes
the topology name enums and their usages.

changes in v4:
	- remove the topology name enum entirely (Sean)
changes in v5:
	- remove RM topology definition and their
	  references (Sean)
	- Implement helper for dual mixer CRTC (Sean)
changes in v6:
	- avoid heap memory for topology (Sean)
Signed-off-by: default avatarJeykumar Sankaran <jsanka@codeaurora.org>
Signed-off-by: default avatarSean Paul <seanpaul@chromium.org>
Signed-off-by: default avatarRob Clark <robdclark@gmail.com>
parent 157b9ce7
......@@ -237,6 +237,16 @@ struct dpu_crtc_state {
#define to_dpu_crtc_state(x) \
container_of(x, struct dpu_crtc_state, base)
/**
* dpu_crtc_state_is_stereo - Is crtc virtualized with two mixers?
* @cstate: Pointer to dpu crtc state
* @Return: true - has two mixers, false - has one mixer
*/
static inline bool dpu_crtc_state_is_stereo(struct dpu_crtc_state *cstate)
{
return cstate->num_mixers == CRTC_DUAL_MIXERS;
}
/**
* dpu_crtc_get_mixer_height - get the mixer height
* Mixer height will be same as panel height
......
......@@ -1003,7 +1003,6 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct drm_connector *conn = NULL, *conn_iter;
struct dpu_rm_hw_iter pp_iter, ctl_iter;
struct msm_display_topology topology;
enum dpu_rm_topology_name topology_name;
struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
int i = 0, ret;
......@@ -1059,7 +1058,6 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
hw_ctl[i] = (struct dpu_hw_ctl *)ctl_iter.hw;
}
topology_name = dpu_rm_get_topology_name(topology);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
......@@ -1080,7 +1078,6 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
phys->hw_ctl = hw_ctl[i];
phys->connector = conn->state->connector;
phys->topology_name = topology_name;
if (phys->ops.mode_set)
phys->ops.mode_set(phys, mode, adj_mode);
}
......
......@@ -32,7 +32,6 @@
/**
* Encoder functions and data types
* @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused
* @topology: Topology of the display
*/
struct dpu_encoder_hw_resources {
enum dpu_intf_mode intfs[INTF_MAX];
......
......@@ -23,6 +23,7 @@
#include "dpu_hw_ctl.h"
#include "dpu_hw_top.h"
#include "dpu_encoder.h"
#include "dpu_crtc.h"
#define DPU_ENCODER_NAME_MAX 16
......@@ -209,7 +210,6 @@ struct dpu_encoder_irq {
* @split_role: Role to play in a split-panel configuration
* @intf_mode: Interface mode
* @intf_idx: Interface index on dpu hardware
* @topology_name: topology selected for the display
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @enable_state: Enable state tracking
* @vblank_refcount: Reference count of vblank request
......@@ -237,7 +237,6 @@ struct dpu_encoder_phys {
enum dpu_enc_split_role split_role;
enum dpu_intf_mode intf_mode;
enum dpu_intf intf_idx;
enum dpu_rm_topology_name topology_name;
spinlock_t *enc_spinlock;
enum dpu_enc_enable_state enable_state;
atomic_t vblank_refcount;
......@@ -355,11 +354,15 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_crtc_state *dpu_cstate;
if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING)
return BLEND_3D_NONE;
dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
if (phys_enc->split_role == ENC_ROLE_SOLO &&
phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE)
dpu_crtc_state_is_stereo(dpu_cstate))
return BLEND_3D_H_ROW_INT;
return BLEND_3D_NONE;
......
......@@ -355,13 +355,14 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
{
struct dpu_crtc_state *dpu_cstate;
if (!phys_enc)
return false;
if (phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE)
return true;
dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
return false;
return dpu_cstate->num_ctls > 1;
}
static bool dpu_encoder_phys_vid_needs_single_flush(
......
......@@ -24,33 +24,13 @@
#define RESERVED_BY_OTHER(h, r) \
((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
(t).num_comp_enc == (r).num_enc && \
(t).num_intf == (r).num_intf)
struct dpu_rm_topology_def {
enum dpu_rm_topology_name top_name;
int num_lm;
int num_comp_enc;
int num_intf;
int num_ctl;
int needs_split_display;
};
static const struct dpu_rm_topology_def g_top_table[] = {
{ DPU_RM_TOPOLOGY_NONE, 0, 0, 0, 0, false },
{ DPU_RM_TOPOLOGY_SINGLEPIPE, 1, 0, 1, 1, false },
{ DPU_RM_TOPOLOGY_DUALPIPE, 2, 0, 2, 2, true },
{ DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE, 2, 0, 1, 1, false },
};
/**
* struct dpu_rm_requirements - Reservation requirements parameter bundle
* @top: selected topology for the display
* @topology: selected topology for the display
* @hw_res: Hardware resources required as reported by the encoders
*/
struct dpu_rm_requirements {
const struct dpu_rm_topology_def *topology;
struct msm_display_topology topology;
struct dpu_encoder_hw_resources hw_res;
};
......@@ -66,13 +46,11 @@ struct dpu_rm_requirements {
* @enc_id: Reservations are tracked by Encoder DRM object ID.
* CRTCs may be connected to multiple Encoders.
* An encoder or connector id identifies the display path.
* @topology DRM<->HW topology use case
*/
struct dpu_rm_rsvp {
struct list_head list;
uint32_t seq;
uint32_t enc_id;
enum dpu_rm_topology_name topology;
};
/**
......@@ -116,8 +94,8 @@ static void _dpu_rm_print_rsvps(
DPU_DEBUG("%d\n", stage);
list_for_each_entry(rsvp, &rm->rsvps, list) {
DRM_DEBUG_KMS("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
rsvp->enc_id, rsvp->topology);
DRM_DEBUG_KMS("%d rsvp[s%ue%u]\n", stage, rsvp->seq,
rsvp->enc_id);
}
for (type = 0; type < DPU_HW_BLK_MAX; type++) {
......@@ -140,18 +118,6 @@ struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
return rm->hw_mdp;
}
enum dpu_rm_topology_name
dpu_rm_get_topology_name(struct msm_display_topology topology)
{
int i;
for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++)
if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
return g_top_table[i].top_name;
return DPU_RM_TOPOLOGY_NONE;
}
void dpu_rm_init_hw_iter(
struct dpu_rm_hw_iter *iter,
uint32_t enc_id,
......@@ -434,6 +400,11 @@ int dpu_rm_init(struct dpu_rm *rm,
return rc;
}
static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
{
return top->num_intf > 1;
}
/**
* _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
* proposed use case requirements, incl. hardwired dependent blocks like
......@@ -517,14 +488,14 @@ static int _dpu_rm_reserve_lms(
int lm_count = 0;
int i, rc = 0;
if (!reqs->topology->num_lm) {
DPU_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
if (!reqs->topology.num_lm) {
DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
return -EINVAL;
}
/* Find a primary mixer */
dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
while (lm_count != reqs->topology->num_lm &&
while (lm_count != reqs->topology.num_lm &&
_dpu_rm_get_hw_locked(rm, &iter_i)) {
memset(&lm, 0, sizeof(lm));
memset(&pp, 0, sizeof(pp));
......@@ -542,7 +513,7 @@ static int _dpu_rm_reserve_lms(
/* Valid primary mixer found, find matching peers */
dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
while (lm_count != reqs->topology->num_lm &&
while (lm_count != reqs->topology.num_lm &&
_dpu_rm_get_hw_locked(rm, &iter_j)) {
if (iter_i.blk == iter_j.blk)
continue;
......@@ -557,7 +528,7 @@ static int _dpu_rm_reserve_lms(
}
}
if (lm_count != reqs->topology->num_lm) {
if (lm_count != reqs->topology.num_lm) {
DPU_DEBUG("unable to find appropriate mixers\n");
return -ENAVAIL;
}
......@@ -579,14 +550,20 @@ static int _dpu_rm_reserve_lms(
static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm,
struct dpu_rm_rsvp *rsvp,
const struct dpu_rm_topology_def *top)
const struct msm_display_topology *top)
{
struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
struct dpu_rm_hw_iter iter;
int i = 0;
int i = 0, num_ctls = 0;
bool needs_split_display = false;
memset(&ctls, 0, sizeof(ctls));
/* each hw_intf needs its own hw_ctrl to program its control path */
num_ctls = top->num_intf;
needs_split_display = _dpu_rm_needs_split_display(top);
dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
while (_dpu_rm_get_hw_locked(rm, &iter)) {
const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
......@@ -600,20 +577,20 @@ static int _dpu_rm_reserve_ctls(
DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
if (top->needs_split_display != has_split_display)
if (needs_split_display != has_split_display)
continue;
ctls[i] = iter.blk;
DPU_DEBUG("ctl %d match\n", iter.blk->id);
if (++i == top->num_ctl)
if (++i == num_ctls)
break;
}
if (i != top->num_ctl)
if (i != num_ctls)
return -ENAVAIL;
for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
ctls[i]->rsvp_nxt = rsvp;
trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
rsvp->enc_id);
......@@ -686,12 +663,10 @@ static int _dpu_rm_make_next_rsvp(
struct dpu_rm_requirements *reqs)
{
int ret;
struct dpu_rm_topology_def topology;
/* Create reservation info, tag reserved blocks with it as we go */
rsvp->seq = ++rm->rsvp_next_seq;
rsvp->enc_id = enc->base.id;
rsvp->topology = reqs->topology->top_name;
list_add_tail(&rsvp->list, &rm->rsvps);
ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
......@@ -700,17 +675,7 @@ static int _dpu_rm_make_next_rsvp(
return ret;
}
/*
* Do assignment preferring to give away low-resource CTLs first:
* - Check mixers without Split Display
* - Only then allow to grab from CTLs with split display capability
*/
_dpu_rm_reserve_ctls(rm, rsvp, reqs->topology);
if (ret && !reqs->topology->needs_split_display) {
memcpy(&topology, reqs->topology, sizeof(topology));
topology.needs_split_display = true;
_dpu_rm_reserve_ctls(rm, rsvp, &topology);
}
ret = _dpu_rm_reserve_ctls(rm, rsvp, &reqs->topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
......@@ -730,29 +695,13 @@ static int _dpu_rm_populate_requirements(
struct dpu_rm_requirements *reqs,
struct msm_display_topology req_topology)
{
int i;
memset(reqs, 0, sizeof(*reqs));
dpu_encoder_get_hw_resources(enc, &reqs->hw_res);
for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) {
if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
req_topology)) {
reqs->topology = &g_top_table[i];
break;
}
}
if (!reqs->topology) {
DPU_ERROR("invalid topology for the display\n");
return -EINVAL;
}
reqs->topology = req_topology;
DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
reqs->topology->num_lm, reqs->topology->num_ctl,
reqs->topology->top_name,
reqs->topology->needs_split_display);
DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
reqs->topology.num_lm, reqs->topology.num_enc,
reqs->topology.num_intf);
return 0;
}
......@@ -843,11 +792,10 @@ void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
mutex_unlock(&rm->rm_lock);
}
static int _dpu_rm_commit_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp)
static void _dpu_rm_commit_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp)
{
struct dpu_rm_hw_blk *blk;
enum dpu_hw_blk_type type;
int ret = 0;
/* Swap next rsvp to be the active */
for (type = 0; type < DPU_HW_BLK_MAX; type++) {
......@@ -858,12 +806,6 @@ static int _dpu_rm_commit_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp)
}
}
}
if (!ret)
DRM_DEBUG_KMS("rsrv enc %d topology %d\n", rsvp->enc_id,
rsvp->topology);
return ret;
}
int dpu_rm_reserve(
......
......@@ -20,21 +20,6 @@
#include "msm_kms.h"
#include "dpu_hw_top.h"
/**
* enum dpu_rm_topology_name - HW resource use case in use by connector
* @DPU_RM_TOPOLOGY_NONE: No topology in use currently
* @DPU_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB
* @DPU_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB
* @DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB
*/
enum dpu_rm_topology_name {
DPU_RM_TOPOLOGY_NONE = 0,
DPU_RM_TOPOLOGY_SINGLEPIPE,
DPU_RM_TOPOLOGY_DUALPIPE,
DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,
DPU_RM_TOPOLOGY_MAX,
};
/**
* struct dpu_rm - DPU dynamic hardware resource manager
* @dev: device handle for event logging purposes
......@@ -167,13 +152,4 @@ bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
*/
int dpu_rm_check_property_topctl(uint64_t val);
/**
* dpu_rm_get_topology_name - returns the name of the the given topology
* definition
* @topology: topology definition
* @Return: name of the topology
*/
enum dpu_rm_topology_name
dpu_rm_get_topology_name(struct msm_display_topology topology);
#endif /* __DPU_RM_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment