Commit 4a0dc640 authored by Jeykumar Sankaran's avatar Jeykumar Sankaran Committed by Rob Clark

drm/msm/dpu: remove LOCK/CLEAR support in RM

DPU had the support to LOCK the hw resources in
atomic check and CLEAR the locked resources explicitly
through custom property values. Now that DPU is
stripped off of all the custom properties, the RM
handlers for this feature will be no-op's. This change
gets rid of all its references.

changes in v5:
	- Introduced in the series.
Signed-off-by: default avatarJeykumar Sankaran <jsanka@codeaurora.org>
Signed-off-by: default avatarSean Paul <seanpaul@chromium.org>
Signed-off-by: default avatarRob Clark <robdclark@gmail.com>
parent 9816b226
...@@ -24,8 +24,6 @@ ...@@ -24,8 +24,6 @@
#define RESERVED_BY_OTHER(h, r) \ #define RESERVED_BY_OTHER(h, r) \
((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id)) ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_LOCK))
#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_CLEAR))
#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \ #define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
(t).num_comp_enc == (r).num_enc && \ (t).num_comp_enc == (r).num_enc && \
(t).num_intf == (r).num_intf) (t).num_intf == (r).num_intf)
...@@ -48,12 +46,10 @@ static const struct dpu_rm_topology_def g_top_table[] = { ...@@ -48,12 +46,10 @@ static const struct dpu_rm_topology_def g_top_table[] = {
/** /**
* struct dpu_rm_requirements - Reservation requirements parameter bundle * struct dpu_rm_requirements - Reservation requirements parameter bundle
* @top_ctrl: topology control preference from kernel client
* @top: selected topology for the display * @top: selected topology for the display
* @hw_res: Hardware resources required as reported by the encoders * @hw_res: Hardware resources required as reported by the encoders
*/ */
struct dpu_rm_requirements { struct dpu_rm_requirements {
uint64_t top_ctrl;
const struct dpu_rm_topology_def *topology; const struct dpu_rm_topology_def *topology;
struct dpu_encoder_hw_resources hw_res; struct dpu_encoder_hw_resources hw_res;
}; };
...@@ -755,8 +751,7 @@ static int _dpu_rm_populate_requirements( ...@@ -755,8 +751,7 @@ static int _dpu_rm_populate_requirements(
return -EINVAL; return -EINVAL;
} }
DRM_DEBUG_KMS("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl, DRM_DEBUG_KMS("num_h_tiles: %d\n", reqs->hw_res.display_num_of_h_tiles);
reqs->hw_res.display_num_of_h_tiles);
DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n", DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
reqs->topology->num_lm, reqs->topology->num_ctl, reqs->topology->num_lm, reqs->topology->num_ctl,
reqs->topology->top_name, reqs->topology->top_name,
...@@ -956,18 +951,6 @@ int dpu_rm_reserve( ...@@ -956,18 +951,6 @@ int dpu_rm_reserve(
rsvp_cur = _dpu_rm_get_rsvp(rm, enc); rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
/*
* User can request that we clear out any reservation during the
* atomic_check phase by using this CLEAR bit
*/
if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
DPU_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
rsvp_cur->seq, rsvp_cur->enc_id);
_dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
rsvp_cur = NULL;
_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_CLEAR);
}
/* Check the proposed reservation, store it in hw's "next" field */ /* Check the proposed reservation, store it in hw's "next" field */
ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state, ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
rsvp_nxt, &reqs); rsvp_nxt, &reqs);
...@@ -977,7 +960,7 @@ int dpu_rm_reserve( ...@@ -977,7 +960,7 @@ int dpu_rm_reserve(
if (ret) { if (ret) {
DPU_ERROR("failed to reserve hw resources: %d\n", ret); DPU_ERROR("failed to reserve hw resources: %d\n", ret);
_dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector); _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
} else if (test_only && !RM_RQ_LOCK(&reqs)) { } else if (test_only) {
/* /*
* Normally, if test_only, test the reservation and then undo * Normally, if test_only, test the reservation and then undo
* However, if the user requests LOCK, then keep the reservation * However, if the user requests LOCK, then keep the reservation
...@@ -987,10 +970,6 @@ int dpu_rm_reserve( ...@@ -987,10 +970,6 @@ int dpu_rm_reserve(
rsvp_nxt->seq, rsvp_nxt->enc_id); rsvp_nxt->seq, rsvp_nxt->enc_id);
_dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector); _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
} else { } else {
if (test_only && RM_RQ_LOCK(&reqs))
DPU_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
rsvp_nxt->seq, rsvp_nxt->enc_id);
_dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector); _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state); ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
......
...@@ -35,22 +35,6 @@ enum dpu_rm_topology_name { ...@@ -35,22 +35,6 @@ enum dpu_rm_topology_name {
DPU_RM_TOPOLOGY_MAX, DPU_RM_TOPOLOGY_MAX,
}; };
/**
* enum dpu_rm_topology_control - HW resource use case in use by connector
* @DPU_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
* test, reserve the resources for this display.
* Normal behavior would not impact the reservation
* list during the AtomicTest phase.
* @DPU_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
* release any reservation held by this display.
* Normal behavior would not impact the
* reservation list during the AtomicTest phase.
*/
enum dpu_rm_topology_control {
DPU_RM_TOPCTL_RESERVE_LOCK,
DPU_RM_TOPCTL_RESERVE_CLEAR,
};
/** /**
* struct dpu_rm - DPU dynamic hardware resource manager * struct dpu_rm - DPU dynamic hardware resource manager
* @dev: device handle for event logging purposes * @dev: device handle for event logging purposes
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment