Commit 28801cc8 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'amd-drm-fixes-6.5-2023-07-20' of...

Merge tag 'amd-drm-fixes-6.5-2023-07-20' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.5-2023-07-20:

amdgpu:
- More PCIe DPM fixes for Intel platforms
- DCN3.0.1 fixes
- Virtual display timer fix
- Async flip fix
- SMU13 clock reporting fixes
- Add missing PSP firmware declaration
- DP MST fix
- DCN3.1.x fixes
- Slab out of bounds fix
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230720133456.7826-1-alexander.deucher@amd.com
parents 534a7915 b13d3e9c
...@@ -1709,7 +1709,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ...@@ -1709,7 +1709,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
} }
xcp_id = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id; xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ?
0 : fpriv->xcp_id;
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0; alloc_flags = 0;
......
...@@ -1229,13 +1229,13 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) ...@@ -1229,13 +1229,13 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
pasid = 0; pasid = 0;
} }
r = amdgpu_vm_init(adev, &fpriv->vm); r = amdgpu_xcp_open_device(adev, fpriv, file_priv);
if (r) if (r)
goto error_pasid; goto error_pasid;
r = amdgpu_xcp_open_device(adev, fpriv, file_priv); r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
if (r) if (r)
goto error_vm; goto error_pasid;
r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid); r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
if (r) if (r)
......
...@@ -1382,7 +1382,7 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev) ...@@ -1382,7 +1382,7 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev)
goto error_pasid; goto error_pasid;
} }
r = amdgpu_vm_init(adev, vm); r = amdgpu_vm_init(adev, vm, -1);
if (r) { if (r) {
DRM_ERROR("failed to initialize vm\n"); DRM_ERROR("failed to initialize vm\n");
goto error_pasid; goto error_pasid;
......
...@@ -55,8 +55,9 @@ static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer) ...@@ -55,8 +55,9 @@ static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)
DRM_WARN("%s: vblank timer overrun\n", __func__); DRM_WARN("%s: vblank timer overrun\n", __func__);
ret = drm_crtc_handle_vblank(crtc); ret = drm_crtc_handle_vblank(crtc);
/* Don't queue timer again when vblank is disabled. */
if (!ret) if (!ret)
DRM_ERROR("amdgpu_vkms failure on handling vblank"); return HRTIMER_NORESTART;
return HRTIMER_RESTART; return HRTIMER_RESTART;
} }
...@@ -81,7 +82,7 @@ static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc) ...@@ -81,7 +82,7 @@ static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
hrtimer_cancel(&amdgpu_crtc->vblank_timer); hrtimer_try_to_cancel(&amdgpu_crtc->vblank_timer);
} }
static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
......
...@@ -2121,13 +2121,14 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) ...@@ -2121,13 +2121,14 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vm: requested vm * @vm: requested vm
* @xcp_id: GPU partition selection id
* *
* Init @vm fields. * Init @vm fields.
* *
* Returns: * Returns:
* 0 for success, error for failure. * 0 for success, error for failure.
*/ */
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id)
{ {
struct amdgpu_bo *root_bo; struct amdgpu_bo *root_bo;
struct amdgpu_bo_vm *root; struct amdgpu_bo_vm *root;
...@@ -2177,7 +2178,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -2177,7 +2178,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->evicting = false; vm->evicting = false;
r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
false, &root); false, &root, xcp_id);
if (r) if (r)
goto error_free_delayed; goto error_free_delayed;
root_bo = &root->bo; root_bo = &root->bo;
......
...@@ -392,7 +392,7 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -392,7 +392,7 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
u32 pasid); u32 pasid);
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout); long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
...@@ -475,7 +475,8 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, ...@@ -475,7 +475,8 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_vm *vmbo, bool immediate); struct amdgpu_bo_vm *vmbo, bool immediate);
int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int level, bool immediate, struct amdgpu_bo_vm **vmbo); int level, bool immediate, struct amdgpu_bo_vm **vmbo,
int32_t xcp_id);
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev, bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
struct amdgpu_vm *vm); struct amdgpu_vm *vm);
......
...@@ -498,11 +498,12 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -498,11 +498,12 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* @level: the page table level * @level: the page table level
* @immediate: use a immediate update * @immediate: use a immediate update
* @vmbo: pointer to the buffer object pointer * @vmbo: pointer to the buffer object pointer
* @xcp_id: GPU partition id
*/ */
int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int level, bool immediate, struct amdgpu_bo_vm **vmbo) int level, bool immediate, struct amdgpu_bo_vm **vmbo,
int32_t xcp_id)
{ {
struct amdgpu_fpriv *fpriv = container_of(vm, struct amdgpu_fpriv, vm);
struct amdgpu_bo_param bp; struct amdgpu_bo_param bp;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
struct dma_resv *resv; struct dma_resv *resv;
...@@ -535,7 +536,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -535,7 +536,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp.type = ttm_bo_type_kernel; bp.type = ttm_bo_type_kernel;
bp.no_wait_gpu = immediate; bp.no_wait_gpu = immediate;
bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1; bp.xcp_id_plus1 = xcp_id + 1;
if (vm->root.bo) if (vm->root.bo)
bp.resv = vm->root.bo->tbo.base.resv; bp.resv = vm->root.bo->tbo.base.resv;
...@@ -561,7 +562,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -561,7 +562,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp.type = ttm_bo_type_kernel; bp.type = ttm_bo_type_kernel;
bp.resv = bo->tbo.base.resv; bp.resv = bo->tbo.base.resv;
bp.bo_ptr_size = sizeof(struct amdgpu_bo); bp.bo_ptr_size = sizeof(struct amdgpu_bo);
bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1; bp.xcp_id_plus1 = xcp_id + 1;
r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow); r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
...@@ -606,7 +607,8 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev, ...@@ -606,7 +607,8 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
return 0; return 0;
amdgpu_vm_eviction_unlock(vm); amdgpu_vm_eviction_unlock(vm);
r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt); r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt,
vm->root.bo->xcp_id);
amdgpu_vm_eviction_lock(vm); amdgpu_vm_eviction_lock(vm);
if (r) if (r)
return r; return r;
......
...@@ -363,7 +363,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev, ...@@ -363,7 +363,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
if (!adev->xcp_mgr) if (!adev->xcp_mgr)
return 0; return 0;
fpriv->xcp_id = ~0; fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
for (i = 0; i < MAX_XCP; ++i) { for (i = 0; i < MAX_XCP; ++i) {
if (!adev->xcp_mgr->xcp[i].ddev) if (!adev->xcp_mgr->xcp[i].ddev)
break; break;
...@@ -381,7 +381,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev, ...@@ -381,7 +381,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
} }
} }
fpriv->vm.mem_id = fpriv->xcp_id == ~0 ? -1 : fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id; adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
return 0; return 0;
} }
......
...@@ -37,6 +37,8 @@ ...@@ -37,6 +37,8 @@
#define AMDGPU_XCP_FL_NONE 0 #define AMDGPU_XCP_FL_NONE 0
#define AMDGPU_XCP_FL_LOCKED (1 << 0) #define AMDGPU_XCP_FL_LOCKED (1 << 0)
#define AMDGPU_XCP_NO_PARTITION (~0)
struct amdgpu_fpriv; struct amdgpu_fpriv;
enum AMDGPU_XCP_IP_BLOCK { enum AMDGPU_XCP_IP_BLOCK {
......
...@@ -68,7 +68,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, ...@@ -68,7 +68,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
enum AMDGPU_XCP_IP_BLOCK ip_blk; enum AMDGPU_XCP_IP_BLOCK ip_blk;
uint32_t inst_mask; uint32_t inst_mask;
ring->xcp_id = ~0; ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
return; return;
...@@ -177,7 +177,7 @@ static int aqua_vanjaram_select_scheds( ...@@ -177,7 +177,7 @@ static int aqua_vanjaram_select_scheds(
u32 sel_xcp_id; u32 sel_xcp_id;
int i; int i;
if (fpriv->xcp_id == ~0) { if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
u32 least_ref_cnt = ~0; u32 least_ref_cnt = ~0;
fpriv->xcp_id = 0; fpriv->xcp_id = 0;
......
...@@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin"); ...@@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_6_ta.bin");
/* For large FW files the time to complete can be very long */ /* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240 #define USBC_PD_POLLING_LIMIT_S 240
......
...@@ -194,6 +194,11 @@ struct hpd_rx_irq_offload_work_queue { ...@@ -194,6 +194,11 @@ struct hpd_rx_irq_offload_work_queue {
* we're handling link loss * we're handling link loss
*/ */
bool is_handling_link_loss; bool is_handling_link_loss;
/**
* @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message
* ready event when we're already handling mst message ready event
*/
bool is_handling_mst_msg_rdy_event;
/** /**
* @aconnector: The aconnector that this work queue is attached to * @aconnector: The aconnector that this work queue is attached to
*/ */
...@@ -638,6 +643,8 @@ struct amdgpu_dm_connector { ...@@ -638,6 +643,8 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *mst_output_port; struct drm_dp_mst_port *mst_output_port;
struct amdgpu_dm_connector *mst_root; struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux; struct drm_dp_aux *dsc_aux;
struct mutex handle_mst_msg_ready;
/* TODO see if we can merge with ddc_bus or make a dm_connector */ /* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c; struct amdgpu_i2c_adapter *i2c;
......
...@@ -398,6 +398,18 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, ...@@ -398,6 +398,18 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL; return -EINVAL;
} }
/*
* Only allow async flips for fast updates that don't change the FB
* pitch, the DCC state, rotation, etc.
*/
if (crtc_state->async_flip &&
dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
/* In some use cases, like reset, no stream is attached */ /* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream) if (!dm_crtc_state->stream)
return 0; return 0;
......
...@@ -619,8 +619,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, ...@@ -619,8 +619,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
return connector; return connector;
} }
void dm_handle_mst_sideband_msg_ready_event(
struct drm_dp_mst_topology_mgr *mgr,
enum mst_msg_ready_type msg_rdy_type)
{
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
uint8_t dret;
bool new_irq_handled = false;
int dpcd_addr;
uint8_t dpcd_bytes_to_read;
const uint8_t max_process_count = 30;
uint8_t process_count = 0;
u8 retry;
struct amdgpu_dm_connector *aconnector =
container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
/* DPCD 0x200 - 0x201 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT;
} else {
dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
/* DPCD 0x2002 - 0x2005 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT_ESI;
}
mutex_lock(&aconnector->handle_mst_msg_ready);
while (process_count < max_process_count) {
u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
process_count++;
dret = drm_dp_dpcd_read(
&aconnector->dm_dp_aux.aux,
dpcd_addr,
esi,
dpcd_bytes_to_read);
if (dret != dpcd_bytes_to_read) {
DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
break;
}
DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
switch (msg_rdy_type) {
case DOWN_REP_MSG_RDY_EVENT:
/* Only handle DOWN_REP_MSG_RDY case*/
esi[1] &= DP_DOWN_REP_MSG_RDY;
break;
case UP_REQ_MSG_RDY_EVENT:
/* Only handle UP_REQ_MSG_RDY case*/
esi[1] &= DP_UP_REQ_MSG_RDY;
break;
default:
/* Handle both cases*/
esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
break;
}
if (!esi[1])
break;
/* handle MST irq */
if (aconnector->mst_mgr.mst_state)
drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
esi,
ack,
&new_irq_handled);
if (new_irq_handled) {
/* ACK at DPCD to notify down stream */
for (retry = 0; retry < 3; retry++) {
ssize_t wret;
wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
dpcd_addr + 1,
ack[1]);
if (wret == 1)
break;
}
if (retry == 3) {
DRM_ERROR("Failed to ack MST event.\n");
return;
}
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
new_irq_handled = false;
} else {
break;
}
}
mutex_unlock(&aconnector->handle_mst_msg_ready);
if (process_count == max_process_count)
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
}
static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
{
dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
}
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector, .add_connector = dm_dp_add_mst_connector,
.poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
}; };
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
......
...@@ -49,6 +49,13 @@ ...@@ -49,6 +49,13 @@
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031 #define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000 #define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
enum mst_msg_ready_type {
NONE_MSG_RDY_EVENT = 0,
DOWN_REP_MSG_RDY_EVENT = 1,
UP_REQ_MSG_RDY_EVENT = 2,
DOWN_OR_UP_MSG_RDY_EVENT = 3
};
struct amdgpu_display_manager; struct amdgpu_display_manager;
struct amdgpu_dm_connector; struct amdgpu_dm_connector;
...@@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, ...@@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
void void
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
void dm_handle_mst_sideband_msg_ready_event(
struct drm_dp_mst_topology_mgr *mgr,
enum mst_msg_ready_type msg_rdy_type);
struct dsc_mst_fairness_vars { struct dsc_mst_fairness_vars {
int pbn; int pbn;
bool dsc_enabled; bool dsc_enabled;
......
...@@ -87,6 +87,11 @@ static int dcn31_get_active_display_cnt_wa( ...@@ -87,6 +87,11 @@ static int dcn31_get_active_display_cnt_wa(
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true; tmds_present = true;
/* Checking stream / link detection ensuring that PHY is active*/
if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
display_count++;
} }
for (i = 0; i < dc->link_count; i++) { for (i = 0; i < dc->link_count; i++) {
......
...@@ -3278,7 +3278,8 @@ void dcn10_wait_for_mpcc_disconnect( ...@@ -3278,7 +3278,8 @@ void dcn10_wait_for_mpcc_disconnect(
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) { if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst); struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) if (pipe_ctx->stream_res.tg &&
pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst); res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
hubp->funcs->set_blank(hubp, true); hubp->funcs->set_blank(hubp, true);
......
...@@ -215,7 +215,7 @@ void optc3_set_odm_bypass(struct timing_generator *optc, ...@@ -215,7 +215,7 @@ void optc3_set_odm_bypass(struct timing_generator *optc,
optc1->opp_count = 1; optc1->opp_count = 1;
} }
static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
struct dc_crtc_timing *timing) struct dc_crtc_timing *timing)
{ {
struct optc *optc1 = DCN10TG_FROM_TG(optc); struct optc *optc1 = DCN10TG_FROM_TG(optc);
...@@ -293,7 +293,7 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e ...@@ -293,7 +293,7 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e
OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode); OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode);
} }
static void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc) void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
{ {
struct optc *optc1 = DCN10TG_FROM_TG(optc); struct optc *optc1 = DCN10TG_FROM_TG(optc);
......
...@@ -351,6 +351,9 @@ void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable); ...@@ -351,6 +351,9 @@ void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable);
void optc3_set_odm_bypass(struct timing_generator *optc, void optc3_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing); const struct dc_crtc_timing *dc_crtc_timing);
void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
struct dc_crtc_timing *timing);
void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc);
void optc3_tg_init(struct timing_generator *optc); void optc3_tg_init(struct timing_generator *optc);
void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max); void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);
#endif /* __DC_OPTC_DCN30_H__ */ #endif /* __DC_OPTC_DCN30_H__ */
...@@ -11,7 +11,8 @@ ...@@ -11,7 +11,8 @@
# Makefile for dcn30. # Makefile for dcn30.
DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \ DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o \
dcn301_optc.o
AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301)) AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
......
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "dcn301_optc.h"
#include "dc.h"
#include "dcn_calc_math.h"
#include "dc_dmub_srv.h"
#include "dml/dcn30/dcn30_fpu.h"
#include "dc_trace.h"
#define REG(reg)\
optc1->tg_regs->reg
#define CTX \
optc1->base.ctx
#undef FN
#define FN(reg_name, field_name) \
optc1->tg_shift->field_name, optc1->tg_mask->field_name
/**
* optc301_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*.
*
* @optc: timing_generator instance.
* @params: parameters used for Dynamic Refresh Rate.
*/
void optc301_set_drr(
struct timing_generator *optc,
const struct drr_params *params)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
if (params != NULL &&
params->vertical_total_max > 0 &&
params->vertical_total_min > 0) {
if (params->vertical_total_mid != 0) {
REG_SET(OTG_V_TOTAL_MID, 0,
OTG_V_TOTAL_MID, params->vertical_total_mid - 1);
REG_UPDATE_2(OTG_V_TOTAL_CONTROL,
OTG_VTOTAL_MID_REPLACING_MAX_EN, 1,
OTG_VTOTAL_MID_FRAME_NUM,
(uint8_t)params->vertical_total_mid_frame_num);
}
optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
OTG_V_TOTAL_MIN_SEL, 1,
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
OTG_SET_V_TOTAL_MIN_MASK, 0);
// Setup manual flow control for EOF via TRIG_A
optc->funcs->setup_manual_trigger(optc);
} else {
REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
OTG_SET_V_TOTAL_MIN_MASK, 0,
OTG_V_TOTAL_MIN_SEL, 0,
OTG_V_TOTAL_MAX_SEL, 0,
OTG_FORCE_LOCK_ON_EVENT, 0);
optc->funcs->set_vtotal_min_max(optc, 0, 0);
}
}
void optc301_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET_8(OTG_TRIGA_CNTL, 0,
OTG_TRIGA_SOURCE_SELECT, 21,
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
OTG_TRIGA_POLARITY_SELECT, 0,
OTG_TRIGA_FREQUENCY_SELECT, 0,
OTG_TRIGA_DELAY, 0,
OTG_TRIGA_CLEAR, 1);
}
static struct timing_generator_funcs dcn30_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
.setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
.setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
.program_global_sync = optc1_program_global_sync,
.enable_crtc = optc2_enable_crtc,
.disable_crtc = optc1_disable_crtc,
/* used by enable_timing_synchronization. Not need for FPGA */
.is_counter_moving = optc1_is_counter_moving,
.get_position = optc1_get_position,
.get_frame_count = optc1_get_vblank_counter,
.get_scanoutpos = optc1_get_crtc_scanoutpos,
.get_otg_active_size = optc1_get_otg_active_size,
.set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
.wait_for_state = optc1_wait_for_state,
.set_blank_color = optc3_program_blank_color,
.did_triggered_reset_occur = optc1_did_triggered_reset_occur,
.triplebuffer_lock = optc3_triplebuffer_lock,
.triplebuffer_unlock = optc2_triplebuffer_unlock,
.enable_reset_trigger = optc1_enable_reset_trigger,
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc301_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc3_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
.tg_init = optc3_tg_init,
.is_tg_enabled = optc1_is_tg_enabled,
.is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
.clear_optc_underflow = optc1_clear_optc_underflow,
.setup_global_swap_lock = NULL,
.get_crc = optc1_get_crc,
.configure_crc = optc2_configure_crc,
.set_dsc_config = optc3_set_dsc_config,
.get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
.set_odm_bypass = optc3_set_odm_bypass,
.set_odm_combine = optc3_set_odm_combine,
.get_optc_source = optc2_get_optc_source,
.set_out_mux = optc3_set_out_mux,
.set_drr_trigger_window = optc3_set_drr_trigger_window,
.set_vtotal_change_limit = optc3_set_vtotal_change_limit,
.set_gsl = optc2_set_gsl,
.set_gsl_source_select = optc2_set_gsl_source_select,
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc2_program_manual_trigger,
.setup_manual_trigger = optc301_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
.wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear,
};
void dcn301_timing_generator_init(struct optc *optc1)
{
optc1->base.funcs = &dcn30_tg_funcs;
optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
optc1->min_h_blank = 32;
optc1->min_v_blank = 3;
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
}
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DC_OPTC_DCN301_H__
#define __DC_OPTC_DCN301_H__
#include "dcn20/dcn20_optc.h"
#include "dcn30/dcn30_optc.h"
void dcn301_timing_generator_init(struct optc *optc1);
void optc301_setup_manual_trigger(struct timing_generator *optc);
void optc301_set_drr(struct timing_generator *optc, const struct drr_params *params);
#endif /* __DC_OPTC_DCN301_H__ */
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
#include "dcn30/dcn30_hubp.h" #include "dcn30/dcn30_hubp.h"
#include "irq/dcn30/irq_service_dcn30.h" #include "irq/dcn30/irq_service_dcn30.h"
#include "dcn30/dcn30_dpp.h" #include "dcn30/dcn30_dpp.h"
#include "dcn30/dcn30_optc.h" #include "dcn301/dcn301_optc.h"
#include "dcn20/dcn20_hwseq.h" #include "dcn20/dcn20_hwseq.h"
#include "dcn30/dcn30_hwseq.h" #include "dcn30/dcn30_hwseq.h"
#include "dce110/dce110_hw_sequencer.h" #include "dce110/dce110_hw_sequencer.h"
...@@ -855,7 +855,7 @@ static struct timing_generator *dcn301_timing_generator_create( ...@@ -855,7 +855,7 @@ static struct timing_generator *dcn301_timing_generator_create(
tgn10->tg_shift = &optc_shift; tgn10->tg_shift = &optc_shift;
tgn10->tg_mask = &optc_mask; tgn10->tg_mask = &optc_mask;
dcn30_timing_generator_init(tgn10); dcn301_timing_generator_init(tgn10);
return &tgn10->base; return &tgn10->base;
} }
......
...@@ -65,7 +65,7 @@ static const struct dc_debug_options debug_defaults_drv = { ...@@ -65,7 +65,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false, .timing_trace = false,
.clock_trace = true, .clock_trace = true,
.disable_pplib_clock_request = true, .disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_DYNAMIC, .pipe_split_policy = MPC_SPLIT_AVOID,
.force_single_disp_pipe_split = false, .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE, .disable_dcc = DCC_ENABLE,
.vsr_support = true, .vsr_support = true,
......
...@@ -295,7 +295,11 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c ...@@ -295,7 +295,11 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
pipe = &res_ctx->pipe_ctx[i]; pipe = &res_ctx->pipe_ctx[i];
timing = &pipe->stream->timing; timing = &pipe->stream->timing;
if (pipe->stream->adjust.v_total_min != 0)
pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min; pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
else
pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive; pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS); pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS);
pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width); pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width);
......
...@@ -1798,17 +1798,6 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) ...@@ -1798,17 +1798,6 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
return result; return result;
} }
static bool intel_core_rkl_chk(void)
{
#if IS_ENABLED(CONFIG_X86_64)
struct cpuinfo_x86 *c = &cpu_data(0);
return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE);
#else
return false;
#endif
}
static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
{ {
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
...@@ -1835,7 +1824,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) ...@@ -1835,7 +1824,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
data->pcie_dpm_key_disabled = data->pcie_dpm_key_disabled =
intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); !amdgpu_device_pcie_dynamic_switching_supported() ||
!(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
/* need to set voltage control types before EVV patching */ /* need to set voltage control types before EVV patching */
data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE; data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE; data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
......
...@@ -1927,12 +1927,16 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu, ...@@ -1927,12 +1927,16 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
*size = 4; *size = 4;
break; break;
case AMDGPU_PP_SENSOR_GFX_MCLK: case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data); ret = sienna_cichlid_get_smu_metrics_data(smu,
METRICS_CURR_UCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100; *(uint32_t *)data *= 100;
*size = 4; *size = 4;
break; break;
case AMDGPU_PP_SENSOR_GFX_SCLK: case AMDGPU_PP_SENSOR_GFX_SCLK:
ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data); ret = sienna_cichlid_get_smu_metrics_data(smu,
METRICS_AVERAGE_GFXCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100; *(uint32_t *)data *= 100;
*size = 4; *size = 4;
break; break;
......
...@@ -949,7 +949,7 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu, ...@@ -949,7 +949,7 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu,
break; break;
case AMDGPU_PP_SENSOR_GFX_MCLK: case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = smu_v13_0_7_get_smu_metrics_data(smu, ret = smu_v13_0_7_get_smu_metrics_data(smu,
METRICS_AVERAGE_UCLK, METRICS_CURR_UCLK,
(uint32_t *)data); (uint32_t *)data);
*(uint32_t *)data *= 100; *(uint32_t *)data *= 100;
*size = 4; *size = 4;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment