Commit c2c94b3b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-next-2022-01-21' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Thanks to Daniel for taking care of things while I was out, just a set
  of merge window fixes that came in this week, two i915 display fixes
  and a bunch of misc amdgpu, along with a radeon regression fix.

  amdgpu:
   - SR-IOV fix
   - VCN harvest fix
   - Suspend/resume fixes
   - Tahiti fix
   - Enable GPU recovery on yellow carp

  radeon:
   - Fix error handling regression in radeon_driver_open_kms

  i915:
   - Update EHL display voltage swing table
   - Fix programming the ADL-P display TC voltage swing"

* tag 'drm-next-2022-01-21' of git://anongit.freedesktop.org/drm/drm:
  drm/radeon: fix error handling in radeon_driver_open_kms
  drm/amd/amdgpu: fixing read wrong pf2vf data in SRIOV
  drm/amdgpu: apply vcn harvest quirk
  drm/i915/display/adlp: Implement new step in the TC voltage swing prog sequence
  drm/i915/display/ehl: Update voltage swing table
  drm/amd/display: Revert W/A for hard hangs on DCN20/DCN21
  drm/amdgpu: drop flags check for CHIP_IP_DISCOVERY
  drm/amdgpu: Fix rejecting Tahiti GPUs
  drm/amdgpu: don't do resets on APUs which don't support it
  drm/amdgpu: invert the logic in amdgpu_device_should_recover_gpu()
  drm/amdgpu: Enable recovery on yellow carp
parents 39e77c48 ccf34586
...@@ -2354,7 +2354,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) ...@@ -2354,7 +2354,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
} }
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
amdgpu_virt_exchange_data(adev); amdgpu_virt_init_data_exchange(adev);
r = amdgpu_ib_pool_init(adev); r = amdgpu_ib_pool_init(adev);
if (r) { if (r) {
...@@ -4450,33 +4450,24 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) ...@@ -4450,33 +4450,24 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
if (amdgpu_gpu_recovery == -1) { if (amdgpu_gpu_recovery == -1) {
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_BONAIRE: #ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_HAWAII: case CHIP_VERDE:
case CHIP_TOPAZ: case CHIP_TAHITI:
case CHIP_TONGA: case CHIP_PITCAIRN:
case CHIP_FIJI: case CHIP_OLAND:
case CHIP_POLARIS10: case CHIP_HAINAN:
case CHIP_POLARIS11: #endif
case CHIP_POLARIS12: #ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_VEGAM: case CHIP_KAVERI:
case CHIP_VEGA20: case CHIP_KABINI:
case CHIP_VEGA10: case CHIP_MULLINS:
case CHIP_VEGA12: #endif
case CHIP_RAVEN: case CHIP_CARRIZO:
case CHIP_ARCTURUS: case CHIP_STONEY:
case CHIP_RENOIR: case CHIP_CYAN_SKILLFISH:
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
case CHIP_BEIGE_GOBY:
case CHIP_VANGOGH:
case CHIP_ALDEBARAN:
break;
default:
goto disabled; goto disabled;
default:
break;
} }
} }
......
...@@ -243,6 +243,30 @@ static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary) ...@@ -243,6 +243,30 @@ static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE); return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
} }
static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
{
/*
* So far, apply this quirk only on those Navy Flounder boards which
* have a bad harvest table of VCN config.
*/
if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
(adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) {
switch (adev->pdev->revision) {
case 0xC1:
case 0xC2:
case 0xC3:
case 0xC5:
case 0xC7:
case 0xCF:
case 0xDF:
adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
break;
default:
break;
}
}
}
static int amdgpu_discovery_init(struct amdgpu_device *adev) static int amdgpu_discovery_init(struct amdgpu_device *adev)
{ {
struct table_info *info; struct table_info *info;
...@@ -548,11 +572,9 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) ...@@ -548,11 +572,9 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
break; break;
} }
} }
/* some IP discovery tables on Navy Flounder don't have this set correctly */
if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && amdgpu_discovery_harvest_config_quirk(adev);
(adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2)) &&
(adev->pdev->revision != 0xFF))
adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
if (vcn_harvest_count == adev->vcn.num_vcn_inst) { if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
......
...@@ -1930,11 +1930,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, ...@@ -1930,11 +1930,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENODEV; return -ENODEV;
} }
if (flags == 0) {
DRM_INFO("Unsupported asic. Remove me when IP discovery init is in place.\n");
return -ENODEV;
}
if (amdgpu_virtual_display || if (amdgpu_virtual_display ||
amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
supports_atomic = true; supports_atomic = true;
......
...@@ -625,20 +625,20 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) ...@@ -625,20 +625,20 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
adev->virt.fw_reserve.p_vf2pf = NULL; adev->virt.fw_reserve.p_vf2pf = NULL;
adev->virt.vf2pf_update_interval_ms = 0; adev->virt.vf2pf_update_interval_ms = 0;
if (adev->bios != NULL) { if (adev->mman.fw_vram_usage_va != NULL) {
adev->virt.vf2pf_update_interval_ms = 2000; /* go through this logic in ip_init and reset to init workqueue*/
amdgpu_virt_exchange_data(adev);
INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
} else if (adev->bios != NULL) {
/* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
adev->virt.fw_reserve.p_pf2vf = adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *) (struct amd_sriov_msg_pf2vf_info_header *)
(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
amdgpu_virt_read_pf2vf_data(adev); amdgpu_virt_read_pf2vf_data(adev);
} }
if (adev->virt.vf2pf_update_interval_ms != 0) {
INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
}
} }
...@@ -674,12 +674,6 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev) ...@@ -674,12 +674,6 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
if (adev->virt.ras_init_done) if (adev->virt.ras_init_done)
amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size); amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
} }
} else if (adev->bios != NULL) {
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
amdgpu_virt_read_pf2vf_data(adev);
} }
} }
......
...@@ -1428,6 +1428,10 @@ static int cik_asic_reset(struct amdgpu_device *adev) ...@@ -1428,6 +1428,10 @@ static int cik_asic_reset(struct amdgpu_device *adev)
{ {
int r; int r;
/* APUs don't have full asic reset */
if (adev->flags & AMD_IS_APU)
return 0;
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
dev_info(adev->dev, "BACO reset\n"); dev_info(adev->dev, "BACO reset\n");
r = amdgpu_dpm_baco_reset(adev); r = amdgpu_dpm_baco_reset(adev);
......
...@@ -956,6 +956,10 @@ static int vi_asic_reset(struct amdgpu_device *adev) ...@@ -956,6 +956,10 @@ static int vi_asic_reset(struct amdgpu_device *adev)
{ {
int r; int r;
/* APUs don't have full asic reset */
if (adev->flags & AMD_IS_APU)
return 0;
if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
dev_info(adev->dev, "BACO reset\n"); dev_info(adev->dev, "BACO reset\n");
r = amdgpu_dpm_baco_reset(adev); r = amdgpu_dpm_baco_reset(adev);
......
...@@ -38,7 +38,6 @@ ...@@ -38,7 +38,6 @@
#include "clk/clk_11_0_0_offset.h" #include "clk/clk_11_0_0_offset.h"
#include "clk/clk_11_0_0_sh_mask.h" #include "clk/clk_11_0_0_sh_mask.h"
#include "irq/dcn20/irq_service_dcn20.h"
#undef FN #undef FN
#define FN(reg_name, field_name) \ #define FN(reg_name, field_name) \
...@@ -223,8 +222,6 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, ...@@ -223,8 +222,6 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
bool force_reset = false; bool force_reset = false;
bool p_state_change_support; bool p_state_change_support;
int total_plane_count; int total_plane_count;
int irq_src;
uint32_t hpd_state;
if (dc->work_arounds.skip_clock_update) if (dc->work_arounds.skip_clock_update)
return; return;
...@@ -242,13 +239,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, ...@@ -242,13 +239,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->res_pool->pp_smu) if (dc->res_pool->pp_smu)
pp_smu = &dc->res_pool->pp_smu->nv_funcs; pp_smu = &dc->res_pool->pp_smu->nv_funcs;
for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD6; irq_src++) { if (display_count == 0)
hpd_state = dc_get_hpd_state_dcn20(dc->res_pool->irqs, irq_src);
if (hpd_state)
break;
}
if (display_count == 0 && !hpd_state)
enter_display_off = true; enter_display_off = true;
if (enter_display_off == safe_to_lower) { if (enter_display_off == safe_to_lower) {
......
...@@ -42,7 +42,6 @@ ...@@ -42,7 +42,6 @@
#include "clk/clk_10_0_2_sh_mask.h" #include "clk/clk_10_0_2_sh_mask.h"
#include "renoir_ip_offset.h" #include "renoir_ip_offset.h"
#include "irq/dcn21/irq_service_dcn21.h"
/* Constants */ /* Constants */
...@@ -129,11 +128,9 @@ static void rn_update_clocks(struct clk_mgr *clk_mgr_base, ...@@ -129,11 +128,9 @@ static void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc; struct dc *dc = clk_mgr_base->ctx->dc;
int display_count; int display_count;
int irq_src;
bool update_dppclk = false; bool update_dppclk = false;
bool update_dispclk = false; bool update_dispclk = false;
bool dpp_clock_lowered = false; bool dpp_clock_lowered = false;
uint32_t hpd_state;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
...@@ -150,14 +147,8 @@ static void rn_update_clocks(struct clk_mgr *clk_mgr_base, ...@@ -150,14 +147,8 @@ static void rn_update_clocks(struct clk_mgr *clk_mgr_base,
display_count = rn_get_active_display_cnt_wa(dc, context); display_count = rn_get_active_display_cnt_wa(dc, context);
for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD5; irq_src++) {
hpd_state = dc_get_hpd_state_dcn21(dc->res_pool->irqs, irq_src);
if (hpd_state)
break;
}
/* if we can go lower, go lower */ /* if we can go lower, go lower */
if (display_count == 0 && !hpd_state) { if (display_count == 0) {
rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER); rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
/* update power state */ /* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
......
...@@ -132,31 +132,6 @@ enum dc_irq_source to_dal_irq_source_dcn20( ...@@ -132,31 +132,6 @@ enum dc_irq_source to_dal_irq_source_dcn20(
} }
} }
uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source)
{
const struct irq_source_info *info;
uint32_t addr;
uint32_t value;
uint32_t current_status;
info = find_irq_source_info(irq_service, source);
if (!info)
return 0;
addr = info->status_reg;
if (!addr)
return 0;
value = dm_read_reg(irq_service->ctx, addr);
current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE);
return current_status;
}
static bool hpd_ack( static bool hpd_ack(
struct irq_service *irq_service, struct irq_service *irq_service,
const struct irq_source_info *info) const struct irq_source_info *info)
......
...@@ -31,6 +31,4 @@ ...@@ -31,6 +31,4 @@
struct irq_service *dal_irq_service_dcn20_create( struct irq_service *dal_irq_service_dcn20_create(
struct irq_service_init_data *init_data); struct irq_service_init_data *init_data);
uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source);
#endif #endif
...@@ -134,31 +134,6 @@ static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_servic ...@@ -134,31 +134,6 @@ static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_servic
return DC_IRQ_SOURCE_INVALID; return DC_IRQ_SOURCE_INVALID;
} }
uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source)
{
const struct irq_source_info *info;
uint32_t addr;
uint32_t value;
uint32_t current_status;
info = find_irq_source_info(irq_service, source);
if (!info)
return 0;
addr = info->status_reg;
if (!addr)
return 0;
value = dm_read_reg(irq_service->ctx, addr);
current_status =
get_reg_field_value(
value,
HPD0_DC_HPD_INT_STATUS,
DC_HPD_SENSE);
return current_status;
}
static bool hpd_ack( static bool hpd_ack(
struct irq_service *irq_service, struct irq_service *irq_service,
const struct irq_source_info *info) const struct irq_source_info *info)
......
...@@ -31,6 +31,4 @@ ...@@ -31,6 +31,4 @@
struct irq_service *dal_irq_service_dcn21_create( struct irq_service *dal_irq_service_dcn21_create(
struct irq_service_init_data *init_data); struct irq_service_init_data *init_data);
uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source);
#endif #endif
...@@ -79,7 +79,7 @@ void dal_irq_service_destroy(struct irq_service **irq_service) ...@@ -79,7 +79,7 @@ void dal_irq_service_destroy(struct irq_service **irq_service)
*irq_service = NULL; *irq_service = NULL;
} }
const struct irq_source_info *find_irq_source_info( static const struct irq_source_info *find_irq_source_info(
struct irq_service *irq_service, struct irq_service *irq_service,
enum dc_irq_source source) enum dc_irq_source source)
{ {
......
...@@ -69,10 +69,6 @@ struct irq_service { ...@@ -69,10 +69,6 @@ struct irq_service {
const struct irq_service_funcs *funcs; const struct irq_service_funcs *funcs;
}; };
const struct irq_source_info *find_irq_source_info(
struct irq_service *irq_service,
enum dc_irq_source source);
void dal_irq_service_construct( void dal_irq_service_construct(
struct irq_service *irq_service, struct irq_service *irq_service,
struct irq_service_init_data *init_data); struct irq_service_init_data *init_data);
......
...@@ -1298,6 +1298,28 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, ...@@ -1298,6 +1298,28 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port),
DKL_TX_DP20BITMODE, 0); DKL_TX_DP20BITMODE, 0);
if (IS_ALDERLAKE_P(dev_priv)) {
u32 val;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
if (ln == 0) {
val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(0);
val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(2);
} else {
val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(3);
val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(3);
}
} else {
val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(0);
val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(0);
}
intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port),
DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK |
DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK,
val);
}
} }
} }
......
...@@ -477,14 +477,14 @@ static const struct intel_ddi_buf_trans icl_combo_phy_trans_hdmi = { ...@@ -477,14 +477,14 @@ static const struct intel_ddi_buf_trans icl_combo_phy_trans_hdmi = {
static const union intel_ddi_buf_trans_entry _ehl_combo_phy_trans_dp[] = { static const union intel_ddi_buf_trans_entry _ehl_combo_phy_trans_dp[] = {
/* NT mV Trans mV db */ /* NT mV Trans mV db */
{ .icl = { 0xA, 0x33, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x33, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x47, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */ { .icl = { 0xA, 0x47, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */
{ .icl = { 0xC, 0x64, 0x34, 0x00, 0x0B } }, /* 350 700 6.0 */ { .icl = { 0xC, 0x64, 0x33, 0x00, 0x0C } }, /* 350 700 6.0 */
{ .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 350 900 8.2 */ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 350 900 8.2 */
{ .icl = { 0xA, 0x46, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ { .icl = { 0xA, 0x46, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
{ .icl = { 0xC, 0x64, 0x38, 0x00, 0x07 } }, /* 500 700 2.9 */ { .icl = { 0xC, 0x64, 0x37, 0x00, 0x08 } }, /* 500 700 2.9 */
{ .icl = { 0x6, 0x7F, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */ { .icl = { 0x6, 0x7F, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */
{ .icl = { 0xC, 0x61, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */ { .icl = { 0xC, 0x61, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
{ .icl = { 0x6, 0x7F, 0x38, 0x00, 0x07 } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 600 900 3.5 */
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
}; };
......
...@@ -11166,8 +11166,12 @@ enum skl_power_gate { ...@@ -11166,8 +11166,12 @@ enum skl_power_gate {
_DKL_PHY2_BASE) + \ _DKL_PHY2_BASE) + \
_DKL_TX_DPCNTL1) _DKL_TX_DPCNTL1)
#define _DKL_TX_DPCNTL2 0x2C8 #define _DKL_TX_DPCNTL2 0x2C8
#define DKL_TX_DP20BITMODE (1 << 2) #define DKL_TX_DP20BITMODE REG_BIT(2)
#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK REG_GENMASK(4, 3)
#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK, (val))
#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK REG_GENMASK(6, 5)
#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, (val))
#define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \ #define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \
_DKL_PHY1_BASE, \ _DKL_PHY1_BASE, \
_DKL_PHY2_BASE) + \ _DKL_PHY2_BASE) + \
......
...@@ -666,18 +666,18 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) ...@@ -666,18 +666,18 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
if (unlikely(!fpriv)) { if (unlikely(!fpriv)) {
r = -ENOMEM; r = -ENOMEM;
goto out_suspend; goto err_suspend;
} }
if (rdev->accel_working) { if (rdev->accel_working) {
vm = &fpriv->vm; vm = &fpriv->vm;
r = radeon_vm_init(rdev, vm); r = radeon_vm_init(rdev, vm);
if (r) if (r)
goto out_fpriv; goto err_fpriv;
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r) if (r)
goto out_vm_fini; goto err_vm_fini;
/* map the ib pool buffer read only into /* map the ib pool buffer read only into
* virtual address space */ * virtual address space */
...@@ -685,7 +685,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) ...@@ -685,7 +685,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
rdev->ring_tmp_bo.bo); rdev->ring_tmp_bo.bo);
if (!vm->ib_bo_va) { if (!vm->ib_bo_va) {
r = -ENOMEM; r = -ENOMEM;
goto out_vm_fini; goto err_vm_fini;
} }
r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
...@@ -693,19 +693,21 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) ...@@ -693,19 +693,21 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED); RADEON_VM_PAGE_SNOOPED);
if (r) if (r)
goto out_vm_fini; goto err_vm_fini;
} }
file_priv->driver_priv = fpriv; file_priv->driver_priv = fpriv;
} }
if (!r) pm_runtime_mark_last_busy(dev->dev);
goto out_suspend; pm_runtime_put_autosuspend(dev->dev);
return 0;
out_vm_fini: err_vm_fini:
radeon_vm_fini(rdev, vm); radeon_vm_fini(rdev, vm);
out_fpriv: err_fpriv:
kfree(fpriv); kfree(fpriv);
out_suspend:
err_suspend:
pm_runtime_mark_last_busy(dev->dev); pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev); pm_runtime_put_autosuspend(dev->dev);
return r; return r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment