Commit b3d971ec authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2022-02-18' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Regular fixes for rc5, nothing really stands out, mostly some amdgpu
  and i915 fixes with mediatek, radeon and some misc fixes.

  cma-helper:
   - set VM_DONTEXPAND

  atomic:
   - error handling fix

  mediatek:
   - fix probe defer loop with external bridge

  amdgpu:
   - Stable pstate clock fixes for Dimgrey Cavefish and Beige Goby
   - S0ix SDMA fix
   - Yellow Carp GPU reset fix

  radeon:
   - Backlight fix for iMac 12,1

  i915:
   - GVT kerneldoc cleanup.
   - GVT Kconfig should depend on X86
   - Prevent out of range access in SWSCI display code
   - Fix mbus join and dbuf slice config lookup
   - Fix inverted priority selection in the TTM backend
   - Fix FBC plane end Y offset check"

* tag 'drm-fixes-2022-02-18' of git://anongit.freedesktop.org/drm/drm:
  drm/atomic: Don't pollute crtc_state->mode_blob with error pointers
  drm/radeon: Fix backlight control on iMac 12,1
  drm/amd/pm: correct the sequence of sending gpu reset msg
  drm/amdgpu: skipping SDMA hw_init and hw_fini for S0ix.
  drm/amd/pm: correct UMD pstate clocks for Dimgrey Cavefish and Beige Goby
  drm/i915/fbc: Fix the plane end Y offset check
  drm/i915/opregion: check port number bounds for SWSCI display power state
  drm/i915/ttm: tweak priority hint selection
  drm/i915: Fix mbus join config lookup
  drm/i915: Fix dbuf slice config lookup
  drm/cma-helper: Set VM_DONTEXPAND for mmap
  drm/mediatek: mtk_dsi: Avoid EPROBE_DEFER loop with external bridge
  drm/i915/gvt: Make DRM_I915_GVT depend on X86
  drm/i915/gvt: clean up kernel-doc in gtt.c
parents 8b97cae3 5666b610
...@@ -2057,6 +2057,10 @@ static int sdma_v4_0_suspend(void *handle) ...@@ -2057,6 +2057,10 @@ static int sdma_v4_0_suspend(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SMU saves SDMA state for us */
if (adev->in_s0ix)
return 0;
return sdma_v4_0_hw_fini(adev); return sdma_v4_0_hw_fini(adev);
} }
...@@ -2064,6 +2068,10 @@ static int sdma_v4_0_resume(void *handle) ...@@ -2064,6 +2068,10 @@ static int sdma_v4_0_resume(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SMU restores SDMA state for us */
if (adev->in_s0ix)
return 0;
return sdma_v4_0_hw_init(adev); return sdma_v4_0_hw_init(adev);
} }
......
...@@ -1238,21 +1238,37 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu) ...@@ -1238,21 +1238,37 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
&dpm_context->dpm_tables.soc_table; &dpm_context->dpm_tables.soc_table;
struct smu_umd_pstate_table *pstate_table = struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table; &smu->pstate_table;
struct amdgpu_device *adev = smu->adev;
pstate_table->gfxclk_pstate.min = gfx_table->min; pstate_table->gfxclk_pstate.min = gfx_table->min;
pstate_table->gfxclk_pstate.peak = gfx_table->max; pstate_table->gfxclk_pstate.peak = gfx_table->max;
if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK)
pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.min = mem_table->min; pstate_table->uclk_pstate.min = mem_table->min;
pstate_table->uclk_pstate.peak = mem_table->max; pstate_table->uclk_pstate.peak = mem_table->max;
if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK)
pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.min = soc_table->min; pstate_table->socclk_pstate.min = soc_table->min;
pstate_table->socclk_pstate.peak = soc_table->max; pstate_table->socclk_pstate.peak = soc_table->max;
if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK)
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK; pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK;
break;
case CHIP_DIMGREY_CAVEFISH:
pstate_table->gfxclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK;
break;
case CHIP_BEIGE_GOBY:
pstate_table->gfxclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK;
break;
default:
break;
}
return 0; return 0;
} }
......
...@@ -33,6 +33,14 @@ typedef enum { ...@@ -33,6 +33,14 @@ typedef enum {
#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960 #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960
#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000 #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000
#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK 1950
#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK 960
#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK 676
#define BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK 2200
#define BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK 960
#define BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK 1000
extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu); extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu);
#endif #endif
...@@ -282,14 +282,9 @@ static int yellow_carp_post_smu_init(struct smu_context *smu) ...@@ -282,14 +282,9 @@ static int yellow_carp_post_smu_init(struct smu_context *smu)
static int yellow_carp_mode_reset(struct smu_context *smu, int type) static int yellow_carp_mode_reset(struct smu_context *smu, int type)
{ {
int ret = 0, index = 0; int ret = 0;
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
SMU_MSG_GfxDeviceDriverReset);
if (index < 0)
return index == -EACCES ? 0 : index;
ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL);
if (ret) if (ret)
dev_err(smu->adev->dev, "Failed to mode reset!\n"); dev_err(smu->adev->dev, "Failed to mode reset!\n");
......
...@@ -76,15 +76,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, ...@@ -76,15 +76,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
state->mode_blob = NULL; state->mode_blob = NULL;
if (mode) { if (mode) {
struct drm_property_blob *blob;
drm_mode_convert_to_umode(&umode, mode); drm_mode_convert_to_umode(&umode, mode);
state->mode_blob = blob = drm_property_create_blob(crtc->dev,
drm_property_create_blob(state->crtc->dev, sizeof(umode), &umode);
sizeof(umode), if (IS_ERR(blob))
&umode); return PTR_ERR(blob);
if (IS_ERR(state->mode_blob))
return PTR_ERR(state->mode_blob);
drm_mode_copy(&state->mode, mode); drm_mode_copy(&state->mode, mode);
state->mode_blob = blob;
state->enable = true; state->enable = true;
drm_dbg_atomic(crtc->dev, drm_dbg_atomic(crtc->dev,
"Set [MODE:%s] for [CRTC:%d:%s] state %p\n", "Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
......
...@@ -512,6 +512,7 @@ int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct * ...@@ -512,6 +512,7 @@ int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *
*/ */
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
vma->vm_flags &= ~VM_PFNMAP; vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_DONTEXPAND;
if (cma_obj->map_noncoherent) { if (cma_obj->map_noncoherent) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
......
...@@ -101,6 +101,7 @@ config DRM_I915_USERPTR ...@@ -101,6 +101,7 @@ config DRM_I915_USERPTR
config DRM_I915_GVT config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support" bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915 depends on DRM_I915
depends on X86
depends on 64BIT depends on 64BIT
default n default n
help help
......
...@@ -1115,7 +1115,8 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, ...@@ -1115,7 +1115,8 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
if (DISPLAY_VER(i915) >= 11 && if (DISPLAY_VER(i915) >= 11 &&
(plane_state->view.color_plane[0].y + drm_rect_height(&plane_state->uapi.src)) & 3) { (plane_state->view.color_plane[0].y +
(drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
plane_state->no_fbc_reason = "plane end Y offset misaligned"; plane_state->no_fbc_reason = "plane end Y offset misaligned";
return false; return false;
} }
......
...@@ -360,6 +360,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, ...@@ -360,6 +360,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
port++; port++;
} }
/*
* The port numbering and mapping here is bizarre. The now-obsolete
* swsci spec supports ports numbered [0..4]. Port E is handled as a
* special case, but port F and beyond are not. The functionality is
* supposed to be obsolete for new platforms. Just bail out if the port
* number is out of bounds after mapping.
*/
if (port > 4) {
drm_dbg_kms(&dev_priv->drm,
"[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
intel_encoder->base.base.id, intel_encoder->base.name,
port_name(intel_encoder->port), port);
return -EINVAL;
}
if (!enable) if (!enable)
parm |= 4 << 8; parm |= 4 << 8;
......
...@@ -842,11 +842,9 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) ...@@ -842,11 +842,9 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
} else if (obj->mm.madv != I915_MADV_WILLNEED) { } else if (obj->mm.madv != I915_MADV_WILLNEED) {
bo->priority = I915_TTM_PRIO_PURGE; bo->priority = I915_TTM_PRIO_PURGE;
} else if (!i915_gem_object_has_pages(obj)) { } else if (!i915_gem_object_has_pages(obj)) {
if (bo->priority < I915_TTM_PRIO_HAS_PAGES) bo->priority = I915_TTM_PRIO_NO_PAGES;
bo->priority = I915_TTM_PRIO_HAS_PAGES;
} else { } else {
if (bo->priority > I915_TTM_PRIO_NO_PAGES) bo->priority = I915_TTM_PRIO_HAS_PAGES;
bo->priority = I915_TTM_PRIO_NO_PAGES;
} }
ttm_bo_move_to_lru_tail(bo, bo->resource, NULL); ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
......
...@@ -1148,7 +1148,7 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, ...@@ -1148,7 +1148,7 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
ops->set_pfn(se, s->shadow_page.mfn); ops->set_pfn(se, s->shadow_page.mfn);
} }
/** /*
* Check if can do 2M page * Check if can do 2M page
* @vgpu: target vgpu * @vgpu: target vgpu
* @entry: target pfn's gtt entry * @entry: target pfn's gtt entry
...@@ -2193,7 +2193,7 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, ...@@ -2193,7 +2193,7 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
} }
/** /**
* intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read
* @vgpu: a vGPU * @vgpu: a vGPU
* @off: register offset * @off: register offset
* @p_data: data will be returned to guest * @p_data: data will be returned to guest
......
...@@ -4853,7 +4853,7 @@ static bool check_mbus_joined(u8 active_pipes, ...@@ -4853,7 +4853,7 @@ static bool check_mbus_joined(u8 active_pipes,
{ {
int i; int i;
for (i = 0; i < dbuf_slices[i].active_pipes; i++) { for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
if (dbuf_slices[i].active_pipes == active_pipes) if (dbuf_slices[i].active_pipes == active_pipes)
return dbuf_slices[i].join_mbus; return dbuf_slices[i].join_mbus;
} }
...@@ -4870,7 +4870,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus, ...@@ -4870,7 +4870,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
{ {
int i; int i;
for (i = 0; i < dbuf_slices[i].active_pipes; i++) { for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
if (dbuf_slices[i].active_pipes == active_pipes && if (dbuf_slices[i].active_pipes == active_pipes &&
dbuf_slices[i].join_mbus == join_mbus) dbuf_slices[i].join_mbus == join_mbus)
return dbuf_slices[i].dbuf_mask[pipe]; return dbuf_slices[i].dbuf_mask[pipe];
......
...@@ -786,18 +786,101 @@ void mtk_dsi_ddp_stop(struct device *dev) ...@@ -786,18 +786,101 @@ void mtk_dsi_ddp_stop(struct device *dev)
mtk_dsi_poweroff(dsi); mtk_dsi_poweroff(dsi);
} }
static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
{
int ret;
ret = drm_simple_encoder_init(drm, &dsi->encoder,
DRM_MODE_ENCODER_DSI);
if (ret) {
DRM_ERROR("Failed to encoder init to drm\n");
return ret;
}
dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto err_cleanup_encoder;
dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
if (IS_ERR(dsi->connector)) {
DRM_ERROR("Unable to create bridge connector\n");
ret = PTR_ERR(dsi->connector);
goto err_cleanup_encoder;
}
drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
return 0;
err_cleanup_encoder:
drm_encoder_cleanup(&dsi->encoder);
return ret;
}
static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
{
int ret;
struct drm_device *drm = data;
struct mtk_dsi *dsi = dev_get_drvdata(dev);
ret = mtk_dsi_encoder_init(drm, dsi);
if (ret)
return ret;
return device_reset_optional(dev);
}
static void mtk_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
struct mtk_dsi *dsi = dev_get_drvdata(dev);
drm_encoder_cleanup(&dsi->encoder);
}
static const struct component_ops mtk_dsi_component_ops = {
.bind = mtk_dsi_bind,
.unbind = mtk_dsi_unbind,
};
static int mtk_dsi_host_attach(struct mipi_dsi_host *host, static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device) struct mipi_dsi_device *device)
{ {
struct mtk_dsi *dsi = host_to_dsi(host); struct mtk_dsi *dsi = host_to_dsi(host);
struct device *dev = host->dev;
int ret;
dsi->lanes = device->lanes; dsi->lanes = device->lanes;
dsi->format = device->format; dsi->format = device->format;
dsi->mode_flags = device->mode_flags; dsi->mode_flags = device->mode_flags;
dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(dsi->next_bridge))
return PTR_ERR(dsi->next_bridge);
drm_bridge_add(&dsi->bridge);
ret = component_add(host->dev, &mtk_dsi_component_ops);
if (ret) {
DRM_ERROR("failed to add dsi_host component: %d\n", ret);
drm_bridge_remove(&dsi->bridge);
return ret;
}
return 0; return 0;
} }
static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct mtk_dsi *dsi = host_to_dsi(host);
component_del(host->dev, &mtk_dsi_component_ops);
drm_bridge_remove(&dsi->bridge);
return 0;
}
static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi) static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
{ {
int ret; int ret;
...@@ -938,73 +1021,14 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, ...@@ -938,73 +1021,14 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
static const struct mipi_dsi_host_ops mtk_dsi_ops = { static const struct mipi_dsi_host_ops mtk_dsi_ops = {
.attach = mtk_dsi_host_attach, .attach = mtk_dsi_host_attach,
.detach = mtk_dsi_host_detach,
.transfer = mtk_dsi_host_transfer, .transfer = mtk_dsi_host_transfer,
}; };
static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
{
int ret;
ret = drm_simple_encoder_init(drm, &dsi->encoder,
DRM_MODE_ENCODER_DSI);
if (ret) {
DRM_ERROR("Failed to encoder init to drm\n");
return ret;
}
dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto err_cleanup_encoder;
dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
if (IS_ERR(dsi->connector)) {
DRM_ERROR("Unable to create bridge connector\n");
ret = PTR_ERR(dsi->connector);
goto err_cleanup_encoder;
}
drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
return 0;
err_cleanup_encoder:
drm_encoder_cleanup(&dsi->encoder);
return ret;
}
static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
{
int ret;
struct drm_device *drm = data;
struct mtk_dsi *dsi = dev_get_drvdata(dev);
ret = mtk_dsi_encoder_init(drm, dsi);
if (ret)
return ret;
return device_reset_optional(dev);
}
static void mtk_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
struct mtk_dsi *dsi = dev_get_drvdata(dev);
drm_encoder_cleanup(&dsi->encoder);
}
static const struct component_ops mtk_dsi_component_ops = {
.bind = mtk_dsi_bind,
.unbind = mtk_dsi_unbind,
};
static int mtk_dsi_probe(struct platform_device *pdev) static int mtk_dsi_probe(struct platform_device *pdev)
{ {
struct mtk_dsi *dsi; struct mtk_dsi *dsi;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct drm_panel *panel;
struct resource *regs; struct resource *regs;
int irq_num; int irq_num;
int ret; int ret;
...@@ -1021,19 +1045,6 @@ static int mtk_dsi_probe(struct platform_device *pdev) ...@@ -1021,19 +1045,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
return ret; return ret;
} }
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
&panel, &dsi->next_bridge);
if (ret)
goto err_unregister_host;
if (panel) {
dsi->next_bridge = devm_drm_panel_bridge_add(dev, panel);
if (IS_ERR(dsi->next_bridge)) {
ret = PTR_ERR(dsi->next_bridge);
goto err_unregister_host;
}
}
dsi->driver_data = of_device_get_match_data(dev); dsi->driver_data = of_device_get_match_data(dev);
dsi->engine_clk = devm_clk_get(dev, "engine"); dsi->engine_clk = devm_clk_get(dev, "engine");
...@@ -1098,14 +1109,6 @@ static int mtk_dsi_probe(struct platform_device *pdev) ...@@ -1098,14 +1109,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
dsi->bridge.of_node = dev->of_node; dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI; dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
drm_bridge_add(&dsi->bridge);
ret = component_add(&pdev->dev, &mtk_dsi_component_ops);
if (ret) {
dev_err(&pdev->dev, "failed to add component: %d\n", ret);
goto err_unregister_host;
}
return 0; return 0;
err_unregister_host: err_unregister_host:
...@@ -1118,8 +1121,6 @@ static int mtk_dsi_remove(struct platform_device *pdev) ...@@ -1118,8 +1121,6 @@ static int mtk_dsi_remove(struct platform_device *pdev)
struct mtk_dsi *dsi = platform_get_drvdata(pdev); struct mtk_dsi *dsi = platform_get_drvdata(pdev);
mtk_output_dsi_disable(dsi); mtk_output_dsi_disable(dsi);
drm_bridge_remove(&dsi->bridge);
component_del(&pdev->dev, &mtk_dsi_component_ops);
mipi_dsi_host_unregister(&dsi->host); mipi_dsi_host_unregister(&dsi->host);
return 0; return 0;
......
...@@ -198,7 +198,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, ...@@ -198,7 +198,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
* so don't register a backlight device * so don't register a backlight device
*/ */
if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
(rdev->pdev->device == 0x6741)) (rdev->pdev->device == 0x6741) &&
!dmi_match(DMI_PRODUCT_NAME, "iMac12,1"))
return; return;
if (!radeon_encoder->enc_priv) if (!radeon_encoder->enc_priv)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment