Commit 4a401cee authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-fixes-2016-12-22' of...

Merge tag 'drm-intel-next-fixes-2016-12-22' of git://anongit.freedesktop.org/git/drm-intel into drm-fixes

First set of i915 fixes for code in next.

* tag 'drm-intel-next-fixes-2016-12-22' of git://anongit.freedesktop.org/git/drm-intel:
  drm/i915: skip the first 4k of stolen memory on everything >= gen8
  drm/i915: Fallback to single PAGE_SIZE segments for DMA remapping
  drm/i915: Fix use after free in logical_render_ring_init
  drm/i915: disable PSR by default on HSW/BDW
  drm/i915: Fix setting of boost freq tunable
  drm/i915: tune down the fast link training vs boot fail
  drm/i915: Reorder phys backing storage release
  drm/i915/gen9: Fix PCODE polling during SAGV disabling
  drm/i915/gen9: Fix PCODE polling during CDCLK change notification
  drm/i915/dsi: Fix chv_exec_gpio disabling the GPIOs it is setting
  drm/i915/dsi: Fix swapping of MIPI_SEQ_DEASSERT_RESET / MIPI_SEQ_ASSERT_RESET
  drm/i915/dsi: Do not clear DPOUNIT_CLOCK_GATE_DISABLE from vlv_init_display_clock_gating
  drm/i915: drop the struct_mutex when wedged or trying to reset
parents d043835d 6ba0566c
...@@ -3509,6 +3509,8 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, ...@@ -3509,6 +3509,8 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms);
/* intel_sideband.c */ /* intel_sideband.c */
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
......
...@@ -174,21 +174,35 @@ static struct sg_table * ...@@ -174,21 +174,35 @@ static struct sg_table *
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{ {
struct address_space *mapping = obj->base.filp->f_mapping; struct address_space *mapping = obj->base.filp->f_mapping;
char *vaddr = obj->phys_handle->vaddr; drm_dma_handle_t *phys;
struct sg_table *st; struct sg_table *st;
struct scatterlist *sg; struct scatterlist *sg;
char *vaddr;
int i; int i;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/* Always aligning to the object size, allows a single allocation
* to handle all possible callers, and given typical object sizes,
* the alignment of the buddy allocation will naturally match.
*/
phys = drm_pci_alloc(obj->base.dev,
obj->base.size,
roundup_pow_of_two(obj->base.size));
if (!phys)
return ERR_PTR(-ENOMEM);
vaddr = phys->vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page; struct page *page;
char *src; char *src;
page = shmem_read_mapping_page(mapping, i); page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) if (IS_ERR(page)) {
return ERR_CAST(page); st = ERR_CAST(page);
goto err_phys;
}
src = kmap_atomic(page); src = kmap_atomic(page);
memcpy(vaddr, src, PAGE_SIZE); memcpy(vaddr, src, PAGE_SIZE);
...@@ -202,21 +216,29 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) ...@@ -202,21 +216,29 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
i915_gem_chipset_flush(to_i915(obj->base.dev)); i915_gem_chipset_flush(to_i915(obj->base.dev));
st = kmalloc(sizeof(*st), GFP_KERNEL); st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL) if (!st) {
return ERR_PTR(-ENOMEM); st = ERR_PTR(-ENOMEM);
goto err_phys;
}
if (sg_alloc_table(st, 1, GFP_KERNEL)) { if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st); kfree(st);
return ERR_PTR(-ENOMEM); st = ERR_PTR(-ENOMEM);
goto err_phys;
} }
sg = st->sgl; sg = st->sgl;
sg->offset = 0; sg->offset = 0;
sg->length = obj->base.size; sg->length = obj->base.size;
sg_dma_address(sg) = obj->phys_handle->busaddr; sg_dma_address(sg) = phys->busaddr;
sg_dma_len(sg) = obj->base.size; sg_dma_len(sg) = obj->base.size;
obj->phys_handle = phys;
return st;
err_phys:
drm_pci_free(obj->base.dev, phys);
return st; return st;
} }
...@@ -272,12 +294,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, ...@@ -272,12 +294,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
sg_free_table(pages); sg_free_table(pages);
kfree(pages); kfree(pages);
drm_pci_free(obj->base.dev, obj->phys_handle);
} }
static void static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj) i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{ {
drm_pci_free(obj->base.dev, obj->phys_handle);
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
} }
...@@ -538,15 +561,13 @@ int ...@@ -538,15 +561,13 @@ int
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align) int align)
{ {
drm_dma_handle_t *phys;
int ret; int ret;
if (obj->phys_handle) { if (align > obj->base.size)
if ((unsigned long)obj->phys_handle->vaddr & (align -1)) return -EINVAL;
return -EBUSY;
if (obj->ops == &i915_gem_phys_ops)
return 0; return 0;
}
if (obj->mm.madv != I915_MADV_WILLNEED) if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT; return -EFAULT;
...@@ -562,12 +583,6 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, ...@@ -562,12 +583,6 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (obj->mm.pages) if (obj->mm.pages)
return -EBUSY; return -EBUSY;
/* create a new object */
phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
if (!phys)
return -ENOMEM;
obj->phys_handle = phys;
obj->ops = &i915_gem_phys_ops; obj->ops = &i915_gem_phys_ops;
return i915_gem_object_pin_pages(obj); return i915_gem_object_pin_pages(obj);
...@@ -2326,7 +2341,8 @@ static struct sg_table * ...@@ -2326,7 +2341,8 @@ static struct sg_table *
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
int page_count, i; const unsigned long page_count = obj->base.size / PAGE_SIZE;
unsigned long i;
struct address_space *mapping; struct address_space *mapping;
struct sg_table *st; struct sg_table *st;
struct scatterlist *sg; struct scatterlist *sg;
...@@ -2352,7 +2368,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2352,7 +2368,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (st == NULL) if (st == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
page_count = obj->base.size / PAGE_SIZE; rebuild_st:
if (sg_alloc_table(st, page_count, GFP_KERNEL)) { if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st); kfree(st);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -2411,8 +2427,25 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2411,8 +2427,25 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
i915_sg_trim(st); i915_sg_trim(st);
ret = i915_gem_gtt_prepare_pages(obj, st); ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret) if (ret) {
goto err_pages; /* DMA remapping failed? One possible cause is that
* it could not reserve enough large entries, asking
* for PAGE_SIZE chunks instead may be helpful.
*/
if (max_segment > PAGE_SIZE) {
for_each_sgt_page(page, sgt_iter, st)
put_page(page);
sg_free_table(st);
max_segment = PAGE_SIZE;
goto rebuild_st;
} else {
dev_warn(&dev_priv->drm.pdev->dev,
"Failed to DMA remap %lu pages\n",
page_count);
goto err_pages;
}
}
if (i915_gem_object_needs_bit17_swizzle(obj)) if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj, st); i915_gem_object_do_bit_17_swizzle(obj, st);
......
...@@ -55,10 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, ...@@ -55,10 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
return -ENODEV; return -ENODEV;
/* See the comment at the drm_mm_init() call for more about this check. /* See the comment at the drm_mm_init() call for more about this check.
* WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete) * WaSkipStolenMemoryFirstPage:bdw+ (incomplete)
*/ */
if (start < 4096 && (IS_GEN8(dev_priv) || if (start < 4096 && INTEL_GEN(dev_priv) >= 8)
IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
start = 4096; start = 4096;
mutex_lock(&dev_priv->mm.stolen_lock); mutex_lock(&dev_priv->mm.stolen_lock);
......
...@@ -460,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -460,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL); static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL); static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store); static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO | S_IWUSR, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
......
...@@ -46,14 +46,20 @@ struct edp_power_seq { ...@@ -46,14 +46,20 @@ struct edp_power_seq {
u16 t11_t12; u16 t11_t12;
} __packed; } __packed;
/* MIPI Sequence Block definitions */ /*
* MIPI Sequence Block definitions
*
* Note the VBT spec has AssertReset / DeassertReset swapped from their
* usual naming, we use the proper names here to avoid confusion when
* reading the code.
*/
enum mipi_seq { enum mipi_seq {
MIPI_SEQ_END = 0, MIPI_SEQ_END = 0,
MIPI_SEQ_ASSERT_RESET, MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */
MIPI_SEQ_INIT_OTP, MIPI_SEQ_INIT_OTP,
MIPI_SEQ_DISPLAY_ON, MIPI_SEQ_DISPLAY_ON,
MIPI_SEQ_DISPLAY_OFF, MIPI_SEQ_DISPLAY_OFF,
MIPI_SEQ_DEASSERT_RESET, MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */
MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */ MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */ MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
MIPI_SEQ_TEAR_ON, /* sequence block v2+ */ MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
......
...@@ -6244,35 +6244,24 @@ skl_dpll0_disable(struct drm_i915_private *dev_priv) ...@@ -6244,35 +6244,24 @@ skl_dpll0_disable(struct drm_i915_private *dev_priv)
dev_priv->cdclk_pll.vco = 0; dev_priv->cdclk_pll.vco = 0;
} }
static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
{
int ret;
u32 val;
/* inform PCU we want to change CDCLK */
val = SKL_CDCLK_PREPARE_FOR_CHANGE;
mutex_lock(&dev_priv->rps.hw_lock);
ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
mutex_unlock(&dev_priv->rps.hw_lock);
return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
}
static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
{
return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0;
}
static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
{ {
u32 freq_select, pcu_ack; u32 freq_select, pcu_ack;
int ret;
WARN_ON((cdclk == 24000) != (vco == 0)); WARN_ON((cdclk == 24000) != (vco == 0));
DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { mutex_lock(&dev_priv->rps.hw_lock);
DRM_ERROR("failed to inform PCU about cdclk change\n"); ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
mutex_unlock(&dev_priv->rps.hw_lock);
if (ret) {
DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
ret);
return; return;
} }
......
...@@ -4014,8 +4014,8 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) ...@@ -4014,8 +4014,8 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
return; return;
/* FIXME: we need to synchronize this sort of stuff with hardware /* FIXME: we need to synchronize this sort of stuff with hardware
* readout */ * readout. Currently fast link training doesn't work on boot-up. */
if (WARN_ON_ONCE(!intel_dp->lane_count)) if (!intel_dp->lane_count)
return; return;
/* if link training is requested we should perform it always */ /* if link training is requested we should perform it always */
......
...@@ -300,7 +300,8 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv, ...@@ -300,7 +300,8 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
mutex_lock(&dev_priv->sb_lock); mutex_lock(&dev_priv->sb_lock);
vlv_iosf_sb_write(dev_priv, port, cfg1, 0); vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
vlv_iosf_sb_write(dev_priv, port, cfg0, vlv_iosf_sb_write(dev_priv, port, cfg0,
CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value)); CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO |
CHV_GPIO_GPIOTXSTATE(value));
mutex_unlock(&dev_priv->sb_lock); mutex_unlock(&dev_priv->sb_lock);
} }
...@@ -376,11 +377,11 @@ static const fn_mipi_elem_exec exec_elem[] = { ...@@ -376,11 +377,11 @@ static const fn_mipi_elem_exec exec_elem[] = {
*/ */
static const char * const seq_name[] = { static const char * const seq_name[] = {
[MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET", [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
[MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP", [MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
[MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON", [MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
[MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF", [MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF",
[MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET", [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
[MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON", [MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON",
[MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF", [MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF",
[MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON", [MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON",
......
...@@ -1968,12 +1968,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine) ...@@ -1968,12 +1968,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
ret); ret);
} }
ret = logical_ring_init(engine); return logical_ring_init(engine);
if (ret) {
lrc_destroy_wa_ctx_obj(engine);
}
return ret;
} }
int logical_xcs_ring_init(struct intel_engine_cs *engine) int logical_xcs_ring_init(struct intel_engine_cs *engine)
......
...@@ -2964,24 +2964,10 @@ intel_enable_sagv(struct drm_i915_private *dev_priv) ...@@ -2964,24 +2964,10 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
return 0; return 0;
} }
static int
intel_do_sagv_disable(struct drm_i915_private *dev_priv)
{
int ret;
uint32_t temp = GEN9_SAGV_DISABLE;
ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
&temp);
if (ret)
return ret;
else
return temp & GEN9_SAGV_IS_DISABLED;
}
int int
intel_disable_sagv(struct drm_i915_private *dev_priv) intel_disable_sagv(struct drm_i915_private *dev_priv)
{ {
int ret, result; int ret;
if (!intel_has_sagv(dev_priv)) if (!intel_has_sagv(dev_priv))
return 0; return 0;
...@@ -2993,25 +2979,23 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) ...@@ -2993,25 +2979,23 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
/* bspec says to keep retrying for at least 1 ms */ /* bspec says to keep retrying for at least 1 ms */
ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1); ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_DISABLE,
GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
1);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
if (ret == -ETIMEDOUT) {
DRM_ERROR("Request to disable SAGV timed out\n");
return -ETIMEDOUT;
}
/* /*
* Some skl systems, pre-release machines in particular, * Some skl systems, pre-release machines in particular,
* don't actually have an SAGV. * don't actually have an SAGV.
*/ */
if (IS_SKYLAKE(dev_priv) && result == -ENXIO) { if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0; return 0;
} else if (result < 0) { } else if (ret < 0) {
DRM_ERROR("Failed to disable the SAGV\n"); DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
return result; return ret;
} }
dev_priv->sagv_status = I915_SAGV_DISABLED; dev_priv->sagv_status = I915_SAGV_DISABLED;
...@@ -7890,6 +7874,81 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, ...@@ -7890,6 +7874,81 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
return 0; return 0;
} }
static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
u32 request, u32 reply_mask, u32 reply,
u32 *status)
{
u32 val = request;
*status = sandybridge_pcode_read(dev_priv, mbox, &val);
return *status || ((val & reply_mask) == reply);
}
/**
* skl_pcode_request - send PCODE request until acknowledgment
* @dev_priv: device private
* @mbox: PCODE mailbox ID the request is targeted for
* @request: request ID
* @reply_mask: mask used to check for request acknowledgment
* @reply: value used to check for request acknowledgment
* @timeout_base_ms: timeout for polling with preemption enabled
*
* Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
* reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
* The request is acknowledged once the PCODE reply dword equals @reply after
* applying @reply_mask. Polling is first attempted with preemption enabled
* for @timeout_base_ms and if this times out for another 10 ms with
* preemption disabled.
*
* Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
* other error as reported by PCODE.
*/
int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms)
{
u32 status;
int ret;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
&status)
/*
* Prime the PCODE by doing a request first. Normally it guarantees
* that a subsequent request, at most @timeout_base_ms later, succeeds.
* _wait_for() doesn't guarantee when its passed condition is evaluated
* first, so send the first request explicitly.
*/
if (COND) {
ret = 0;
goto out;
}
ret = _wait_for(COND, timeout_base_ms * 1000, 10);
if (!ret)
goto out;
/*
* The above can time out if the number of requests was low (2 in the
* worst case) _and_ PCODE was busy for some reason even after a
* (queued) request and @timeout_base_ms delay. As a workaround retry
* the poll with preemption disabled to maximize the number of
* requests. Increase the timeout from @timeout_base_ms to 10ms to
* account for interrupts that could reduce the number of these
* requests.
*/
DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
WARN_ON_ONCE(timeout_base_ms > 3);
preempt_disable();
ret = wait_for_atomic(COND, 10);
preempt_enable();
out:
return ret ? ret : status;
#undef COND
}
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{ {
/* /*
......
...@@ -825,13 +825,9 @@ void intel_psr_init(struct drm_device *dev) ...@@ -825,13 +825,9 @@ void intel_psr_init(struct drm_device *dev)
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
/* Per platform default */ /* Per platform default: all disabled. */
if (i915.enable_psr == -1) { if (i915.enable_psr == -1)
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) i915.enable_psr = 0;
i915.enable_psr = 1;
else
i915.enable_psr = 0;
}
/* Set link_standby x link_off defaults */ /* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
......
...@@ -1039,7 +1039,18 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, ...@@ -1039,7 +1039,18 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
{ {
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); u32 val;
/*
* On driver load, a pipe may be active and driving a DSI display.
* Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
val = I915_READ(DSPCLK_GATE_D);
val &= DPOUNIT_CLOCK_GATE_DISABLE;
val |= VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
/* /*
* Disable trickle feed and enable pnd deadline calculation * Disable trickle feed and enable pnd deadline calculation
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment