Commit 66e514c1 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2014-03-21' of git://anongit.freedesktop.org/drm-intel into drm-next

- Inherit/reuse firmwar framebuffers (for real this time) from Jesse, less
  flicker for fastbooting.
- More flexible cloning for hdmi (Ville).
- Some PPGTT fixes from Ben.
- Ring init fixes from Naresh Kumar.
- set_cache_level regression fixes for the vma conversion from Ville&Chris.
- Conversion to the new dp aux helpers (Jani).
- Unification of runtime pm with pc8 support from Paulo, prep work for runtime
  pm on other platforms than HSW.
- Larger cursor sizes (Sagar Kamble).
- Piles of improvements and fixes all over, as usual.

* tag 'drm-intel-next-2014-03-21' of git://anongit.freedesktop.org/drm-intel: (75 commits)
  drm/i915: Include a note about the dangers of I915_READ64/I915_WRITE64
  drm/i915/sdvo: fix questionable return value check
  drm/i915: Fix unsafe loop iteration over vma whilst unbinding them
  drm/i915: Enabling 128x128 and 256x256 ARGB Cursor Support
  drm/i915: Print how many objects are shared in per-process stats
  drm/i915: Per-process stats work better when evaluated per-process
  drm/i915: remove rps local variables
  drm/i915: Remove extraneous MMIO for RPS
  drm/i915: Rename and comment all the RPS *stuff*
  drm/i915: Store the HW min frequency as min_freq
  drm/i915: Fix coding style for RPS
  drm/i915: Reorganize the overclock code
  drm/i915: init pm.suspended earlier
  drm/i915: update the PC8 and runtime PM documentation
  drm/i915: rename __hsw_do_{en, dis}able_pc8
  drm/i915: kill struct i915_package_c8
  drm/i915: move pc8.irqs_disabled to pm.irqs_disabled
  drm/i915: remove dev_priv->pc8.enabled
  drm/i915: don't get/put PC8 when getting/putting power wells
  drm/i915: make intel_aux_display_runtime_get get runtime PM, not PC8
  ...

Conflicts:
	drivers/gpu/drm/i915/intel_display.c
	drivers/gpu/drm/i915/intel_dp.c
parents 2844ea3f 698b3135
...@@ -726,7 +726,8 @@ int drm_dp_aux_register_i2c_bus(struct drm_dp_aux *aux) ...@@ -726,7 +726,8 @@ int drm_dp_aux_register_i2c_bus(struct drm_dp_aux *aux)
aux->ddc.dev.parent = aux->dev; aux->ddc.dev.parent = aux->dev;
aux->ddc.dev.of_node = aux->dev->of_node; aux->ddc.dev.of_node = aux->dev->of_node;
strncpy(aux->ddc.name, dev_name(aux->dev), sizeof(aux->ddc.name)); strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
sizeof(aux->ddc.name));
return i2c_add_adapter(&aux->ddc); return i2c_add_adapter(&aux->ddc);
} }
......
...@@ -402,7 +402,7 @@ int i915_parse_cmds(struct intel_ring_buffer *ring, ...@@ -402,7 +402,7 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
length = ((*cmd & desc->length.mask) + LENGTH_BIAS); length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
if ((batch_end - cmd) < length) { if ((batch_end - cmd) < length) {
DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%d batchlen=%ld\n", DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%d batchlen=%td\n",
*cmd, *cmd,
length, length,
(unsigned long)(batch_end - cmd)); (unsigned long)(batch_end - cmd));
......
...@@ -299,28 +299,62 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) ...@@ -299,28 +299,62 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
} while (0) } while (0)
struct file_stats { struct file_stats {
struct drm_i915_file_private *file_priv;
int count; int count;
size_t total, active, inactive, unbound; size_t total, unbound;
size_t global, shared;
size_t active, inactive;
}; };
static int per_file_stats(int id, void *ptr, void *data) static int per_file_stats(int id, void *ptr, void *data)
{ {
struct drm_i915_gem_object *obj = ptr; struct drm_i915_gem_object *obj = ptr;
struct file_stats *stats = data; struct file_stats *stats = data;
struct i915_vma *vma;
stats->count++; stats->count++;
stats->total += obj->base.size; stats->total += obj->base.size;
if (i915_gem_obj_ggtt_bound(obj)) { if (obj->base.name || obj->base.dma_buf)
if (!list_empty(&obj->ring_list)) stats->shared += obj->base.size;
stats->active += obj->base.size;
else if (USES_FULL_PPGTT(obj->base.dev)) {
stats->inactive += obj->base.size; list_for_each_entry(vma, &obj->vma_list, vma_link) {
struct i915_hw_ppgtt *ppgtt;
if (!drm_mm_node_allocated(&vma->node))
continue;
if (i915_is_ggtt(vma->vm)) {
stats->global += obj->base.size;
continue;
}
ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv)
continue;
if (obj->ring) /* XXX per-vma statistic */
stats->active += obj->base.size;
else
stats->inactive += obj->base.size;
return 0;
}
} else { } else {
if (!list_empty(&obj->global_list)) if (i915_gem_obj_ggtt_bound(obj)) {
stats->unbound += obj->base.size; stats->global += obj->base.size;
if (obj->ring)
stats->active += obj->base.size;
else
stats->inactive += obj->base.size;
return 0;
}
} }
if (!list_empty(&obj->global_list))
stats->unbound += obj->base.size;
return 0; return 0;
} }
...@@ -411,6 +445,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) ...@@ -411,6 +445,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
struct task_struct *task; struct task_struct *task;
memset(&stats, 0, sizeof(stats)); memset(&stats, 0, sizeof(stats));
stats.file_priv = file->driver_priv;
idr_for_each(&file->object_idr, per_file_stats, &stats); idr_for_each(&file->object_idr, per_file_stats, &stats);
/* /*
* Although we have a valid reference on file->pid, that does * Although we have a valid reference on file->pid, that does
...@@ -420,12 +455,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data) ...@@ -420,12 +455,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
*/ */
rcu_read_lock(); rcu_read_lock();
task = pid_task(file->pid, PIDTYPE_PID); task = pid_task(file->pid, PIDTYPE_PID);
seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n", seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n",
task ? task->comm : "<unknown>", task ? task->comm : "<unknown>",
stats.count, stats.count,
stats.total, stats.total,
stats.active, stats.active,
stats.inactive, stats.inactive,
stats.global,
stats.shared,
stats.unbound); stats.unbound);
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -1026,7 +1063,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) ...@@ -1026,7 +1063,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
max_freq * GT_FREQUENCY_MULTIPLIER); max_freq * GT_FREQUENCY_MULTIPLIER);
seq_printf(m, "Max overclocked frequency: %dMHz\n", seq_printf(m, "Max overclocked frequency: %dMHz\n",
dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER); dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
} else if (IS_VALLEYVIEW(dev)) { } else if (IS_VALLEYVIEW(dev)) {
u32 freq_sts, val; u32 freq_sts, val;
...@@ -1498,8 +1535,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ...@@ -1498,8 +1535,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
for (gpu_freq = dev_priv->rps.min_delay; for (gpu_freq = dev_priv->rps.min_freq_softlimit;
gpu_freq <= dev_priv->rps.max_delay; gpu_freq <= dev_priv->rps.max_freq_softlimit;
gpu_freq++) { gpu_freq++) {
ia_freq = gpu_freq; ia_freq = gpu_freq;
sandybridge_pcode_read(dev_priv, sandybridge_pcode_read(dev_priv,
...@@ -2012,15 +2049,9 @@ static int i915_pc8_status(struct seq_file *m, void *unused) ...@@ -2012,15 +2049,9 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
return 0; return 0;
} }
mutex_lock(&dev_priv->pc8.lock);
seq_printf(m, "Requirements met: %s\n",
yesno(dev_priv->pc8.requirements_met));
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count);
seq_printf(m, "IRQs disabled: %s\n", seq_printf(m, "IRQs disabled: %s\n",
yesno(dev_priv->pc8.irqs_disabled)); yesno(dev_priv->pm.irqs_disabled));
seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled));
mutex_unlock(&dev_priv->pc8.lock);
return 0; return 0;
} }
...@@ -2248,24 +2279,67 @@ static void intel_connector_info(struct seq_file *m, ...@@ -2248,24 +2279,67 @@ static void intel_connector_info(struct seq_file *m,
intel_seq_print_mode(m, 2, mode); intel_seq_print_mode(m, 2, mode);
} }
static bool cursor_active(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 state;
if (IS_845G(dev) || IS_I865G(dev))
state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
else
state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
return state;
}
static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pos;
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
pos = I915_READ(CURPOS_IVB(pipe));
else
pos = I915_READ(CURPOS(pipe));
*x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
*x = -*x;
*y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
*y = -*y;
return cursor_active(dev, pipe);
}
static int i915_display_info(struct seq_file *m, void *unused) static int i915_display_info(struct seq_file *m, void *unused)
{ {
struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct drm_crtc *crtc; struct intel_crtc *crtc;
struct drm_connector *connector; struct drm_connector *connector;
drm_modeset_lock_all(dev); drm_modeset_lock_all(dev);
seq_printf(m, "CRTC info\n"); seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n"); seq_printf(m, "---------\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); bool active;
int x, y;
seq_printf(m, "CRTC %d: pipe: %c, active: %s\n", seq_printf(m, "CRTC %d: pipe: %c, active: %s\n",
crtc->base.id, pipe_name(intel_crtc->pipe), crtc->base.base.id, pipe_name(crtc->pipe),
intel_crtc->active ? "yes" : "no"); yesno(crtc->active));
if (intel_crtc->active) if (crtc->active)
intel_crtc_info(m, intel_crtc); intel_crtc_info(m, crtc);
active = cursor_position(dev, crtc->pipe, &x, &y);
seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n",
yesno(crtc->cursor_visible),
x, y, crtc->cursor_addr,
yesno(active));
} }
seq_printf(m, "\n"); seq_printf(m, "\n");
...@@ -2603,8 +2677,6 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, ...@@ -2603,8 +2677,6 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
if (need_stable_symbols) { if (need_stable_symbols) {
uint32_t tmp = I915_READ(PORT_DFT2_G4X); uint32_t tmp = I915_READ(PORT_DFT2_G4X);
WARN_ON(!IS_G4X(dev));
tmp |= DC_BALANCE_RESET_VLV; tmp |= DC_BALANCE_RESET_VLV;
if (pipe == PIPE_A) if (pipe == PIPE_A)
tmp |= PIPE_A_SCRAMBLE_RESET; tmp |= PIPE_A_SCRAMBLE_RESET;
...@@ -3414,9 +3486,9 @@ i915_max_freq_get(void *data, u64 *val) ...@@ -3414,9 +3486,9 @@ i915_max_freq_get(void *data, u64 *val)
return ret; return ret;
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
*val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay); *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
else else
*val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return 0; return 0;
...@@ -3453,16 +3525,16 @@ i915_max_freq_set(void *data, u64 val) ...@@ -3453,16 +3525,16 @@ i915_max_freq_set(void *data, u64 val)
do_div(val, GT_FREQUENCY_MULTIPLIER); do_div(val, GT_FREQUENCY_MULTIPLIER);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.hw_max; hw_max = dev_priv->rps.max_freq;
hw_min = (rp_state_cap >> 16) & 0xff; hw_min = (rp_state_cap >> 16) & 0xff;
} }
if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL; return -EINVAL;
} }
dev_priv->rps.max_delay = val; dev_priv->rps.max_freq_softlimit = val;
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val); valleyview_set_rps(dev, val);
...@@ -3495,9 +3567,9 @@ i915_min_freq_get(void *data, u64 *val) ...@@ -3495,9 +3567,9 @@ i915_min_freq_get(void *data, u64 *val)
return ret; return ret;
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
*val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay); *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
else else
*val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return 0; return 0;
...@@ -3534,16 +3606,16 @@ i915_min_freq_set(void *data, u64 val) ...@@ -3534,16 +3606,16 @@ i915_min_freq_set(void *data, u64 val)
do_div(val, GT_FREQUENCY_MULTIPLIER); do_div(val, GT_FREQUENCY_MULTIPLIER);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.hw_max; hw_max = dev_priv->rps.max_freq;
hw_min = (rp_state_cap >> 16) & 0xff; hw_min = (rp_state_cap >> 16) & 0xff;
} }
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL; return -EINVAL;
} }
dev_priv->rps.min_delay = val; dev_priv->rps.min_freq_softlimit = val;
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val); valleyview_set_rps(dev, val);
......
...@@ -1187,6 +1187,9 @@ intel_setup_mchbar(struct drm_device *dev) ...@@ -1187,6 +1187,9 @@ intel_setup_mchbar(struct drm_device *dev)
u32 temp; u32 temp;
bool enabled; bool enabled;
if (IS_VALLEYVIEW(dev))
return;
dev_priv->mchbar_need_disable = false; dev_priv->mchbar_need_disable = false;
if (IS_I915G(dev) || IS_I915GM(dev)) { if (IS_I915G(dev) || IS_I915GM(dev)) {
...@@ -1608,8 +1611,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1608,8 +1611,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_bridge; goto put_bridge;
} }
intel_uncore_early_sanitize(dev);
/* This must be called before any calls to HAS_PCH_* */ /* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev); intel_detect_pch(dev);
...@@ -1822,8 +1823,6 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1822,8 +1823,6 @@ int i915_driver_unload(struct drm_device *dev)
cancel_work_sync(&dev_priv->gpu_error.work); cancel_work_sync(&dev_priv->gpu_error.work);
i915_destroy_error_state(dev); i915_destroy_error_state(dev);
cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
if (dev->pdev->msi_enabled) if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev); pci_disable_msi(dev->pdev);
......
...@@ -428,7 +428,6 @@ static int i915_drm_freeze(struct drm_device *dev) ...@@ -428,7 +428,6 @@ static int i915_drm_freeze(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work /* We do a lot of poking in a lot of registers, make sure they work
* properly. */ * properly. */
hsw_disable_package_c8(dev_priv);
intel_display_set_init_power(dev_priv, true); intel_display_set_init_power(dev_priv, true);
drm_kms_helper_poll_disable(dev); drm_kms_helper_poll_disable(dev);
...@@ -467,6 +466,7 @@ static int i915_drm_freeze(struct drm_device *dev) ...@@ -467,6 +466,7 @@ static int i915_drm_freeze(struct drm_device *dev)
i915_save_state(dev); i915_save_state(dev);
intel_opregion_fini(dev); intel_opregion_fini(dev);
intel_uncore_fini(dev);
console_lock(); console_lock();
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED); intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
...@@ -603,10 +603,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) ...@@ -603,10 +603,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
schedule_work(&dev_priv->console_resume_work); schedule_work(&dev_priv->console_resume_work);
} }
/* Undo what we did at i915_drm_freeze so the refcount goes back to the
* expected level. */
hsw_enable_package_c8(dev_priv);
mutex_lock(&dev_priv->modeset_restore_lock); mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_DONE; dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock); mutex_unlock(&dev_priv->modeset_restore_lock);
...@@ -848,6 +844,9 @@ static int i915_runtime_suspend(struct device *device) ...@@ -848,6 +844,9 @@ static int i915_runtime_suspend(struct device *device)
DRM_DEBUG_KMS("Suspending device\n"); DRM_DEBUG_KMS("Suspending device\n");
if (HAS_PC8(dev))
hsw_enable_pc8(dev_priv);
i915_gem_release_all_mmaps(dev_priv); i915_gem_release_all_mmaps(dev_priv);
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
...@@ -862,6 +861,7 @@ static int i915_runtime_suspend(struct device *device) ...@@ -862,6 +861,7 @@ static int i915_runtime_suspend(struct device *device)
*/ */
intel_opregion_notify_adapter(dev, PCI_D1); intel_opregion_notify_adapter(dev, PCI_D1);
DRM_DEBUG_KMS("Device suspended\n");
return 0; return 0;
} }
...@@ -878,6 +878,10 @@ static int i915_runtime_resume(struct device *device) ...@@ -878,6 +878,10 @@ static int i915_runtime_resume(struct device *device)
intel_opregion_notify_adapter(dev, PCI_D0); intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false; dev_priv->pm.suspended = false;
if (HAS_PC8(dev))
hsw_disable_pc8(dev_priv);
DRM_DEBUG_KMS("Device resumed\n");
return 0; return 0;
} }
......
...@@ -406,6 +406,7 @@ struct drm_i915_error_state { ...@@ -406,6 +406,7 @@ struct drm_i915_error_state {
struct intel_connector; struct intel_connector;
struct intel_crtc_config; struct intel_crtc_config;
struct intel_plane_config;
struct intel_crtc; struct intel_crtc;
struct intel_limit; struct intel_limit;
struct dpll; struct dpll;
...@@ -444,6 +445,8 @@ struct drm_i915_display_funcs { ...@@ -444,6 +445,8 @@ struct drm_i915_display_funcs {
* fills out the pipe-config with the hw state. */ * fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *, bool (*get_pipe_config)(struct intel_crtc *,
struct intel_crtc_config *); struct intel_crtc_config *);
void (*get_plane_config)(struct intel_crtc *,
struct intel_plane_config *);
int (*crtc_mode_set)(struct drm_crtc *crtc, int (*crtc_mode_set)(struct drm_crtc *crtc,
int x, int y, int x, int y,
struct drm_framebuffer *old_fb); struct drm_framebuffer *old_fb);
...@@ -459,8 +462,9 @@ struct drm_i915_display_funcs { ...@@ -459,8 +462,9 @@ struct drm_i915_display_funcs {
struct drm_framebuffer *fb, struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
uint32_t flags); uint32_t flags);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, int (*update_primary_plane)(struct drm_crtc *crtc,
int x, int y); struct drm_framebuffer *fb,
int x, int y);
void (*hpd_irq_setup)(struct drm_device *dev); void (*hpd_irq_setup)(struct drm_device *dev);
/* clock updates for mode set */ /* clock updates for mode set */
/* cursor updates */ /* cursor updates */
...@@ -721,6 +725,8 @@ struct i915_hw_ppgtt { ...@@ -721,6 +725,8 @@ struct i915_hw_ppgtt {
dma_addr_t *gen8_pt_dma_addr[4]; dma_addr_t *gen8_pt_dma_addr[4];
}; };
struct i915_hw_context *ctx;
int (*enable)(struct i915_hw_ppgtt *ppgtt); int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring,
...@@ -976,13 +982,24 @@ struct intel_gen6_power_mgmt { ...@@ -976,13 +982,24 @@ struct intel_gen6_power_mgmt {
struct work_struct work; struct work_struct work;
u32 pm_iir; u32 pm_iir;
u8 cur_delay; /* Frequencies are stored in potentially platform dependent multiples.
u8 min_delay; * In other words, *_freq needs to be multiplied by X to be interesting.
u8 max_delay; * Soft limits are those which are used for the dynamic reclocking done
u8 rpe_delay; * by the driver (raise frequencies under heavy loads, and lower for
u8 rp1_delay; * lighter loads). Hard limits are those imposed by the hardware.
u8 rp0_delay; *
u8 hw_max; * A distinction is made for overclocking, which is never enabled by
* default, and is considered to be above the hard limit if it's
* possible at all.
*/
u8 cur_freq; /* Current frequency (cached, may not == HW) */
u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
u8 max_freq_softlimit; /* Max frequency permitted by the driver */
u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
u8 min_freq; /* AKA RPn. Minimum frequency */
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */
bool rp_up_masked; bool rp_up_masked;
bool rp_down_masked; bool rp_down_masked;
...@@ -1333,43 +1350,19 @@ struct ilk_wm_values { ...@@ -1333,43 +1350,19 @@ struct ilk_wm_values {
}; };
/* /*
* This struct tracks the state needed for the Package C8+ feature. * This struct helps tracking the state needed for runtime PM, which puts the
* * device in PCI D3 state. Notice that when this happens, nothing on the
* Package states C8 and deeper are really deep PC states that can only be * graphics device works, even register access, so we don't get interrupts nor
* reached when all the devices on the system allow it, so even if the graphics * anything else.
* device allows PC8+, it doesn't mean the system will actually get to these
* states.
*
* Our driver only allows PC8+ when all the outputs are disabled, the power well
* is disabled and the GPU is idle. When these conditions are met, we manually
* do the other conditions: disable the interrupts, clocks and switch LCPLL
* refclk to Fclk.
* *
* When we really reach PC8 or deeper states (not just when we allow it) we lose * Every piece of our code that needs to actually touch the hardware needs to
* the state of some registers, so when we come back from PC8+ we need to * either call intel_runtime_pm_get or call intel_display_power_get with the
* restore this state. We don't get into PC8+ if we're not in RC6, so we don't * appropriate power domain.
* need to take care of the registers kept by RC6.
* *
* The interrupt disabling is part of the requirements. We can only leave the * Our driver uses the autosuspend delay feature, which means we'll only really
* PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we * suspend if we stay with zero refcount for a certain amount of time. The
* can lock the machine. * default value is currently very conservative (see intel_init_runtime_pm), but
* * it can be changed with the standard runtime PM files from sysfs.
* Ideally every piece of our code that needs PC8+ disabled would call
* hsw_disable_package_c8, which would increment disable_count and prevent the
* system from reaching PC8+. But we don't have a symmetric way to do this for
* everything, so we have the requirements_met variable. When we switch
* requirements_met to true we decrease disable_count, and increase it in the
* opposite case. The requirements_met variable is true when all the CRTCs,
* encoders and the power well are disabled.
*
* In addition to everything, we only actually enable PC8+ if disable_count
* stays at zero for at least some seconds. This is implemented with the
* enable_work variable. We do this so we don't enable/disable PC8 dozens of
* consecutive times when all screens are disabled and some background app
* queries the state of our connectors, or we have some application constantly
* waking up to use the GPU. Only after the enable_work function actually
* enables PC8+ the "enable" variable will become true, which means that it can
* be false even if disable_count is 0.
* *
* The irqs_disabled variable becomes true exactly after we disable the IRQs and * The irqs_disabled variable becomes true exactly after we disable the IRQs and
* goes back to false exactly before we reenable the IRQs. We use this variable * goes back to false exactly before we reenable the IRQs. We use this variable
...@@ -1379,16 +1372,11 @@ struct ilk_wm_values { ...@@ -1379,16 +1372,11 @@ struct ilk_wm_values {
* inside struct regsave so when we restore the IRQs they will contain the * inside struct regsave so when we restore the IRQs they will contain the
* latest expected values. * latest expected values.
* *
* For more, read "Display Sequences for Package C8" on our documentation. * For more, read the Documentation/power/runtime_pm.txt.
*/ */
struct i915_package_c8 { struct i915_runtime_pm {
bool requirements_met; bool suspended;
bool irqs_disabled; bool irqs_disabled;
/* Only true after the delayed work task actually enables it. */
bool enabled;
int disable_count;
struct mutex lock;
struct delayed_work enable_work;
struct { struct {
uint32_t deimr; uint32_t deimr;
...@@ -1399,10 +1387,6 @@ struct i915_package_c8 { ...@@ -1399,10 +1387,6 @@ struct i915_package_c8 {
} regsave; } regsave;
}; };
struct i915_runtime_pm {
bool suspended;
};
enum intel_pipe_crc_source { enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_NONE, INTEL_PIPE_CRC_SOURCE_NONE,
INTEL_PIPE_CRC_SOURCE_PLANE1, INTEL_PIPE_CRC_SOURCE_PLANE1,
...@@ -1610,6 +1594,7 @@ typedef struct drm_i915_private { ...@@ -1610,6 +1594,7 @@ typedef struct drm_i915_private {
u32 fdi_rx_config; u32 fdi_rx_config;
u32 suspend_count;
struct i915_suspend_saved_registers regfile; struct i915_suspend_saved_registers regfile;
struct { struct {
...@@ -1629,8 +1614,6 @@ typedef struct drm_i915_private { ...@@ -1629,8 +1614,6 @@ typedef struct drm_i915_private {
struct ilk_wm_values hw; struct ilk_wm_values hw;
} wm; } wm;
struct i915_package_c8 pc8;
struct i915_runtime_pm pm; struct i915_runtime_pm pm;
/* Old dri1 support infrastructure, beware the dragons ya fools entering /* Old dri1 support infrastructure, beware the dragons ya fools entering
...@@ -1638,8 +1621,6 @@ typedef struct drm_i915_private { ...@@ -1638,8 +1621,6 @@ typedef struct drm_i915_private {
struct i915_dri1_state dri1; struct i915_dri1_state dri1;
/* Old ums support infrastructure, same warning applies. */ /* Old ums support infrastructure, same warning applies. */
struct i915_ums_state ums; struct i915_ums_state ums;
u32 suspend_count;
} drm_i915_private_t; } drm_i915_private_t;
static inline struct drm_i915_private *to_i915(const struct drm_device *dev) static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
...@@ -2092,8 +2073,6 @@ struct i915_params { ...@@ -2092,8 +2073,6 @@ struct i915_params {
unsigned int preliminary_hw_support; unsigned int preliminary_hw_support;
int disable_power_well; int disable_power_well;
int enable_ips; int enable_ips;
int enable_pc8;
int pc8_timeout;
int invert_brightness; int invert_brightness;
int enable_cmd_parser; int enable_cmd_parser;
/* leave bools at the end to not create holes */ /* leave bools at the end to not create holes */
...@@ -2757,6 +2736,12 @@ void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); ...@@ -2757,6 +2736,12 @@ void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
/* Be very careful with read/write 64-bit values. On 32-bit machines, they
* will be implemented using 2 32-bit writes in an arbitrary order with
* an arbitrary delay between them. This can cause the hardware to
* act upon the intermediate value, possibly leading to corruption and
* machine death. You have been warned.
*/
#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
......
...@@ -510,12 +510,10 @@ i915_gem_shmem_pread(struct drm_device *dev, ...@@ -510,12 +510,10 @@ i915_gem_shmem_pread(struct drm_device *dev,
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
next_page:
mark_page_accessed(page);
if (ret) if (ret)
goto out; goto out;
next_page:
remain -= page_length; remain -= page_length;
user_data += page_length; user_data += page_length;
offset += page_length; offset += page_length;
...@@ -695,9 +693,8 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, ...@@ -695,9 +693,8 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
if (needs_clflush_before) if (needs_clflush_before)
drm_clflush_virt_range(vaddr + shmem_page_offset, drm_clflush_virt_range(vaddr + shmem_page_offset,
page_length); page_length);
ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset, ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
user_data, user_data, page_length);
page_length);
if (needs_clflush_after) if (needs_clflush_after)
drm_clflush_virt_range(vaddr + shmem_page_offset, drm_clflush_virt_range(vaddr + shmem_page_offset,
page_length); page_length);
...@@ -831,13 +828,10 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ...@@ -831,13 +828,10 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
next_page:
set_page_dirty(page);
mark_page_accessed(page);
if (ret) if (ret)
goto out; goto out;
next_page:
remain -= page_length; remain -= page_length;
user_data += page_length; user_data += page_length;
offset += page_length; offset += page_length;
...@@ -1041,7 +1035,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, ...@@ -1041,7 +1035,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
unsigned long timeout_expire; unsigned long timeout_expire;
int ret; int ret;
WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n"); WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0; return 0;
...@@ -3473,7 +3467,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3473,7 +3467,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level) enum i915_cache_level cache_level)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct i915_vma *vma; struct i915_vma *vma, *next;
int ret; int ret;
if (obj->cache_level == cache_level) if (obj->cache_level == cache_level)
...@@ -3484,13 +3478,11 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3484,13 +3478,11 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY; return -EBUSY;
} }
list_for_each_entry(vma, &obj->vma_list, vma_link) { list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
ret = i915_vma_unbind(vma); ret = i915_vma_unbind(vma);
if (ret) if (ret)
return ret; return ret;
break;
} }
} }
......
...@@ -215,6 +215,7 @@ create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx) ...@@ -215,6 +215,7 @@ create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx)
return ERR_PTR(ret); return ERR_PTR(ret);
} }
ppgtt->ctx = ctx;
return ppgtt; return ppgtt;
} }
...@@ -775,9 +776,11 @@ int i915_switch_context(struct intel_ring_buffer *ring, ...@@ -775,9 +776,11 @@ int i915_switch_context(struct intel_ring_buffer *ring,
BUG_ON(file && to == NULL); BUG_ON(file && to == NULL);
/* We have the fake context, but don't supports switching. */ /* We have the fake context */
if (!HAS_HW_CONTEXTS(ring->dev)) if (!HAS_HW_CONTEXTS(ring->dev)) {
ring->last_context = to;
return 0; return 0;
}
return do_switch(ring, to); return do_switch(ring, to);
} }
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#include "i915_trace.h" #include "i915_trace.h"
#include "intel_drv.h" #include "intel_drv.h"
static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv);
bool intel_enable_ppgtt(struct drm_device *dev, bool full) bool intel_enable_ppgtt(struct drm_device *dev, bool full)
{ {
if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
...@@ -1191,9 +1193,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ...@@ -1191,9 +1193,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.clear_range = gen6_ppgtt_clear_range; ppgtt->base.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup; ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
ppgtt->base.start = 0; ppgtt->base.start = 0;
ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
ppgtt->debug_dump = gen6_dump_ppgtt; ppgtt->debug_dump = gen6_dump_ppgtt;
ppgtt->pd_offset = ppgtt->pd_offset =
...@@ -1214,6 +1215,7 @@ int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) ...@@ -1214,6 +1215,7 @@ int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
int ret = 0; int ret = 0;
ppgtt->base.dev = dev; ppgtt->base.dev = dev;
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
if (INTEL_INFO(dev)->gen < 8) if (INTEL_INFO(dev)->gen < 8)
ret = gen6_ppgtt_init(ppgtt); ret = gen6_ppgtt_init(ppgtt);
...@@ -1243,8 +1245,6 @@ ppgtt_bind_vma(struct i915_vma *vma, ...@@ -1243,8 +1245,6 @@ ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags) u32 flags)
{ {
WARN_ON(flags);
vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
cache_level); cache_level);
} }
...@@ -1372,8 +1372,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) ...@@ -1372,8 +1372,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
} }
if (INTEL_INFO(dev)->gen >= 8) if (INTEL_INFO(dev)->gen >= 8) {
gen8_setup_private_ppat(dev_priv);
return; return;
}
list_for_each_entry(vm, &dev_priv->vm_list, global_link) { list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
/* TODO: Perhaps it shouldn't be gen6 specific */ /* TODO: Perhaps it shouldn't be gen6 specific */
......
...@@ -850,10 +850,12 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -850,10 +850,12 @@ static void i915_record_ring_state(struct drm_device *dev,
} }
break; break;
case 7: case 7:
ering->vm_info.pp_dir_base = RING_PP_DIR_BASE(ring); ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(ring));
break; break;
case 6: case 6:
ering->vm_info.pp_dir_base = RING_PP_DIR_BASE_READ(ring); ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(ring));
break; break;
} }
} }
......
...@@ -86,9 +86,9 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) ...@@ -86,9 +86,9 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{ {
assert_spin_locked(&dev_priv->irq_lock); assert_spin_locked(&dev_priv->irq_lock);
if (dev_priv->pc8.irqs_disabled) { if (dev_priv->pm.irqs_disabled) {
WARN(1, "IRQs disabled\n"); WARN(1, "IRQs disabled\n");
dev_priv->pc8.regsave.deimr &= ~mask; dev_priv->pm.regsave.deimr &= ~mask;
return; return;
} }
...@@ -104,9 +104,9 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) ...@@ -104,9 +104,9 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{ {
assert_spin_locked(&dev_priv->irq_lock); assert_spin_locked(&dev_priv->irq_lock);
if (dev_priv->pc8.irqs_disabled) { if (dev_priv->pm.irqs_disabled) {
WARN(1, "IRQs disabled\n"); WARN(1, "IRQs disabled\n");
dev_priv->pc8.regsave.deimr |= mask; dev_priv->pm.regsave.deimr |= mask;
return; return;
} }
...@@ -129,10 +129,10 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, ...@@ -129,10 +129,10 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
{ {
assert_spin_locked(&dev_priv->irq_lock); assert_spin_locked(&dev_priv->irq_lock);
if (dev_priv->pc8.irqs_disabled) { if (dev_priv->pm.irqs_disabled) {
WARN(1, "IRQs disabled\n"); WARN(1, "IRQs disabled\n");
dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; dev_priv->pm.regsave.gtimr &= ~interrupt_mask;
dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & dev_priv->pm.regsave.gtimr |= (~enabled_irq_mask &
interrupt_mask); interrupt_mask);
return; return;
} }
...@@ -167,10 +167,10 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, ...@@ -167,10 +167,10 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock); assert_spin_locked(&dev_priv->irq_lock);
if (dev_priv->pc8.irqs_disabled) { if (dev_priv->pm.irqs_disabled) {
WARN(1, "IRQs disabled\n"); WARN(1, "IRQs disabled\n");
dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; dev_priv->pm.regsave.gen6_pmimr &= ~interrupt_mask;
dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & dev_priv->pm.regsave.gen6_pmimr |= (~enabled_irq_mask &
interrupt_mask); interrupt_mask);
return; return;
} }
...@@ -313,11 +313,11 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, ...@@ -313,11 +313,11 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock); assert_spin_locked(&dev_priv->irq_lock);
if (dev_priv->pc8.irqs_disabled && if (dev_priv->pm.irqs_disabled &&
(interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
WARN(1, "IRQs disabled\n"); WARN(1, "IRQs disabled\n");
dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; dev_priv->pm.regsave.sdeimr &= ~interrupt_mask;
dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & dev_priv->pm.regsave.sdeimr |= (~enabled_irq_mask &
interrupt_mask); interrupt_mask);
return; return;
} }
...@@ -1075,7 +1075,7 @@ void gen6_set_pm_mask(struct drm_i915_private *dev_priv, ...@@ -1075,7 +1075,7 @@ void gen6_set_pm_mask(struct drm_i915_private *dev_priv,
u32 pm_iir, int new_delay) u32 pm_iir, int new_delay)
{ {
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (new_delay >= dev_priv->rps.max_delay) { if (new_delay >= dev_priv->rps.max_freq_softlimit) {
/* Mask UP THRESHOLD Interrupts */ /* Mask UP THRESHOLD Interrupts */
I915_WRITE(GEN6_PMINTRMSK, I915_WRITE(GEN6_PMINTRMSK,
I915_READ(GEN6_PMINTRMSK) | I915_READ(GEN6_PMINTRMSK) |
...@@ -1090,7 +1090,7 @@ void gen6_set_pm_mask(struct drm_i915_private *dev_priv, ...@@ -1090,7 +1090,7 @@ void gen6_set_pm_mask(struct drm_i915_private *dev_priv,
dev_priv->rps.rp_down_masked = false; dev_priv->rps.rp_down_masked = false;
} }
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (new_delay <= dev_priv->rps.min_delay) { if (new_delay <= dev_priv->rps.min_freq_softlimit) {
/* Mask DOWN THRESHOLD Interrupts */ /* Mask DOWN THRESHOLD Interrupts */
I915_WRITE(GEN6_PMINTRMSK, I915_WRITE(GEN6_PMINTRMSK,
I915_READ(GEN6_PMINTRMSK) | I915_READ(GEN6_PMINTRMSK) |
...@@ -1136,38 +1136,39 @@ static void gen6_pm_rps_work(struct work_struct *work) ...@@ -1136,38 +1136,39 @@ static void gen6_pm_rps_work(struct work_struct *work)
adj *= 2; adj *= 2;
else else
adj = 1; adj = 1;
new_delay = dev_priv->rps.cur_delay + adj; new_delay = dev_priv->rps.cur_freq + adj;
/* /*
* For better performance, jump directly * For better performance, jump directly
* to RPe if we're below it. * to RPe if we're below it.
*/ */
if (new_delay < dev_priv->rps.rpe_delay) if (new_delay < dev_priv->rps.efficient_freq)
new_delay = dev_priv->rps.rpe_delay; new_delay = dev_priv->rps.efficient_freq;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
new_delay = dev_priv->rps.rpe_delay; new_delay = dev_priv->rps.efficient_freq;
else else
new_delay = dev_priv->rps.min_delay; new_delay = dev_priv->rps.min_freq_softlimit;
adj = 0; adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0) if (adj < 0)
adj *= 2; adj *= 2;
else else
adj = -1; adj = -1;
new_delay = dev_priv->rps.cur_delay + adj; new_delay = dev_priv->rps.cur_freq + adj;
} else { /* unknown event */ } else { /* unknown event */
new_delay = dev_priv->rps.cur_delay; new_delay = dev_priv->rps.cur_freq;
} }
/* sysfs frequency interfaces may have snuck in while servicing the /* sysfs frequency interfaces may have snuck in while servicing the
* interrupt * interrupt
*/ */
new_delay = clamp_t(int, new_delay, new_delay = clamp_t(int, new_delay,
dev_priv->rps.min_delay, dev_priv->rps.max_delay); dev_priv->rps.min_freq_softlimit,
dev_priv->rps.max_freq_softlimit);
gen6_set_pm_mask(dev_priv, pm_iir, new_delay); gen6_set_pm_mask(dev_priv, pm_iir, new_delay);
dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
if (IS_VALLEYVIEW(dev_priv->dev)) if (IS_VALLEYVIEW(dev_priv->dev))
valleyview_set_rps(dev_priv->dev, new_delay); valleyview_set_rps(dev_priv->dev, new_delay);
...@@ -3074,7 +3075,7 @@ static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) ...@@ -3074,7 +3075,7 @@ static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
iir_mask = I915_DISPLAY_PORT_INTERRUPT | iir_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
dev_priv->irq_mask |= iir_mask; dev_priv->irq_mask |= iir_mask;
I915_WRITE(VLV_IER, ~dev_priv->irq_mask); I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
...@@ -4118,32 +4119,32 @@ void intel_hpd_init(struct drm_device *dev) ...@@ -4118,32 +4119,32 @@ void intel_hpd_init(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
} }
/* Disable interrupts so we can allow Package C8+. */ /* Disable interrupts so we can allow runtime PM. */
void hsw_pc8_disable_interrupts(struct drm_device *dev) void hsw_runtime_pm_disable_interrupts(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags; unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); dev_priv->pm.regsave.deimr = I915_READ(DEIMR);
dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); dev_priv->pm.regsave.sdeimr = I915_READ(SDEIMR);
dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); dev_priv->pm.regsave.gtimr = I915_READ(GTIMR);
dev_priv->pc8.regsave.gtier = I915_READ(GTIER); dev_priv->pm.regsave.gtier = I915_READ(GTIER);
dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); dev_priv->pm.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
ironlake_disable_display_irq(dev_priv, 0xffffffff); ironlake_disable_display_irq(dev_priv, 0xffffffff);
ibx_disable_display_interrupt(dev_priv, 0xffffffff); ibx_disable_display_interrupt(dev_priv, 0xffffffff);
ilk_disable_gt_irq(dev_priv, 0xffffffff); ilk_disable_gt_irq(dev_priv, 0xffffffff);
snb_disable_pm_irq(dev_priv, 0xffffffff); snb_disable_pm_irq(dev_priv, 0xffffffff);
dev_priv->pc8.irqs_disabled = true; dev_priv->pm.irqs_disabled = true;
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
} }
/* Restore interrupts so we can recover from Package C8+. */ /* Restore interrupts so we can recover from runtime PM. */
void hsw_pc8_restore_interrupts(struct drm_device *dev) void hsw_runtime_pm_restore_interrupts(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags; unsigned long irqflags;
...@@ -4163,13 +4164,13 @@ void hsw_pc8_restore_interrupts(struct drm_device *dev) ...@@ -4163,13 +4164,13 @@ void hsw_pc8_restore_interrupts(struct drm_device *dev)
val = I915_READ(GEN6_PMIMR); val = I915_READ(GEN6_PMIMR);
WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val); WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
dev_priv->pc8.irqs_disabled = false; dev_priv->pm.irqs_disabled = false;
ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); ironlake_enable_display_irq(dev_priv, ~dev_priv->pm.regsave.deimr);
ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr); ibx_enable_display_interrupt(dev_priv, ~dev_priv->pm.regsave.sdeimr);
ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); ilk_enable_gt_irq(dev_priv, ~dev_priv->pm.regsave.gtimr);
snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); snb_enable_pm_irq(dev_priv, ~dev_priv->pm.regsave.gen6_pmimr);
I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); I915_WRITE(GTIER, dev_priv->pm.regsave.gtier);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
} }
...@@ -42,8 +42,6 @@ struct i915_params i915 __read_mostly = { ...@@ -42,8 +42,6 @@ struct i915_params i915 __read_mostly = {
.disable_power_well = 1, .disable_power_well = 1,
.enable_ips = 1, .enable_ips = 1,
.fastboot = 0, .fastboot = 0,
.enable_pc8 = 1,
.pc8_timeout = 5000,
.prefault_disable = 0, .prefault_disable = 0,
.reset = true, .reset = true,
.invert_brightness = 0, .invert_brightness = 0,
...@@ -135,14 +133,6 @@ module_param_named(fastboot, i915.fastboot, bool, 0600); ...@@ -135,14 +133,6 @@ module_param_named(fastboot, i915.fastboot, bool, 0600);
MODULE_PARM_DESC(fastboot, MODULE_PARM_DESC(fastboot,
"Try to skip unnecessary mode sets at boot time (default: false)"); "Try to skip unnecessary mode sets at boot time (default: false)");
module_param_named(enable_pc8, i915.enable_pc8, int, 0600);
MODULE_PARM_DESC(enable_pc8,
"Enable support for low power package C states (PC8+) (default: true)");
module_param_named(pc8_timeout, i915.pc8_timeout, int, 0600);
MODULE_PARM_DESC(pc8_timeout,
"Number of msecs of idleness required to enter PC8+ (default: 5000)");
module_param_named(prefault_disable, i915.prefault_disable, bool, 0600); module_param_named(prefault_disable, i915.prefault_disable, bool, 0600);
MODULE_PARM_DESC(prefault_disable, MODULE_PARM_DESC(prefault_disable,
"Disable page prefaulting for pread/pwrite/reloc (default:false). " "Disable page prefaulting for pread/pwrite/reloc (default:false). "
......
...@@ -748,6 +748,7 @@ enum punit_power_well { ...@@ -748,6 +748,7 @@ enum punit_power_well {
#define RING_INSTPS(base) ((base)+0x70) #define RING_INSTPS(base) ((base)+0x70)
#define RING_DMA_FADD(base) ((base)+0x78) #define RING_DMA_FADD(base) ((base)+0x78)
#define RING_INSTPM(base) ((base)+0xc0) #define RING_INSTPM(base) ((base)+0xc0)
#define RING_MI_MODE(base) ((base)+0x9c)
#define INSTPS 0x02070 /* 965+ only */ #define INSTPS 0x02070 /* 965+ only */
#define INSTDONE1 0x0207c /* 965+ only */ #define INSTDONE1 0x0207c /* 965+ only */
#define ACTHD_I965 0x02074 #define ACTHD_I965 0x02074
...@@ -824,6 +825,7 @@ enum punit_power_well { ...@@ -824,6 +825,7 @@ enum punit_power_well {
# define VS_TIMER_DISPATCH (1 << 6) # define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 12) # define MI_FLUSH_ENABLE (1 << 12)
# define ASYNC_FLIP_PERF_DISABLE (1 << 14) # define ASYNC_FLIP_PERF_DISABLE (1 << 14)
# define MODE_IDLE (1 << 9)
#define GEN6_GT_MODE 0x20d0 #define GEN6_GT_MODE 0x20d0
#define GEN7_GT_MODE 0x7008 #define GEN7_GT_MODE 0x7008
...@@ -3551,7 +3553,11 @@ enum punit_power_well { ...@@ -3551,7 +3553,11 @@ enum punit_power_well {
/* New style CUR*CNTR flags */ /* New style CUR*CNTR flags */
#define CURSOR_MODE 0x27 #define CURSOR_MODE 0x27
#define CURSOR_MODE_DISABLE 0x00 #define CURSOR_MODE_DISABLE 0x00
#define CURSOR_MODE_128_32B_AX 0x02
#define CURSOR_MODE_256_32B_AX 0x03
#define CURSOR_MODE_64_32B_AX 0x07 #define CURSOR_MODE_64_32B_AX 0x07
#define CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX)
#define CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX)
#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) #define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
#define MCURSOR_PIPE_SELECT (1 << 28) #define MCURSOR_PIPE_SELECT (1 << 28)
#define MCURSOR_PIPE_A 0x00 #define MCURSOR_PIPE_A 0x00
......
...@@ -269,7 +269,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, ...@@ -269,7 +269,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff); ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else { } else {
ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER;
} }
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
...@@ -284,7 +284,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, ...@@ -284,7 +284,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
return snprintf(buf, PAGE_SIZE, "%d\n", return snprintf(buf, PAGE_SIZE, "%d\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay)); vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
} }
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
...@@ -298,9 +298,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute ...@@ -298,9 +298,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay); ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
else else
ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret); return snprintf(buf, PAGE_SIZE, "%d\n", ret);
...@@ -313,7 +313,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -313,7 +313,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev; struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, rp_state_cap, hw_max, hw_min, non_oc_max; u32 val;
ssize_t ret; ssize_t ret;
ret = kstrtou32(buf, 0, &val); ret = kstrtou32(buf, 0, &val);
...@@ -324,44 +324,35 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -324,44 +324,35 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) { if (IS_VALLEYVIEW(dev_priv->dev))
val = vlv_freq_opcode(dev_priv, val); val = vlv_freq_opcode(dev_priv, val);
else
hw_max = valleyview_rps_max_freq(dev_priv);
hw_min = valleyview_rps_min_freq(dev_priv);
non_oc_max = hw_max;
} else {
val /= GT_FREQUENCY_MULTIPLIER; val /= GT_FREQUENCY_MULTIPLIER;
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); if (val < dev_priv->rps.min_freq ||
hw_max = dev_priv->rps.hw_max; val > dev_priv->rps.max_freq ||
non_oc_max = (rp_state_cap & 0xff); val < dev_priv->rps.min_freq_softlimit) {
hw_min = ((rp_state_cap & 0xff0000) >> 16);
}
if (val < hw_min || val > hw_max ||
val < dev_priv->rps.min_delay) {
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL; return -EINVAL;
} }
if (val > non_oc_max) if (val > dev_priv->rps.rp0_freq)
DRM_DEBUG("User requested overclocking to %d\n", DRM_DEBUG("User requested overclocking to %d\n",
val * GT_FREQUENCY_MULTIPLIER); val * GT_FREQUENCY_MULTIPLIER);
dev_priv->rps.max_delay = val; dev_priv->rps.max_freq_softlimit = val;
if (dev_priv->rps.cur_delay > val) { if (dev_priv->rps.cur_freq > val) {
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val); valleyview_set_rps(dev, val);
else else
gen6_set_rps(dev, val); gen6_set_rps(dev, val);
} else if (!IS_VALLEYVIEW(dev)) {
/* We still need gen6_set_rps to process the new max_delay and
* update the interrupt limits even though frequency request is
* unchanged. */
gen6_set_rps(dev, dev_priv->rps.cur_freq);
} }
else if (!IS_VALLEYVIEW(dev))
/* We still need gen6_set_rps to process the new max_delay
and update the interrupt limits even though frequency
request is unchanged. */
gen6_set_rps(dev, dev_priv->rps.cur_delay);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
...@@ -379,9 +370,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute ...@@ -379,9 +370,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay); ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
else else
ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret); return snprintf(buf, PAGE_SIZE, "%d\n", ret);
...@@ -394,7 +385,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -394,7 +385,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev; struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, rp_state_cap, hw_max, hw_min; u32 val;
ssize_t ret; ssize_t ret;
ret = kstrtou32(buf, 0, &val); ret = kstrtou32(buf, 0, &val);
...@@ -405,37 +396,31 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -405,37 +396,31 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev)) { if (IS_VALLEYVIEW(dev))
val = vlv_freq_opcode(dev_priv, val); val = vlv_freq_opcode(dev_priv, val);
else
hw_max = valleyview_rps_max_freq(dev_priv);
hw_min = valleyview_rps_min_freq(dev_priv);
} else {
val /= GT_FREQUENCY_MULTIPLIER; val /= GT_FREQUENCY_MULTIPLIER;
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); if (val < dev_priv->rps.min_freq ||
hw_max = dev_priv->rps.hw_max; val > dev_priv->rps.max_freq ||
hw_min = ((rp_state_cap & 0xff0000) >> 16); val > dev_priv->rps.max_freq_softlimit) {
}
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL; return -EINVAL;
} }
dev_priv->rps.min_delay = val; dev_priv->rps.min_freq_softlimit = val;
if (dev_priv->rps.cur_delay < val) { if (dev_priv->rps.cur_freq < val) {
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val); valleyview_set_rps(dev, val);
else else
gen6_set_rps(dev, val); gen6_set_rps(dev, val);
} else if (!IS_VALLEYVIEW(dev)) {
/* We still need gen6_set_rps to process the new min_delay and
* update the interrupt limits even though frequency request is
* unchanged. */
gen6_set_rps(dev, dev_priv->rps.cur_freq);
} }
else if (!IS_VALLEYVIEW(dev))
/* We still need gen6_set_rps to process the new min_delay
and update the interrupt limits even though frequency
request is unchanged. */
gen6_set_rps(dev, dev_priv->rps.cur_delay);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
......
...@@ -238,14 +238,16 @@ TRACE_EVENT(i915_gem_evict_vm, ...@@ -238,14 +238,16 @@ TRACE_EVENT(i915_gem_evict_vm,
TP_ARGS(vm), TP_ARGS(vm),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev)
__field(struct i915_address_space *, vm) __field(struct i915_address_space *, vm)
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = vm->dev->primary->index;
__entry->vm = vm; __entry->vm = vm;
), ),
TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm) TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
); );
TRACE_EVENT(i915_gem_ring_sync_to, TRACE_EVENT(i915_gem_ring_sync_to,
......
...@@ -839,7 +839,7 @@ void intel_crt_init(struct drm_device *dev) ...@@ -839,7 +839,7 @@ void intel_crt_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, &crt->base); intel_connector_attach_encoder(intel_connector, &crt->base);
crt->base.type = INTEL_OUTPUT_ANALOG; crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = true; crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
if (IS_I830(dev)) if (IS_I830(dev))
crt->base.crtc_mask = (1 << 0); crt->base.crtc_mask = (1 << 0);
else else
......
...@@ -1340,6 +1340,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) ...@@ -1340,6 +1340,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp); intel_edp_panel_off(intel_dp);
} }
...@@ -1717,7 +1718,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) ...@@ -1717,7 +1718,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->type = INTEL_OUTPUT_UNKNOWN; intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = false; intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_ddi_hot_plug; intel_encoder->hot_plug = intel_ddi_hot_plug;
if (init_dp) if (init_dp)
......
This diff is collapsed.
This diff is collapsed.
...@@ -78,6 +78,12 @@ ...@@ -78,6 +78,12 @@
#define MAX_OUTPUTS 6 #define MAX_OUTPUTS 6
/* maximum connectors per crtcs in the mode set */ /* maximum connectors per crtcs in the mode set */
/* Maximum cursor sizes */
#define GEN2_CURSOR_WIDTH 64
#define GEN2_CURSOR_HEIGHT 64
#define CURSOR_WIDTH 256
#define CURSOR_HEIGHT 256
#define INTEL_I2C_BUS_DVO 1 #define INTEL_I2C_BUS_DVO 1
#define INTEL_I2C_BUS_SDVO 2 #define INTEL_I2C_BUS_SDVO 2
...@@ -113,6 +119,7 @@ struct intel_fbdev { ...@@ -113,6 +119,7 @@ struct intel_fbdev {
struct intel_framebuffer *fb; struct intel_framebuffer *fb;
struct list_head fbdev_list; struct list_head fbdev_list;
struct drm_display_mode *our_mode; struct drm_display_mode *our_mode;
int preferred_bpp;
}; };
struct intel_encoder { struct intel_encoder {
...@@ -124,11 +131,7 @@ struct intel_encoder { ...@@ -124,11 +131,7 @@ struct intel_encoder {
struct intel_crtc *new_crtc; struct intel_crtc *new_crtc;
int type; int type;
/* unsigned int cloneable;
* Intel hw has only one MUX where encoders could be clone, hence a
* simple flag is enough to compute the possible_clones mask.
*/
bool cloneable;
bool connectors_active; bool connectors_active;
void (*hot_plug)(struct intel_encoder *); void (*hot_plug)(struct intel_encoder *);
bool (*compute_config)(struct intel_encoder *, bool (*compute_config)(struct intel_encoder *,
...@@ -218,6 +221,12 @@ typedef struct dpll { ...@@ -218,6 +221,12 @@ typedef struct dpll {
int p; int p;
} intel_clock_t; } intel_clock_t;
struct intel_plane_config {
bool tiled;
int size;
u32 base;
};
struct intel_crtc_config { struct intel_crtc_config {
/** /**
* quirks - bitfield with hw state readout quirks * quirks - bitfield with hw state readout quirks
...@@ -364,8 +373,10 @@ struct intel_crtc { ...@@ -364,8 +373,10 @@ struct intel_crtc {
uint32_t cursor_addr; uint32_t cursor_addr;
int16_t cursor_x, cursor_y; int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height; int16_t cursor_width, cursor_height;
int16_t max_cursor_width, max_cursor_height;
bool cursor_visible; bool cursor_visible;
struct intel_plane_config plane_config;
struct intel_crtc_config config; struct intel_crtc_config config;
struct intel_crtc_config *new_config; struct intel_crtc_config *new_config;
bool new_enabled; bool new_enabled;
...@@ -485,8 +496,7 @@ struct intel_dp { ...@@ -485,8 +496,7 @@ struct intel_dp {
uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
struct i2c_adapter adapter; struct drm_dp_aux aux;
struct i2c_algo_dp_aux_data algo;
uint8_t train_set[4]; uint8_t train_set[4];
int panel_power_up_delay; int panel_power_up_delay;
int panel_power_down_delay; int panel_power_down_delay;
...@@ -618,8 +628,8 @@ void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); ...@@ -618,8 +628,8 @@ void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void hsw_pc8_disable_interrupts(struct drm_device *dev); void hsw_runtime_pm_disable_interrupts(struct drm_device *dev);
void hsw_pc8_restore_interrupts(struct drm_device *dev); void hsw_runtime_pm_restore_interrupts(struct drm_device *dev);
/* intel_crt.c */ /* intel_crt.c */
...@@ -722,9 +732,8 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y, ...@@ -722,9 +732,8 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned int bpp, unsigned int bpp,
unsigned int pitch); unsigned int pitch);
void intel_display_handle_reset(struct drm_device *dev); void intel_display_handle_reset(struct drm_device *dev);
void hsw_enable_pc8_work(struct work_struct *__work); void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_enable_package_c8(struct drm_i915_private *dev_priv); void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc, void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config); struct intel_crtc_config *pipe_config);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
...@@ -740,6 +749,7 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder); ...@@ -740,6 +749,7 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder);
int valleyview_get_vco(struct drm_i915_private *dev_priv); int valleyview_get_vco(struct drm_i915_private *dev_priv);
void intel_mode_from_pipe_config(struct drm_display_mode *mode, void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_config *pipe_config); struct intel_crtc_config *pipe_config);
int intel_format_to_fourcc(int format);
/* intel_dp.c */ /* intel_dp.c */
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
...@@ -757,6 +767,7 @@ bool intel_dp_compute_config(struct intel_encoder *encoder, ...@@ -757,6 +767,7 @@ bool intel_dp_compute_config(struct intel_encoder *encoder,
bool intel_dp_is_edp(struct drm_device *dev, enum port port); bool intel_dp_is_edp(struct drm_device *dev, enum port port);
void intel_edp_backlight_on(struct intel_dp *intel_dp); void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp); void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
void intel_edp_panel_on(struct intel_dp *intel_dp); void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp); void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_edp_psr_enable(struct intel_dp *intel_dp); void intel_edp_psr_enable(struct intel_dp *intel_dp);
......
...@@ -620,7 +620,7 @@ bool intel_dsi_init(struct drm_device *dev) ...@@ -620,7 +620,7 @@ bool intel_dsi_init(struct drm_device *dev)
intel_encoder->type = INTEL_OUTPUT_DSI; intel_encoder->type = INTEL_OUTPUT_DSI;
intel_encoder->crtc_mask = (1 << 0); /* XXX */ intel_encoder->crtc_mask = (1 << 0); /* XXX */
intel_encoder->cloneable = false; intel_encoder->cloneable = 0;
drm_connector_init(dev, connector, &intel_dsi_connector_funcs, drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI); DRM_MODE_CONNECTOR_DSI);
......
...@@ -522,14 +522,15 @@ void intel_dvo_init(struct drm_device *dev) ...@@ -522,14 +522,15 @@ void intel_dvo_init(struct drm_device *dev)
intel_encoder->crtc_mask = (1 << 0) | (1 << 1); intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) { switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS: case INTEL_DVO_CHIP_TMDS:
intel_encoder->cloneable = true; intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
(1 << INTEL_OUTPUT_DVO);
drm_connector_init(dev, connector, drm_connector_init(dev, connector,
&intel_dvo_connector_funcs, &intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII); DRM_MODE_CONNECTOR_DVII);
encoder_type = DRM_MODE_ENCODER_TMDS; encoder_type = DRM_MODE_ENCODER_TMDS;
break; break;
case INTEL_DVO_CHIP_LVDS: case INTEL_DVO_CHIP_LVDS:
intel_encoder->cloneable = false; intel_encoder->cloneable = 0;
drm_connector_init(dev, connector, drm_connector_init(dev, connector,
&intel_dvo_connector_funcs, &intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS); DRM_MODE_CONNECTOR_LVDS);
......
...@@ -128,6 +128,7 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -128,6 +128,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int size, ret; int size, ret;
bool prealloc = false;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
...@@ -139,6 +140,7 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -139,6 +140,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
intel_fb = ifbdev->fb; intel_fb = ifbdev->fb;
} else { } else {
DRM_DEBUG_KMS("re-using BIOS fb\n"); DRM_DEBUG_KMS("re-using BIOS fb\n");
prealloc = true;
sizes->fb_width = intel_fb->base.width; sizes->fb_width = intel_fb->base.width;
sizes->fb_height = intel_fb->base.height; sizes->fb_height = intel_fb->base.height;
} }
...@@ -200,7 +202,7 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -200,7 +202,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* If the object is stolen however, it will be full of whatever * If the object is stolen however, it will be full of whatever
* garbage was left in there. * garbage was left in there.
*/ */
if (ifbdev->fb->obj->stolen) if (ifbdev->fb->obj->stolen && !prealloc)
memset_io(info->screen_base, 0, info->screen_size); memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
...@@ -454,27 +456,149 @@ static void intel_fbdev_destroy(struct drm_device *dev, ...@@ -454,27 +456,149 @@ static void intel_fbdev_destroy(struct drm_device *dev,
drm_framebuffer_remove(&ifbdev->fb->base); drm_framebuffer_remove(&ifbdev->fb->base);
} }
/*
* Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
* The core display code will have read out the current plane configuration,
* so we use that to figure out if there's an object for us to use as the
* fb, and if so, we re-use it for the fbdev configuration.
*
* Note we only support a single fb shared across pipes for boot (mostly for
* fbcon), so we just find the biggest and use that.
*/
static bool intel_fbdev_init_bios(struct drm_device *dev,
struct intel_fbdev *ifbdev)
{
struct intel_framebuffer *fb = NULL;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
struct intel_plane_config *plane_config = NULL;
unsigned int max_size = 0;
if (!i915.fastboot)
return false;
/* Find the largest fb */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->active || !crtc->primary->fb) {
DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
pipe_name(intel_crtc->pipe));
continue;
}
if (intel_crtc->plane_config.size > max_size) {
DRM_DEBUG_KMS("found possible fb from plane %c\n",
pipe_name(intel_crtc->pipe));
plane_config = &intel_crtc->plane_config;
fb = to_intel_framebuffer(crtc->primary->fb);
max_size = plane_config->size;
}
}
if (!fb) {
DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n");
goto out;
}
/* Now make sure all the pipes will fit into it */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
unsigned int cur_size;
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->active) {
DRM_DEBUG_KMS("pipe %c not active, skipping\n",
pipe_name(intel_crtc->pipe));
continue;
}
DRM_DEBUG_KMS("checking plane %c for BIOS fb\n",
pipe_name(intel_crtc->pipe));
/*
* See if the plane fb we found above will fit on this
* pipe. Note we need to use the selected fb's bpp rather
* than the current pipe's, since they could be different.
*/
cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay *
intel_crtc->config.adjusted_mode.crtc_vdisplay;
DRM_DEBUG_KMS("pipe %c area: %d\n", pipe_name(intel_crtc->pipe),
cur_size);
cur_size *= fb->base.bits_per_pixel / 8;
DRM_DEBUG_KMS("total size %d (bpp %d)\n", cur_size,
fb->base.bits_per_pixel / 8);
if (cur_size > max_size) {
DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
pipe_name(intel_crtc->pipe),
cur_size, max_size);
plane_config = NULL;
fb = NULL;
break;
}
DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n",
pipe_name(intel_crtc->pipe),
max_size, cur_size);
}
if (!fb) {
DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n");
goto out;
}
ifbdev->preferred_bpp = fb->base.bits_per_pixel;
ifbdev->fb = fb;
drm_framebuffer_reference(&ifbdev->fb->base);
/* Final pass to check if any active pipes don't have fbs */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->active)
continue;
WARN(!crtc->primary->fb,
"re-used BIOS config but lost an fb on crtc %d\n",
crtc->base.id);
}
DRM_DEBUG_KMS("using BIOS fb for initial console\n");
return true;
out:
return false;
}
int intel_fbdev_init(struct drm_device *dev) int intel_fbdev_init(struct drm_device *dev)
{ {
struct intel_fbdev *ifbdev; struct intel_fbdev *ifbdev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL); if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
if (!ifbdev) return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
if (ifbdev == NULL)
return -ENOMEM; return -ENOMEM;
dev_priv->fbdev = ifbdev;
ifbdev->helper.funcs = &intel_fb_helper_funcs; ifbdev->helper.funcs = &intel_fb_helper_funcs;
if (!intel_fbdev_init_bios(dev, ifbdev))
ifbdev->preferred_bpp = 32;
ret = drm_fb_helper_init(dev, &ifbdev->helper, ret = drm_fb_helper_init(dev, &ifbdev->helper,
INTEL_INFO(dev)->num_pipes, INTEL_INFO(dev)->num_pipes, 4);
4);
if (ret) { if (ret) {
kfree(ifbdev); kfree(ifbdev);
return ret; return ret;
} }
dev_priv->fbdev = ifbdev;
drm_fb_helper_single_add_all_connectors(&ifbdev->helper); drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
return 0; return 0;
...@@ -483,9 +607,10 @@ int intel_fbdev_init(struct drm_device *dev) ...@@ -483,9 +607,10 @@ int intel_fbdev_init(struct drm_device *dev)
void intel_fbdev_initial_config(struct drm_device *dev) void intel_fbdev_initial_config(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_fbdev *ifbdev = dev_priv->fbdev;
/* Due to peculiar init order wrt to hpd handling this is separate. */ /* Due to peculiar init order wrt to hpd handling this is separate. */
drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32); drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp);
} }
void intel_fbdev_fini(struct drm_device *dev) void intel_fbdev_fini(struct drm_device *dev)
...@@ -523,7 +648,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state) ...@@ -523,7 +648,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
void intel_fbdev_output_poll_changed(struct drm_device *dev) void intel_fbdev_output_poll_changed(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); if (dev_priv->fbdev)
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
} }
void intel_fbdev_restore_mode(struct drm_device *dev) void intel_fbdev_restore_mode(struct drm_device *dev)
...@@ -531,7 +657,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev) ...@@ -531,7 +657,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
int ret; int ret;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->num_pipes == 0) if (!dev_priv->fbdev)
return; return;
drm_modeset_lock_all(dev); drm_modeset_lock_all(dev);
......
...@@ -848,6 +848,30 @@ intel_hdmi_mode_valid(struct drm_connector *connector, ...@@ -848,6 +848,30 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK; return MODE_OK;
} }
static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
int count = 0, count_hdmi = 0;
if (!HAS_PCH_SPLIT(dev))
return false;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
if (encoder->new_crtc != crtc)
continue;
count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
count++;
}
/*
* HDMI 12bpc affects the clocks, so it's only possible
* when not cloning with other encoder types.
*/
return count_hdmi > 0 && count_hdmi == count;
}
bool intel_hdmi_compute_config(struct intel_encoder *encoder, bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config) struct intel_crtc_config *pipe_config)
{ {
...@@ -880,7 +904,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, ...@@ -880,7 +904,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
* within limits. * within limits.
*/ */
if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink && if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) { clock_12bpc <= portclock_limit &&
hdmi_12bpc_possible(encoder->new_crtc)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3; desired_bpp = 12*3;
...@@ -1318,7 +1343,14 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) ...@@ -1318,7 +1343,14 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
intel_encoder->type = INTEL_OUTPUT_HDMI; intel_encoder->type = INTEL_OUTPUT_HDMI;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = false; intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
/*
* BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
* to work on real hardware. And since g4x can send infoframes to
* only one port anyway, nothing is lost by allowing it.
*/
if (IS_G4X(dev))
intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
intel_dig_port->port = port; intel_dig_port->port = port;
intel_dig_port->hdmi.hdmi_reg = hdmi_reg; intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
......
...@@ -963,7 +963,7 @@ void intel_lvds_init(struct drm_device *dev) ...@@ -963,7 +963,7 @@ void intel_lvds_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, intel_encoder); intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS; intel_encoder->type = INTEL_OUTPUT_LVDS;
intel_encoder->cloneable = false; intel_encoder->cloneable = 0;
if (HAS_PCH_SPLIT(dev)) if (HAS_PCH_SPLIT(dev))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
else if (IS_GEN4(dev)) else if (IS_GEN4(dev))
......
This diff is collapsed.
...@@ -440,15 +440,17 @@ static int init_ring_common(struct intel_ring_buffer *ring) ...@@ -440,15 +440,17 @@ static int init_ring_common(struct intel_ring_buffer *ring)
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
if (I915_NEED_GFX_HWS(dev))
intel_ring_setup_status_page(ring);
else
ring_setup_phys_status_page(ring);
/* Stop the ring if it's running. */ /* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0); I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0); I915_WRITE_HEAD(ring, 0);
ring->write_tail(ring, 0); ring->write_tail(ring, 0);
if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000))
DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
if (I915_NEED_GFX_HWS(dev))
intel_ring_setup_status_page(ring);
else
ring_setup_phys_status_page(ring);
head = I915_READ_HEAD(ring) & HEAD_ADDR; head = I915_READ_HEAD(ring) & HEAD_ADDR;
...@@ -979,9 +981,19 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) ...@@ -979,9 +981,19 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
POSTING_READ(mmio); POSTING_READ(mmio);
/* Flush the TLB for this page */ /*
if (INTEL_INFO(dev)->gen >= 6) { * Flush the TLB for this page
*
* FIXME: These two bits have disappeared on gen8, so a question
* arises: do we still need this and if so how should we go about
* invalidating the TLB?
*/
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
u32 reg = RING_INSTPM(ring->mmio_base); u32 reg = RING_INSTPM(ring->mmio_base);
/* ring should be idle before issuing a sync flush*/
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
I915_WRITE(reg, I915_WRITE(reg,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
INSTPM_SYNC_FLUSH)); INSTPM_SYNC_FLUSH));
......
...@@ -33,6 +33,8 @@ struct intel_hw_status_page { ...@@ -33,6 +33,8 @@ struct intel_hw_status_page {
#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
enum intel_ring_hangcheck_action { enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0, HANGCHECK_IDLE = 0,
HANGCHECK_WAIT, HANGCHECK_WAIT,
......
...@@ -1461,7 +1461,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder) ...@@ -1461,7 +1461,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
u32 temp; u32 temp;
bool input1, input2; bool input1, input2;
int i; int i;
u8 status; bool success;
temp = I915_READ(intel_sdvo->sdvo_reg); temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0) { if ((temp & SDVO_ENABLE) == 0) {
...@@ -1475,12 +1475,12 @@ static void intel_enable_sdvo(struct intel_encoder *encoder) ...@@ -1475,12 +1475,12 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
for (i = 0; i < 2; i++) for (i = 0; i < 2; i++)
intel_wait_for_vblank(dev, intel_crtc->pipe); intel_wait_for_vblank(dev, intel_crtc->pipe);
status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
/* Warn if the device reported failure to sync. /* Warn if the device reported failure to sync.
* A lot of SDVO devices fail to notify of sync, but it's * A lot of SDVO devices fail to notify of sync, but it's
* a given it the status is a success, we succeeded. * a given it the status is a success, we succeeded.
*/ */
if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { if (success && !input1) {
DRM_DEBUG_KMS("First %s output reported failure to " DRM_DEBUG_KMS("First %s output reported failure to "
"sync\n", SDVO_NAME(intel_sdvo)); "sync\n", SDVO_NAME(intel_sdvo));
} }
...@@ -3032,7 +3032,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) ...@@ -3032,7 +3032,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
* simplistic anyway to express such constraints, so just give up on * simplistic anyway to express such constraints, so just give up on
* cloning for SDVO encoders. * cloning for SDVO encoders.
*/ */
intel_sdvo->base.cloneable = false; intel_sdvo->base.cloneable = 0;
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
......
...@@ -1639,9 +1639,8 @@ intel_tv_init(struct drm_device *dev) ...@@ -1639,9 +1639,8 @@ intel_tv_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, intel_encoder); intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_TVOUT; intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1); intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->cloneable = false; intel_encoder->cloneable = 0;
intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
intel_tv->type = DRM_MODE_CONNECTOR_Unknown; intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */ /* BIOS margin values */
......
...@@ -280,12 +280,17 @@ void vlv_force_wake_put(struct drm_i915_private *dev_priv, ...@@ -280,12 +280,17 @@ void vlv_force_wake_put(struct drm_i915_private *dev_priv,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (fw_engine & FORCEWAKE_RENDER && if (fw_engine & FORCEWAKE_RENDER) {
--dev_priv->uncore.fw_rendercount != 0) WARN_ON(!dev_priv->uncore.fw_rendercount);
fw_engine &= ~FORCEWAKE_RENDER; if (--dev_priv->uncore.fw_rendercount != 0)
if (fw_engine & FORCEWAKE_MEDIA && fw_engine &= ~FORCEWAKE_RENDER;
--dev_priv->uncore.fw_mediacount != 0) }
fw_engine &= ~FORCEWAKE_MEDIA;
if (fw_engine & FORCEWAKE_MEDIA) {
WARN_ON(!dev_priv->uncore.fw_mediacount);
if (--dev_priv->uncore.fw_mediacount != 0)
fw_engine &= ~FORCEWAKE_MEDIA;
}
if (fw_engine) if (fw_engine)
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine); dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
...@@ -301,6 +306,8 @@ static void gen6_force_wake_timer(unsigned long arg) ...@@ -301,6 +306,8 @@ static void gen6_force_wake_timer(unsigned long arg)
assert_device_not_suspended(dev_priv); assert_device_not_suspended(dev_priv);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
WARN_ON(!dev_priv->uncore.forcewake_count);
if (--dev_priv->uncore.forcewake_count == 0) if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
...@@ -308,9 +315,17 @@ static void gen6_force_wake_timer(unsigned long arg) ...@@ -308,9 +315,17 @@ static void gen6_force_wake_timer(unsigned long arg)
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
} }
static void intel_uncore_forcewake_reset(struct drm_device *dev) static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
del_timer_sync(&dev_priv->uncore.force_wake_timer);
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly
*/
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
vlv_force_wake_reset(dev_priv); vlv_force_wake_reset(dev_priv);
...@@ -319,6 +334,35 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev) ...@@ -319,6 +334,35 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev)
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev)) if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev))
__gen7_gt_force_wake_mt_reset(dev_priv); __gen7_gt_force_wake_mt_reset(dev_priv);
if (restore) { /* If reset with a user forcewake, try to restore */
unsigned fw = 0;
if (IS_VALLEYVIEW(dev)) {
if (dev_priv->uncore.fw_rendercount)
fw |= FORCEWAKE_RENDER;
if (dev_priv->uncore.fw_mediacount)
fw |= FORCEWAKE_MEDIA;
} else {
if (dev_priv->uncore.forcewake_count)
fw = FORCEWAKE_ALL;
}
if (fw)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
if (IS_GEN6(dev) || IS_GEN7(dev))
dev_priv->uncore.fifo_count =
__raw_i915_read32(dev_priv, GTFIFOCTL) &
GT_FIFO_FREE_ENTRIES_MASK;
} else {
dev_priv->uncore.forcewake_count = 0;
dev_priv->uncore.fw_rendercount = 0;
dev_priv->uncore.fw_mediacount = 0;
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
} }
void intel_uncore_early_sanitize(struct drm_device *dev) void intel_uncore_early_sanitize(struct drm_device *dev)
...@@ -344,7 +388,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev) ...@@ -344,7 +388,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
__raw_i915_write32(dev_priv, GTFIFODBG, __raw_i915_write32(dev_priv, GTFIFODBG,
__raw_i915_read32(dev_priv, GTFIFODBG)); __raw_i915_read32(dev_priv, GTFIFODBG));
intel_uncore_forcewake_reset(dev); intel_uncore_forcewake_reset(dev, false);
} }
void intel_uncore_sanitize(struct drm_device *dev) void intel_uncore_sanitize(struct drm_device *dev)
...@@ -415,6 +459,8 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) ...@@ -415,6 +459,8 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
WARN_ON(!dev_priv->uncore.forcewake_count);
if (--dev_priv->uncore.forcewake_count == 0) { if (--dev_priv->uncore.forcewake_count == 0) {
dev_priv->uncore.forcewake_count++; dev_priv->uncore.forcewake_count++;
delayed = true; delayed = true;
...@@ -690,6 +736,8 @@ void intel_uncore_init(struct drm_device *dev) ...@@ -690,6 +736,8 @@ void intel_uncore_init(struct drm_device *dev)
setup_timer(&dev_priv->uncore.force_wake_timer, setup_timer(&dev_priv->uncore.force_wake_timer,
gen6_force_wake_timer, (unsigned long)dev_priv); gen6_force_wake_timer, (unsigned long)dev_priv);
intel_uncore_early_sanitize(dev);
if (IS_VALLEYVIEW(dev)) { if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put; dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
...@@ -798,13 +846,9 @@ void intel_uncore_init(struct drm_device *dev) ...@@ -798,13 +846,9 @@ void intel_uncore_init(struct drm_device *dev)
void intel_uncore_fini(struct drm_device *dev) void intel_uncore_fini(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
del_timer_sync(&dev_priv->uncore.force_wake_timer);
/* Paranoia: make sure we have disabled everything before we exit. */ /* Paranoia: make sure we have disabled everything before we exit. */
intel_uncore_sanitize(dev); intel_uncore_sanitize(dev);
intel_uncore_forcewake_reset(dev); intel_uncore_forcewake_reset(dev, false);
} }
static const struct register_whitelist { static const struct register_whitelist {
...@@ -953,13 +997,6 @@ static int gen6_do_reset(struct drm_device *dev) ...@@ -953,13 +997,6 @@ static int gen6_do_reset(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
unsigned long irqflags;
u32 fw_engine = 0;
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly
*/
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
/* Reset the chip */ /* Reset the chip */
...@@ -972,29 +1009,8 @@ static int gen6_do_reset(struct drm_device *dev) ...@@ -972,29 +1009,8 @@ static int gen6_do_reset(struct drm_device *dev)
/* Spin waiting for the device to ack the reset request */ /* Spin waiting for the device to ack the reset request */
ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
intel_uncore_forcewake_reset(dev); intel_uncore_forcewake_reset(dev, true);
/* If reset with a user forcewake, try to restore */
if (IS_VALLEYVIEW(dev)) {
if (dev_priv->uncore.fw_rendercount)
fw_engine |= FORCEWAKE_RENDER;
if (dev_priv->uncore.fw_mediacount)
fw_engine |= FORCEWAKE_MEDIA;
} else {
if (dev_priv->uncore.forcewake_count)
fw_engine = FORCEWAKE_ALL;
}
if (fw_engine)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
if (IS_GEN6(dev) || IS_GEN7(dev))
dev_priv->uncore.fifo_count =
__raw_i915_read32(dev_priv, GTFIFOCTL) &
GT_FIFO_FREE_ENTRIES_MASK;
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
return ret; return ret;
} }
......
...@@ -438,6 +438,9 @@ struct drm_dp_aux_msg { ...@@ -438,6 +438,9 @@ struct drm_dp_aux_msg {
* The .dev field should be set to a pointer to the device that implements * The .dev field should be set to a pointer to the device that implements
* the AUX channel. * the AUX channel.
* *
* The .name field may be used to specify the name of the I2C adapter. If set to
* NULL, dev_name() of .dev will be used.
*
* Drivers provide a hardware-specific implementation of how transactions * Drivers provide a hardware-specific implementation of how transactions
* are executed via the .transfer() function. A pointer to a drm_dp_aux_msg * are executed via the .transfer() function. A pointer to a drm_dp_aux_msg
* structure describing the transaction is passed into this function. Upon * structure describing the transaction is passed into this function. Upon
...@@ -455,6 +458,7 @@ struct drm_dp_aux_msg { ...@@ -455,6 +458,7 @@ struct drm_dp_aux_msg {
* should call drm_dp_aux_unregister_i2c_bus() to remove the I2C adapter. * should call drm_dp_aux_unregister_i2c_bus() to remove the I2C adapter.
*/ */
struct drm_dp_aux { struct drm_dp_aux {
const char *name;
struct i2c_adapter ddc; struct i2c_adapter ddc;
struct device *dev; struct device *dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment