Commit 225963dd authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel into drm-next

drm-intel-next-2015-01-30:
- chv rps improvements from Ville
- atomic state handling prep work from Ander
- execlist request tracking refactoring from Nick Hoath
- forcewake code consolidation from Chris&Mika
- fastboot plane config refactoring and skl support from Damien
- some more skl pm patches all over (Damien)
- refactor dsi code to use drm dsi helpers and drm_panel infrastructure (Jani)
- first cut at experimental atomic plane updates (Matt Roper)
- piles of smaller things all over, as usual

* 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel: (102 commits)
  drm/i915: Remove bogus locking check in the hangcheck code
  drm/i915: Update DRIVER_DATE to 20150130
  drm/i915: Use pipe_config's cpu_transcoder for reading encoder hw state
  drm/i915: Fix a use-after-free in intel_execlists_retire_requests
  drm/i915: Split shared dpll setup out of __intel_set_mode()
  drm/i915: Don't do posting reads on getting forcewake
  drm/i915: Do uncore early sanitize after domain init
  drm/i915: Handle CHV in vlv_set_rps_idle()
  drm/i915: Remove nested work in gpu error handling
  drm/i915/documentation: Add intel_uncore.c to drm.tmpl
  drm/i915/dsi: remove intel_dsi_cmd.c and the unused functions therein
  drm/i915/dsi: move dpi_send_cmd() to intel_dsi.c and make it static
  drm/i915/dsi: remove old read/write functions in favor of new stuff
  drm/i915/dsi: make the vbt panel driver use mipi_dsi_device for transfers
  drm/i915/dsi: add drm mipi dsi host support
  drm/i915/dsi: switch to drm_panel interface
  drm/i915/skl: Enabling PSR on Skylake
  Revert "drm/i915: Fix mutex->owner inspection race under DEBUG_MUTEXES"
  drm/i915: Be consistent on printing seqnos
  drm/i915: Display current hangcheck status in debugfs
  ...
parents e4bf44b3 b838cbee
...@@ -3969,6 +3969,7 @@ int num_ioctls;</synopsis> ...@@ -3969,6 +3969,7 @@ int num_ioctls;</synopsis>
<title>Runtime Power Management</title> <title>Runtime Power Management</title>
!Pdrivers/gpu/drm/i915/intel_runtime_pm.c runtime pm !Pdrivers/gpu/drm/i915/intel_runtime_pm.c runtime pm
!Idrivers/gpu/drm/i915/intel_runtime_pm.c !Idrivers/gpu/drm/i915/intel_runtime_pm.c
!Idrivers/gpu/drm/i915/intel_uncore.c
</sect2> </sect2>
<sect2> <sect2>
<title>Interrupt Handling</title> <title>Interrupt Handling</title>
......
...@@ -225,7 +225,7 @@ static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start, ...@@ -225,7 +225,7 @@ static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
intel_private.driver->write_entry(addr, intel_private.driver->write_entry(addr,
i, type); i, type);
} }
readl(intel_private.gtt+i-1); wmb();
return 0; return 0;
} }
...@@ -329,7 +329,7 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry, ...@@ -329,7 +329,7 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry,
break; break;
} }
writel(addr | pte_flags, intel_private.gtt + entry); writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
} }
static const struct aper_size_info_fixed intel_fake_agp_sizes[] = { static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
...@@ -735,7 +735,7 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry, ...@@ -735,7 +735,7 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
if (flags == AGP_USER_CACHED_MEMORY) if (flags == AGP_USER_CACHED_MEMORY)
pte_flags |= I830_PTE_SYSTEM_CACHED; pte_flags |= I830_PTE_SYSTEM_CACHED;
writel(addr | pte_flags, intel_private.gtt + entry); writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
} }
bool intel_enable_gtt(void) bool intel_enable_gtt(void)
...@@ -858,7 +858,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st, ...@@ -858,7 +858,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
j++; j++;
} }
} }
readl(intel_private.gtt+j-1); wmb();
} }
EXPORT_SYMBOL(intel_gtt_insert_sg_entries); EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
...@@ -875,7 +875,7 @@ static void intel_gtt_insert_pages(unsigned int first_entry, ...@@ -875,7 +875,7 @@ static void intel_gtt_insert_pages(unsigned int first_entry,
intel_private.driver->write_entry(addr, intel_private.driver->write_entry(addr,
j, flags); j, flags);
} }
readl(intel_private.gtt+j-1); wmb();
} }
static int intel_fake_agp_insert_entries(struct agp_memory *mem, static int intel_fake_agp_insert_entries(struct agp_memory *mem,
...@@ -938,7 +938,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) ...@@ -938,7 +938,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
intel_private.driver->write_entry(intel_private.scratch_page_dma, intel_private.driver->write_entry(intel_private.scratch_page_dma,
i, 0); i, 0);
} }
readl(intel_private.gtt+i-1); wmb();
} }
EXPORT_SYMBOL(intel_gtt_clear_range); EXPORT_SYMBOL(intel_gtt_clear_range);
...@@ -1106,7 +1106,7 @@ static void i965_write_entry(dma_addr_t addr, ...@@ -1106,7 +1106,7 @@ static void i965_write_entry(dma_addr_t addr,
/* Shift high bits down */ /* Shift high bits down */
addr |= (addr >> 28) & 0xf0; addr |= (addr >> 28) & 0xf0;
writel(addr | pte_flags, intel_private.gtt + entry); writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
} }
static int i9xx_setup(void) static int i9xx_setup(void)
......
...@@ -11,6 +11,8 @@ config DRM_I915 ...@@ -11,6 +11,8 @@ config DRM_I915
select SHMEM select SHMEM
select TMPFS select TMPFS
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_PANEL
select DRM_MIPI_DSI
# i915 depends on ACPI_VIDEO when ACPI is enabled # i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick # but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI select BACKLIGHT_LCD_SUPPORT if ACPI
......
...@@ -66,12 +66,12 @@ i915-y += dvo_ch7017.o \ ...@@ -66,12 +66,12 @@ i915-y += dvo_ch7017.o \
dvo_ns2501.o \ dvo_ns2501.o \
dvo_sil164.o \ dvo_sil164.o \
dvo_tfp410.o \ dvo_tfp410.o \
intel_atomic.o \
intel_atomic_plane.o \ intel_atomic_plane.o \
intel_crt.o \ intel_crt.o \
intel_ddi.o \ intel_ddi.o \
intel_dp.o \ intel_dp.o \
intel_dp_mst.o \ intel_dp_mst.o \
intel_dsi_cmd.o \
intel_dsi.o \ intel_dsi.o \
intel_dsi_pll.o \ intel_dsi_pll.o \
intel_dsi_panel_vbt.o \ intel_dsi_panel_vbt.o \
......
This diff is collapsed.
...@@ -92,6 +92,9 @@ static int i915_getparam(struct drm_device *dev, void *data, ...@@ -92,6 +92,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_VEBOX: case I915_PARAM_HAS_VEBOX:
value = intel_ring_initialized(&dev_priv->ring[VECS]); value = intel_ring_initialized(&dev_priv->ring[VECS]);
break; break;
case I915_PARAM_HAS_BSD2:
value = intel_ring_initialized(&dev_priv->ring[VCS2]);
break;
case I915_PARAM_HAS_RELAXED_FENCING: case I915_PARAM_HAS_RELAXED_FENCING:
value = 1; value = 1;
break; break;
...@@ -601,6 +604,17 @@ static void intel_device_info_runtime_init(struct drm_device *dev) ...@@ -601,6 +604,17 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info->num_pipes = 0; info->num_pipes = 0;
} }
} }
if (IS_CHERRYVIEW(dev)) {
u32 fuse, mask_eu;
fuse = I915_READ(CHV_FUSE_GT);
mask_eu = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
CHV_FGT_EU_DIS_SS0_R1_MASK |
CHV_FGT_EU_DIS_SS1_R0_MASK |
CHV_FGT_EU_DIS_SS1_R1_MASK);
info->eu_total = 16 - hweight32(mask_eu);
}
} }
/** /**
...@@ -776,6 +790,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -776,6 +790,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_freewq; goto out_freewq;
} }
dev_priv->gpu_error.hangcheck_wq =
alloc_ordered_workqueue("i915-hangcheck", 0);
if (dev_priv->gpu_error.hangcheck_wq == NULL) {
DRM_ERROR("Failed to create our hangcheck workqueue.\n");
ret = -ENOMEM;
goto out_freedpwq;
}
intel_irq_init(dev_priv); intel_irq_init(dev_priv);
intel_uncore_sanitize(dev); intel_uncore_sanitize(dev);
...@@ -850,6 +872,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -850,6 +872,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_teardown_gmbus(dev); intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev); intel_teardown_mchbar(dev);
pm_qos_remove_request(&dev_priv->pm_qos); pm_qos_remove_request(&dev_priv->pm_qos);
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
out_freedpwq:
destroy_workqueue(dev_priv->dp_wq); destroy_workqueue(dev_priv->dp_wq);
out_freewq: out_freewq:
destroy_workqueue(dev_priv->wq); destroy_workqueue(dev_priv->wq);
...@@ -920,8 +944,7 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -920,8 +944,7 @@ int i915_driver_unload(struct drm_device *dev)
} }
/* Free error state after interrupts are fully disabled. */ /* Free error state after interrupts are fully disabled. */
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_work_sync(&dev_priv->gpu_error.work);
i915_destroy_error_state(dev); i915_destroy_error_state(dev);
if (dev->pdev->msi_enabled) if (dev->pdev->msi_enabled)
...@@ -946,6 +969,7 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -946,6 +969,7 @@ int i915_driver_unload(struct drm_device *dev)
destroy_workqueue(dev_priv->dp_wq); destroy_workqueue(dev_priv->dp_wq);
destroy_workqueue(dev_priv->wq); destroy_workqueue(dev_priv->wq);
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
pm_qos_remove_request(&dev_priv->pm_qos); pm_qos_remove_request(&dev_priv->pm_qos);
i915_global_gtt_cleanup(dev); i915_global_gtt_cleanup(dev);
......
...@@ -1365,8 +1365,6 @@ static int intel_runtime_suspend(struct device *device) ...@@ -1365,8 +1365,6 @@ static int intel_runtime_suspend(struct device *device)
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
return -ENODEV; return -ENODEV;
assert_force_wake_inactive(dev_priv);
DRM_DEBUG_KMS("Suspending device\n"); DRM_DEBUG_KMS("Suspending device\n");
/* /*
...@@ -1404,7 +1402,8 @@ static int intel_runtime_suspend(struct device *device) ...@@ -1404,7 +1402,8 @@ static int intel_runtime_suspend(struct device *device)
return ret; return ret;
} }
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
intel_uncore_forcewake_reset(dev, false);
dev_priv->pm.suspended = true; dev_priv->pm.suspended = true;
/* /*
...@@ -1432,6 +1431,8 @@ static int intel_runtime_suspend(struct device *device) ...@@ -1432,6 +1431,8 @@ static int intel_runtime_suspend(struct device *device)
intel_opregion_notify_adapter(dev, PCI_D3hot); intel_opregion_notify_adapter(dev, PCI_D3hot);
} }
assert_forcewakes_inactive(dev_priv);
DRM_DEBUG_KMS("Device suspended\n"); DRM_DEBUG_KMS("Device suspended\n");
return 0; return 0;
} }
...@@ -1642,6 +1643,14 @@ static int __init i915_init(void) ...@@ -1642,6 +1643,14 @@ static int __init i915_init(void)
#endif #endif
} }
/*
* FIXME: Note that we're lying to the DRM core here so that we can get access
* to the atomic ioctl and the atomic properties. Only plane operations on
* a single CRTC will actually work.
*/
if (i915.nuclear_pageflip)
driver.driver_features |= DRIVER_ATOMIC;
return drm_pci_init(&driver, &i915_pci_driver); return drm_pci_init(&driver, &i915_pci_driver);
} }
......
This diff is collapsed.
...@@ -39,8 +39,7 @@ ...@@ -39,8 +39,7 @@
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
bool force);
static __must_check int static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly); bool readonly);
...@@ -1516,7 +1515,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, ...@@ -1516,7 +1515,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
/* Pinned buffers may be scanout, so flush the cache */ /* Pinned buffers may be scanout, so flush the cache */
if (obj->pin_display) if (obj->pin_display)
i915_gem_object_flush_cpu_write_domain(obj, true); i915_gem_object_flush_cpu_write_domain(obj);
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
unlock: unlock:
...@@ -2414,7 +2413,7 @@ int __i915_add_request(struct intel_engine_cs *ring, ...@@ -2414,7 +2413,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
struct intel_ringbuffer *ringbuf; struct intel_ringbuffer *ringbuf;
u32 request_ring_position, request_start; u32 request_start;
int ret; int ret;
request = ring->outstanding_lazy_request; request = ring->outstanding_lazy_request;
...@@ -2422,8 +2421,7 @@ int __i915_add_request(struct intel_engine_cs *ring, ...@@ -2422,8 +2421,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
return -ENOMEM; return -ENOMEM;
if (i915.enable_execlists) { if (i915.enable_execlists) {
struct intel_context *ctx = request->ctx; ringbuf = request->ctx->engine[ring->id].ringbuf;
ringbuf = ctx->engine[ring->id].ringbuf;
} else } else
ringbuf = ring->buffer; ringbuf = ring->buffer;
...@@ -2436,7 +2434,7 @@ int __i915_add_request(struct intel_engine_cs *ring, ...@@ -2436,7 +2434,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
* what. * what.
*/ */
if (i915.enable_execlists) { if (i915.enable_execlists) {
ret = logical_ring_flush_all_caches(ringbuf); ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
if (ret) if (ret)
return ret; return ret;
} else { } else {
...@@ -2450,10 +2448,10 @@ int __i915_add_request(struct intel_engine_cs *ring, ...@@ -2450,10 +2448,10 @@ int __i915_add_request(struct intel_engine_cs *ring,
* GPU processing the request, we never over-estimate the * GPU processing the request, we never over-estimate the
* position of the head. * position of the head.
*/ */
request_ring_position = intel_ring_get_tail(ringbuf); request->postfix = intel_ring_get_tail(ringbuf);
if (i915.enable_execlists) { if (i915.enable_execlists) {
ret = ring->emit_request(ringbuf); ret = ring->emit_request(ringbuf, request);
if (ret) if (ret)
return ret; return ret;
} else { } else {
...@@ -2463,7 +2461,7 @@ int __i915_add_request(struct intel_engine_cs *ring, ...@@ -2463,7 +2461,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
} }
request->head = request_start; request->head = request_start;
request->tail = request_ring_position; request->tail = intel_ring_get_tail(ringbuf);
/* Whilst this request exists, batch_obj will be on the /* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this * active_list, and so will hold the active reference. Only when this
...@@ -2650,13 +2648,17 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, ...@@ -2650,13 +2648,17 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
* pinned in place. * pinned in place.
*/ */
while (!list_empty(&ring->execlist_queue)) { while (!list_empty(&ring->execlist_queue)) {
struct intel_ctx_submit_request *submit_req; struct drm_i915_gem_request *submit_req;
submit_req = list_first_entry(&ring->execlist_queue, submit_req = list_first_entry(&ring->execlist_queue,
struct intel_ctx_submit_request, struct drm_i915_gem_request,
execlist_link); execlist_link);
list_del(&submit_req->execlist_link); list_del(&submit_req->execlist_link);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
if (submit_req->ctx != ring->default_context)
intel_lr_context_unpin(ring, submit_req->ctx);
i915_gem_context_unreference(submit_req->ctx); i915_gem_context_unreference(submit_req->ctx);
kfree(submit_req); kfree(submit_req);
} }
...@@ -2783,7 +2785,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) ...@@ -2783,7 +2785,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
* of tail of the request to update the last known position * of tail of the request to update the last known position
* of the GPU head. * of the GPU head.
*/ */
ringbuf->last_retired_head = request->tail; ringbuf->last_retired_head = request->postfix;
i915_gem_free_request(request); i915_gem_free_request(request);
} }
...@@ -3634,11 +3636,14 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj, ...@@ -3634,11 +3636,14 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
* snooping behaviour occurs naturally as the result of our domain * snooping behaviour occurs naturally as the result of our domain
* tracking. * tracking.
*/ */
if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
obj->cache_dirty = true;
return false; return false;
}
trace_i915_gem_object_clflush(obj); trace_i915_gem_object_clflush(obj);
drm_clflush_sg(obj->pages); drm_clflush_sg(obj->pages);
obj->cache_dirty = false;
return true; return true;
} }
...@@ -3674,15 +3679,14 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) ...@@ -3674,15 +3679,14 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
/** Flushes the CPU write domain for the object if it's dirty. */ /** Flushes the CPU write domain for the object if it's dirty. */
static void static void
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
bool force)
{ {
uint32_t old_write_domain; uint32_t old_write_domain;
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return; return;
if (i915_gem_clflush_object(obj, force)) if (i915_gem_clflush_object(obj, obj->pin_display))
i915_gem_chipset_flush(obj->base.dev); i915_gem_chipset_flush(obj->base.dev);
old_write_domain = obj->base.write_domain; old_write_domain = obj->base.write_domain;
...@@ -3729,7 +3733,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -3729,7 +3733,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret) if (ret)
return ret; return ret;
i915_gem_object_flush_cpu_write_domain(obj, false); i915_gem_object_flush_cpu_write_domain(obj);
/* Serialise direct access to this object with the barriers for /* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the * coherent writes from the GPU, by effectively invalidating the
...@@ -3821,27 +3825,11 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3821,27 +3825,11 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
vma->node.color = cache_level; vma->node.color = cache_level;
obj->cache_level = cache_level; obj->cache_level = cache_level;
if (cpu_write_needs_clflush(obj)) { if (obj->cache_dirty &&
u32 old_read_domains, old_write_domain; obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
cpu_write_needs_clflush(obj)) {
/* If we're coming from LLC cached, then we haven't if (i915_gem_clflush_object(obj, true))
* actually been tracking whether the data is in the i915_gem_chipset_flush(obj->base.dev);
* CPU cache or not, since we only allow one bit set
* in obj->write_domain and have been skipping the clflushes.
* Just set it to the CPU cache for now.
*/
i915_gem_object_retire(obj);
WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
old_read_domains = obj->base.read_domains;
old_write_domain = obj->base.write_domain;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
} }
return 0; return 0;
...@@ -3991,7 +3979,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, ...@@ -3991,7 +3979,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (ret) if (ret)
goto err_unpin_display; goto err_unpin_display;
i915_gem_object_flush_cpu_write_domain(obj, true); i915_gem_object_flush_cpu_write_domain(obj);
old_write_domain = obj->base.write_domain; old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains; old_read_domains = obj->base.read_domains;
...@@ -4620,7 +4608,7 @@ i915_gem_suspend(struct drm_device *dev) ...@@ -4620,7 +4608,7 @@ i915_gem_suspend(struct drm_device *dev)
i915_gem_stop_ringbuffers(dev); i915_gem_stop_ringbuffers(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->mm.retire_work); cancel_delayed_work_sync(&dev_priv->mm.retire_work);
flush_delayed_work(&dev_priv->mm.idle_work); flush_delayed_work(&dev_priv->mm.idle_work);
...@@ -5111,7 +5099,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) ...@@ -5111,7 +5099,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
if (!mutex_is_locked(mutex)) if (!mutex_is_locked(mutex))
return false; return false;
#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
return mutex->owner == task; return mutex->owner == task;
#else #else
/* Since UP may be pre-empted, we cannot assume that we own the lock */ /* Since UP may be pre-empted, we cannot assume that we own the lock */
......
...@@ -1380,13 +1380,35 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1380,13 +1380,35 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
((args->flags & I915_EXEC_BSD_MASK) != 0)) {
DRM_DEBUG("execbuf with non bsd ring but with invalid "
"bsd dispatch flags: %d\n", (int)(args->flags));
return -EINVAL;
}
if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT) if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
ring = &dev_priv->ring[RCS]; ring = &dev_priv->ring[RCS];
else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) { else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
if (HAS_BSD2(dev)) { if (HAS_BSD2(dev)) {
int ring_id; int ring_id;
ring_id = gen8_dispatch_bsd_ring(dev, file);
ring = &dev_priv->ring[ring_id]; switch (args->flags & I915_EXEC_BSD_MASK) {
case I915_EXEC_BSD_DEFAULT:
ring_id = gen8_dispatch_bsd_ring(dev, file);
ring = &dev_priv->ring[ring_id];
break;
case I915_EXEC_BSD_RING1:
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BSD_RING2:
ring = &dev_priv->ring[VCS2];
break;
default:
DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
(int)(args->flags & I915_EXEC_BSD_MASK));
return -EINVAL;
}
} else } else
ring = &dev_priv->ring[VCS]; ring = &dev_priv->ring[VCS];
} else } else
......
...@@ -1052,7 +1052,7 @@ static void i915_gem_record_rings(struct drm_device *dev, ...@@ -1052,7 +1052,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
erq = &error->ring[i].requests[count++]; erq = &error->ring[i].requests[count++];
erq->seqno = request->seqno; erq->seqno = request->seqno;
erq->jiffies = request->emitted_jiffies; erq->jiffies = request->emitted_jiffies;
erq->tail = request->tail; erq->tail = request->postfix;
} }
} }
} }
......
...@@ -593,7 +593,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) ...@@ -593,7 +593,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
struct intel_crtc *intel_crtc = struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
const struct drm_display_mode *mode = const struct drm_display_mode *mode =
&intel_crtc->config.adjusted_mode; &intel_crtc->config->base.adjusted_mode;
htotal = mode->crtc_htotal; htotal = mode->crtc_htotal;
hsync_start = mode->crtc_hsync_start; hsync_start = mode->crtc_hsync_start;
...@@ -664,7 +664,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) ...@@ -664,7 +664,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{ {
struct drm_device *dev = crtc->base.dev; struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
const struct drm_display_mode *mode = &crtc->config.adjusted_mode; const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
enum pipe pipe = crtc->pipe; enum pipe pipe = crtc->pipe;
int position, vtotal; int position, vtotal;
...@@ -691,7 +691,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, ...@@ -691,7 +691,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
int position; int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal; int vbl_start, vbl_end, hsync_start, htotal, vtotal;
bool in_vbl = true; bool in_vbl = true;
...@@ -849,7 +849,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, ...@@ -849,7 +849,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags, vblank_time, flags,
crtc, crtc,
&to_intel_crtc(crtc)->config.adjusted_mode); &to_intel_crtc(crtc)->config->base.adjusted_mode);
} }
static bool intel_hpd_irq_event(struct drm_device *dev, static bool intel_hpd_irq_event(struct drm_device *dev,
...@@ -879,7 +879,7 @@ static void i915_digport_work_func(struct work_struct *work) ...@@ -879,7 +879,7 @@ static void i915_digport_work_func(struct work_struct *work)
container_of(work, struct drm_i915_private, dig_port_work); container_of(work, struct drm_i915_private, dig_port_work);
u32 long_port_mask, short_port_mask; u32 long_port_mask, short_port_mask;
struct intel_digital_port *intel_dig_port; struct intel_digital_port *intel_dig_port;
int i, ret; int i;
u32 old_bits = 0; u32 old_bits = 0;
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
...@@ -903,9 +903,11 @@ static void i915_digport_work_func(struct work_struct *work) ...@@ -903,9 +903,11 @@ static void i915_digport_work_func(struct work_struct *work)
valid = true; valid = true;
if (valid) { if (valid) {
enum irqreturn ret;
ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
if (ret == true) { if (ret == IRQ_NONE) {
/* if we get true fallback to old school hpd */ /* fall back to old school hpd */
old_bits |= (1 << intel_dig_port->base.hpd_pin); old_bits |= (1 << intel_dig_port->base.hpd_pin);
} }
} }
...@@ -2419,19 +2421,15 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, ...@@ -2419,19 +2421,15 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
} }
/** /**
* i915_error_work_func - do process context error handling work * i915_reset_and_wakeup - do process context error handling work
* @work: work struct
* *
* Fire an error uevent so userspace can see that a hang or error * Fire an error uevent so userspace can see that a hang or error
* was detected. * was detected.
*/ */
static void i915_error_work_func(struct work_struct *work) static void i915_reset_and_wakeup(struct drm_device *dev)
{ {
struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, struct drm_i915_private *dev_priv = to_i915(dev);
work); struct i915_gpu_error *error = &dev_priv->gpu_error;
struct drm_i915_private *dev_priv =
container_of(error, struct drm_i915_private, gpu_error);
struct drm_device *dev = dev_priv->dev;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
...@@ -2598,10 +2596,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev) ...@@ -2598,10 +2596,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
} }
/** /**
* i915_handle_error - handle an error interrupt * i915_handle_error - handle a gpu error
* @dev: drm device * @dev: drm device
* *
* Do some basic checking of regsiter state at error interrupt time and * Do some basic checking of regsiter state at error time and
* dump it to the syslog. Also call i915_capture_error_state() to make * dump it to the syslog. Also call i915_capture_error_state() to make
* sure we get a record and make it available in debugfs. Fire a uevent * sure we get a record and make it available in debugfs. Fire a uevent
* so userspace knows something bad happened (should trigger collection * so userspace knows something bad happened (should trigger collection
...@@ -2626,9 +2624,9 @@ void i915_handle_error(struct drm_device *dev, bool wedged, ...@@ -2626,9 +2624,9 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
&dev_priv->gpu_error.reset_counter); &dev_priv->gpu_error.reset_counter);
/* /*
* Wakeup waiting processes so that the reset work function * Wakeup waiting processes so that the reset function
* i915_error_work_func doesn't deadlock trying to grab various * i915_reset_and_wakeup doesn't deadlock trying to grab
* locks. By bumping the reset counter first, the woken * various locks. By bumping the reset counter first, the woken
* processes will see a reset in progress and back off, * processes will see a reset in progress and back off,
* releasing their locks and then wait for the reset completion. * releasing their locks and then wait for the reset completion.
* We must do this for _all_ gpu waiters that might hold locks * We must do this for _all_ gpu waiters that might hold locks
...@@ -2641,13 +2639,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged, ...@@ -2641,13 +2639,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
i915_error_wake_up(dev_priv, false); i915_error_wake_up(dev_priv, false);
} }
/* i915_reset_and_wakeup(dev);
* Our reset work can grab modeset locks (since it needs to reset the
* state of outstanding pagelips). Hence it must not be run on our own
* dev-priv->wq work queue for otherwise the flush_work in the pageflip
* code will deadlock.
*/
schedule_work(&dev_priv->gpu_error.work);
} }
/* Called from drm generic code, passed 'crtc' which /* Called from drm generic code, passed 'crtc' which
...@@ -2972,7 +2964,7 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd) ...@@ -2972,7 +2964,7 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
return HANGCHECK_HUNG; return HANGCHECK_HUNG;
} }
/** /*
* This is called when the chip hasn't reported back with completed * This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. We keep track per ring seqno progress and * batchbuffers in a long time. We keep track per ring seqno progress and
* if there are no progress, hangcheck score for that ring is increased. * if there are no progress, hangcheck score for that ring is increased.
...@@ -2980,10 +2972,12 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd) ...@@ -2980,10 +2972,12 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
* we kick the ring. If we see no progress on three subsequent calls * we kick the ring. If we see no progress on three subsequent calls
* we assume chip is wedged and try to fix it by resetting the chip. * we assume chip is wedged and try to fix it by resetting the chip.
*/ */
static void i915_hangcheck_elapsed(unsigned long data) static void i915_hangcheck_elapsed(struct work_struct *work)
{ {
struct drm_device *dev = (struct drm_device *)data; struct drm_i915_private *dev_priv =
struct drm_i915_private *dev_priv = dev->dev_private; container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
struct drm_device *dev = dev_priv->dev;
struct intel_engine_cs *ring; struct intel_engine_cs *ring;
int i; int i;
int busy_count = 0, rings_hung = 0; int busy_count = 0, rings_hung = 0;
...@@ -3097,17 +3091,18 @@ static void i915_hangcheck_elapsed(unsigned long data) ...@@ -3097,17 +3091,18 @@ static void i915_hangcheck_elapsed(unsigned long data)
void i915_queue_hangcheck(struct drm_device *dev) void i915_queue_hangcheck(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer;
if (!i915.enable_hangcheck) if (!i915.enable_hangcheck)
return; return;
/* Don't continually defer the hangcheck, but make sure it is active */ /* Don't continually defer the hangcheck so that it is always run at
if (timer_pending(timer)) * least once after work has been scheduled on any ring. Otherwise,
return; * we will ignore a hung ring if a second ring is kept busy.
mod_timer(timer, */
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
} }
static void ibx_irq_reset(struct drm_device *dev) static void ibx_irq_reset(struct drm_device *dev)
...@@ -4340,7 +4335,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv) ...@@ -4340,7 +4335,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
...@@ -4351,9 +4345,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv) ...@@ -4351,9 +4345,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
else else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
setup_timer(&dev_priv->gpu_error.hangcheck_timer, INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
i915_hangcheck_elapsed, i915_hangcheck_elapsed);
(unsigned long) dev);
INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
intel_hpd_irq_reenable_work); intel_hpd_irq_reenable_work);
......
...@@ -52,6 +52,7 @@ struct i915_params i915 __read_mostly = { ...@@ -52,6 +52,7 @@ struct i915_params i915 __read_mostly = {
.use_mmio_flip = 0, .use_mmio_flip = 0,
.mmio_debug = 0, .mmio_debug = 0,
.verbose_state_checks = 1, .verbose_state_checks = 1,
.nuclear_pageflip = 0,
}; };
module_param_named(modeset, i915.modeset, int, 0400); module_param_named(modeset, i915.modeset, int, 0400);
...@@ -178,3 +179,7 @@ MODULE_PARM_DESC(mmio_debug, ...@@ -178,3 +179,7 @@ MODULE_PARM_DESC(mmio_debug,
module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600); module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
MODULE_PARM_DESC(verbose_state_checks, MODULE_PARM_DESC(verbose_state_checks,
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions."); "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
MODULE_PARM_DESC(nuclear_pageflip,
"Force atomic modeset functionality; only planes work for now (default: false).");
...@@ -605,6 +605,15 @@ enum punit_power_well { ...@@ -605,6 +605,15 @@ enum punit_power_well {
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ #define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */ #define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
#define FB_GFX_FMAX_AT_VMAX_FUSE 0x136
#define FB_GFX_FREQ_FUSE_MASK 0xff
#define FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT 24
#define FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT 16
#define FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT 8
#define FB_GFX_FMIN_AT_VMIN_FUSE 0x137
#define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT 8
#define PUNIT_GPU_STATUS_REG 0xdb #define PUNIT_GPU_STATUS_REG 0xdb
#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16 #define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16
#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff #define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff
...@@ -1471,6 +1480,17 @@ enum punit_power_well { ...@@ -1471,6 +1480,17 @@ enum punit_power_well {
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) #define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10) #define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
/* Fuse readout registers for GT */
#define CHV_FUSE_GT (VLV_DISPLAY_BASE + 0x2168)
#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT)
#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20
#define CHV_FGT_EU_DIS_SS0_R1_MASK (0xf << CHV_FGT_EU_DIS_SS0_R1_SHIFT)
#define CHV_FGT_EU_DIS_SS1_R0_SHIFT 24
#define CHV_FGT_EU_DIS_SS1_R0_MASK (0xf << CHV_FGT_EU_DIS_SS1_R0_SHIFT)
#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) #define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) #define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
...@@ -3748,6 +3768,11 @@ enum punit_power_well { ...@@ -3748,6 +3768,11 @@ enum punit_power_well {
#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11) #define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff) #define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0 #define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
#define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL (1 << 14)
#define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL (1 << 13)
#define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL (1 << 12)
#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (1f << 5)
#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5)
#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1) #define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1)
/* /*
...@@ -6052,6 +6077,9 @@ enum punit_power_well { ...@@ -6052,6 +6077,9 @@ enum punit_power_well {
#define GEN6_PMINTRMSK 0xA168 #define GEN6_PMINTRMSK 0xA168
#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) #define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
#define VLV_PWRDWNUPCTL 0xA294 #define VLV_PWRDWNUPCTL 0xA294
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS 0xA0C4
#define GEN9_RENDER_PG_IDLE_HYSTERESIS 0xA0C8
#define GEN9_PG_ENABLE 0xA210
#define VLV_CHICKEN_3 (VLV_DISPLAY_BASE + 0x7040C) #define VLV_CHICKEN_3 (VLV_DISPLAY_BASE + 0x7040C)
#define PIXEL_OVERLAP_CNT_MASK (3 << 30) #define PIXEL_OVERLAP_CNT_MASK (3 << 30)
......
...@@ -49,14 +49,14 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg) ...@@ -49,14 +49,14 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */ /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev)) { if (IS_VALLEYVIEW(dev)) {
u32 reg, czcount_30ns; u32 clk_reg, czcount_30ns;
if (IS_CHERRYVIEW(dev)) if (IS_CHERRYVIEW(dev))
reg = CHV_CLK_CTL1; clk_reg = CHV_CLK_CTL1;
else else
reg = VLV_CLK_CTL2; clk_reg = VLV_CLK_CTL2;
czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT; czcount_30ns = I915_READ(clk_reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
if (!czcount_30ns) { if (!czcount_30ns) {
WARN(!czcount_30ns, "bogus CZ count value"); WARN(!czcount_30ns, "bogus CZ count value");
...@@ -116,8 +116,6 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) ...@@ -116,8 +116,6 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{ {
struct drm_minor *dminor = dev_to_drm_minor(kdev); struct drm_minor *dminor = dev_to_drm_minor(kdev);
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
if (IS_VALLEYVIEW(dminor->dev))
rc6p_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency); return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
} }
...@@ -126,8 +124,6 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) ...@@ -126,8 +124,6 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{ {
struct drm_minor *dminor = dev_to_drm_minor(kdev); struct drm_minor *dminor = dev_to_drm_minor(kdev);
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
if (IS_VALLEYVIEW(dminor->dev))
rc6pp_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
} }
...@@ -285,7 +281,7 @@ static struct bin_attribute dpf_attrs_1 = { ...@@ -285,7 +281,7 @@ static struct bin_attribute dpf_attrs_1 = {
.private = (void *)1 .private = (void *)1
}; };
static ssize_t gt_cur_freq_mhz_show(struct device *kdev, static ssize_t gt_act_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_minor *minor = dev_to_drm_minor(kdev);
...@@ -301,9 +297,14 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, ...@@ -301,9 +297,14 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
if (IS_VALLEYVIEW(dev_priv->dev)) { if (IS_VALLEYVIEW(dev_priv->dev)) {
u32 freq; u32 freq;
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff); ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else { } else {
ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER; u32 rpstat = I915_READ(GEN6_RPSTAT1);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
ret = intel_gpu_freq(dev_priv, ret);
} }
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
...@@ -312,6 +313,27 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, ...@@ -312,6 +313,27 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
return snprintf(buf, PAGE_SIZE, "%d\n", ret); return snprintf(buf, PAGE_SIZE, "%d\n", ret);
} }
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock);
ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
...@@ -319,8 +341,9 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, ...@@ -319,8 +341,9 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct drm_device *dev = minor->dev; struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
return snprintf(buf, PAGE_SIZE, "%d\n", return snprintf(buf, PAGE_SIZE,
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); "%d\n",
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
} }
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
...@@ -333,10 +356,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute ...@@ -333,10 +356,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
flush_delayed_work(&dev_priv->rps.delayed_resume_work); flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
else
ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret); return snprintf(buf, PAGE_SIZE, "%d\n", ret);
...@@ -360,10 +380,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -360,10 +380,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) val = intel_freq_opcode(dev_priv, val);
val = vlv_freq_opcode(dev_priv, val);
else
val /= GT_FREQUENCY_MULTIPLIER;
if (val < dev_priv->rps.min_freq || if (val < dev_priv->rps.min_freq ||
val > dev_priv->rps.max_freq || val > dev_priv->rps.max_freq ||
...@@ -374,21 +391,21 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -374,21 +391,21 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (val > dev_priv->rps.rp0_freq) if (val > dev_priv->rps.rp0_freq)
DRM_DEBUG("User requested overclocking to %d\n", DRM_DEBUG("User requested overclocking to %d\n",
val * GT_FREQUENCY_MULTIPLIER); intel_gpu_freq(dev_priv, val));
dev_priv->rps.max_freq_softlimit = val; dev_priv->rps.max_freq_softlimit = val;
if (dev_priv->rps.cur_freq > val) { val = clamp_t(int, dev_priv->rps.cur_freq,
if (IS_VALLEYVIEW(dev)) dev_priv->rps.min_freq_softlimit,
valleyview_set_rps(dev, val); dev_priv->rps.max_freq_softlimit);
else
gen6_set_rps(dev, val); /* We still need *_set_rps to process the new max_delay and
} else if (!IS_VALLEYVIEW(dev)) { * update the interrupt limits and PMINTRMSK even though
/* We still need gen6_set_rps to process the new max_delay and * frequency request may be unchanged. */
* update the interrupt limits even though frequency request is if (IS_VALLEYVIEW(dev))
* unchanged. */ valleyview_set_rps(dev, val);
gen6_set_rps(dev, dev_priv->rps.cur_freq); else
} gen6_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
...@@ -405,10 +422,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute ...@@ -405,10 +422,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
flush_delayed_work(&dev_priv->rps.delayed_resume_work); flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
else
ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret); return snprintf(buf, PAGE_SIZE, "%d\n", ret);
...@@ -432,10 +446,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -432,10 +446,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev)) val = intel_freq_opcode(dev_priv, val);
val = vlv_freq_opcode(dev_priv, val);
else
val /= GT_FREQUENCY_MULTIPLIER;
if (val < dev_priv->rps.min_freq || if (val < dev_priv->rps.min_freq ||
val > dev_priv->rps.max_freq || val > dev_priv->rps.max_freq ||
...@@ -446,17 +457,17 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -446,17 +457,17 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
dev_priv->rps.min_freq_softlimit = val; dev_priv->rps.min_freq_softlimit = val;
if (dev_priv->rps.cur_freq < val) { val = clamp_t(int, dev_priv->rps.cur_freq,
if (IS_VALLEYVIEW(dev)) dev_priv->rps.min_freq_softlimit,
valleyview_set_rps(dev, val); dev_priv->rps.max_freq_softlimit);
else
gen6_set_rps(dev, val); /* We still need *_set_rps to process the new min_delay and
} else if (!IS_VALLEYVIEW(dev)) { * update the interrupt limits and PMINTRMSK even though
/* We still need gen6_set_rps to process the new min_delay and * frequency request may be unchanged. */
* update the interrupt limits even though frequency request is if (IS_VALLEYVIEW(dev))
* unchanged. */ valleyview_set_rps(dev, val);
gen6_set_rps(dev, dev_priv->rps.cur_freq); else
} gen6_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
...@@ -464,6 +475,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -464,6 +475,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
} }
static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL); static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
...@@ -494,19 +506,22 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr ...@@ -494,19 +506,22 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
if (attr == &dev_attr_gt_RP0_freq_mhz) { if (attr == &dev_attr_gt_RP0_freq_mhz) {
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq); val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
else else
val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER; val = intel_gpu_freq(dev_priv,
((rp_state_cap & 0x0000ff) >> 0));
} else if (attr == &dev_attr_gt_RP1_freq_mhz) { } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq); val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
else else
val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER; val = intel_gpu_freq(dev_priv,
((rp_state_cap & 0x00ff00) >> 8));
} else if (attr == &dev_attr_gt_RPn_freq_mhz) { } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq); val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
else else
val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER; val = intel_gpu_freq(dev_priv,
((rp_state_cap & 0xff0000) >> 16));
} else { } else {
BUG(); BUG();
} }
...@@ -514,6 +529,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr ...@@ -514,6 +529,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
} }
static const struct attribute *gen6_attrs[] = { static const struct attribute *gen6_attrs[] = {
&dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr, &dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_max_freq_mhz.attr, &dev_attr_gt_max_freq_mhz.attr,
&dev_attr_gt_min_freq_mhz.attr, &dev_attr_gt_min_freq_mhz.attr,
...@@ -524,6 +540,7 @@ static const struct attribute *gen6_attrs[] = { ...@@ -524,6 +540,7 @@ static const struct attribute *gen6_attrs[] = {
}; };
static const struct attribute *vlv_attrs[] = { static const struct attribute *vlv_attrs[] = {
&dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr, &dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_max_freq_mhz.attr, &dev_attr_gt_max_freq_mhz.attr,
&dev_attr_gt_min_freq_mhz.attr, &dev_attr_gt_min_freq_mhz.attr,
......
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/**
* DOC: atomic modeset support
*
* The functions here implement the state management and hardware programming
* dispatch required by the atomic modeset infrastructure.
* See intel_atomic_plane.c for the plane-specific atomic functionality.
*/
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
/**
* intel_atomic_check - validate state object
* @dev: drm device
* @state: state to validate
*/
int intel_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
int nplanes = dev->mode_config.num_total_plane;
int ncrtcs = dev->mode_config.num_crtc;
int nconnectors = dev->mode_config.num_connector;
enum pipe nuclear_pipe = INVALID_PIPE;
int ret;
int i;
bool not_nuclear = false;
/*
* FIXME: At the moment, we only support "nuclear pageflip" on a
* single CRTC. Cross-crtc updates will be added later.
*/
for (i = 0; i < nplanes; i++) {
struct intel_plane *plane = to_intel_plane(state->planes[i]);
if (!plane)
continue;
if (nuclear_pipe == INVALID_PIPE) {
nuclear_pipe = plane->pipe;
} else if (nuclear_pipe != plane->pipe) {
DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n");
return -EINVAL;
}
}
/*
* FIXME: We only handle planes for now; make sure there are no CRTC's
* or connectors involved.
*/
state->allow_modeset = false;
for (i = 0; i < ncrtcs; i++) {
struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]);
if (crtc && crtc->pipe != nuclear_pipe)
not_nuclear = true;
}
for (i = 0; i < nconnectors; i++)
if (state->connectors[i] != NULL)
not_nuclear = true;
if (not_nuclear) {
DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n");
return -EINVAL;
}
ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
return ret;
return ret;
}
/**
* intel_atomic_commit - commit validated state object
* @dev: DRM device
* @state: the top-level driver state object
* @async: asynchronous commit
*
* This function commits a top-level state object that has been validated
* with drm_atomic_helper_check().
*
* FIXME: Atomic modeset support for i915 is not yet complete. At the moment
* we can only handle plane-related operations and do not yet support
* asynchronous commit.
*
* RETURNS
* Zero for success or -errno.
*/
int intel_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool async)
{
int ret;
int i;
if (async) {
DRM_DEBUG_KMS("i915 does not yet support async commit\n");
return -EINVAL;
}
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
return ret;
/* Point of no return */
/*
* FIXME: The proper sequence here will eventually be:
*
* drm_atomic_helper_swap_state(dev, state)
* drm_atomic_helper_commit_pre_planes(dev, state);
* drm_atomic_helper_commit_planes(dev, state);
* drm_atomic_helper_commit_post_planes(dev, state);
* drm_atomic_helper_wait_for_vblanks(dev, state);
* drm_atomic_helper_cleanup_planes(dev, state);
* drm_atomic_state_free(state);
*
* once we have full atomic modeset. For now, just manually update
* plane states to avoid clobbering good states with dummy states
* while nuclear pageflipping.
*/
for (i = 0; i < dev->mode_config.num_total_plane; i++) {
struct drm_plane *plane = state->planes[i];
if (!plane)
continue;
plane->state->state = state;
swap(state->plane_states[i], plane->state);
plane->state->state = NULL;
}
drm_atomic_helper_commit_planes(dev, state);
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_state_free(state);
return 0;
}
/**
* intel_connector_atomic_get_property - fetch connector property value
* @connector: connector to fetch property for
* @state: state containing the property value
* @property: property to look up
* @val: pointer to write property value into
*
* The DRM core does not store shadow copies of properties for
* atomic-capable drivers. This entrypoint is used to fetch
* the current value of a driver-specific connector property.
*/
int
intel_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val)
{
int i;
/*
* TODO: We only have atomic modeset for planes at the moment, so the
* crtc/connector code isn't quite ready yet. Until it's ready,
* continue to look up all property values in the DRM's shadow copy
* in obj->properties->values[].
*
* When the crtc/connector state work matures, this function should
* be updated to read the values out of the state structure instead.
*/
for (i = 0; i < connector->base.properties->count; i++) {
if (connector->base.properties->properties[i] == property) {
*val = connector->base.properties->values[i];
return 0;
}
}
return -EINVAL;
}
/*
* intel_crtc_duplicate_state - duplicate crtc state
* @crtc: drm crtc
*
* Allocates and returns a copy of the crtc state (both common and
* Intel-specific) for the specified crtc.
*
* Returns: The newly allocated crtc state, or NULL on failure.
*/
struct drm_crtc_state *
intel_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
if (WARN_ON(!intel_crtc->config))
return kzalloc(sizeof(*intel_crtc->config), GFP_KERNEL);
return kmemdup(intel_crtc->config, sizeof(*intel_crtc->config),
GFP_KERNEL);
}
/**
* intel_crtc_destroy_state - destroy crtc state
* @crtc: drm crtc
*
* Destroys the crtc state (both common and Intel-specific) for the
* specified crtc.
*/
void
intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
drm_atomic_helper_crtc_destroy_state(crtc, state);
}
...@@ -36,6 +36,30 @@ ...@@ -36,6 +36,30 @@
#include <drm/drm_plane_helper.h> #include <drm/drm_plane_helper.h>
#include "intel_drv.h" #include "intel_drv.h"
/**
* intel_create_plane_state - create plane state object
* @plane: drm plane
*
* Allocates a fresh plane state for the given plane and sets some of
* the state values to sensible initial values.
*
* Returns: A newly allocated plane state, or NULL on failure
*/
struct intel_plane_state *
intel_create_plane_state(struct drm_plane *plane)
{
struct intel_plane_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
state->base.plane = plane;
state->base.rotation = BIT(DRM_ROTATE_0);
return state;
}
/** /**
* intel_plane_duplicate_state - duplicate plane state * intel_plane_duplicate_state - duplicate plane state
* @plane: drm plane * @plane: drm plane
...@@ -43,25 +67,28 @@ ...@@ -43,25 +67,28 @@
* Allocates and returns a copy of the plane state (both common and * Allocates and returns a copy of the plane state (both common and
* Intel-specific) for the specified plane. * Intel-specific) for the specified plane.
* *
* Returns: The newly allocated plane state, or NULL or failure. * Returns: The newly allocated plane state, or NULL on failure.
*/ */
struct drm_plane_state * struct drm_plane_state *
intel_plane_duplicate_state(struct drm_plane *plane) intel_plane_duplicate_state(struct drm_plane *plane)
{ {
struct intel_plane_state *state; struct drm_plane_state *state;
struct intel_plane_state *intel_state;
if (plane->state) if (WARN_ON(!plane->state))
state = kmemdup(plane->state, sizeof(*state), GFP_KERNEL); intel_state = intel_create_plane_state(plane);
else else
state = kzalloc(sizeof(*state), GFP_KERNEL); intel_state = kmemdup(plane->state, sizeof(*intel_state),
GFP_KERNEL);
if (!state) if (!intel_state)
return NULL; return NULL;
if (state->base.fb) state = &intel_state->base;
drm_framebuffer_reference(state->base.fb); if (state->fb)
drm_framebuffer_reference(state->fb);
return &state->base; return state;
} }
/** /**
...@@ -90,6 +117,15 @@ static int intel_plane_atomic_check(struct drm_plane *plane, ...@@ -90,6 +117,15 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
crtc = crtc ? crtc : plane->crtc; crtc = crtc ? crtc : plane->crtc;
intel_crtc = to_intel_crtc(crtc); intel_crtc = to_intel_crtc(crtc);
/*
* Both crtc and plane->crtc could be NULL if we're updating a
* property while the plane is disabled. We don't actually have
* anything driver-specific we need to test in that case, so
* just return success.
*/
if (!crtc)
return 0;
/* /*
* The original src/dest coordinates are stored in state->base, but * The original src/dest coordinates are stored in state->base, but
* we want to keep another copy internal to our driver that we can * we want to keep another copy internal to our driver that we can
...@@ -108,9 +144,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane, ...@@ -108,9 +144,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
intel_state->clip.x1 = 0; intel_state->clip.x1 = 0;
intel_state->clip.y1 = 0; intel_state->clip.y1 = 0;
intel_state->clip.x2 = intel_state->clip.x2 =
intel_crtc->active ? intel_crtc->config.pipe_src_w : 0; intel_crtc->active ? intel_crtc->config->pipe_src_w : 0;
intel_state->clip.y2 = intel_state->clip.y2 =
intel_crtc->active ? intel_crtc->config.pipe_src_h : 0; intel_crtc->active ? intel_crtc->config->pipe_src_h : 0;
/* /*
* Disabling a plane is always okay; we just need to update * Disabling a plane is always okay; we just need to update
...@@ -150,3 +186,61 @@ const struct drm_plane_helper_funcs intel_plane_helper_funcs = { ...@@ -150,3 +186,61 @@ const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.atomic_update = intel_plane_atomic_update, .atomic_update = intel_plane_atomic_update,
}; };
/**
* intel_plane_atomic_get_property - fetch plane property value
* @plane: plane to fetch property for
* @state: state containing the property value
* @property: property to look up
* @val: pointer to write property value into
*
* The DRM core does not store shadow copies of properties for
* atomic-capable drivers. This entrypoint is used to fetch
* the current value of a driver-specific plane property.
*/
int
intel_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
uint64_t *val)
{
struct drm_mode_config *config = &plane->dev->mode_config;
if (property == config->rotation_property) {
*val = state->rotation;
} else {
DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
return -EINVAL;
}
return 0;
}
/**
* intel_plane_atomic_set_property - set plane property value
* @plane: plane to set property for
* @state: state to update property value in
* @property: property to set
* @val: value to set property to
*
* Writes the specified property value for a plane into the provided atomic
* state object.
*
* Returns 0 on success, -EINVAL on unrecognized properties
*/
int
intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val)
{
struct drm_mode_config *config = &plane->dev->mode_config;
if (property == config->rotation_property) {
state->rotation = val;
} else {
DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
return -EINVAL;
}
return 0;
}
...@@ -400,7 +400,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder) ...@@ -400,7 +400,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
{ {
struct drm_encoder *encoder = &intel_encoder->base; struct drm_encoder *encoder = &intel_encoder->base;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
struct drm_display_mode *mode = &crtc->config.adjusted_mode; struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
struct drm_connector *connector; struct drm_connector *connector;
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h> #include <drm/drm_edid.h>
...@@ -110,31 +111,31 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) ...@@ -110,31 +111,31 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
} }
static void intel_crt_get_config(struct intel_encoder *encoder, static void intel_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config) struct intel_crtc_state *pipe_config)
{ {
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
int dotclock; int dotclock;
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder); pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
dotclock = pipe_config->port_clock; dotclock = pipe_config->port_clock;
if (HAS_PCH_SPLIT(dev)) if (HAS_PCH_SPLIT(dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock); ironlake_check_encoder_dotclock(pipe_config, dotclock);
pipe_config->adjusted_mode.crtc_clock = dotclock; pipe_config->base.adjusted_mode.crtc_clock = dotclock;
} }
static void hsw_crt_get_config(struct intel_encoder *encoder, static void hsw_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config) struct intel_crtc_state *pipe_config)
{ {
intel_ddi_get_config(encoder, pipe_config); intel_ddi_get_config(encoder, pipe_config);
pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC | pipe_config->base.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NHSYNC |
DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NVSYNC); DRM_MODE_FLAG_NVSYNC);
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder); pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
} }
static void hsw_crt_pre_enable(struct intel_encoder *encoder) static void hsw_crt_pre_enable(struct intel_encoder *encoder)
...@@ -157,7 +158,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) ...@@ -157,7 +158,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder); struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
u32 adpa; u32 adpa;
if (INTEL_INFO(dev)->gen >= 5) if (INTEL_INFO(dev)->gen >= 5)
...@@ -303,7 +304,7 @@ intel_crt_mode_valid(struct drm_connector *connector, ...@@ -303,7 +304,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
} }
static bool intel_crt_compute_config(struct intel_encoder *encoder, static bool intel_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config) struct intel_crtc_state *pipe_config)
{ {
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
...@@ -792,6 +793,8 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { ...@@ -792,6 +793,8 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes, .fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_crt_destroy, .destroy = intel_crt_destroy,
.set_property = intel_crt_set_property, .set_property = intel_crt_set_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_get_property = intel_connector_atomic_get_property,
}; };
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -26,11 +26,12 @@ ...@@ -26,11 +26,12 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "i915_drv.h" #include "i915_drv.h"
#include "intel_drv.h" #include "intel_drv.h"
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h> #include <drm/drm_edid.h>
static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config) struct intel_crtc_state *pipe_config)
{ {
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_digital_port *intel_dig_port = intel_mst->primary;
...@@ -38,7 +39,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, ...@@ -38,7 +39,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
int bpp; int bpp;
int lane_count, slots; int lane_count, slots;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_connector *found = NULL, *intel_connector; struct intel_connector *found = NULL, *intel_connector;
int mst_pbn; int mst_pbn;
...@@ -157,7 +158,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) ...@@ -157,7 +158,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
if (intel_dp->active_mst_links == 0) { if (intel_dp->active_mst_links == 0) {
enum port port = intel_ddi_get_encoder_port(encoder); enum port port = intel_ddi_get_encoder_port(encoder);
I915_WRITE(PORT_CLK_SEL(port), intel_crtc->config.ddi_pll_sel); I915_WRITE(PORT_CLK_SEL(port),
intel_crtc->config->ddi_pll_sel);
intel_ddi_init_dp_buf_reg(&intel_dig_port->base); intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
...@@ -170,7 +172,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) ...@@ -170,7 +172,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
} }
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr, ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
intel_mst->port, intel_crtc->config.pbn, &slots); intel_mst->port,
intel_crtc->config->pbn, &slots);
if (ret == false) { if (ret == false) {
DRM_ERROR("failed to allocate vcpi\n"); DRM_ERROR("failed to allocate vcpi\n");
return; return;
...@@ -216,14 +219,14 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, ...@@ -216,14 +219,14 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
} }
static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config) struct intel_crtc_state *pipe_config)
{ {
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder = crtc->config.cpu_transcoder; enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 temp, flags = 0; u32 temp, flags = 0;
pipe_config->has_dp_encoder = true; pipe_config->has_dp_encoder = true;
...@@ -254,7 +257,7 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, ...@@ -254,7 +257,7 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
default: default:
break; break;
} }
pipe_config->adjusted_mode.flags |= flags; pipe_config->base.adjusted_mode.flags |= flags;
intel_dp_get_m_n(crtc, pipe_config); intel_dp_get_m_n(crtc, pipe_config);
intel_ddi_clock_get(&intel_dig_port->base, pipe_config); intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
...@@ -311,7 +314,9 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { ...@@ -311,7 +314,9 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.detect = intel_dp_mst_detect, .detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes, .fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_mst_set_property, .set_property = intel_dp_mst_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_dp_mst_connector_destroy, .destroy = intel_dp_mst_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
}; };
static int intel_dp_mst_get_modes(struct drm_connector *connector) static int intel_dp_mst_get_modes(struct drm_connector *connector)
......
This diff is collapsed.
This diff is collapsed.
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include "intel_drv.h" #include "intel_drv.h"
/* Dual Link support */ /* Dual Link support */
...@@ -33,53 +34,13 @@ ...@@ -33,53 +34,13 @@
#define DSI_DUAL_LINK_FRONT_BACK 1 #define DSI_DUAL_LINK_FRONT_BACK 1
#define DSI_DUAL_LINK_PIXEL_ALT 2 #define DSI_DUAL_LINK_PIXEL_ALT 2
struct intel_dsi_device { struct intel_dsi_host;
unsigned int panel_id;
const char *name;
const struct intel_dsi_dev_ops *dev_ops;
void *dev_priv;
};
struct intel_dsi_dev_ops {
bool (*init)(struct intel_dsi_device *dsi);
void (*panel_reset)(struct intel_dsi_device *dsi);
void (*disable_panel_power)(struct intel_dsi_device *dsi);
/* one time programmable commands if needed */
void (*send_otp_cmds)(struct intel_dsi_device *dsi);
/* This callback must be able to assume DSI commands can be sent */
void (*enable)(struct intel_dsi_device *dsi);
/* This callback must be able to assume DSI commands can be sent */
void (*disable)(struct intel_dsi_device *dsi);
int (*mode_valid)(struct intel_dsi_device *dsi,
struct drm_display_mode *mode);
bool (*mode_fixup)(struct intel_dsi_device *dsi,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*mode_set)(struct intel_dsi_device *dsi,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
enum drm_connector_status (*detect)(struct intel_dsi_device *dsi);
bool (*get_hw_state)(struct intel_dsi_device *dev);
struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi);
void (*destroy) (struct intel_dsi_device *dsi);
};
struct intel_dsi { struct intel_dsi {
struct intel_encoder base; struct intel_encoder base;
struct intel_dsi_device dev; struct drm_panel *panel;
struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
struct intel_connector *attached_connector; struct intel_connector *attached_connector;
...@@ -137,16 +98,18 @@ struct intel_dsi { ...@@ -137,16 +98,18 @@ struct intel_dsi {
u16 panel_pwr_cycle_delay; u16 panel_pwr_cycle_delay;
}; };
/* XXX: Transitional before dual port configuration */ struct intel_dsi_host {
static inline enum port intel_dsi_pipe_to_port(enum pipe pipe) struct mipi_dsi_host base;
{ struct intel_dsi *intel_dsi;
if (pipe == PIPE_A) enum port port;
return PORT_A;
else if (pipe == PIPE_B) /* our little hack */
return PORT_C; struct mipi_dsi_device *device;
};
WARN(1, "DSI on pipe %c, assuming port C\n", pipe_name(pipe)); static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
return PORT_C; {
return container_of(h, struct intel_dsi_host, base);
} }
#define for_each_dsi_port(__port, __ports_mask) \ #define for_each_dsi_port(__port, __ports_mask) \
...@@ -162,6 +125,6 @@ extern void vlv_enable_dsi_pll(struct intel_encoder *encoder); ...@@ -162,6 +125,6 @@ extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
extern void vlv_disable_dsi_pll(struct intel_encoder *encoder); extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp); extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern struct intel_dsi_dev_ops vbt_generic_dsi_display_ops; struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
#endif /* _INTEL_DSI_H */ #endif /* _INTEL_DSI_H */
This diff is collapsed.
...@@ -33,85 +33,7 @@ ...@@ -33,85 +33,7 @@
#include "intel_drv.h" #include "intel_drv.h"
#include "intel_dsi.h" #include "intel_dsi.h"
#define DPI_LP_MODE_EN false
#define DPI_HS_MODE_EN true
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable, void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable,
enum port port); enum port port);
int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len, enum port port);
int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len, enum port port);
int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
u8 *buf, int buflen, enum port port);
int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen, enum port port);
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi);
/* XXX: questionable write helpers */
static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
int channel, u8 dcs_cmd, enum port port)
{
return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1, port);
}
static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
int channel, u8 dcs_cmd, u8 param, enum port port)
{
u8 buf[2] = { dcs_cmd, param };
return dsi_vc_dcs_write(intel_dsi, channel, buf, 2, port);
}
static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
int channel, enum port port)
{
return dsi_vc_generic_write(intel_dsi, channel, NULL, 0, port);
}
static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
int channel, u8 param, enum port port)
{
return dsi_vc_generic_write(intel_dsi, channel, &param, 1, port);
}
static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
int channel, u8 param1, u8 param2, enum port port)
{
u8 buf[2] = { param1, param2 };
return dsi_vc_generic_write(intel_dsi, channel, buf, 2, port);
}
/* XXX: questionable read helpers */
static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
int channel, u8 *buf, int buflen, enum port port)
{
return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen,
port);
}
static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
int channel, u8 param, u8 *buf,
int buflen, enum port port)
{
return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen,
port);
}
static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
int channel, u8 param1, u8 param2,
u8 *buf, int buflen, enum port port)
{
u8 req[2] = { param1, param2 };
return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen,
port);
}
#endif /* _INTEL_DSI_DSI_H */ #endif /* _INTEL_DSI_DSI_H */
This diff is collapsed.
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
#include "intel_drv.h" #include "intel_drv.h"
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
...@@ -144,7 +145,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder, ...@@ -144,7 +145,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
} }
static void intel_dvo_get_config(struct intel_encoder *encoder, static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config) struct intel_crtc_state *pipe_config)
{ {
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder); struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
...@@ -160,9 +161,9 @@ static void intel_dvo_get_config(struct intel_encoder *encoder, ...@@ -160,9 +161,9 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
else else
flags |= DRM_MODE_FLAG_NVSYNC; flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->adjusted_mode.flags |= flags; pipe_config->base.adjusted_mode.flags |= flags;
pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock; pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
} }
static void intel_disable_dvo(struct intel_encoder *encoder) static void intel_disable_dvo(struct intel_encoder *encoder)
...@@ -186,8 +187,8 @@ static void intel_enable_dvo(struct intel_encoder *encoder) ...@@ -186,8 +187,8 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
u32 temp = I915_READ(dvo_reg); u32 temp = I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
&crtc->config.requested_mode, &crtc->config->base.mode,
&crtc->config.adjusted_mode); &crtc->config->base.adjusted_mode);
I915_WRITE(dvo_reg, temp | DVO_ENABLE); I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg); I915_READ(dvo_reg);
...@@ -200,7 +201,7 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode) ...@@ -200,7 +201,7 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
{ {
struct intel_dvo *intel_dvo = intel_attached_dvo(connector); struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct intel_crtc_config *config; struct intel_crtc_state *config;
/* dvo supports only 2 dpms states. */ /* dvo supports only 2 dpms states. */
if (mode != DRM_MODE_DPMS_ON) if (mode != DRM_MODE_DPMS_ON)
...@@ -221,7 +222,7 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode) ...@@ -221,7 +222,7 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
/* We call connector dpms manually below in case pipe dpms doesn't /* We call connector dpms manually below in case pipe dpms doesn't
* change due to cloning. */ * change due to cloning. */
if (mode == DRM_MODE_DPMS_ON) { if (mode == DRM_MODE_DPMS_ON) {
config = &to_intel_crtc(crtc)->config; config = to_intel_crtc(crtc)->config;
intel_dvo->base.connectors_active = true; intel_dvo->base.connectors_active = true;
...@@ -261,10 +262,10 @@ intel_dvo_mode_valid(struct drm_connector *connector, ...@@ -261,10 +262,10 @@ intel_dvo_mode_valid(struct drm_connector *connector,
} }
static bool intel_dvo_compute_config(struct intel_encoder *encoder, static bool intel_dvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config) struct intel_crtc_state *pipe_config)
{ {
struct intel_dvo *intel_dvo = enc_to_dvo(encoder); struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
/* If we have timings from the BIOS for the panel, put them in /* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode, * to the adjusted mode. The CRTC will be set up for this mode,
...@@ -295,7 +296,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder) ...@@ -295,7 +296,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder); struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
int pipe = crtc->pipe; int pipe = crtc->pipe;
u32 dvo_val; u32 dvo_val;
...@@ -390,6 +391,8 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = { ...@@ -390,6 +391,8 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.detect = intel_dvo_detect, .detect = intel_dvo_detect,
.destroy = intel_dvo_destroy, .destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes, .fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_connector_atomic_get_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
}; };
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
......
...@@ -182,7 +182,7 @@ static void snb_fbc_blit_update(struct drm_device *dev) ...@@ -182,7 +182,7 @@ static void snb_fbc_blit_update(struct drm_device *dev)
/* Blitter is part of Media powerwell on VLV. No impact of /* Blitter is part of Media powerwell on VLV. No impact of
* his param in other platforms for now */ * his param in other platforms for now */
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
...@@ -195,7 +195,7 @@ static void snb_fbc_blit_update(struct drm_device *dev) ...@@ -195,7 +195,7 @@ static void snb_fbc_blit_update(struct drm_device *dev)
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
POSTING_READ(GEN6_BLITTER_ECOSKPD); POSTING_READ(GEN6_BLITTER_ECOSKPD);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
} }
static void ilk_fbc_enable(struct drm_crtc *crtc) static void ilk_fbc_enable(struct drm_crtc *crtc)
...@@ -542,7 +542,7 @@ void intel_fbc_update(struct drm_device *dev) ...@@ -542,7 +542,7 @@ void intel_fbc_update(struct drm_device *dev)
intel_crtc = to_intel_crtc(crtc); intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb; fb = crtc->primary->fb;
obj = intel_fb_obj(fb); obj = intel_fb_obj(fb);
adjusted_mode = &intel_crtc->config.adjusted_mode; adjusted_mode = &intel_crtc->config->base.adjusted_mode;
if (i915.enable_fbc < 0) { if (i915.enable_fbc < 0) {
if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT)) if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
...@@ -572,8 +572,8 @@ void intel_fbc_update(struct drm_device *dev) ...@@ -572,8 +572,8 @@ void intel_fbc_update(struct drm_device *dev)
max_width = 2048; max_width = 2048;
max_height = 1536; max_height = 1536;
} }
if (intel_crtc->config.pipe_src_w > max_width || if (intel_crtc->config->pipe_src_w > max_width ||
intel_crtc->config.pipe_src_h > max_height) { intel_crtc->config->pipe_src_h > max_height) {
if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE)) if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
DRM_DEBUG_KMS("mode too large for compression, disabling\n"); DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable; goto out_disable;
...@@ -595,7 +595,7 @@ void intel_fbc_update(struct drm_device *dev) ...@@ -595,7 +595,7 @@ void intel_fbc_update(struct drm_device *dev)
goto out_disable; goto out_disable;
} }
if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) { crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE)) if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
DRM_DEBUG_KMS("Rotation unsupported, disabling\n"); DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
goto out_disable; goto out_disable;
......
...@@ -443,7 +443,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, ...@@ -443,7 +443,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
DRM_DEBUG_KMS("looking for current mode on connector %s\n", DRM_DEBUG_KMS("looking for current mode on connector %s\n",
connector->name); connector->name);
intel_mode_from_pipe_config(&encoder->crtc->hwmode, intel_mode_from_pipe_config(&encoder->crtc->hwmode,
&to_intel_crtc(encoder->crtc)->config); to_intel_crtc(encoder->crtc)->config);
modes[i] = &encoder->crtc->hwmode; modes[i] = &encoder->crtc->hwmode;
} }
crtcs[i] = new_crtc; crtcs[i] = new_crtc;
...@@ -531,7 +531,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, ...@@ -531,7 +531,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
struct intel_framebuffer *fb = NULL; struct intel_framebuffer *fb = NULL;
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct intel_crtc *intel_crtc; struct intel_crtc *intel_crtc;
struct intel_plane_config *plane_config = NULL; struct intel_initial_plane_config *plane_config = NULL;
unsigned int max_size = 0; unsigned int max_size = 0;
if (!i915.fastboot) if (!i915.fastboot)
...@@ -581,7 +581,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, ...@@ -581,7 +581,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
* pipe. Note we need to use the selected fb's pitch and bpp * pipe. Note we need to use the selected fb's pitch and bpp
* rather than the current pipe's, since they differ. * rather than the current pipe's, since they differ.
*/ */
cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay; cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.bits_per_pixel / 8; cur_size = cur_size * fb->base.bits_per_pixel / 8;
if (fb->base.pitches[0] < cur_size) { if (fb->base.pitches[0] < cur_size) {
DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n", DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
...@@ -592,13 +592,14 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, ...@@ -592,13 +592,14 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
break; break;
} }
cur_size = intel_crtc->config.adjusted_mode.crtc_vdisplay; cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
cur_size = ALIGN(cur_size, plane_config->tiled ? (IS_GEN2(dev) ? 16 : 8) : 1); cur_size = intel_fb_align_height(dev, cur_size,
plane_config->tiling);
cur_size *= fb->base.pitches[0]; cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n", DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe), pipe_name(intel_crtc->pipe),
intel_crtc->config.adjusted_mode.crtc_hdisplay, intel_crtc->config->base.adjusted_mode.crtc_hdisplay,
intel_crtc->config.adjusted_mode.crtc_vdisplay, intel_crtc->config->base.adjusted_mode.crtc_vdisplay,
fb->base.bits_per_pixel, fb->base.bits_per_pixel,
cur_size); cur_size);
......
...@@ -341,7 +341,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, ...@@ -341,7 +341,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
} }
/** /**
* intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt * intel_cpu_fifo_underrun_irq_handler - handle CPU fifo underrun interrupt
* @dev_priv: i915 device instance * @dev_priv: i915 device instance
* @pipe: (CPU) pipe to set state for * @pipe: (CPU) pipe to set state for
* *
......
...@@ -157,6 +157,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, ...@@ -157,6 +157,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring); intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
intel_psr_invalidate(dev, obj->frontbuffer_bits); intel_psr_invalidate(dev, obj->frontbuffer_bits);
intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
} }
/** /**
...@@ -182,6 +183,7 @@ void intel_frontbuffer_flush(struct drm_device *dev, ...@@ -182,6 +183,7 @@ void intel_frontbuffer_flush(struct drm_device *dev,
intel_mark_fb_busy(dev, frontbuffer_bits, NULL); intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
intel_edp_drrs_flush(dev, frontbuffer_bits);
intel_psr_flush(dev, frontbuffer_bits); intel_psr_flush(dev, frontbuffer_bits);
/* /*
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -856,7 +856,7 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, ...@@ -856,7 +856,7 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
return -EINVAL; return -EINVAL;
/* can't use the overlay with double wide pipe */ /* can't use the overlay with double wide pipe */
if (crtc->config.double_wide) if (crtc->config->double_wide)
return -EINVAL; return -EINVAL;
return 0; return 0;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -75,7 +75,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn, ...@@ -75,7 +75,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
return 0; return 0;
} }
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr) u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
{ {
u32 val = 0; u32 val = 0;
...@@ -89,7 +89,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr) ...@@ -89,7 +89,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
return val; return val;
} }
void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val) void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
{ {
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment