Commit c51f7167 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2014-07-11' of git://anongit.freedesktop.org/drm-intel into drm-next

- fbc improvements when stolen memory is tight (Ben)
- cdclk handling improvements for vlv/chv (Ville)
- proper fix for stuck primary planes on gmch platforms with cxsr (Imre&Ebgert
  Eich)
- gen8 hw semaphore support (Ben)
- more execlist prep work from Oscar Mateo
- locking fixes for primary planes (Matt Roper)
- code rework to support runtime pm for dpms on hsw/bdw (Paulo, Imre & me), but
  not yet enabled because some fixes from Paulo haven't made the cut
- more gpu boost tuning from Chris
- as usual piles of little things all over

* tag 'drm-intel-next-2014-07-11' of git://anongit.freedesktop.org/drm-intel: (93 commits)
  drm/i915: Make the RPS interrupt generation mask handle the vlv wa
  drm/i915: Move RPS evaluation interval counters to i915->rps
  drm/i915: Don't cast a pointer to void* unnecessarily
  drm/i915: don't read LVDS regs at compute_config time
  drm/i915: check the power domains in intel_lvds_get_hw_state()
  drm/i915: check the power domains in ironlake_get_pipe_config()
  drm/i915: don't skip shared DPLL assertion on LPT
  drm/i915: Only touch WRPLL hw state in enable/disable hooks
  drm/i915: Switch to common shared dpll framework for WRPLLs
  drm/i915: ->enable hook for WRPLLs
  drm/i915: ->disable hook for WRPLLs
  drm/i915: State readout support for WRPLLs
  drm/i915: add POWER_DOMAIN_PLLS
  drm/i915: Document that the pll->mode_set hook is optional
  drm/i915: Basic shared dpll support for WRPLLs
  drm/i915: Precompute static ddi_pll_sel values in encoders
  drm/i915: BDW also has special-purpose DP DDI clocks
  drm/i915: State readout and cross-checking for ddi_pll_sel
  drm/i915: Move ddi_pll_sel into the pipe config
  drm/i915: Add a debugfs file for the shared dpll state
  ...
parents b957f457 7b3c29f6
This diff is collapsed.
...@@ -1593,7 +1593,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1593,7 +1593,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (dev_priv == NULL) if (dev_priv == NULL)
return -ENOMEM; return -ENOMEM;
dev->dev_private = (void *)dev_priv; dev->dev_private = dev_priv;
dev_priv->dev = dev; dev_priv->dev = dev;
/* copy initial configuration to dev_priv->info */ /* copy initial configuration to dev_priv->info */
...@@ -1954,11 +1954,11 @@ void i915_driver_lastclose(struct drm_device *dev) ...@@ -1954,11 +1954,11 @@ void i915_driver_lastclose(struct drm_device *dev)
i915_dma_cleanup(dev); i915_dma_cleanup(dev);
} }
void i915_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
{ {
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_context_close(dev, file_priv); i915_gem_context_close(dev, file);
i915_gem_release(dev, file_priv); i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
......
...@@ -477,10 +477,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) ...@@ -477,10 +477,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (i915.semaphores >= 0) if (i915.semaphores >= 0)
return i915.semaphores; return i915.semaphores;
/* Until we get further testing... */
if (IS_GEN8(dev))
return false;
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */ /* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
...@@ -520,6 +516,8 @@ static int i915_drm_freeze(struct drm_device *dev) ...@@ -520,6 +516,8 @@ static int i915_drm_freeze(struct drm_device *dev)
return error; return error;
} }
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
intel_runtime_pm_disable_interrupts(dev); intel_runtime_pm_disable_interrupts(dev);
intel_suspend_gt_powersave(dev); intel_suspend_gt_powersave(dev);
...@@ -541,10 +539,11 @@ static int i915_drm_freeze(struct drm_device *dev) ...@@ -541,10 +539,11 @@ static int i915_drm_freeze(struct drm_device *dev)
i915_save_state(dev); i915_save_state(dev);
if (acpi_target_system_state() >= ACPI_STATE_S3)
opregion_target_state = PCI_D3cold; opregion_target_state = PCI_D3cold;
else #if IS_ENABLED(CONFIG_ACPI_SLEEP)
if (acpi_target_system_state() < ACPI_STATE_S3)
opregion_target_state = PCI_D1; opregion_target_state = PCI_D1;
#endif
intel_opregion_notify_adapter(dev, opregion_target_state); intel_opregion_notify_adapter(dev, opregion_target_state);
intel_uncore_forcewake_reset(dev, false); intel_uncore_forcewake_reset(dev, false);
......
...@@ -129,6 +129,7 @@ enum intel_display_power_domain { ...@@ -129,6 +129,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_OTHER, POWER_DOMAIN_PORT_OTHER,
POWER_DOMAIN_VGA, POWER_DOMAIN_VGA,
POWER_DOMAIN_AUDIO, POWER_DOMAIN_AUDIO,
POWER_DOMAIN_PLLS,
POWER_DOMAIN_INIT, POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM, POWER_DOMAIN_NUM,
...@@ -184,8 +185,10 @@ struct i915_mmu_object; ...@@ -184,8 +185,10 @@ struct i915_mmu_object;
enum intel_dpll_id { enum intel_dpll_id {
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
/* real shared dpll ids must be >= 0 */ /* real shared dpll ids must be >= 0 */
DPLL_ID_PCH_PLL_A, DPLL_ID_PCH_PLL_A = 0,
DPLL_ID_PCH_PLL_B, DPLL_ID_PCH_PLL_B = 1,
DPLL_ID_WRPLL1 = 0,
DPLL_ID_WRPLL2 = 1,
}; };
#define I915_NUM_PLLS 2 #define I915_NUM_PLLS 2
...@@ -194,6 +197,7 @@ struct intel_dpll_hw_state { ...@@ -194,6 +197,7 @@ struct intel_dpll_hw_state {
uint32_t dpll_md; uint32_t dpll_md;
uint32_t fp0; uint32_t fp0;
uint32_t fp1; uint32_t fp1;
uint32_t wrpll;
}; };
struct intel_shared_dpll { struct intel_shared_dpll {
...@@ -204,6 +208,8 @@ struct intel_shared_dpll { ...@@ -204,6 +208,8 @@ struct intel_shared_dpll {
/* should match the index in the dev_priv->shared_dplls array */ /* should match the index in the dev_priv->shared_dplls array */
enum intel_dpll_id id; enum intel_dpll_id id;
struct intel_dpll_hw_state hw_state; struct intel_dpll_hw_state hw_state;
/* The mode_set hook is optional and should be used together with the
* intel_prepare_shared_dpll function. */
void (*mode_set)(struct drm_i915_private *dev_priv, void (*mode_set)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll); struct intel_shared_dpll *pll);
void (*enable)(struct drm_i915_private *dev_priv, void (*enable)(struct drm_i915_private *dev_priv,
...@@ -228,12 +234,6 @@ void intel_link_compute_m_n(int bpp, int nlanes, ...@@ -228,12 +234,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
int pixel_clock, int link_clock, int pixel_clock, int link_clock,
struct intel_link_m_n *m_n); struct intel_link_m_n *m_n);
struct intel_ddi_plls {
int spll_refcount;
int wrpll1_refcount;
int wrpll2_refcount;
};
/* Interface history: /* Interface history:
* *
* 1.1: Original. * 1.1: Original.
...@@ -324,6 +324,7 @@ struct drm_i915_error_state { ...@@ -324,6 +324,7 @@ struct drm_i915_error_state {
u64 fence[I915_MAX_NUM_FENCES]; u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay; struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display; struct intel_display_error_state *display;
struct drm_i915_error_object *semaphore_obj;
struct drm_i915_error_ring { struct drm_i915_error_ring {
bool valid; bool valid;
...@@ -584,27 +585,48 @@ struct i915_ctx_hang_stats { ...@@ -584,27 +585,48 @@ struct i915_ctx_hang_stats {
}; };
/* This must match up with the value previously used for execbuf2.rsvd1. */ /* This must match up with the value previously used for execbuf2.rsvd1. */
#define DEFAULT_CONTEXT_ID 0 #define DEFAULT_CONTEXT_HANDLE 0
/**
* struct intel_context - as the name implies, represents a context.
* @ref: reference count.
* @user_handle: userspace tracking identity for this context.
* @remap_slice: l3 row remapping information.
* @file_priv: filp associated with this context (NULL for global default
* context).
* @hang_stats: information about the role of this context in possible GPU
* hangs.
* @vm: virtual memory space used by this context.
* @legacy_hw_ctx: render context backing object and whether it is correctly
* initialized (legacy ring submission mechanism only).
* @link: link in the global list of contexts.
*
* Contexts are memory images used by the hardware to store copies of their
* internal state.
*/
struct intel_context { struct intel_context {
struct kref ref; struct kref ref;
int id; int user_handle;
bool is_initialized;
uint8_t remap_slice; uint8_t remap_slice;
struct drm_i915_file_private *file_priv; struct drm_i915_file_private *file_priv;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats; struct i915_ctx_hang_stats hang_stats;
struct i915_address_space *vm; struct i915_address_space *vm;
struct {
struct drm_i915_gem_object *rcs_state;
bool initialized;
} legacy_hw_ctx;
struct list_head link; struct list_head link;
}; };
struct i915_fbc { struct i915_fbc {
unsigned long size; unsigned long size;
unsigned threshold;
unsigned int fb_id; unsigned int fb_id;
enum plane plane; enum plane plane;
int y; int y;
struct drm_mm_node *compressed_fb; struct drm_mm_node compressed_fb;
struct drm_mm_node *compressed_llb; struct drm_mm_node *compressed_llb;
struct intel_fbc_work { struct intel_fbc_work {
...@@ -880,6 +902,12 @@ struct vlv_s0ix_state { ...@@ -880,6 +902,12 @@ struct vlv_s0ix_state {
u32 clock_gate_dis2; u32 clock_gate_dis2;
}; };
struct intel_rps_ei {
u32 cz_clock;
u32 render_c0;
u32 media_c0;
};
struct intel_gen6_power_mgmt { struct intel_gen6_power_mgmt {
/* work and pm_iir are protected by dev_priv->irq_lock */ /* work and pm_iir are protected by dev_priv->irq_lock */
struct work_struct work; struct work_struct work;
...@@ -904,12 +932,17 @@ struct intel_gen6_power_mgmt { ...@@ -904,12 +932,17 @@ struct intel_gen6_power_mgmt {
u8 rp1_freq; /* "less than" RP0 power/freqency */ u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */ u8 rp0_freq; /* Non-overclocked max frequency. */
u32 ei_interrupt_count;
int last_adj; int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power; enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
bool enabled; bool enabled;
struct delayed_work delayed_resume_work; struct delayed_work delayed_resume_work;
/* manual wa residency calculations */
struct intel_rps_ei up_ei, down_ei;
/* /*
* Protects RPS/RC6 register access and PCU communication. * Protects RPS/RC6 register access and PCU communication.
* Must be taken after struct_mutex if nested. * Must be taken after struct_mutex if nested.
...@@ -1374,6 +1407,7 @@ struct drm_i915_private { ...@@ -1374,6 +1407,7 @@ struct drm_i915_private {
struct pci_dev *bridge_dev; struct pci_dev *bridge_dev;
struct intel_engine_cs ring[I915_NUM_RINGS]; struct intel_engine_cs ring[I915_NUM_RINGS];
struct drm_i915_gem_object *semaphore_obj;
uint32_t last_seqno, next_seqno; uint32_t last_seqno, next_seqno;
drm_dma_handle_t *status_page_dmah; drm_dma_handle_t *status_page_dmah;
...@@ -1480,7 +1514,6 @@ struct drm_i915_private { ...@@ -1480,7 +1514,6 @@ struct drm_i915_private {
int num_shared_dpll; int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
struct intel_ddi_plls ddi_plls;
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
/* Reclocking support */ /* Reclocking support */
...@@ -1557,6 +1590,11 @@ struct drm_i915_private { ...@@ -1557,6 +1590,11 @@ struct drm_i915_private {
struct i915_runtime_pm pm; struct i915_runtime_pm pm;
struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
u32 long_hpd_port_mask;
u32 short_hpd_port_mask;
struct work_struct dig_port_work;
/* Old dri1 support infrastructure, beware the dragons ya fools entering /* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */ * here! */
struct i915_dri1_state dri1; struct i915_dri1_state dri1;
...@@ -2097,12 +2135,12 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev); ...@@ -2097,12 +2135,12 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev); extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags); extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *); extern int i915_driver_unload(struct drm_device *);
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
extern void i915_driver_lastclose(struct drm_device * dev); extern void i915_driver_lastclose(struct drm_device * dev);
extern void i915_driver_preclose(struct drm_device *dev, extern void i915_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv); struct drm_file *file);
extern void i915_driver_postclose(struct drm_device *dev, extern void i915_driver_postclose(struct drm_device *dev,
struct drm_file *file_priv); struct drm_file *file);
extern int i915_driver_device_is_agp(struct drm_device * dev); extern int i915_driver_device_is_agp(struct drm_device * dev);
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
...@@ -2457,7 +2495,7 @@ static inline void i915_gem_context_unreference(struct intel_context *ctx) ...@@ -2457,7 +2495,7 @@ static inline void i915_gem_context_unreference(struct intel_context *ctx)
static inline bool i915_gem_context_is_default(const struct intel_context *c) static inline bool i915_gem_context_is_default(const struct intel_context *c)
{ {
return c->id == DEFAULT_CONTEXT_ID; return c->user_handle == DEFAULT_CONTEXT_HANDLE;
} }
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
...@@ -2488,7 +2526,7 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev) ...@@ -2488,7 +2526,7 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
/* i915_gem_stolen.c */ /* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev); int i915_gem_init_stolen(struct drm_device *dev);
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size); int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
void i915_gem_stolen_cleanup_compression(struct drm_device *dev); void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev); void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object * struct drm_i915_gem_object *
...@@ -2647,6 +2685,8 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val); ...@@ -2647,6 +2685,8 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void valleyview_set_rps(struct drm_device *dev, u8 val); extern void valleyview_set_rps(struct drm_device *dev, u8 val);
extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv); extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv); extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
extern void intel_detect_pch(struct drm_device *dev); extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev); extern int intel_enable_rc6(const struct drm_device *dev);
......
...@@ -1168,7 +1168,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, ...@@ -1168,7 +1168,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0; timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) { if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
gen6_rps_boost(dev_priv); gen6_rps_boost(dev_priv);
if (file_priv) if (file_priv)
mod_delayed_work(dev_priv->wq, mod_delayed_work(dev_priv->wq,
...@@ -2330,7 +2330,7 @@ int __i915_add_request(struct intel_engine_cs *ring, ...@@ -2330,7 +2330,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
u32 request_ring_position, request_start; u32 request_ring_position, request_start;
int ret; int ret;
request_start = intel_ring_get_tail(ring); request_start = intel_ring_get_tail(ring->buffer);
/* /*
* Emit any outstanding flushes - execbuf can fail to emit the flush * Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix * after having emitted the batchbuffer command. Hence we need to fix
...@@ -2351,7 +2351,7 @@ int __i915_add_request(struct intel_engine_cs *ring, ...@@ -2351,7 +2351,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
* GPU processing the request, we never over-estimate the * GPU processing the request, we never over-estimate the
* position of the head. * position of the head.
*/ */
request_ring_position = intel_ring_get_tail(ring); request_ring_position = intel_ring_get_tail(ring->buffer);
ret = ring->add_request(ring); ret = ring->add_request(ring);
if (ret) if (ret)
...@@ -2842,6 +2842,8 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, ...@@ -2842,6 +2842,8 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
idx = intel_ring_sync_index(from, to); idx = intel_ring_sync_index(from, to);
seqno = obj->last_read_seqno; seqno = obj->last_read_seqno;
/* Optimization: Avoid semaphore sync when we are sure we already
* waited for an object with higher seqno */
if (seqno <= from->semaphore.sync_seqno[idx]) if (seqno <= from->semaphore.sync_seqno[idx])
return 0; return 0;
......
This diff is collapsed.
...@@ -938,7 +938,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, ...@@ -938,7 +938,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_context *ctx = NULL; struct intel_context *ctx = NULL;
struct i915_ctx_hang_stats *hs; struct i915_ctx_hang_stats *hs;
if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID) if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
ctx = i915_gem_context_get(file->driver_priv, ctx_id); ctx = i915_gem_context_get(file->driver_priv, ctx_id);
...@@ -1026,6 +1026,163 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, ...@@ -1026,6 +1026,163 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return 0; return 0;
} }
static int
legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags)
{
struct drm_clip_rect *cliprects = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
u64 exec_len;
int instp_mode;
u32 instp_mask;
int i, ret = 0;
if (args->num_cliprects != 0) {
if (ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 5) {
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
return -EINVAL;
}
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
DRM_DEBUG("execbuf with %u cliprects\n",
args->num_cliprects);
return -EINVAL;
}
cliprects = kcalloc(args->num_cliprects,
sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto error;
}
if (copy_from_user(cliprects,
to_user_ptr(args->cliprects_ptr),
sizeof(*cliprects)*args->num_cliprects)) {
ret = -EFAULT;
goto error;
}
} else {
if (args->DR4 == 0xffffffff) {
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
args->DR4 = 0;
}
if (args->DR1 || args->DR4 || args->cliprects_ptr) {
DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
return -EINVAL;
}
}
ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
if (ret)
goto error;
ret = i915_switch_context(ring, ctx);
if (ret)
goto error;
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
switch (instp_mode) {
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
ret = -EINVAL;
goto error;
}
if (instp_mode != dev_priv->relative_constants_mode) {
if (INTEL_INFO(dev)->gen < 4) {
DRM_DEBUG("no rel constants on pre-gen4\n");
ret = -EINVAL;
goto error;
}
if (INTEL_INFO(dev)->gen > 5 &&
instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
ret = -EINVAL;
goto error;
}
/* The HW changed the meaning on this bit on gen6 */
if (INTEL_INFO(dev)->gen >= 6)
instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
}
break;
default:
DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
ret = -EINVAL;
goto error;
}
if (ring == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4);
if (ret)
goto error;
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, INSTPM);
intel_ring_emit(ring, instp_mask << 16 | instp_mode);
intel_ring_advance(ring);
dev_priv->relative_constants_mode = instp_mode;
}
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(dev, ring);
if (ret)
goto error;
}
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
ret = i915_emit_box(dev, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto error;
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
flags);
if (ret)
goto error;
}
} else {
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
flags);
if (ret)
return ret;
}
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
error:
kfree(cliprects);
return ret;
}
/** /**
* Find one BSD ring to dispatch the corresponding BSD command. * Find one BSD ring to dispatch the corresponding BSD command.
* The Ring ID is returned. * The Ring ID is returned.
...@@ -1085,14 +1242,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1085,14 +1242,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct eb_vmas *eb; struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_engine_cs *ring; struct intel_engine_cs *ring;
struct intel_context *ctx; struct intel_context *ctx;
struct i915_address_space *vm; struct i915_address_space *vm;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args); const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u64 exec_start = args->batch_start_offset, exec_len; u64 exec_start = args->batch_start_offset;
u32 mask, flags; u32 flags;
int ret, mode, i; int ret;
bool need_relocs; bool need_relocs;
if (!i915_gem_check_execbuffer(args)) if (!i915_gem_check_execbuffer(args))
...@@ -1136,87 +1292,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1136,87 +1292,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
mask = I915_EXEC_CONSTANTS_MASK;
switch (mode) {
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
if (mode != 0 && ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
return -EINVAL;
}
if (mode != dev_priv->relative_constants_mode) {
if (INTEL_INFO(dev)->gen < 4) {
DRM_DEBUG("no rel constants on pre-gen4\n");
return -EINVAL;
}
if (INTEL_INFO(dev)->gen > 5 &&
mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
return -EINVAL;
}
/* The HW changed the meaning on this bit on gen6 */
if (INTEL_INFO(dev)->gen >= 6)
mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
}
break;
default:
DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
return -EINVAL;
}
if (args->buffer_count < 1) { if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL; return -EINVAL;
} }
if (args->num_cliprects != 0) {
if (ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 5) {
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
return -EINVAL;
}
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
DRM_DEBUG("execbuf with %u cliprects\n",
args->num_cliprects);
return -EINVAL;
}
cliprects = kcalloc(args->num_cliprects,
sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto pre_mutex_err;
}
if (copy_from_user(cliprects,
to_user_ptr(args->cliprects_ptr),
sizeof(*cliprects)*args->num_cliprects)) {
ret = -EFAULT;
goto pre_mutex_err;
}
} else {
if (args->DR4 == 0xffffffff) {
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
args->DR4 = 0;
}
if (args->DR1 || args->DR4 || args->cliprects_ptr) {
DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
return -EINVAL;
}
}
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
ret = i915_mutex_lock_interruptible(dev); ret = i915_mutex_lock_interruptible(dev);
...@@ -1320,63 +1400,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1320,63 +1400,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
else else
exec_start += i915_gem_obj_offset(batch_obj, vm); exec_start += i915_gem_obj_offset(batch_obj, vm);
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas); ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
if (ret) args, &eb->vmas, batch_obj, exec_start, flags);
goto err;
ret = i915_switch_context(ring, ctx);
if (ret) if (ret)
goto err; goto err;
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4);
if (ret)
goto err;
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, INSTPM);
intel_ring_emit(ring, mask << 16 | mode);
intel_ring_advance(ring);
dev_priv->relative_constants_mode = mode;
}
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(dev, ring);
if (ret)
goto err;
}
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
ret = i915_emit_box(dev, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto err;
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
flags);
if (ret)
goto err;
}
} else {
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
flags);
if (ret)
goto err;
}
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
err: err:
/* the request owns the ref now */ /* the request owns the ref now */
i915_gem_context_unreference(ctx); i915_gem_context_unreference(ctx);
...@@ -1385,8 +1413,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1385,8 +1413,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
pre_mutex_err: pre_mutex_err:
kfree(cliprects);
/* intel_gpu_busy should also get a ref, so it will free when the device /* intel_gpu_busy should also get a ref, so it will free when the device
* is really idle. */ * is really idle. */
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
......
...@@ -103,30 +103,68 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -103,30 +103,68 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
return base; return base;
} }
static int i915_setup_compression(struct drm_device *dev, int size) static int find_compression_threshold(struct drm_device *dev,
struct drm_mm_node *node,
int size,
int fb_cpp)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); int compression_threshold = 1;
int ret; int ret;
compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL); /* HACK: This code depends on what we will do in *_enable_fbc. If that
if (!compressed_fb) * code changes, this code needs to change as well.
goto err_llb; *
* The enable_fbc code will attempt to use one of our 2 compression
* thresholds, therefore, in that case, we only have 1 resort.
*/
/* Try to over-allocate to reduce reallocations and fragmentation */ /* Try to over-allocate to reduce reallocations and fragmentation. */
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT); size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
if (ret) if (ret == 0)
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, return compression_threshold;
again:
/* HW's ability to limit the CFB is 1:4 */
if (compression_threshold > 4 ||
(fb_cpp == 2 && compression_threshold == 2))
return 0;
ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
size >>= 1, 4096, size >>= 1, 4096,
DRM_MM_SEARCH_DEFAULT); DRM_MM_SEARCH_DEFAULT);
if (ret) if (ret && INTEL_INFO(dev)->gen <= 4) {
return 0;
} else if (ret) {
compression_threshold <<= 1;
goto again;
} else {
return compression_threshold;
}
}
static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *uninitialized_var(compressed_llb);
int ret;
ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
size, fb_cpp);
if (!ret)
goto err_llb; goto err_llb;
else if (ret > 1) {
DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
}
dev_priv->fbc.threshold = ret;
if (HAS_PCH_SPLIT(dev)) if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
else if (IS_GM45(dev)) { else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start); I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
} else { } else {
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb) if (!compressed_llb)
...@@ -140,13 +178,12 @@ static int i915_setup_compression(struct drm_device *dev, int size) ...@@ -140,13 +178,12 @@ static int i915_setup_compression(struct drm_device *dev, int size)
dev_priv->fbc.compressed_llb = compressed_llb; dev_priv->fbc.compressed_llb = compressed_llb;
I915_WRITE(FBC_CFB_BASE, I915_WRITE(FBC_CFB_BASE,
dev_priv->mm.stolen_base + compressed_fb->start); dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
I915_WRITE(FBC_LL_BASE, I915_WRITE(FBC_LL_BASE,
dev_priv->mm.stolen_base + compressed_llb->start); dev_priv->mm.stolen_base + compressed_llb->start);
} }
dev_priv->fbc.compressed_fb = compressed_fb; dev_priv->fbc.size = size / dev_priv->fbc.threshold;
dev_priv->fbc.size = size;
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size); size);
...@@ -155,14 +192,13 @@ static int i915_setup_compression(struct drm_device *dev, int size) ...@@ -155,14 +192,13 @@ static int i915_setup_compression(struct drm_device *dev, int size)
err_fb: err_fb:
kfree(compressed_llb); kfree(compressed_llb);
drm_mm_remove_node(compressed_fb); drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
err_llb: err_llb:
kfree(compressed_fb);
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC; return -ENOSPC;
} }
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size) int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -175,7 +211,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size) ...@@ -175,7 +211,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
/* Release any current block */ /* Release any current block */
i915_gem_stolen_cleanup_compression(dev); i915_gem_stolen_cleanup_compression(dev);
return i915_setup_compression(dev, size); return i915_setup_compression(dev, size, fb_cpp);
} }
void i915_gem_stolen_cleanup_compression(struct drm_device *dev) void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
...@@ -185,10 +221,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev) ...@@ -185,10 +221,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
if (dev_priv->fbc.size == 0) if (dev_priv->fbc.size == 0)
return; return;
if (dev_priv->fbc.compressed_fb) { drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
drm_mm_remove_node(dev_priv->fbc.compressed_fb);
kfree(dev_priv->fbc.compressed_fb);
}
if (dev_priv->fbc.compressed_llb) { if (dev_priv->fbc.compressed_llb) {
drm_mm_remove_node(dev_priv->fbc.compressed_llb); drm_mm_remove_node(dev_priv->fbc.compressed_llb);
......
...@@ -327,6 +327,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, ...@@ -327,6 +327,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
struct drm_device *dev = error_priv->dev; struct drm_device *dev = error_priv->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error = error_priv->error; struct drm_i915_error_state *error = error_priv->error;
struct drm_i915_error_object *obj;
int i, j, offset, elt; int i, j, offset, elt;
int max_hangcheck_score; int max_hangcheck_score;
...@@ -395,8 +396,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, ...@@ -395,8 +396,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
error->pinned_bo_count[0]); error->pinned_bo_count[0]);
for (i = 0; i < ARRAY_SIZE(error->ring); i++) { for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
struct drm_i915_error_object *obj;
obj = error->ring[i].batchbuffer; obj = error->ring[i].batchbuffer;
if (obj) { if (obj) {
err_puts(m, dev_priv->ring[i].name); err_puts(m, dev_priv->ring[i].name);
...@@ -459,6 +458,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, ...@@ -459,6 +458,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
} }
} }
if ((obj = error->semaphore_obj)) {
err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset);
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
elt * 4,
obj->pages[0][elt],
obj->pages[0][elt+1],
obj->pages[0][elt+2],
obj->pages[0][elt+3]);
}
}
if (error->overlay) if (error->overlay)
intel_overlay_print_error_state(m, error->overlay); intel_overlay_print_error_state(m, error->overlay);
...@@ -529,6 +540,7 @@ static void i915_error_state_free(struct kref *error_ref) ...@@ -529,6 +540,7 @@ static void i915_error_state_free(struct kref *error_ref)
kfree(error->ring[i].requests); kfree(error->ring[i].requests);
} }
i915_error_object_free(error->semaphore_obj);
kfree(error->active_bo); kfree(error->active_bo);
kfree(error->overlay); kfree(error->overlay);
kfree(error->display); kfree(error->display);
...@@ -746,28 +758,65 @@ static void i915_gem_record_fences(struct drm_device *dev, ...@@ -746,28 +758,65 @@ static void i915_gem_record_fences(struct drm_device *dev,
} }
} }
static void i915_record_ring_state(struct drm_device *dev,
static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
struct intel_engine_cs *ring, struct intel_engine_cs *ring,
struct drm_i915_error_ring *ering) struct drm_i915_error_ring *ering)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *useless;
int i;
if (INTEL_INFO(dev)->gen >= 6) { if (!i915_semaphore_is_enabled(dev_priv->dev))
ering->rc_psmi = I915_READ(ring->mmio_base + 0x50); return;
ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
ering->semaphore_mboxes[0] if (!error->semaphore_obj)
= I915_READ(RING_SYNC_0(ring->mmio_base)); error->semaphore_obj =
ering->semaphore_mboxes[1] i915_error_object_create(dev_priv,
= I915_READ(RING_SYNC_1(ring->mmio_base)); dev_priv->semaphore_obj,
&dev_priv->gtt.base);
for_each_ring(useless, dev_priv, i) {
u16 signal_offset =
(GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4;
u32 *tmp = error->semaphore_obj->pages[0];
ering->semaphore_mboxes[i] = tmp[signal_offset];
ering->semaphore_seqno[i] = ring->semaphore.sync_seqno[i];
}
}
static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
struct intel_engine_cs *ring,
struct drm_i915_error_ring *ering)
{
ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0]; ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1]; ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
}
if (HAS_VEBOX(dev)) { if (HAS_VEBOX(dev_priv->dev)) {
ering->semaphore_mboxes[2] = ering->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(ring->mmio_base)); I915_READ(RING_SYNC_2(ring->mmio_base));
ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2]; ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
} }
}
static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_error_state *error,
struct intel_engine_cs *ring,
struct drm_i915_error_ring *ering)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 6) {
ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
if (INTEL_INFO(dev)->gen >= 8)
gen8_record_semaphore_state(dev_priv, error, ring, ering);
else
gen6_record_semaphore_state(dev_priv, ring, ering);
}
if (INTEL_INFO(dev)->gen >= 4) { if (INTEL_INFO(dev)->gen >= 4) {
ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base)); ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
...@@ -895,7 +944,7 @@ static void i915_gem_record_rings(struct drm_device *dev, ...@@ -895,7 +944,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].valid = true; error->ring[i].valid = true;
i915_record_ring_state(dev, ring, &error->ring[i]); i915_record_ring_state(dev, error, ring, &error->ring[i]);
request = i915_gem_find_active_request(ring); request = i915_gem_find_active_request(ring);
if (request) { if (request) {
......
This diff is collapsed.
...@@ -240,7 +240,7 @@ ...@@ -240,7 +240,7 @@
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) #define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) #define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) #define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) #define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21) #define MI_SEMAPHORE_UPDATE (1<<21)
#define MI_SEMAPHORE_COMPARE (1<<20) #define MI_SEMAPHORE_COMPARE (1<<20)
...@@ -266,6 +266,11 @@ ...@@ -266,6 +266,11 @@
#define MI_RESTORE_EXT_STATE_EN (1<<2) #define MI_RESTORE_EXT_STATE_EN (1<<2)
#define MI_FORCE_RESTORE (1<<1) #define MI_FORCE_RESTORE (1<<1)
#define MI_RESTORE_INHIBIT (1<<0) #define MI_RESTORE_INHIBIT (1<<0)
#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
#define MI_SEMAPHORE_POLL (1<<15)
#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
...@@ -360,6 +365,7 @@ ...@@ -360,6 +365,7 @@
#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ #define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) #define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
#define PIPE_CONTROL_NOTIFY (1<<8) #define PIPE_CONTROL_NOTIFY (1<<8)
#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) #define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) #define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) #define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
...@@ -525,6 +531,7 @@ enum punit_power_well { ...@@ -525,6 +531,7 @@ enum punit_power_well {
#define PUNIT_REG_GPU_FREQ_STS 0xd8 #define PUNIT_REG_GPU_FREQ_STS 0xd8
#define GENFREQSTATUS (1<<0) #define GENFREQSTATUS (1<<0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
#define PUNIT_REG_CZ_TIMESTAMP 0xce
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ #define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */ #define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
...@@ -550,6 +557,11 @@ enum punit_power_well { ...@@ -550,6 +557,11 @@ enum punit_power_well {
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 #define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 #define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
#define VLV_RP_UP_EI_THRESHOLD 90
#define VLV_RP_DOWN_EI_THRESHOLD 70
#define VLV_INT_COUNT_FOR_DOWN_EI 5
/* vlv2 north clock has */ /* vlv2 north clock has */
#define CCK_FUSE_REG 0x8 #define CCK_FUSE_REG 0x8
#define CCK_FUSE_HPLL_FREQ_MASK 0x3 #define CCK_FUSE_HPLL_FREQ_MASK 0x3
...@@ -584,6 +596,11 @@ enum punit_power_well { ...@@ -584,6 +596,11 @@ enum punit_power_well {
#define DSI_PLL_M1_DIV_SHIFT 0 #define DSI_PLL_M1_DIV_SHIFT 0
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0) #define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b #define CCK_DISPLAY_CLOCK_CONTROL 0x6b
#define DISPLAY_TRUNK_FORCE_ON (1 << 17)
#define DISPLAY_TRUNK_FORCE_OFF (1 << 16)
#define DISPLAY_FREQUENCY_STATUS (0x1f << 8)
#define DISPLAY_FREQUENCY_STATUS_SHIFT 8
#define DISPLAY_FREQUENCY_VALUES (0x1f << 0)
/** /**
* DOC: DPIO * DOC: DPIO
...@@ -5383,6 +5400,7 @@ enum punit_power_well { ...@@ -5383,6 +5400,7 @@ enum punit_power_well {
#define VLV_GTLC_ALLOWWAKEERR (1 << 1) #define VLV_GTLC_ALLOWWAKEERR (1 << 1)
#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5) #define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7) #define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
#define VLV_GTLC_SURVIVABILITY_REG 0x130098
#define FORCEWAKE_MT 0xa188 /* multi-threaded */ #define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1 #define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2 #define FORCEWAKE_USER 0x2
...@@ -5530,6 +5548,8 @@ enum punit_power_well { ...@@ -5530,6 +5548,8 @@ enum punit_power_well {
#define GEN6_GT_GFX_RC6_LOCKED 0x138104 #define GEN6_GT_GFX_RC6_LOCKED 0x138104
#define VLV_COUNTER_CONTROL 0x138104 #define VLV_COUNTER_CONTROL 0x138104
#define VLV_COUNT_RANGE_HIGH (1<<15) #define VLV_COUNT_RANGE_HIGH (1<<15)
#define VLV_MEDIA_RC0_COUNT_EN (1<<5)
#define VLV_RENDER_RC0_COUNT_EN (1<<4)
#define VLV_MEDIA_RC6_COUNT_EN (1<<1) #define VLV_MEDIA_RC6_COUNT_EN (1<<1)
#define VLV_RENDER_RC6_COUNT_EN (1<<0) #define VLV_RENDER_RC6_COUNT_EN (1<<0)
#define GEN6_GT_GFX_RC6 0x138108 #define GEN6_GT_GFX_RC6 0x138108
...@@ -5538,6 +5558,8 @@ enum punit_power_well { ...@@ -5538,6 +5558,8 @@ enum punit_power_well {
#define GEN6_GT_GFX_RC6p 0x13810C #define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110 #define GEN6_GT_GFX_RC6pp 0x138110
#define VLV_RENDER_C0_COUNT_REG 0x138118
#define VLV_MEDIA_C0_COUNT_REG 0x13811C
#define GEN6_PCODE_MAILBOX 0x138124 #define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31) #define GEN6_PCODE_READY (1<<31)
...@@ -5772,6 +5794,7 @@ enum punit_power_well { ...@@ -5772,6 +5794,7 @@ enum punit_power_well {
#define TRANS_DDI_FUNC_ENABLE (1<<31) #define TRANS_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ /* Those bits are ignored by pipe EDP since it can only connect to DDI A */
#define TRANS_DDI_PORT_MASK (7<<28) #define TRANS_DDI_PORT_MASK (7<<28)
#define TRANS_DDI_PORT_SHIFT 28
#define TRANS_DDI_SELECT_PORT(x) ((x)<<28) #define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
#define TRANS_DDI_PORT_NONE (0<<28) #define TRANS_DDI_PORT_NONE (0<<28)
#define TRANS_DDI_MODE_SELECT_MASK (7<<24) #define TRANS_DDI_MODE_SELECT_MASK (7<<24)
...@@ -5899,10 +5922,12 @@ enum punit_power_well { ...@@ -5899,10 +5922,12 @@ enum punit_power_well {
/* WRPLL */ /* WRPLL */
#define WRPLL_CTL1 0x46040 #define WRPLL_CTL1 0x46040
#define WRPLL_CTL2 0x46060 #define WRPLL_CTL2 0x46060
#define WRPLL_CTL(pll) (pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2)
#define WRPLL_PLL_ENABLE (1<<31) #define WRPLL_PLL_ENABLE (1<<31)
#define WRPLL_PLL_SELECT_SSC (0x01<<28) #define WRPLL_PLL_SSC (1<<28)
#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28) #define WRPLL_PLL_NON_SSC (2<<28)
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) #define WRPLL_PLL_LCPLL (3<<28)
#define WRPLL_PLL_REF_MASK (3<<28)
/* WRPLL divider programming */ /* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) #define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
#define WRPLL_DIVIDER_REF_MASK (0xff) #define WRPLL_DIVIDER_REF_MASK (0xff)
...@@ -5921,6 +5946,7 @@ enum punit_power_well { ...@@ -5921,6 +5946,7 @@ enum punit_power_well {
#define PORT_CLK_SEL_LCPLL_1350 (1<<29) #define PORT_CLK_SEL_LCPLL_1350 (1<<29)
#define PORT_CLK_SEL_LCPLL_810 (2<<29) #define PORT_CLK_SEL_LCPLL_810 (2<<29)
#define PORT_CLK_SEL_SPLL (3<<29) #define PORT_CLK_SEL_SPLL (3<<29)
#define PORT_CLK_SEL_WRPLL(pll) (((pll)+4)<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29) #define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29) #define PORT_CLK_SEL_WRPLL2 (5<<29)
#define PORT_CLK_SEL_NONE (7<<29) #define PORT_CLK_SEL_NONE (7<<29)
...@@ -5962,7 +5988,10 @@ enum punit_power_well { ...@@ -5962,7 +5988,10 @@ enum punit_power_well {
#define LCPLL_CD_SOURCE_FCLK (1<<21) #define LCPLL_CD_SOURCE_FCLK (1<<21)
#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19) #define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) /* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
#define D_COMP_BDW 0x138144
#define D_COMP_RCOMP_IN_PROGRESS (1<<9) #define D_COMP_RCOMP_IN_PROGRESS (1<<9)
#define D_COMP_COMP_FORCE (1<<8) #define D_COMP_COMP_FORCE (1<<8)
#define D_COMP_COMP_DISABLE (1<<0) #define D_COMP_COMP_DISABLE (1<<0)
......
...@@ -137,6 +137,18 @@ static void hsw_crt_get_config(struct intel_encoder *encoder, ...@@ -137,6 +137,18 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder); pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
} }
static void hsw_crt_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
I915_WRITE(SPLL_CTL,
SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
POSTING_READ(SPLL_CTL);
udelay(20);
}
/* Note: The caller is required to filter out dpms modes not supported by the /* Note: The caller is required to filter out dpms modes not supported by the
* platform. */ * platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
...@@ -194,6 +206,20 @@ static void intel_disable_crt(struct intel_encoder *encoder) ...@@ -194,6 +206,20 @@ static void intel_disable_crt(struct intel_encoder *encoder)
intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF); intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
} }
static void hsw_crt_post_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val;
DRM_DEBUG_KMS("Disabling SPLL\n");
val = I915_READ(SPLL_CTL);
WARN_ON(!(val & SPLL_PLL_ENABLE));
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
POSTING_READ(SPLL_CTL);
}
static void intel_enable_crt(struct intel_encoder *encoder) static void intel_enable_crt(struct intel_encoder *encoder)
{ {
struct intel_crt *crt = intel_encoder_to_crt(encoder); struct intel_crt *crt = intel_encoder_to_crt(encoder);
...@@ -289,8 +315,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, ...@@ -289,8 +315,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = 24; pipe_config->pipe_bpp = 24;
/* FDI must always be 2.7 GHz */ /* FDI must always be 2.7 GHz */
if (HAS_DDI(dev)) if (HAS_DDI(dev)) {
pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
pipe_config->port_clock = 135000 * 2; pipe_config->port_clock = 135000 * 2;
}
return true; return true;
} }
...@@ -860,6 +888,8 @@ void intel_crt_init(struct drm_device *dev) ...@@ -860,6 +888,8 @@ void intel_crt_init(struct drm_device *dev)
if (HAS_DDI(dev)) { if (HAS_DDI(dev)) {
crt->base.get_config = hsw_crt_get_config; crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state; crt->base.get_hw_state = intel_ddi_get_hw_state;
crt->base.pre_enable = hsw_crt_pre_enable;
crt->base.post_disable = hsw_crt_post_disable;
} else { } else {
crt->base.get_config = intel_crt_get_config; crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state; crt->base.get_hw_state = intel_crt_get_hw_state;
......
This diff is collapsed.
This diff is collapsed.
...@@ -745,6 +745,22 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector) ...@@ -745,6 +745,22 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
intel_connector_unregister(intel_connector); intel_connector_unregister(intel_connector);
} }
static void
hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
{
switch (link_bw) {
case DP_LINK_BW_1_62:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
break;
case DP_LINK_BW_2_7:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
break;
case DP_LINK_BW_5_4:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
break;
}
}
static void static void
intel_dp_set_clock(struct intel_encoder *encoder, intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config, int link_bw) struct intel_crtc_config *pipe_config, int link_bw)
...@@ -756,8 +772,6 @@ intel_dp_set_clock(struct intel_encoder *encoder, ...@@ -756,8 +772,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
if (IS_G4X(dev)) { if (IS_G4X(dev)) {
divisor = gen4_dpll; divisor = gen4_dpll;
count = ARRAY_SIZE(gen4_dpll); count = ARRAY_SIZE(gen4_dpll);
} else if (IS_HASWELL(dev)) {
/* Haswell has special-purpose DP DDI clocks. */
} else if (HAS_PCH_SPLIT(dev)) { } else if (HAS_PCH_SPLIT(dev)) {
divisor = pch_dpll; divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll); count = ARRAY_SIZE(pch_dpll);
...@@ -928,6 +942,9 @@ intel_dp_compute_config(struct intel_encoder *encoder, ...@@ -928,6 +942,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
&pipe_config->dp_m2_n2); &pipe_config->dp_m2_n2);
} }
if (HAS_DDI(dev))
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
else
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
return true; return true;
...@@ -1316,8 +1333,6 @@ void intel_edp_panel_off(struct intel_dp *intel_dp) ...@@ -1316,8 +1333,6 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n"); DRM_DEBUG_KMS("Turn eDP power off\n");
edp_wait_backlight_off(intel_dp);
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
pp = ironlake_get_pp_control(intel_dp); pp = ironlake_get_pp_control(intel_dp);
...@@ -1353,6 +1368,9 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp) ...@@ -1353,6 +1368,9 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
return; return;
DRM_DEBUG_KMS("\n"); DRM_DEBUG_KMS("\n");
intel_panel_enable_backlight(intel_dp->attached_connector);
/* /*
* If we enable the backlight right away following a panel power * If we enable the backlight right away following a panel power
* on, we may see slight flicker as the panel syncs with the eDP * on, we may see slight flicker as the panel syncs with the eDP
...@@ -1367,8 +1385,6 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp) ...@@ -1367,8 +1385,6 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp); I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg); POSTING_READ(pp_ctrl_reg);
intel_panel_enable_backlight(intel_dp->attached_connector);
} }
void intel_edp_backlight_off(struct intel_dp *intel_dp) void intel_edp_backlight_off(struct intel_dp *intel_dp)
...@@ -1381,8 +1397,6 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp) ...@@ -1381,8 +1397,6 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
if (!is_edp(intel_dp)) if (!is_edp(intel_dp))
return; return;
intel_panel_disable_backlight(intel_dp->attached_connector);
DRM_DEBUG_KMS("\n"); DRM_DEBUG_KMS("\n");
pp = ironlake_get_pp_control(intel_dp); pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE; pp &= ~EDP_BLC_ENABLE;
...@@ -1392,6 +1406,10 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp) ...@@ -1392,6 +1406,10 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp); I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg); POSTING_READ(pp_ctrl_reg);
intel_dp->last_backlight_off = jiffies; intel_dp->last_backlight_off = jiffies;
edp_wait_backlight_off(intel_dp);
intel_panel_disable_backlight(intel_dp->attached_connector);
} }
static void ironlake_edp_pll_on(struct intel_dp *intel_dp) static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
...@@ -1751,7 +1769,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) ...@@ -1751,7 +1769,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc; struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj; struct drm_i915_gem_object *obj = intel_fb_obj(crtc->primary->fb);
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
dev_priv->psr.source_ok = false; dev_priv->psr.source_ok = false;
...@@ -1784,7 +1802,6 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) ...@@ -1784,7 +1802,6 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
return false; return false;
} }
obj = to_intel_framebuffer(crtc->primary->fb)->obj;
if (obj->tiling_mode != I915_TILING_X || if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) { obj->fence_reg == I915_FENCE_REG_NONE) {
DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
...@@ -3815,6 +3832,22 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder) ...@@ -3815,6 +3832,22 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
intel_dp_check_link_status(intel_dp); intel_dp_check_link_status(intel_dp);
} }
bool
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
if (long_hpd)
return true;
/*
* we'll check the link status via the normal hot plug path later -
* but for short hpds we should check it now
*/
intel_dp_check_link_status(intel_dp);
return false;
}
/* Return which DP Port should be selected for Transcoder DP control */ /* Return which DP Port should be selected for Transcoder DP control */
int int
intel_trans_dp_port_sel(struct drm_crtc *crtc) intel_trans_dp_port_sel(struct drm_crtc *crtc)
...@@ -4387,6 +4420,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -4387,6 +4420,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
void void
intel_dp_init(struct drm_device *dev, int output_reg, enum port port) intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port; struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder; struct intel_encoder *intel_encoder;
struct drm_encoder *encoder; struct drm_encoder *encoder;
...@@ -4443,6 +4477,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) ...@@ -4443,6 +4477,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->cloneable = 0; intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_dp_hot_plug; intel_encoder->hot_plug = intel_dp_hot_plug;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
dev_priv->hpd_irq_port[port] = intel_dig_port;
if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
drm_encoder_cleanup(encoder); drm_encoder_cleanup(encoder);
kfree(intel_dig_port); kfree(intel_dig_port);
......
...@@ -307,6 +307,9 @@ struct intel_crtc_config { ...@@ -307,6 +307,9 @@ struct intel_crtc_config {
/* Selected dpll when shared or DPLL_ID_PRIVATE. */ /* Selected dpll when shared or DPLL_ID_PRIVATE. */
enum intel_dpll_id shared_dpll; enum intel_dpll_id shared_dpll;
/* PORT_CLK_SEL for DDI ports. */
uint32_t ddi_pll_sel;
/* Actual register state of the dpll, for shared dpll cross-checking. */ /* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state; struct intel_dpll_hw_state dpll_hw_state;
...@@ -338,6 +341,7 @@ struct intel_crtc_config { ...@@ -338,6 +341,7 @@ struct intel_crtc_config {
u32 pos; u32 pos;
u32 size; u32 size;
bool enabled; bool enabled;
bool force_thru;
} pch_pfit; } pch_pfit;
/* FDI configuration, only valid if has_pch_encoder is set. */ /* FDI configuration, only valid if has_pch_encoder is set. */
...@@ -398,8 +402,6 @@ struct intel_crtc { ...@@ -398,8 +402,6 @@ struct intel_crtc {
struct intel_crtc_config *new_config; struct intel_crtc_config *new_config;
bool new_enabled; bool new_enabled;
uint32_t ddi_pll_sel;
/* reset counter value when the last flip was submitted */ /* reset counter value when the last flip was submitted */
unsigned int reset_counter; unsigned int reset_counter;
...@@ -485,6 +487,7 @@ struct cxsr_latency { ...@@ -485,6 +487,7 @@ struct cxsr_latency {
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) #define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base) #define to_intel_plane(x) container_of(x, struct intel_plane, base)
#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
struct intel_hdmi { struct intel_hdmi {
u32 hdmi_reg; u32 hdmi_reg;
...@@ -567,6 +570,7 @@ struct intel_digital_port { ...@@ -567,6 +570,7 @@ struct intel_digital_port {
u32 saved_port_bits; u32 saved_port_bits;
struct intel_dp dp; struct intel_dp dp;
struct intel_hdmi hdmi; struct intel_hdmi hdmi;
bool (*hpd_pulse)(struct intel_digital_port *, bool);
}; };
static inline int static inline int
...@@ -706,10 +710,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, ...@@ -706,10 +710,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder); enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
bool intel_ddi_pll_select(struct intel_crtc *crtc); bool intel_ddi_pll_select(struct intel_crtc *crtc);
void intel_ddi_pll_enable(struct intel_crtc *crtc);
void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
...@@ -722,7 +723,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, ...@@ -722,7 +723,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
const char *intel_output_name(int output); const char *intel_output_name(int output);
bool intel_has_pending_fb_unpin(struct drm_device *dev); bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev); int intel_pch_rawclk(struct drm_device *dev);
int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
void intel_mark_busy(struct drm_device *dev); void intel_mark_busy(struct drm_device *dev);
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring); struct intel_engine_cs *ring);
...@@ -793,12 +793,18 @@ __intel_framebuffer_create(struct drm_device *dev, ...@@ -793,12 +793,18 @@ __intel_framebuffer_create(struct drm_device *dev,
void intel_prepare_page_flip(struct drm_device *dev, int plane); void intel_prepare_page_flip(struct drm_device *dev, int plane);
void intel_finish_page_flip(struct drm_device *dev, int pipe); void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane); void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv, void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll, struct intel_shared_dpll *pll,
bool state); bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
void intel_put_shared_dpll(struct intel_crtc *crtc);
/* modesetting asserts */
void assert_pll(struct drm_i915_private *dev_priv, void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state); enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true) #define assert_pll_enabled(d, p) assert_pll(d, p, true)
...@@ -831,7 +837,6 @@ void hsw_disable_ips(struct intel_crtc *crtc); ...@@ -831,7 +837,6 @@ void hsw_disable_ips(struct intel_crtc *crtc);
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable); void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
enum intel_display_power_domain enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder); intel_display_port_power_domain(struct intel_encoder *intel_encoder);
int valleyview_get_vco(struct drm_i915_private *dev_priv);
void intel_mode_from_pipe_config(struct drm_display_mode *mode, void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_config *pipe_config); struct intel_crtc_config *pipe_config);
int intel_format_to_fourcc(int format); int intel_format_to_fourcc(int format);
...@@ -852,6 +857,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); ...@@ -852,6 +857,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder, bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config); struct intel_crtc_config *pipe_config);
bool intel_dp_is_edp(struct drm_device *dev, enum port port); bool intel_dp_is_edp(struct drm_device *dev, enum port port);
bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp); void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp); void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
...@@ -863,7 +870,6 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate); ...@@ -863,7 +870,6 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
void intel_edp_psr_exit(struct drm_device *dev); void intel_edp_psr_exit(struct drm_device *dev);
void intel_edp_psr_init(struct drm_device *dev); void intel_edp_psr_init(struct drm_device *dev);
/* intel_dsi.c */ /* intel_dsi.c */
void intel_dsi_init(struct drm_device *dev); void intel_dsi_init(struct drm_device *dev);
...@@ -1005,8 +1011,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv); ...@@ -1005,8 +1011,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
void intel_init_runtime_pm(struct drm_i915_private *dev_priv); void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv); void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev); void ilk_wm_get_hw_state(struct drm_device *dev);
void __vlv_set_power_well(struct drm_i915_private *dev_priv,
enum punit_power_well power_well_id, bool enable);
/* intel_sdvo.c */ /* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
......
...@@ -107,7 +107,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, ...@@ -107,7 +107,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
sizes->surface_depth); sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height; size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE); size = PAGE_ALIGN(size);
obj = i915_gem_object_create_stolen(dev, size); obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL) if (obj == NULL)
obj = i915_gem_alloc_object(dev, size); obj = i915_gem_alloc_object(dev, size);
......
...@@ -34,11 +34,6 @@ ...@@ -34,11 +34,6 @@
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
#include "i915_drv.h" #include "i915_drv.h"
enum disp_clk {
CDCLK,
CZCLK
};
struct gmbus_port { struct gmbus_port {
const char *name; const char *name;
int reg; int reg;
...@@ -63,60 +58,11 @@ to_intel_gmbus(struct i2c_adapter *i2c) ...@@ -63,60 +58,11 @@ to_intel_gmbus(struct i2c_adapter *i2c)
return container_of(i2c, struct intel_gmbus, adapter); return container_of(i2c, struct intel_gmbus, adapter);
} }
static int get_disp_clk_div(struct drm_i915_private *dev_priv,
enum disp_clk clk)
{
u32 reg_val;
int clk_ratio;
reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
if (clk == CDCLK)
clk_ratio =
((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
else
clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
return clk_ratio;
}
static void gmbus_set_freq(struct drm_i915_private *dev_priv)
{
int vco, gmbus_freq = 0, cdclk_div;
BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
vco = valleyview_get_vco(dev_priv);
/* Get the CDCLK divide ratio */
cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
/*
* Program the gmbus_freq based on the cdclk frequency.
* BSpec erroneously claims we should aim for 4MHz, but
* in fact 1MHz is the correct frequency.
*/
if (cdclk_div)
gmbus_freq = (vco << 1) / cdclk_div;
if (WARN_ON(gmbus_freq == 0))
return;
I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
}
void void
intel_i2c_reset(struct drm_device *dev) intel_i2c_reset(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
/*
* In BIOS-less system, program the correct gmbus frequency
* before reading edid.
*/
if (IS_VALLEYVIEW(dev))
gmbus_set_freq(dev_priv);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0); I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
} }
......
...@@ -51,6 +51,7 @@ struct intel_lvds_encoder { ...@@ -51,6 +51,7 @@ struct intel_lvds_encoder {
bool is_dual_link; bool is_dual_link;
u32 reg; u32 reg;
u32 a3_power;
struct intel_lvds_connector *attached_connector; struct intel_lvds_connector *attached_connector;
}; };
...@@ -71,8 +72,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, ...@@ -71,8 +72,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp; u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(lvds_encoder->reg); tmp = I915_READ(lvds_encoder->reg);
if (!(tmp & LVDS_PORT_EN)) if (!(tmp & LVDS_PORT_EN))
...@@ -165,8 +171,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder) ...@@ -165,8 +171,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how * appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes. * panels behave in the two modes. For now, let's just maintain the
* value we got from the BIOS.
*/ */
temp &= ~LVDS_A3_POWER_MASK;
temp |= lvds_encoder->a3_power;
/* Set the dithering flag on LVDS as needed, note that there is no /* Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is * special lvds dither control bit on pch-split platforms, dithering is
...@@ -264,7 +273,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, ...@@ -264,7 +273,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_crtc_config *pipe_config) struct intel_crtc_config *pipe_config)
{ {
struct drm_device *dev = intel_encoder->base.dev; struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder = struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base); to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector = struct intel_connector *intel_connector =
...@@ -279,8 +287,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, ...@@ -279,8 +287,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return false; return false;
} }
if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) == if (lvds_encoder->a3_power == LVDS_A3_POWER_UP)
LVDS_A3_POWER_UP)
lvds_bpp = 8*3; lvds_bpp = 8*3;
else else
lvds_bpp = 6*3; lvds_bpp = 6*3;
...@@ -1081,6 +1088,9 @@ void intel_lvds_init(struct drm_device *dev) ...@@ -1081,6 +1088,9 @@ void intel_lvds_init(struct drm_device *dev)
DRM_DEBUG_KMS("detected %s-link lvds configuration\n", DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single"); lvds_encoder->is_dual_link ? "dual" : "single");
lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
LVDS_A3_POWER_MASK;
/* /*
* Unlock registers and just * Unlock registers and just
* leave them unlocked * leave them unlocked
......
This diff is collapsed.
This diff is collapsed.
...@@ -40,6 +40,32 @@ struct intel_hw_status_page { ...@@ -40,6 +40,32 @@ struct intel_hw_status_page {
#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
*/
#define i915_semaphore_seqno_size sizeof(uint64_t)
#define GEN8_SIGNAL_OFFSET(__ring, to) \
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
(i915_semaphore_seqno_size * (to)))
#define GEN8_WAIT_OFFSET(__ring, from) \
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
(i915_semaphore_seqno_size * (__ring)->id))
#define GEN8_RING_SEMAPHORE_INIT do { \
if (!dev_priv->semaphore_obj) { \
break; \
} \
ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
} while(0)
enum intel_ring_hangcheck_action { enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0, HANGCHECK_IDLE = 0,
HANGCHECK_WAIT, HANGCHECK_WAIT,
...@@ -127,15 +153,55 @@ struct intel_engine_cs { ...@@ -127,15 +153,55 @@ struct intel_engine_cs {
#define I915_DISPATCH_PINNED 0x2 #define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_engine_cs *ring); void (*cleanup)(struct intel_engine_cs *ring);
/* GEN8 signal/wait table - never trust comments!
* signal to signal to signal to signal to signal to
* RCS VCS BCS VECS VCS2
* --------------------------------------------------------------------
* RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
* |-------------------------------------------------------------------
* VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
* |-------------------------------------------------------------------
* BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
* |-------------------------------------------------------------------
* VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
* |-------------------------------------------------------------------
* VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
* |-------------------------------------------------------------------
*
* Generalization:
* f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
* ie. transpose of g(x, y)
*
* sync from sync from sync from sync from sync from
* RCS VCS BCS VECS VCS2
* --------------------------------------------------------------------
* RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
* |-------------------------------------------------------------------
* VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
* |-------------------------------------------------------------------
* BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
* |-------------------------------------------------------------------
* VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
* |-------------------------------------------------------------------
* VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
* |-------------------------------------------------------------------
*
* Generalization:
* g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
* ie. transpose of f(x, y)
*/
struct { struct {
u32 sync_seqno[I915_NUM_RINGS-1]; u32 sync_seqno[I915_NUM_RINGS-1];
union {
struct { struct {
/* our mbox written by others */ /* our mbox written by others */
u32 wait[I915_NUM_RINGS]; u32 wait[I915_NUM_RINGS];
/* mboxes this ring signals to */ /* mboxes this ring signals to */
u32 signal[I915_NUM_RINGS]; u32 signal[I915_NUM_RINGS];
} mbox; } mbox;
u64 signal_ggtt[I915_NUM_RINGS];
};
/* AKA wait() */ /* AKA wait() */
int (*sync_to)(struct intel_engine_cs *ring, int (*sync_to)(struct intel_engine_cs *ring,
...@@ -238,9 +304,11 @@ intel_ring_sync_index(struct intel_engine_cs *ring, ...@@ -238,9 +304,11 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
int idx; int idx;
/* /*
* cs -> 0 = vcs, 1 = bcs * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
* vcs -> 0 = bcs, 1 = cs, * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
* bcs -> 0 = cs, 1 = vcs. * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
* vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
* vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
*/ */
idx = (other - ring) - 1; idx = (other - ring) - 1;
...@@ -318,9 +386,9 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev); ...@@ -318,9 +386,9 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *ring); u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
void intel_ring_setup_status_page(struct intel_engine_cs *ring); void intel_ring_setup_status_page(struct intel_engine_cs *ring);
static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring) static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{ {
return ring->buffer->tail; return ringbuf->tail;
} }
static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring) static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
......
...@@ -1010,7 +1010,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev, ...@@ -1010,7 +1010,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
if (args->flags || args->pad) if (args->flags || args->pad)
return -EINVAL; return -EINVAL;
if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN)) if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
ret = mutex_lock_interruptible(&dev->struct_mutex); ret = mutex_lock_interruptible(&dev->struct_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment