Commit 436e94a4 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-fixes-2014-10-03' of...

Merge tag 'drm-intel-next-fixes-2014-10-03' of git://anongit.freedesktop.org/drm-intel into drm-next

Bunch of fixes for 3.18. Major parts:
- ppgtt fixes (but full ppgtt is for 3.19) from Chris, Michel, ...
- hdmi pixel replication fixes (Clint Taylor)
- leftover i830M patches from Ville
- small things all over

* tag 'drm-intel-next-fixes-2014-10-03' of git://anongit.freedesktop.org/drm-intel: (21 commits)
  drm/i915: Enable pixel replicated modes on BDW and HSW.
  drm/i915: Don't spam dmesg with rps messages on vlv/chv
  drm/i915: Do not leak pages when freeing userptr objects
  drm/i915: Do not store the error pointer for a failed userptr registration
  Revert "drm/i915/bdw: BDW Software Turbo"
  drm/i915/bdw: Cleanup pre prod workarounds
  drm/i915: Use EIO instead of EAGAIN for sink CRC error.
  drm/i915: Extend BIOS stolen mem handling to all platform
  drm/i915: Match GTT space sanity checker with implementation
  drm/i915: HSW always use GGTT selector for secure batches
  drm/i915: add cherryview specfic forcewake in execlists_elsp_write
  drm/i915: fix another use-after-free in i915_gem_evict_everything
  drm/i915: Don't reinit hpd interrupts after gpu reset
  drm/i915: Wrap -EIO send-vblank event for failed pageflip in spinlock
  drm/i915: Drop any active reference before unbinding
  drm/i915: Objects on the unbound list may still have an active reference
  drm/i915/edp: use lane count and link rate from DPCD for eDP
  drm/i915/dp: add missing \n in the TPS3 debug message
  drm/i915/hdmi, dp: Do not dereference the encoder in the connector destroy
  drm/i915: Limit the watermark to at least 8 entries on gen2/3
  ...
parents ccb09a8e ebb69c95
...@@ -3826,7 +3826,6 @@ i915_drop_caches_set(void *data, u64 val) ...@@ -3826,7 +3826,6 @@ i915_drop_caches_set(void *data, u64 val)
{ {
struct drm_device *dev = data; struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
int ret; int ret;
DRM_DEBUG("Dropping caches: 0x%08llx\n", val); DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
...@@ -3846,36 +3845,11 @@ i915_drop_caches_set(void *data, u64 val) ...@@ -3846,36 +3845,11 @@ i915_drop_caches_set(void *data, u64 val)
if (val & (DROP_RETIRE | DROP_ACTIVE)) if (val & (DROP_RETIRE | DROP_ACTIVE))
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev);
if (val & DROP_BOUND) { if (val & DROP_BOUND)
list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list, i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
global_list) {
struct i915_vma *vma, *v;
ret = 0; if (val & DROP_UNBOUND)
drm_gem_object_reference(&obj->base); i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) {
if (vma->pin_count)
continue;
ret = i915_vma_unbind(vma);
if (ret)
break;
}
drm_gem_object_unreference(&obj->base);
if (ret)
goto unlock;
}
}
if (val & DROP_UNBOUND) {
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
global_list)
if (obj->pages_pin_count == 0) {
ret = i915_gem_object_put_pages(obj);
if (ret)
goto unlock;
}
}
unlock: unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -871,8 +871,6 @@ int i915_reset(struct drm_device *dev) ...@@ -871,8 +871,6 @@ int i915_reset(struct drm_device *dev)
*/ */
if (INTEL_INFO(dev)->gen > 5) if (INTEL_INFO(dev)->gen > 5)
intel_reset_gt_powersave(dev); intel_reset_gt_powersave(dev);
intel_hpd_init(dev);
} else { } else {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
......
...@@ -946,23 +946,6 @@ struct intel_rps_ei { ...@@ -946,23 +946,6 @@ struct intel_rps_ei {
u32 media_c0; u32 media_c0;
}; };
struct intel_rps_bdw_cal {
u32 it_threshold_pct; /* interrupt, in percentage */
u32 eval_interval; /* evaluation interval, in us */
u32 last_ts;
u32 last_c0;
bool is_up;
};
struct intel_rps_bdw_turbo {
struct intel_rps_bdw_cal up;
struct intel_rps_bdw_cal down;
struct timer_list flip_timer;
u32 timeout;
atomic_t flip_received;
struct work_struct work_max_freq;
};
struct intel_gen6_power_mgmt { struct intel_gen6_power_mgmt {
/* work and pm_iir are protected by dev_priv->irq_lock */ /* work and pm_iir are protected by dev_priv->irq_lock */
struct work_struct work; struct work_struct work;
...@@ -996,9 +979,6 @@ struct intel_gen6_power_mgmt { ...@@ -996,9 +979,6 @@ struct intel_gen6_power_mgmt {
bool enabled; bool enabled;
struct delayed_work delayed_resume_work; struct delayed_work delayed_resume_work;
bool is_bdw_sw_turbo; /* Switch of BDW software turbo */
struct intel_rps_bdw_turbo sw_turbo; /* Calculate RP interrupt timing */
/* manual wa residency calculations */ /* manual wa residency calculations */
struct intel_rps_ei up_ei, down_ei; struct intel_rps_ei up_ei, down_ei;
...@@ -2369,6 +2349,12 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, ...@@ -2369,6 +2349,12 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
int i915_gem_wait_ioctl(struct drm_device *dev, void *data, int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev); void i915_gem_load(struct drm_device *dev);
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
long target,
unsigned flags);
#define I915_SHRINK_PURGEABLE 0x1
#define I915_SHRINK_UNBOUND 0x2
#define I915_SHRINK_BOUND 0x4
void *i915_gem_object_alloc(struct drm_device *dev); void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj, void i915_gem_object_init(struct drm_i915_gem_object *obj,
...@@ -2823,8 +2809,6 @@ extern void intel_disable_fbc(struct drm_device *dev); ...@@ -2823,8 +2809,6 @@ extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev); extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void bdw_software_turbo(struct drm_device *dev);
extern void gen8_flip_interrupt(struct drm_device *dev);
extern void valleyview_set_rps(struct drm_device *dev, u8 val); extern void valleyview_set_rps(struct drm_device *dev, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable); bool enable);
......
...@@ -60,7 +60,6 @@ static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker, ...@@ -60,7 +60,6 @@ static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
static int i915_gem_shrinker_oom(struct notifier_block *nb, static int i915_gem_shrinker_oom(struct notifier_block *nb,
unsigned long event, unsigned long event,
void *ptr); void *ptr);
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static bool cpu_cache_is_coherent(struct drm_device *dev, static bool cpu_cache_is_coherent(struct drm_device *dev,
...@@ -1741,7 +1740,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) ...@@ -1741,7 +1740,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
* offsets on purgeable objects by truncating it and marking it purged, * offsets on purgeable objects by truncating it and marking it purged,
* which prevents userspace from ever using that object again. * which prevents userspace from ever using that object again.
*/ */
i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT); i915_gem_shrink(dev_priv,
obj->base.size >> PAGE_SHIFT,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE);
ret = drm_gem_create_mmap_offset(&obj->base); ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC) if (ret != -ENOSPC)
goto out; goto out;
...@@ -1938,12 +1941,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) ...@@ -1938,12 +1941,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
return 0; return 0;
} }
static unsigned long unsigned long
__i915_gem_shrink(struct drm_i915_private *dev_priv, long target, i915_gem_shrink(struct drm_i915_private *dev_priv,
bool purgeable_only) long target, unsigned flags)
{ {
struct list_head still_in_list; const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
struct drm_i915_gem_object *obj;
unsigned long count = 0; unsigned long count = 0;
/* /*
...@@ -1965,62 +1967,68 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, ...@@ -1965,62 +1967,68 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
* dev->struct_mutex and so we won't ever be able to observe an * dev->struct_mutex and so we won't ever be able to observe an
* object on the bound_list with a reference count equals 0. * object on the bound_list with a reference count equals 0.
*/ */
INIT_LIST_HEAD(&still_in_list); if (flags & I915_SHRINK_UNBOUND) {
while (count < target && !list_empty(&dev_priv->mm.unbound_list)) { struct list_head still_in_list;
obj = list_first_entry(&dev_priv->mm.unbound_list,
typeof(*obj), global_list);
list_move_tail(&obj->global_list, &still_in_list);
if (!i915_gem_object_is_purgeable(obj) && purgeable_only) INIT_LIST_HEAD(&still_in_list);
continue; while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
struct drm_i915_gem_object *obj;
drm_gem_object_reference(&obj->base); obj = list_first_entry(&dev_priv->mm.unbound_list,
typeof(*obj), global_list);
list_move_tail(&obj->global_list, &still_in_list);
if (i915_gem_object_put_pages(obj) == 0) if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
count += obj->base.size >> PAGE_SHIFT; continue;
drm_gem_object_reference(&obj->base);
drm_gem_object_unreference(&obj->base); if (i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT;
drm_gem_object_unreference(&obj->base);
}
list_splice(&still_in_list, &dev_priv->mm.unbound_list);
} }
list_splice(&still_in_list, &dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&still_in_list); if (flags & I915_SHRINK_BOUND) {
while (count < target && !list_empty(&dev_priv->mm.bound_list)) { struct list_head still_in_list;
struct i915_vma *vma, *v;
obj = list_first_entry(&dev_priv->mm.bound_list, INIT_LIST_HEAD(&still_in_list);
typeof(*obj), global_list); while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
list_move_tail(&obj->global_list, &still_in_list); struct drm_i915_gem_object *obj;
struct i915_vma *vma, *v;
if (!i915_gem_object_is_purgeable(obj) && purgeable_only) obj = list_first_entry(&dev_priv->mm.bound_list,
continue; typeof(*obj), global_list);
list_move_tail(&obj->global_list, &still_in_list);
drm_gem_object_reference(&obj->base); if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
continue;
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) drm_gem_object_reference(&obj->base);
if (i915_vma_unbind(vma))
break;
if (i915_gem_object_put_pages(obj) == 0) list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
count += obj->base.size >> PAGE_SHIFT; if (i915_vma_unbind(vma))
break;
if (i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT;
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
}
list_splice(&still_in_list, &dev_priv->mm.bound_list);
} }
list_splice(&still_in_list, &dev_priv->mm.bound_list);
return count; return count;
} }
static unsigned long
i915_gem_purge(struct drm_i915_private *dev_priv, long target)
{
return __i915_gem_shrink(dev_priv, target, true);
}
static unsigned long static unsigned long
i915_gem_shrink_all(struct drm_i915_private *dev_priv) i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{ {
i915_gem_evict_everything(dev_priv->dev); i915_gem_evict_everything(dev_priv->dev);
return __i915_gem_shrink(dev_priv, LONG_MAX, false); return i915_gem_shrink(dev_priv, LONG_MAX,
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
} }
static int static int
...@@ -2067,7 +2075,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2067,7 +2075,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp); page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (IS_ERR(page)) { if (IS_ERR(page)) {
i915_gem_purge(dev_priv, page_count); i915_gem_shrink(dev_priv,
page_count,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE);
page = shmem_read_mapping_page_gfp(mapping, i, gfp); page = shmem_read_mapping_page_gfp(mapping, i, gfp);
} }
if (IS_ERR(page)) { if (IS_ERR(page)) {
...@@ -2944,6 +2956,9 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -2944,6 +2956,9 @@ int i915_vma_unbind(struct i915_vma *vma)
* cause memory corruption through use-after-free. * cause memory corruption through use-after-free.
*/ */
/* Throw away the active reference before moving to the unbound list */
i915_gem_object_retire(obj);
if (i915_is_ggtt(vma->vm)) { if (i915_is_ggtt(vma->vm)) {
i915_gem_object_finish_gtt(obj); i915_gem_object_finish_gtt(obj);
...@@ -3336,17 +3351,20 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) ...@@ -3336,17 +3351,20 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
return 0; return 0;
} }
static bool i915_gem_valid_gtt_space(struct drm_device *dev, static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
struct drm_mm_node *gtt_space,
unsigned long cache_level) unsigned long cache_level)
{ {
struct drm_mm_node *gtt_space = &vma->node;
struct drm_mm_node *other; struct drm_mm_node *other;
/* On non-LLC machines we have to be careful when putting differing /*
* types of snoopable memory together to avoid the prefetcher * On some machines we have to be careful when putting differing types
* crossing memory domains and dying. * of snoopable memory together to avoid the prefetcher crossing memory
* domains and dying. During vm initialisation, we decide whether or not
* these constraints apply and set the drm_mm.color_adjust
* appropriately.
*/ */
if (HAS_LLC(dev)) if (vma->vm->mm.color_adjust == NULL)
return true; return true;
if (!drm_mm_node_allocated(gtt_space)) if (!drm_mm_node_allocated(gtt_space))
...@@ -3484,8 +3502,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3484,8 +3502,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
goto err_free_vma; goto err_free_vma;
} }
if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node, if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
obj->cache_level))) {
ret = -EINVAL; ret = -EINVAL;
goto err_remove_node; goto err_remove_node;
} }
...@@ -3695,7 +3712,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3695,7 +3712,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
} }
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { if (!i915_gem_valid_gtt_space(vma, cache_level)) {
ret = i915_vma_unbind(vma); ret = i915_vma_unbind(vma);
if (ret) if (ret)
return ret; return ret;
...@@ -5261,11 +5278,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -5261,11 +5278,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
if (!i915_gem_shrinker_lock(dev, &unlock)) if (!i915_gem_shrinker_lock(dev, &unlock))
return SHRINK_STOP; return SHRINK_STOP;
freed = i915_gem_purge(dev_priv, sc->nr_to_scan); freed = i915_gem_shrink(dev_priv,
sc->nr_to_scan,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE);
if (freed < sc->nr_to_scan) if (freed < sc->nr_to_scan)
freed += __i915_gem_shrink(dev_priv, freed += i915_gem_shrink(dev_priv,
sc->nr_to_scan - freed, sc->nr_to_scan - freed,
false); I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
if (unlock) if (unlock)
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -243,7 +243,7 @@ int ...@@ -243,7 +243,7 @@ int
i915_gem_evict_everything(struct drm_device *dev) i915_gem_evict_everything(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm; struct i915_address_space *vm, *v;
bool lists_empty = true; bool lists_empty = true;
int ret; int ret;
...@@ -270,7 +270,7 @@ i915_gem_evict_everything(struct drm_device *dev) ...@@ -270,7 +270,7 @@ i915_gem_evict_everything(struct drm_device *dev)
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev);
/* Having flushed everything, unbind() should never raise an error */ /* Having flushed everything, unbind() should never raise an error */
list_for_each_entry(vm, &dev_priv->vm_list, global_link) list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link)
WARN_ON(i915_gem_evict_vm(vm, false)); WARN_ON(i915_gem_evict_vm(vm, false));
return 0; return 0;
......
...@@ -289,6 +289,7 @@ void i915_gem_cleanup_stolen(struct drm_device *dev) ...@@ -289,6 +289,7 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
int i915_gem_init_stolen(struct drm_device *dev) int i915_gem_init_stolen(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
int bios_reserved = 0; int bios_reserved = 0;
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
...@@ -308,8 +309,16 @@ int i915_gem_init_stolen(struct drm_device *dev) ...@@ -308,8 +309,16 @@ int i915_gem_init_stolen(struct drm_device *dev)
DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n", DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base); dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
if (IS_VALLEYVIEW(dev)) if (INTEL_INFO(dev)->gen >= 8) {
bios_reserved = 1024*1024; /* top 1M on VLV/BYT */ tmp = I915_READ(GEN7_BIOS_RESERVED);
tmp >>= GEN8_BIOS_RESERVED_SHIFT;
tmp &= GEN8_BIOS_RESERVED_MASK;
bios_reserved = (1024*1024) << tmp;
} else if (IS_GEN7(dev)) {
tmp = I915_READ(GEN7_BIOS_RESERVED);
bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
256*1024 : 1024*1024;
}
if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size)) if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
return 0; return 0;
......
...@@ -293,15 +293,23 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) ...@@ -293,15 +293,23 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
static struct i915_mmu_notifier * static struct i915_mmu_notifier *
i915_mmu_notifier_find(struct i915_mm_struct *mm) i915_mmu_notifier_find(struct i915_mm_struct *mm)
{ {
if (mm->mn == NULL) { struct i915_mmu_notifier *mn = mm->mn;
down_write(&mm->mm->mmap_sem);
mutex_lock(&to_i915(mm->dev)->mm_lock); mn = mm->mn;
if (mm->mn == NULL) if (mn)
mm->mn = i915_mmu_notifier_create(mm->mm); return mn;
mutex_unlock(&to_i915(mm->dev)->mm_lock);
up_write(&mm->mm->mmap_sem); down_write(&mm->mm->mmap_sem);
mutex_lock(&to_i915(mm->dev)->mm_lock);
if ((mn = mm->mn) == NULL) {
mn = i915_mmu_notifier_create(mm->mm);
if (!IS_ERR(mn))
mm->mn = mn;
} }
return mm->mn; mutex_unlock(&to_i915(mm->dev)->mm_lock);
up_write(&mm->mm->mmap_sem);
return mn;
} }
static int static int
...@@ -681,16 +689,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) ...@@ -681,16 +689,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
static void static void
i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
{ {
struct scatterlist *sg; struct sg_page_iter sg_iter;
int i;
BUG_ON(obj->userptr.work != NULL); BUG_ON(obj->userptr.work != NULL);
if (obj->madv != I915_MADV_WILLNEED) if (obj->madv != I915_MADV_WILLNEED)
obj->dirty = 0; obj->dirty = 0;
for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
struct page *page = sg_page(sg); struct page *page = sg_page_iter_page(&sg_iter);
if (obj->dirty) if (obj->dirty)
set_page_dirty(page); set_page_dirty(page);
......
...@@ -1979,27 +1979,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) ...@@ -1979,27 +1979,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
res1, res2); res1, res2);
} }
void gen8_flip_interrupt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev_priv->rps.is_bdw_sw_turbo)
return;
if(atomic_read(&dev_priv->rps.sw_turbo.flip_received)) {
mod_timer(&dev_priv->rps.sw_turbo.flip_timer,
usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies);
}
else {
dev_priv->rps.sw_turbo.flip_timer.expires =
usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies;
add_timer(&dev_priv->rps.sw_turbo.flip_timer);
atomic_set(&dev_priv->rps.sw_turbo.flip_received, true);
}
bdw_software_turbo(dev);
}
/* The RPS events need forcewake, so we add them to a work queue and mask their /* The RPS events need forcewake, so we add them to a work queue and mask their
* IMR bits until the work is done. Other interrupts can be processed without * IMR bits until the work is done. Other interrupts can be processed without
* the work queue. */ * the work queue. */
......
...@@ -143,6 +143,14 @@ ...@@ -143,6 +143,14 @@
#define GAB_CTL 0x24000 #define GAB_CTL 0x24000
#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8) #define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
#define GEN7_BIOS_RESERVED 0x1082C0
#define GEN7_BIOS_RESERVED_1M (0 << 5)
#define GEN7_BIOS_RESERVED_256K (1 << 5)
#define GEN8_BIOS_RESERVED_SHIFT 7
#define GEN7_BIOS_RESERVED_MASK 0x1
#define GEN8_BIOS_RESERVED_MASK 0x3
/* VGA stuff */ /* VGA stuff */
#define VGA_ST01_MDA 0x3ba #define VGA_ST01_MDA 0x3ba
...@@ -2435,6 +2443,7 @@ enum punit_power_well { ...@@ -2435,6 +2443,7 @@ enum punit_power_well {
#define _PIPEASRC 0x6001c #define _PIPEASRC 0x6001c
#define _BCLRPAT_A 0x60020 #define _BCLRPAT_A 0x60020
#define _VSYNCSHIFT_A 0x60028 #define _VSYNCSHIFT_A 0x60028
#define _PIPE_MULT_A 0x6002c
/* Pipe B timing regs */ /* Pipe B timing regs */
#define _HTOTAL_B 0x61000 #define _HTOTAL_B 0x61000
...@@ -2446,6 +2455,7 @@ enum punit_power_well { ...@@ -2446,6 +2455,7 @@ enum punit_power_well {
#define _PIPEBSRC 0x6101c #define _PIPEBSRC 0x6101c
#define _BCLRPAT_B 0x61020 #define _BCLRPAT_B 0x61020
#define _VSYNCSHIFT_B 0x61028 #define _VSYNCSHIFT_B 0x61028
#define _PIPE_MULT_B 0x6102c
#define TRANSCODER_A_OFFSET 0x60000 #define TRANSCODER_A_OFFSET 0x60000
#define TRANSCODER_B_OFFSET 0x61000 #define TRANSCODER_B_OFFSET 0x61000
...@@ -2466,6 +2476,7 @@ enum punit_power_well { ...@@ -2466,6 +2476,7 @@ enum punit_power_well {
#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A) #define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A) #define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC) #define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A)
/* HSW+ eDP PSR registers */ /* HSW+ eDP PSR registers */
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) #define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
...@@ -5577,10 +5588,6 @@ enum punit_power_well { ...@@ -5577,10 +5588,6 @@ enum punit_power_well {
#define GEN8_UCGCTL6 0x9430 #define GEN8_UCGCTL6 0x9430
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14) #define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
#define TIMESTAMP_CTR 0x44070
#define FREQ_1_28_US(us) (((us) * 100) >> 7)
#define MCHBAR_PCU_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5960)
#define GEN6_GFXPAUSE 0xA000 #define GEN6_GFXPAUSE 0xA000
#define GEN6_RPNSWREQ 0xA008 #define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31) #define GEN6_TURBO_DISABLE (1<<31)
......
...@@ -1612,6 +1612,18 @@ static void chv_enable_pll(struct intel_crtc *crtc) ...@@ -1612,6 +1612,18 @@ static void chv_enable_pll(struct intel_crtc *crtc)
mutex_unlock(&dev_priv->dpio_lock); mutex_unlock(&dev_priv->dpio_lock);
} }
static int intel_num_dvo_pipes(struct drm_device *dev)
{
struct intel_crtc *crtc;
int count = 0;
for_each_intel_crtc(dev, crtc)
count += crtc->active &&
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO);
return count;
}
static void i9xx_enable_pll(struct intel_crtc *crtc) static void i9xx_enable_pll(struct intel_crtc *crtc)
{ {
struct drm_device *dev = crtc->base.dev; struct drm_device *dev = crtc->base.dev;
...@@ -1628,7 +1640,18 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) ...@@ -1628,7 +1640,18 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
if (IS_MOBILE(dev) && !IS_I830(dev)) if (IS_MOBILE(dev) && !IS_I830(dev))
assert_panel_unlocked(dev_priv, crtc->pipe); assert_panel_unlocked(dev_priv, crtc->pipe);
I915_WRITE(reg, dpll); /* Enable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
/*
* It appears to be important that we don't enable this
* for the current pipe before otherwise configuring the
* PLL. No idea how this should be handled if multiple
* DVO outputs are enabled simultaneosly.
*/
dpll |= DPLL_DVO_2X_MODE;
I915_WRITE(DPLL(!crtc->pipe),
I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
}
/* Wait for the clocks to stabilize. */ /* Wait for the clocks to stabilize. */
POSTING_READ(reg); POSTING_READ(reg);
...@@ -1667,8 +1690,22 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) ...@@ -1667,8 +1690,22 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
* *
* Note! This is for pre-ILK only. * Note! This is for pre-ILK only.
*/ */
static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) static void i9xx_disable_pll(struct intel_crtc *crtc)
{ {
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = crtc->pipe;
/* Disable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev) &&
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO) &&
intel_num_dvo_pipes(dev) == 1) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
I915_WRITE(DPLL(PIPE_A),
I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
}
/* Don't disable pipe or pipe PLLs if needed */ /* Don't disable pipe or pipe PLLs if needed */
if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
...@@ -4185,6 +4222,11 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) ...@@ -4185,6 +4222,11 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_set_pipe_timings(intel_crtc); intel_set_pipe_timings(intel_crtc);
if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) {
I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder),
intel_crtc->config.pixel_multiplier - 1);
}
if (intel_crtc->config.has_pch_encoder) { if (intel_crtc->config.has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc, intel_cpu_transcoder_set_m_n(intel_crtc,
&intel_crtc->config.fdi_m_n, NULL); &intel_crtc->config.fdi_m_n, NULL);
...@@ -4941,7 +4983,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) ...@@ -4941,7 +4983,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
else if (IS_VALLEYVIEW(dev)) else if (IS_VALLEYVIEW(dev))
vlv_disable_pll(dev_priv, pipe); vlv_disable_pll(dev_priv, pipe);
else else
i9xx_disable_pll(dev_priv, pipe); i9xx_disable_pll(intel_crtc);
} }
if (!IS_GEN2(dev)) if (!IS_GEN2(dev))
...@@ -5945,7 +5987,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc, ...@@ -5945,7 +5987,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4; dpll |= PLL_P2_DIVIDE_BY_4;
} }
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO)) if (!IS_I830(dev) && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE; dpll |= DPLL_DVO_2X_MODE;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
...@@ -6451,6 +6493,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, ...@@ -6451,6 +6493,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
} }
pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
if (!IS_VALLEYVIEW(dev)) { if (!IS_VALLEYVIEW(dev)) {
/*
* DPLL_DVO_2X_MODE must be enabled for both DPLLs
* on 830. Filter it out here so that we don't
* report errors due to that.
*/
if (IS_I830(dev))
pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
} else { } else {
...@@ -7845,7 +7895,12 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, ...@@ -7845,7 +7895,12 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
(I915_READ(IPS_CTL) & IPS_ENABLE); (I915_READ(IPS_CTL) & IPS_ENABLE);
pipe_config->pixel_multiplier = 1; if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
pipe_config->pixel_multiplier =
I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
return true; return true;
} }
...@@ -9881,9 +9936,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -9881,9 +9936,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
unsigned long flags; unsigned long flags;
int ret; int ret;
//trigger software GT busyness calculation
gen8_flip_interrupt(dev);
/* /*
* drm_mode_page_flip_ioctl() should already catch this, but double * drm_mode_page_flip_ioctl() should already catch this, but double
* check to be safe. In the future we may enable pageflipping from * check to be safe. In the future we may enable pageflipping from
...@@ -10039,8 +10091,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -10039,8 +10091,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
out_hang: out_hang:
intel_crtc_wait_for_pending_flips(crtc); intel_crtc_wait_for_pending_flips(crtc);
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
if (ret == 0 && event) if (ret == 0 && event) {
spin_lock_irqsave(&dev->event_lock, flags);
drm_send_vblank_event(dev, pipe, event); drm_send_vblank_event(dev, pipe, event);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
} }
return ret; return ret;
} }
......
...@@ -1068,23 +1068,15 @@ intel_dp_compute_config(struct intel_encoder *encoder, ...@@ -1068,23 +1068,15 @@ intel_dp_compute_config(struct intel_encoder *encoder,
bpp = dev_priv->vbt.edp_bpp; bpp = dev_priv->vbt.edp_bpp;
} }
if (IS_BROADWELL(dev)) { /*
/* Yes, it's an ugly hack. */ * Use the maximum clock and number of lanes the eDP panel
min_lane_count = max_lane_count; * advertizes being capable of. The panels are generally
DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n", * designed to support only a single clock and lane
min_lane_count); * configuration, and typically these values correspond to the
} else if (dev_priv->vbt.edp_lanes) { * native resolution of the panel.
min_lane_count = min(dev_priv->vbt.edp_lanes, */
max_lane_count); min_lane_count = max_lane_count;
DRM_DEBUG_KMS("using min %u lanes per VBT\n", min_clock = max_clock;
min_lane_count);
}
if (dev_priv->vbt.edp_rate) {
min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
bws[min_clock]);
}
} }
for (; bpp >= 6*3; bpp -= 2*3) { for (; bpp >= 6*3; bpp -= 2*3) {
...@@ -3732,7 +3724,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) ...@@ -3732,7 +3724,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) { intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
intel_dp->use_tps3 = true; intel_dp->use_tps3 = true;
DRM_DEBUG_KMS("Displayport TPS3 supported"); DRM_DEBUG_KMS("Displayport TPS3 supported\n");
} else } else
intel_dp->use_tps3 = false; intel_dp->use_tps3 = false;
...@@ -3808,21 +3800,21 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) ...@@ -3808,21 +3800,21 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
u8 buf[1]; u8 buf[1];
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0) if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
return -EAGAIN; return -EIO;
if (!(buf[0] & DP_TEST_CRC_SUPPORTED)) if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
return -ENOTTY; return -ENOTTY;
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
DP_TEST_SINK_START) < 0) DP_TEST_SINK_START) < 0)
return -EAGAIN; return -EIO;
/* Wait 2 vblanks to be sure we will have the correct CRC value */ /* Wait 2 vblanks to be sure we will have the correct CRC value */
intel_wait_for_vblank(dev, intel_crtc->pipe); intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_wait_for_vblank(dev, intel_crtc->pipe); intel_wait_for_vblank(dev, intel_crtc->pipe);
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
return -EAGAIN; return -EIO;
drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0); drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
return 0; return 0;
...@@ -4395,7 +4387,7 @@ intel_dp_connector_destroy(struct drm_connector *connector) ...@@ -4395,7 +4387,7 @@ intel_dp_connector_destroy(struct drm_connector *connector)
{ {
struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_connector *intel_connector = to_intel_connector(connector);
intel_dp_unset_edid(intel_attached_dp(connector)); kfree(intel_connector->detect_edid);
if (!IS_ERR_OR_NULL(intel_connector->edid)) if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid); kfree(intel_connector->edid);
......
...@@ -1501,7 +1501,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) ...@@ -1501,7 +1501,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
static void intel_hdmi_destroy(struct drm_connector *connector) static void intel_hdmi_destroy(struct drm_connector *connector)
{ {
intel_hdmi_unset_edid(connector); kfree(to_intel_connector(connector)->detect_edid);
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
kfree(connector); kfree(connector);
} }
......
...@@ -300,8 +300,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring, ...@@ -300,8 +300,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
* Instead, we do the runtime_pm_get/put when creating/destroying requests. * Instead, we do the runtime_pm_get/put when creating/destroying requests.
*/ */
spin_lock_irqsave(&dev_priv->uncore.lock, flags); spin_lock_irqsave(&dev_priv->uncore.lock, flags);
if (dev_priv->uncore.forcewake_count++ == 0) if (IS_CHERRYVIEW(dev_priv->dev)) {
dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); if (dev_priv->uncore.fw_rendercount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_RENDER);
if (dev_priv->uncore.fw_mediacount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_MEDIA);
} else {
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_ALL);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
I915_WRITE(RING_ELSP(ring), desc[1]); I915_WRITE(RING_ELSP(ring), desc[1]);
...@@ -315,8 +325,19 @@ static void execlists_elsp_write(struct intel_engine_cs *ring, ...@@ -315,8 +325,19 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
/* Release Force Wakeup (see the big comment above). */ /* Release Force Wakeup (see the big comment above). */
spin_lock_irqsave(&dev_priv->uncore.lock, flags); spin_lock_irqsave(&dev_priv->uncore.lock, flags);
if (--dev_priv->uncore.forcewake_count == 0) if (IS_CHERRYVIEW(dev_priv->dev)) {
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); if (--dev_priv->uncore.fw_rendercount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_RENDER);
if (--dev_priv->uncore.fw_mediacount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_MEDIA);
} else {
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_ALL);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
} }
......
This diff is collapsed.
...@@ -707,7 +707,7 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring) ...@@ -707,7 +707,7 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
* update the number of dwords required based on the * update the number of dwords required based on the
* actual number of workarounds applied * actual number of workarounds applied
*/ */
ret = intel_ring_begin(ring, 24); ret = intel_ring_begin(ring, 18);
if (ret) if (ret)
return ret; return ret;
...@@ -722,19 +722,8 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring) ...@@ -722,19 +722,8 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2, intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
/*
* This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
* pre-production hardware
*/
intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3, intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
_MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
| GEN8_SAMPLER_POWER_BYPASS_DIS));
intel_ring_emit_wa(ring, GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
intel_ring_emit_wa(ring, COMMON_SLICE_CHICKEN2,
_MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
/* Use Force Non-Coherent whenever executing a 3D context. This is a /* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB * workaround for for a possible hang in the unlikely event a TLB
...@@ -2203,8 +2192,9 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring, ...@@ -2203,8 +2192,9 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
return ret; return ret;
intel_ring_emit(ring, intel_ring_emit(ring,
MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | MI_BATCH_BUFFER_START |
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); (flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
/* bit0-7 is the length on GEN6+ */ /* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset); intel_ring_emit(ring, offset);
intel_ring_advance(ring); intel_ring_advance(ring);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment