Commit 1df59b84 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-fixes-2016-01-14' of...

Merge tag 'drm-intel-next-fixes-2016-01-14' of git://anongit.freedesktop.org/drm-intel into drm-next

misc i915 fixes all over the place.

* tag 'drm-intel-next-fixes-2016-01-14' of git://anongit.freedesktop.org/drm-intel:
  drm/i915/gen9: Set PIN_ZONE_4G end to 4GB - 1 page
  drm/i915: Widen return value for reservation_object_wait_timeout_rcu to long.
  drm/i915: intel_hpd_init(): Fix suspend/resume reprobing
  drm/i915: shut up gen8+ SDE irq dmesg noise, again
  drm/i915: Restore inhibiting the load of the default context
  drm/i915: Tune down rpm wakelock debug checks
  drm/i915: Avoid writing relocs with addresses in non-canonical form
  drm/i915: Move Braswell stop_machine GGTT insertion workaround
parents 28f03607 48ea1e32
...@@ -3488,7 +3488,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3488,7 +3488,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
if (flags & PIN_MAPPABLE) if (flags & PIN_MAPPABLE)
end = min_t(u64, end, dev_priv->gtt.mappable_end); end = min_t(u64, end, dev_priv->gtt.mappable_end);
if (flags & PIN_ZONE_4G) if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32)); end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
if (alignment == 0) if (alignment == 0)
alignment = flags & PIN_MAPPABLE ? fence_alignment : alignment = flags & PIN_MAPPABLE ? fence_alignment :
......
...@@ -347,6 +347,10 @@ void i915_gem_context_reset(struct drm_device *dev) ...@@ -347,6 +347,10 @@ void i915_gem_context_reset(struct drm_device *dev)
i915_gem_context_unreference(lctx); i915_gem_context_unreference(lctx);
ring->last_context = NULL; ring->last_context = NULL;
} }
/* Force the GPU state to be reinitialised on enabling */
if (ring->default_context)
ring->default_context->legacy_hw_ctx.initialized = false;
} }
} }
...@@ -715,7 +719,7 @@ static int do_switch(struct drm_i915_gem_request *req) ...@@ -715,7 +719,7 @@ static int do_switch(struct drm_i915_gem_request *req)
if (ret) if (ret)
goto unpin_out; goto unpin_out;
if (!to->legacy_hw_ctx.initialized) { if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
hw_flags |= MI_RESTORE_INHIBIT; hw_flags |= MI_RESTORE_INHIBIT;
/* NB: If we inhibit the restore, the context is not allowed to /* NB: If we inhibit the restore, the context is not allowed to
* die because future work may end up depending on valid address * die because future work may end up depending on valid address
......
...@@ -249,6 +249,31 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) ...@@ -249,6 +249,31 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
obj->cache_level != I915_CACHE_NONE); obj->cache_level != I915_CACHE_NONE);
} }
/* Used to convert any address to canonical form.
* Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
* MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
* addresses to be in a canonical form:
* "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
* canonical form [63:48] == [47]."
*/
#define GEN8_HIGH_ADDRESS_BIT 47
static inline uint64_t gen8_canonical_addr(uint64_t address)
{
return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
}
static inline uint64_t gen8_noncanonical_addr(uint64_t address)
{
return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
}
static inline uint64_t
relocation_target(struct drm_i915_gem_relocation_entry *reloc,
uint64_t target_offset)
{
return gen8_canonical_addr((int)reloc->delta + target_offset);
}
static int static int
relocate_entry_cpu(struct drm_i915_gem_object *obj, relocate_entry_cpu(struct drm_i915_gem_object *obj,
struct drm_i915_gem_relocation_entry *reloc, struct drm_i915_gem_relocation_entry *reloc,
...@@ -256,7 +281,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj, ...@@ -256,7 +281,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
uint32_t page_offset = offset_in_page(reloc->offset); uint32_t page_offset = offset_in_page(reloc->offset);
uint64_t delta = reloc->delta + target_offset; uint64_t delta = relocation_target(reloc, target_offset);
char *vaddr; char *vaddr;
int ret; int ret;
...@@ -292,7 +317,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, ...@@ -292,7 +317,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t delta = reloc->delta + target_offset; uint64_t delta = relocation_target(reloc, target_offset);
uint64_t offset; uint64_t offset;
void __iomem *reloc_page; void __iomem *reloc_page;
int ret; int ret;
...@@ -347,7 +372,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj, ...@@ -347,7 +372,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
uint32_t page_offset = offset_in_page(reloc->offset); uint32_t page_offset = offset_in_page(reloc->offset);
uint64_t delta = (int)reloc->delta + target_offset; uint64_t delta = relocation_target(reloc, target_offset);
char *vaddr; char *vaddr;
int ret; int ret;
...@@ -395,7 +420,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, ...@@ -395,7 +420,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_i915_obj = target_vma->obj; target_i915_obj = target_vma->obj;
target_obj = &target_vma->obj->base; target_obj = &target_vma->obj->base;
target_offset = target_vma->node.start; target_offset = gen8_canonical_addr(target_vma->node.start);
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them * pipe_control writes because the gpu doesn't properly redirect them
...@@ -994,6 +1019,21 @@ validate_exec_list(struct drm_device *dev, ...@@ -994,6 +1019,21 @@ validate_exec_list(struct drm_device *dev,
if (exec[i].flags & invalid_flags) if (exec[i].flags & invalid_flags)
return -EINVAL; return -EINVAL;
/* Offset can be used as input (EXEC_OBJECT_PINNED), reject
* any non-page-aligned or non-canonical addresses.
*/
if (exec[i].flags & EXEC_OBJECT_PINNED) {
if (exec[i].offset !=
gen8_canonical_addr(exec[i].offset & PAGE_MASK))
return -EINVAL;
/* From drm_mm perspective address space is continuous,
* so from this point we're always using non-canonical
* form internally.
*/
exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
}
if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
return -EINVAL; return -EINVAL;
...@@ -1687,6 +1727,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ...@@ -1687,6 +1727,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
/* Copy the new buffer offsets back to the user's exec list. */ /* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++) { for (i = 0; i < args->buffer_count; i++) {
exec2_list[i].offset =
gen8_canonical_addr(exec2_list[i].offset);
ret = __copy_to_user(&user_exec_list[i].offset, ret = __copy_to_user(&user_exec_list[i].offset,
&exec2_list[i].offset, &exec2_list[i].offset,
sizeof(user_exec_list[i].offset)); sizeof(user_exec_list[i].offset));
...@@ -1752,6 +1794,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ...@@ -1752,6 +1794,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
int i; int i;
for (i = 0; i < args->buffer_count; i++) { for (i = 0; i < args->buffer_count; i++) {
exec2_list[i].offset =
gen8_canonical_addr(exec2_list[i].offset);
ret = __copy_to_user(&user_exec_list[i].offset, ret = __copy_to_user(&user_exec_list[i].offset,
&exec2_list[i].offset, &exec2_list[i].offset,
sizeof(user_exec_list[i].offset)); sizeof(user_exec_list[i].offset));
......
...@@ -2384,6 +2384,32 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, ...@@ -2384,6 +2384,32 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
} }
struct insert_entries {
struct i915_address_space *vm;
struct sg_table *st;
uint64_t start;
enum i915_cache_level level;
u32 flags;
};
static int gen8_ggtt_insert_entries__cb(void *_arg)
{
struct insert_entries *arg = _arg;
gen8_ggtt_insert_entries(arg->vm, arg->st,
arg->start, arg->level, arg->flags);
return 0;
}
static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
enum i915_cache_level level,
u32 flags)
{
struct insert_entries arg = { vm, st, start, level, flags };
stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
}
/* /*
* Binds an object into the global gtt with the specified cache level. The object * Binds an object into the global gtt with the specified cache level. The object
* will be accessible to the GPU via commands whose operands reference offsets * will be accessible to the GPU via commands whose operands reference offsets
...@@ -2560,26 +2586,6 @@ static int ggtt_bind_vma(struct i915_vma *vma, ...@@ -2560,26 +2586,6 @@ static int ggtt_bind_vma(struct i915_vma *vma,
return 0; return 0;
} }
struct ggtt_bind_vma__cb {
struct i915_vma *vma;
enum i915_cache_level cache_level;
u32 flags;
};
static int ggtt_bind_vma__cb(void *_arg)
{
struct ggtt_bind_vma__cb *arg = _arg;
return ggtt_bind_vma(arg->vma, arg->cache_level, arg->flags);
}
static int ggtt_bind_vma__BKL(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
struct ggtt_bind_vma__cb arg = { vma, cache_level, flags };
return stop_machine(ggtt_bind_vma__cb, &arg, NULL);
}
static int aliasing_gtt_bind_vma(struct i915_vma *vma, static int aliasing_gtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags) u32 flags)
...@@ -3048,8 +3054,8 @@ static int gen8_gmch_probe(struct drm_device *dev, ...@@ -3048,8 +3054,8 @@ static int gen8_gmch_probe(struct drm_device *dev,
dev_priv->gtt.base.bind_vma = ggtt_bind_vma; dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
if (IS_CHERRYVIEW(dev)) if (IS_CHERRYVIEW(dev_priv))
dev_priv->gtt.base.bind_vma = ggtt_bind_vma__BKL; dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL;
return ret; return ret;
} }
......
...@@ -2414,9 +2414,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) ...@@ -2414,9 +2414,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
spt_irq_handler(dev, pch_iir); spt_irq_handler(dev, pch_iir);
else else
cpt_irq_handler(dev, pch_iir); cpt_irq_handler(dev, pch_iir);
} else } else {
DRM_ERROR("The master control interrupt lied (SDE)!\n"); /*
* Like on previous PCH there seems to be something
* fishy going on with forwarding PCH interrupts.
*/
DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
}
} }
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
......
...@@ -13763,13 +13763,15 @@ intel_prepare_plane_fb(struct drm_plane *plane, ...@@ -13763,13 +13763,15 @@ intel_prepare_plane_fb(struct drm_plane *plane,
/* For framebuffer backed by dmabuf, wait for fence */ /* For framebuffer backed by dmabuf, wait for fence */
if (obj && obj->base.dma_buf) { if (obj && obj->base.dma_buf) {
ret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, long lret;
lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
false, true, false, true,
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
if (ret == -ERESTARTSYS) if (lret == -ERESTARTSYS)
return ret; return lret;
WARN_ON(ret < 0); WARN(lret < 0, "waiting returns %li\n", lret);
} }
if (!obj) { if (!obj) {
......
...@@ -1442,8 +1442,10 @@ static inline void ...@@ -1442,8 +1442,10 @@ static inline void
assert_rpm_wakelock_held(struct drm_i915_private *dev_priv) assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
{ {
assert_rpm_device_not_suspended(dev_priv); assert_rpm_device_not_suspended(dev_priv);
WARN_ONCE(!atomic_read(&dev_priv->pm.wakeref_count), /* FIXME: Needs to be converted back to WARN_ONCE, but currently causes
"RPM wakelock ref not held during HW access"); * too much noise. */
if (!atomic_read(&dev_priv->pm.wakeref_count))
DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access");
} }
static inline int static inline int
......
...@@ -468,9 +468,14 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) ...@@ -468,9 +468,14 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
list_for_each_entry(connector, &mode_config->connector_list, head) { list_for_each_entry(connector, &mode_config->connector_list, head) {
struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_connector *intel_connector = to_intel_connector(connector);
connector->polled = intel_connector->polled; connector->polled = intel_connector->polled;
if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
connector->polled = DRM_CONNECTOR_POLL_HPD; /* MST has a dynamic intel_connector->encoder and it's reprobing
* is all handled by the MST helpers. */
if (intel_connector->mst_port) if (intel_connector->mst_port)
continue;
if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
intel_connector->encoder->hpd_pin > HPD_NONE)
connector->polled = DRM_CONNECTOR_POLL_HPD; connector->polled = DRM_CONNECTOR_POLL_HPD;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment