Commit d07f0e59 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Move GEM activity tracking into a common struct reservation_object

In preparation to support many distinct timelines, we need to expand the
activity tracking on the GEM object to handle more than just a request
per engine. We already use the struct reservation_object on the dma-buf
to handle many fence contexts, so integrating that into the GEM object
itself is the preferred solution. (For example, we can now share the same
reservation_object between every consumer/producer using this buffer and
skip the manual import/export via dma-buf.)

v2: Reimplement busy-ioctl (by walking the reservation object), postpone
the ABI change for another day. Similarly use the reservation object to
find the last_write request (if active and from i915) for choosing
display CS flips.

Caveats:

 * busy-ioctl: busy-ioctl only reports on the native fences, it will not
warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is
being rendered to by external fences. It also will not report the same
busy state as wait-ioctl (or polling on the dma-buf) in the same
circumstances. On the plus side, it does retain reporting of which
*i915* engines are engaged with this object.

 * non-blocking atomic modesets take a step backwards as the wait for
render completion blocks the ioctl. This is fixed in a subsequent
patch to use a fence instead for awaiting on the rendering, see
"drm/i915: Restore nonblocking awaits for modesetting"

 * dynamic array manipulation for shared-fences in reservation is slower
than the previous lockless static assignment (e.g. gem_exec_lut_handle
runtime on ivb goes from 42s to 66s), mainly due to atomic operations
(maintaining the fence refcounts).

 * loss of object-level retirement callbacks, emulated by VMA retirement
tracking.

 * minor loss of object-level last activity information from debugfs,
could be replaced with per-vma information if desired
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
parent f0cd5182
......@@ -136,11 +136,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct i915_vma *vma;
unsigned int frontbuffer_bits;
int pin_count = 0;
enum intel_engine_id id;
lockdep_assert_held(&obj->base.dev->struct_mutex);
seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
&obj->base,
get_active_flag(obj),
get_pin_flag(obj),
......@@ -149,14 +148,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
get_pin_mapped_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain);
for_each_engine(engine, dev_priv, id)
seq_printf(m, "%x ",
i915_gem_active_get_seqno(&obj->last_read[id],
&obj->base.dev->struct_mutex));
seq_printf(m, "] %x %s%s%s",
i915_gem_active_get_seqno(&obj->last_write,
&obj->base.dev->struct_mutex),
obj->base.write_domain,
i915_cache_level_str(dev_priv, obj->cache_level),
obj->mm.dirty ? " dirty" : "",
obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
......@@ -187,8 +179,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
engine = i915_gem_active_get_engine(&obj->last_write,
&dev_priv->drm.struct_mutex);
engine = i915_gem_object_last_write_engine(obj);
if (engine)
seq_printf(m, " (%s)", engine->name);
......
......@@ -41,6 +41,7 @@
#include <linux/intel-iommu.h>
#include <linux/kref.h>
#include <linux/pm_qos.h>
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <drm/drmP.h>
......@@ -2246,21 +2247,12 @@ struct drm_i915_gem_object {
struct list_head batch_pool_link;
unsigned long flags;
/**
* This is set if the object is on the active lists (has pending
* rendering and so a non-zero seqno), and is not set if it i s on
* inactive (ready to be unbound) list.
*/
#define I915_BO_ACTIVE_SHIFT 0
#define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
#define __I915_BO_ACTIVE(bo) \
((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
/**
* Have we taken a reference for the object for incomplete GPU
* activity?
*/
#define I915_BO_ACTIVE_REF (I915_BO_ACTIVE_SHIFT + I915_NUM_ENGINES)
#define I915_BO_ACTIVE_REF 0
/*
* Is the object to be mapped as read-only to the GPU
......@@ -2281,6 +2273,7 @@ struct drm_i915_gem_object {
/** Count of VMA actually bound by this object */
unsigned int bind_count;
unsigned int active_count;
unsigned int pin_display;
struct {
......@@ -2320,8 +2313,7 @@ struct drm_i915_gem_object {
* read request. This allows for the CPU to read from an active
* buffer by only waiting for the write to complete.
*/
struct i915_gem_active last_read[I915_NUM_ENGINES];
struct i915_gem_active last_write;
struct reservation_object *resv;
/** References from framebuffers, locks out tiling changes. */
unsigned long framebuffer_references;
......@@ -2340,6 +2332,8 @@ struct drm_i915_gem_object {
/** for phys allocated objects */
struct drm_dma_handle *phys_handle;
struct reservation_object __builtin_resv;
};
static inline struct drm_i915_gem_object *
......@@ -2425,35 +2419,10 @@ i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
}
static inline unsigned long
i915_gem_object_get_active(const struct drm_i915_gem_object *obj)
{
return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK;
}
static inline bool
i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
{
return i915_gem_object_get_active(obj);
}
static inline void
i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine)
{
obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT);
}
static inline void
i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine)
{
obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT);
}
static inline bool
i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
int engine)
{
return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
return obj->active_count;
}
static inline bool
......@@ -2496,6 +2465,23 @@ i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
return obj->tiling_and_stride & STRIDE_MASK;
}
static inline struct intel_engine_cs *
i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
{
struct intel_engine_cs *engine = NULL;
struct dma_fence *fence;
rcu_read_lock();
fence = reservation_object_get_excl_rcu(obj->resv);
rcu_read_unlock();
if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
engine = to_request(fence)->engine;
dma_fence_put(fence);
return engine;
}
static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
{
i915_gem_object_get(vma->obj);
......
This diff is collapsed.
......@@ -114,11 +114,18 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
list_for_each_entry(tmp, list, batch_pool_link) {
/* The batches are strictly LRU ordered */
if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id],
&tmp->base.dev->struct_mutex))
if (i915_gem_object_is_active(tmp))
break;
GEM_BUG_ON(!reservation_object_test_signaled_rcu(tmp->resv,
true));
if (tmp->base.size >= size) {
/* Clear the set of shared fences early */
ww_mutex_lock(&tmp->resv->lock, NULL);
reservation_object_add_excl_fence(tmp->resv, NULL);
ww_mutex_unlock(&tmp->resv->lock);
obj = tmp;
break;
}
......
......@@ -211,60 +211,17 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.end_cpu_access = i915_gem_end_cpu_access,
};
static void export_fences(struct drm_i915_gem_object *obj,
struct dma_buf *dma_buf)
{
struct reservation_object *resv = dma_buf->resv;
struct drm_i915_gem_request *req;
unsigned long active;
int idx;
active = __I915_BO_ACTIVE(obj);
if (!active)
return;
/* Serialise with execbuf to prevent concurrent fence-loops */
mutex_lock(&obj->base.dev->struct_mutex);
/* Mark the object for future fences before racily adding old fences */
obj->base.dma_buf = dma_buf;
ww_mutex_lock(&resv->lock, NULL);
for_each_active(active, idx) {
req = i915_gem_active_get(&obj->last_read[idx],
&obj->base.dev->struct_mutex);
if (!req)
continue;
if (reservation_object_reserve_shared(resv) == 0)
reservation_object_add_shared_fence(resv, &req->fence);
i915_gem_request_put(req);
}
req = i915_gem_active_get(&obj->last_write,
&obj->base.dev->struct_mutex);
if (req) {
reservation_object_add_excl_fence(resv, &req->fence);
i915_gem_request_put(req);
}
ww_mutex_unlock(&resv->lock);
mutex_unlock(&obj->base.dev->struct_mutex);
}
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct dma_buf *dma_buf;
exp_info.ops = &i915_dmabuf_ops;
exp_info.size = gem_obj->size;
exp_info.flags = flags;
exp_info.priv = gem_obj;
exp_info.resv = obj->resv;
if (obj->ops->dmabuf_export) {
int ret = obj->ops->dmabuf_export(obj);
......@@ -272,12 +229,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret);
}
dma_buf = drm_gem_dmabuf_export(dev, &exp_info);
if (IS_ERR(dma_buf))
return dma_buf;
export_fences(obj, dma_buf);
return dma_buf;
return drm_gem_dmabuf_export(dev, &exp_info);
}
static struct sg_table *
......@@ -335,6 +287,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
obj->resv = dma_buf->resv;
/* We use GTT as shorthand for a coherent domain, one that is
* neither in the GPU cache nor in the CPU cache, where all
......
/*
* Copyright 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _I915_GEM_DMABUF_H_
#define _I915_GEM_DMABUF_H_
#include <linux/dma-buf.h>
static inline struct reservation_object *
i915_gem_object_get_dmabuf_resv(struct drm_i915_gem_object *obj)
{
struct dma_buf *dma_buf;
if (obj->base.dma_buf)
dma_buf = obj->base.dma_buf;
else if (obj->base.import_attach)
dma_buf = obj->base.import_attach->dmabuf;
else
return NULL;
return dma_buf->resv;
}
#endif
......@@ -34,7 +34,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_dmabuf.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
......@@ -1101,45 +1100,20 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
return ret;
}
static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
{
unsigned int mask;
mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
mask <<= I915_BO_ACTIVE_SHIFT;
return mask;
}
static int
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
const unsigned int other_rings = eb_other_engines(req);
struct i915_vma *vma;
int ret;
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
struct reservation_object *resv;
if (obj->flags & other_rings) {
ret = i915_gem_request_await_object
(req, obj, obj->base.pending_write_domain);
if (ret)
return ret;
}
resv = i915_gem_object_get_dmabuf_resv(obj);
if (resv) {
ret = i915_sw_fence_await_reservation
(&req->submit, resv, &i915_fence_ops,
obj->base.pending_write_domain,
I915_FENCE_TIMEOUT,
GFP_KERNEL | __GFP_NOWARN);
if (ret < 0)
return ret;
}
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj, false);
......@@ -1281,8 +1255,6 @@ void i915_vma_move_to_active(struct i915_vma *vma,
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
obj->mm.dirty = true; /* be paranoid */
/* Add a reference if we're newly entering the active list.
* The order in which we add operations to the retirement queue is
* vital here: mark_active adds to the start of the callback list,
......@@ -1290,11 +1262,14 @@ void i915_vma_move_to_active(struct i915_vma *vma,
* add the active reference first and queue for it to be dropped
* *last*.
*/
i915_gem_object_set_active(obj, idx);
i915_gem_active_set(&obj->last_read[idx], req);
if (!i915_vma_is_active(vma))
obj->active_count++;
i915_vma_set_active(vma, idx);
i915_gem_active_set(&vma->last_read[idx], req);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
if (flags & EXEC_OBJECT_WRITE) {
i915_gem_active_set(&obj->last_write, req);
i915_gem_active_set(&vma->last_write, req);
intel_fb_obj_invalidate(obj, ORIGIN_CS);
......@@ -1304,21 +1279,13 @@ void i915_vma_move_to_active(struct i915_vma *vma,
if (flags & EXEC_OBJECT_NEEDS_FENCE)
i915_gem_active_set(&vma->last_fence, req);
i915_vma_set_active(vma, idx);
i915_gem_active_set(&vma->last_read[idx], req);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
static void eb_export_fence(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
unsigned int flags)
{
struct reservation_object *resv;
resv = i915_gem_object_get_dmabuf_resv(obj);
if (!resv)
return;
struct reservation_object *resv = obj->resv;
/* Ignore errors from failing to allocate the new fence, we can't
* handle an error right now. Worst case should be missed
......
......@@ -31,6 +31,7 @@
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
......@@ -3343,6 +3344,7 @@ i915_vma_retire(struct i915_gem_active *active,
const unsigned int idx = rq->engine->id;
struct i915_vma *vma =
container_of(active, struct i915_vma, last_read[idx]);
struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
......@@ -3353,6 +3355,34 @@ i915_vma_retire(struct i915_gem_active *active,
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
WARN_ON(i915_vma_unbind(vma));
GEM_BUG_ON(!i915_gem_object_is_active(obj));
if (--obj->active_count)
return;
/* Bump our place on the bound list to keep it roughly in LRU order
* so that we don't steal from recently used but inactive objects
* (unless we are forced to ofc!)
*/
if (obj->bind_count)
list_move_tail(&obj->global_list, &rq->i915->mm.bound_list);
obj->mm.dirty = true; /* be paranoid */
if (i915_gem_object_has_active_reference(obj)) {
i915_gem_object_clear_active_reference(obj);
i915_gem_object_put(obj);
}
}
static void
i915_ggtt_retire__write(struct i915_gem_active *active,
struct drm_i915_gem_request *request)
{
struct i915_vma *vma =
container_of(active, struct i915_vma, last_write);
intel_fb_obj_flush(vma->obj, true, ORIGIN_CS);
}
void i915_vma_destroy(struct i915_vma *vma)
......@@ -3396,6 +3426,8 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->exec_list);
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
init_request_active(&vma->last_read[i], i915_vma_retire);
init_request_active(&vma->last_write,
i915_is_ggtt(vm) ? i915_ggtt_retire__write : NULL);
init_request_active(&vma->last_fence, NULL);
list_add(&vma->vm_link, &vm->unbound_list);
vma->vm = vm;
......
......@@ -211,6 +211,7 @@ struct i915_vma {
unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES];
struct i915_gem_active last_write;
struct i915_gem_active last_fence;
/**
......
......@@ -196,6 +196,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
}
i915_gem_context_put(request->ctx);
dma_fence_signal(&request->fence);
i915_gem_request_put(request);
}
......@@ -553,33 +555,41 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
struct drm_i915_gem_object *obj,
bool write)
{
struct i915_gem_active *active;
unsigned long active_mask;
int idx;
struct dma_fence *excl;
int ret = 0;
if (write) {
active_mask = i915_gem_object_get_active(obj);
active = obj->last_read;
} else {
active_mask = 1;
active = &obj->last_write;
struct dma_fence **shared;
unsigned int count, i;
ret = reservation_object_get_fences_rcu(obj->resv,
&excl, &count, &shared);
if (ret)
return ret;
for (i = 0; i < count; i++) {
ret = i915_gem_request_await_dma_fence(to, shared[i]);
if (ret)
break;
dma_fence_put(shared[i]);
}
for_each_active(active_mask, idx) {
struct drm_i915_gem_request *request;
int ret;
for (; i < count; i++)
dma_fence_put(shared[i]);
kfree(shared);
} else {
excl = reservation_object_get_excl_rcu(obj->resv);
}
request = i915_gem_active_peek(&active[idx],
&obj->base.dev->struct_mutex);
if (!request)
continue;
if (excl) {
if (ret == 0)
ret = i915_gem_request_await_dma_fence(to, excl);
ret = i915_gem_request_await_request(to, request);
if (ret)
return ret;
dma_fence_put(excl);
}
return 0;
return ret;
}
static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
......
......@@ -554,22 +554,7 @@ i915_gem_active_isset(const struct i915_gem_active *active)
}
/**
* i915_gem_active_is_idle - report whether the active tracker is idle
* @active - the active tracker
*
* i915_gem_active_is_idle() returns true if the active tracker is currently
* unassigned or if the request is complete (but not yet retired). Requires
* the caller to hold struct_mutex (but that can be relaxed if desired).
*/
static inline bool
i915_gem_active_is_idle(const struct i915_gem_active *active,
struct mutex *mutex)
{
return !i915_gem_active_peek(active, mutex);
}
/**
* i915_gem_active_wait- waits until the request is completed
* i915_gem_active_wait - waits until the request is completed
* @active - the active request on which to wait
* @flags - how to wait
* @timeout - how long to wait at most
......@@ -639,24 +624,6 @@ i915_gem_active_retire(struct i915_gem_active *active,
return 0;
}
/* Convenience functions for peeking at state inside active's request whilst
* guarded by the struct_mutex.
*/
static inline uint32_t
i915_gem_active_get_seqno(const struct i915_gem_active *active,
struct mutex *mutex)
{
return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
}
static inline struct intel_engine_cs *
i915_gem_active_get_engine(const struct i915_gem_active *active,
struct mutex *mutex)
{
return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
}
#define for_each_active(mask, idx) \
for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
......
......@@ -887,9 +887,9 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->name = obj->base.name;
for (i = 0; i < I915_NUM_ENGINES; i++)
err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
err->wseqno = __active_get_seqno(&obj->last_write);
err->engine = __active_get_engine_id(&obj->last_write);
err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
err->wseqno = __active_get_seqno(&vma->last_write);
err->engine = __active_get_engine_id(&vma->last_write);
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
......
......@@ -84,7 +84,6 @@ intel_plane_duplicate_state(struct drm_plane *plane)
state = &intel_state->base;
__drm_atomic_helper_plane_duplicate_state(plane, state);
intel_state->wait_req = NULL;
return state;
}
......@@ -101,7 +100,6 @@ void
intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
WARN_ON(state && to_intel_plane_state(state)->wait_req);
drm_atomic_helper_plane_destroy_state(plane, state);
}
......
......@@ -37,7 +37,6 @@
#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_dmabuf.h"
#include "intel_dsi.h"
#include "i915_trace.h"
#include <drm/drm_atomic.h>
......@@ -11967,8 +11966,6 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
static bool use_mmio_flip(struct intel_engine_cs *engine,
struct drm_i915_gem_object *obj)
{
struct reservation_object *resv;
/*
* This is not being used for older platforms, because
* non-availability of flip done interrupt forces us to use
......@@ -11990,12 +11987,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
else if (i915.enable_execlists)
return true;
resv = i915_gem_object_get_dmabuf_resv(obj);
if (resv && !reservation_object_test_signaled_rcu(resv, false))
return true;
return engine != i915_gem_active_get_engine(&obj->last_write,
&obj->base.dev->struct_mutex);
return engine != i915_gem_object_last_write_engine(obj);
}
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
......@@ -12068,17 +12060,8 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct reservation_object *resv;
if (work->flip_queued_req)
WARN_ON(i915_wait_request(work->flip_queued_req,
0, MAX_SCHEDULE_TIMEOUT) < 0);
/* For framebuffer backed by dmabuf, wait for fence */
resv = i915_gem_object_get_dmabuf_resv(obj);
if (resv)
WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false,
MAX_SCHEDULE_TIMEOUT) < 0);
WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
intel_pipe_update_start(crtc);
......@@ -12279,8 +12262,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
} else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
engine = dev_priv->engine[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
engine = i915_gem_active_get_engine(&obj->last_write,
&obj->base.dev->struct_mutex);
engine = i915_gem_object_last_write_engine(obj);
if (engine == NULL || engine->id != RCS)
engine = dev_priv->engine[BCS];
} else {
......@@ -12312,9 +12294,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (mmio_flip) {
INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
work->flip_queued_req = i915_gem_active_get(&obj->last_write,
&obj->base.dev->struct_mutex);
queue_work(system_unbound_wq, &work->mmio_work);
} else {
request = i915_gem_request_alloc(engine, engine->last_context);
......@@ -14154,13 +14133,10 @@ static int intel_atomic_check(struct drm_device *dev,
}
static int intel_atomic_prepare_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool nonblock)
struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_plane_state *plane_state;
struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct drm_crtc *crtc;
int i, ret;
......@@ -14183,30 +14159,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
ret = drm_atomic_helper_prepare_planes(dev, state);
mutex_unlock(&dev->struct_mutex);
if (!ret && !nonblock) {
for_each_plane_in_state(state, plane, plane_state, i) {
struct intel_plane_state *intel_plane_state =
to_intel_plane_state(plane_state);
long timeout;
if (!intel_plane_state->wait_req)
continue;
timeout = i915_wait_request(intel_plane_state->wait_req,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
/* Any hang should be swallowed by the wait */
WARN_ON(timeout == -EIO);
mutex_lock(&dev->struct_mutex);
drm_atomic_helper_cleanup_planes(dev, state);
mutex_unlock(&dev->struct_mutex);
ret = timeout;
break;
}
}
}
return ret;
}
......@@ -14400,26 +14352,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_crtc_state *old_crtc_state;
struct drm_crtc *crtc;
struct intel_crtc_state *intel_cstate;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
bool hw_check = intel_state->modeset;
unsigned long put_domains[I915_MAX_PIPES] = {};
unsigned crtc_vblank_mask = 0;
int i;
for_each_plane_in_state(state, plane, plane_state, i) {
struct intel_plane_state *intel_plane_state =
to_intel_plane_state(plane->state);
if (!intel_plane_state->wait_req)
continue;
/* EIO should be eaten, and we can't get interrupted in the
* worker, and blocking commits have waited already. */
WARN_ON(i915_wait_request(intel_plane_state->wait_req,
0, MAX_SCHEDULE_TIMEOUT) < 0);
}
drm_atomic_helper_wait_for_dependencies(state);
if (intel_state->modeset) {
......@@ -14626,7 +14563,7 @@ static int intel_atomic_commit(struct drm_device *dev,
INIT_WORK(&state->commit_work, intel_atomic_commit_work);
ret = intel_atomic_prepare_commit(dev, state, nonblock);
ret = intel_atomic_prepare_commit(dev, state);
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
return ret;
......@@ -14759,7 +14696,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_framebuffer *fb = new_state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
struct reservation_object *resv;
long lret;
int ret = 0;
if (!obj && !old_obj)
......@@ -14797,39 +14734,34 @@ intel_prepare_plane_fb(struct drm_plane *plane,
return 0;
/* For framebuffer backed by dmabuf, wait for fence */
resv = i915_gem_object_get_dmabuf_resv(obj);
if (resv) {
long lret;
lret = reservation_object_wait_timeout_rcu(resv, false, true,
MAX_SCHEDULE_TIMEOUT);
lret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT,
NULL);
if (lret == -ERESTARTSYS)
return lret;
WARN(lret < 0, "waiting returns %li\n", lret);
}
if (plane->type == DRM_PLANE_TYPE_CURSOR &&
INTEL_INFO(dev)->cursor_needs_physical) {
int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
ret = i915_gem_object_attach_phys(obj, align);
if (ret)
if (ret) {
DRM_DEBUG_KMS("failed to attach phys object\n");
return ret;
}
} else {
struct i915_vma *vma;
vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
if (IS_ERR(vma))
ret = PTR_ERR(vma);
if (IS_ERR(vma)) {
DRM_DEBUG_KMS("failed to pin object\n");
return PTR_ERR(vma);
}
if (ret == 0) {
to_intel_plane_state(new_state)->wait_req =
i915_gem_active_get(&obj->last_write,
&obj->base.dev->struct_mutex);
}
return ret;
return 0;
}
/**
......@@ -14847,7 +14779,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
{
struct drm_device *dev = plane->dev;
struct intel_plane_state *old_intel_state;
struct intel_plane_state *intel_state = to_intel_plane_state(plane->state);
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
......@@ -14859,9 +14790,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
!INTEL_INFO(dev)->cursor_needs_physical))
intel_unpin_fb_obj(old_state->fb, old_state->rotation);
i915_gem_request_assign(&intel_state->wait_req, NULL);
i915_gem_request_assign(&old_intel_state->wait_req, NULL);
}
int
......
......@@ -401,9 +401,6 @@ struct intel_plane_state {
int scaler_id;
struct drm_intel_sprite_colorkey ckey;
/* async flip related structures */
struct drm_i915_gem_request *wait_req;
};
struct intel_initial_plane_config {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment