Commit d72d908b authored by Chris Wilson's avatar Chris Wilson

drm/i915: Mark up i915_gem_active for locking annotation

The future annotations will track the locking used for access to ensure
that it is always sufficient. We make the preparations now to present
the API ahead and to make sure that GCC can eliminate the unused
parameter.

Before:	6298417 3619610  696320 10614347         a1f64b vmlinux
After:	6298417 3619610  696320 10614347         a1f64b vmlinux
(with i915 builtin)
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470293567-10811-12-git-send-email-chris@chris-wilson.co.uk
parent 27c01aae
...@@ -155,10 +155,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -155,10 +155,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.write_domain); obj->base.write_domain);
for_each_engine_id(engine, dev_priv, id) for_each_engine_id(engine, dev_priv, id)
seq_printf(m, "%x ", seq_printf(m, "%x ",
i915_gem_active_get_seqno(&obj->last_read[id])); i915_gem_active_get_seqno(&obj->last_read[id],
&obj->base.dev->struct_mutex));
seq_printf(m, "] %x %x%s%s%s", seq_printf(m, "] %x %x%s%s%s",
i915_gem_active_get_seqno(&obj->last_write), i915_gem_active_get_seqno(&obj->last_write,
i915_gem_active_get_seqno(&obj->last_fence), &obj->base.dev->struct_mutex),
i915_gem_active_get_seqno(&obj->last_fence,
&obj->base.dev->struct_mutex),
i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level), i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
obj->dirty ? " dirty" : "", obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
...@@ -196,7 +199,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -196,7 +199,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%s mappable)", s); seq_printf(m, " (%s mappable)", s);
} }
engine = i915_gem_active_get_engine(&obj->last_write); engine = i915_gem_active_get_engine(&obj->last_write,
&obj->base.dev->struct_mutex);
if (engine) if (engine)
seq_printf(m, " (%s)", engine->name); seq_printf(m, " (%s)", engine->name);
......
...@@ -1354,21 +1354,24 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, ...@@ -1354,21 +1354,24 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
int ret, i; int ret, i;
if (readonly) { if (readonly) {
request = i915_gem_active_peek(&obj->last_write); request = i915_gem_active_peek(&obj->last_write,
&obj->base.dev->struct_mutex);
if (request) { if (request) {
ret = i915_wait_request(request); ret = i915_wait_request(request);
if (ret) if (ret)
return ret; return ret;
i = request->engine->id; i = request->engine->id;
if (i915_gem_active_peek(&obj->last_read[i]) == request) if (i915_gem_active_peek(&obj->last_read[i],
&obj->base.dev->struct_mutex) == request)
i915_gem_object_retire__read(obj, i); i915_gem_object_retire__read(obj, i);
else else
i915_gem_object_retire__write(obj); i915_gem_object_retire__write(obj);
} }
} else { } else {
for (i = 0; i < I915_NUM_ENGINES; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
request = i915_gem_active_peek(&obj->last_read[i]); request = i915_gem_active_peek(&obj->last_read[i],
&obj->base.dev->struct_mutex);
if (!request) if (!request)
continue; continue;
...@@ -1400,9 +1403,11 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj, ...@@ -1400,9 +1403,11 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
{ {
int idx = req->engine->id; int idx = req->engine->id;
if (i915_gem_active_peek(&obj->last_read[idx]) == req) if (i915_gem_active_peek(&obj->last_read[idx],
&obj->base.dev->struct_mutex) == req)
i915_gem_object_retire__read(obj, idx); i915_gem_object_retire__read(obj, idx);
else if (i915_gem_active_peek(&obj->last_write) == req) else if (i915_gem_active_peek(&obj->last_write,
&obj->base.dev->struct_mutex) == req)
i915_gem_object_retire__write(obj); i915_gem_object_retire__write(obj);
if (!i915_reset_in_progress(&req->i915->gpu_error)) if (!i915_reset_in_progress(&req->i915->gpu_error))
...@@ -1431,7 +1436,8 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, ...@@ -1431,7 +1436,8 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (readonly) { if (readonly) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
req = i915_gem_active_get(&obj->last_write); req = i915_gem_active_get(&obj->last_write,
&obj->base.dev->struct_mutex);
if (req == NULL) if (req == NULL)
return 0; return 0;
...@@ -1440,7 +1446,8 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, ...@@ -1440,7 +1446,8 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
for (i = 0; i < I915_NUM_ENGINES; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
req = i915_gem_active_get(&obj->last_read[i]); req = i915_gem_active_get(&obj->last_read[i],
&obj->base.dev->struct_mutex);
if (req == NULL) if (req == NULL)
continue; continue;
...@@ -2387,7 +2394,9 @@ static void ...@@ -2387,7 +2394,9 @@ static void
i915_gem_object_retire__write(struct drm_i915_gem_object *obj) i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
{ {
GEM_BUG_ON(!i915_gem_active_isset(&obj->last_write)); GEM_BUG_ON(!i915_gem_active_isset(&obj->last_write));
GEM_BUG_ON(!(obj->active & intel_engine_flag(i915_gem_active_get_engine(&obj->last_write)))); GEM_BUG_ON(!(obj->active &
intel_engine_flag(i915_gem_active_get_engine(&obj->last_write,
&obj->base.dev->struct_mutex))));
i915_gem_active_set(&obj->last_write, NULL); i915_gem_active_set(&obj->last_write, NULL);
intel_fb_obj_flush(obj, true, ORIGIN_CS); intel_fb_obj_flush(obj, true, ORIGIN_CS);
...@@ -2405,7 +2414,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int idx) ...@@ -2405,7 +2414,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int idx)
list_del_init(&obj->engine_list[idx]); list_del_init(&obj->engine_list[idx]);
i915_gem_active_set(&obj->last_read[idx], NULL); i915_gem_active_set(&obj->last_read[idx], NULL);
engine = i915_gem_active_get_engine(&obj->last_write); engine = i915_gem_active_get_engine(&obj->last_write,
&obj->base.dev->struct_mutex);
if (engine && engine->id == idx) if (engine && engine->id == idx)
i915_gem_object_retire__write(obj); i915_gem_object_retire__write(obj);
...@@ -2626,7 +2636,8 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine) ...@@ -2626,7 +2636,8 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
struct drm_i915_gem_object, struct drm_i915_gem_object,
engine_list[engine->id]); engine_list[engine->id]);
if (!list_empty(&i915_gem_active_peek(&obj->last_read[engine->id])->list)) if (!list_empty(&i915_gem_active_peek(&obj->last_read[engine->id],
&obj->base.dev->struct_mutex)->list))
break; break;
i915_gem_object_retire__read(obj, engine->id); i915_gem_object_retire__read(obj, engine->id);
...@@ -2759,7 +2770,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) ...@@ -2759,7 +2770,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
for (i = 0; i < I915_NUM_ENGINES; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
req = i915_gem_active_peek(&obj->last_read[i]); req = i915_gem_active_peek(&obj->last_read[i],
&obj->base.dev->struct_mutex);
if (req == NULL) if (req == NULL)
continue; continue;
...@@ -2837,7 +2849,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -2837,7 +2849,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; i < I915_NUM_ENGINES; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
req = i915_gem_active_get(&obj->last_read[i]); req = i915_gem_active_get(&obj->last_read[i],
&obj->base.dev->struct_mutex);
if (req) if (req)
requests[n++] = req; requests[n++] = req;
} }
...@@ -2932,14 +2945,16 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, ...@@ -2932,14 +2945,16 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (readonly) { if (readonly) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
req = i915_gem_active_peek(&obj->last_write); req = i915_gem_active_peek(&obj->last_write,
&obj->base.dev->struct_mutex);
if (req) if (req)
requests[n++] = req; requests[n++] = req;
} else { } else {
for (i = 0; i < I915_NUM_ENGINES; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
req = i915_gem_active_peek(&obj->last_read[i]); req = i915_gem_active_peek(&obj->last_read[i],
&obj->base.dev->struct_mutex);
if (req) if (req)
requests[n++] = req; requests[n++] = req;
} }
...@@ -4038,11 +4053,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, ...@@ -4038,11 +4053,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
int i; int i;
for (i = 0; i < I915_NUM_ENGINES; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
req = i915_gem_active_peek(&obj->last_read[i]); req = i915_gem_active_peek(&obj->last_read[i],
&obj->base.dev->struct_mutex);
if (req) if (req)
args->busy |= 1 << (16 + req->engine->exec_id); args->busy |= 1 << (16 + req->engine->exec_id);
} }
req = i915_gem_active_peek(&obj->last_write); req = i915_gem_active_peek(&obj->last_write,
&obj->base.dev->struct_mutex);
if (req) if (req)
args->busy |= req->engine->exec_id; args->busy |= req->engine->exec_id;
} }
......
...@@ -263,7 +263,8 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) ...@@ -263,7 +263,8 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
{ {
int ret; int ret;
ret = i915_gem_active_wait(&obj->last_fence); ret = i915_gem_active_wait(&obj->last_fence,
&obj->base.dev->struct_mutex);
if (ret) if (ret)
return ret; return ret;
......
...@@ -296,6 +296,12 @@ i915_gem_active_set(struct i915_gem_active *active, ...@@ -296,6 +296,12 @@ i915_gem_active_set(struct i915_gem_active *active,
i915_gem_request_assign(&active->request, request); i915_gem_request_assign(&active->request, request);
} }
static inline struct drm_i915_gem_request *
__i915_gem_active_peek(const struct i915_gem_active *active)
{
return active->request;
}
/** /**
* i915_gem_active_peek - report the request being monitored * i915_gem_active_peek - report the request being monitored
* @active - the active tracker * @active - the active tracker
...@@ -305,7 +311,7 @@ i915_gem_active_set(struct i915_gem_active *active, ...@@ -305,7 +311,7 @@ i915_gem_active_set(struct i915_gem_active *active,
* caller must hold struct_mutex. * caller must hold struct_mutex.
*/ */
static inline struct drm_i915_gem_request * static inline struct drm_i915_gem_request *
i915_gem_active_peek(const struct i915_gem_active *active) i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
{ {
return active->request; return active->request;
} }
...@@ -318,11 +324,11 @@ i915_gem_active_peek(const struct i915_gem_active *active) ...@@ -318,11 +324,11 @@ i915_gem_active_peek(const struct i915_gem_active *active)
* if the active tracker is idle. The caller must hold struct_mutex. * if the active tracker is idle. The caller must hold struct_mutex.
*/ */
static inline struct drm_i915_gem_request * static inline struct drm_i915_gem_request *
i915_gem_active_get(const struct i915_gem_active *active) i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
{ {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
request = i915_gem_active_peek(active); request = i915_gem_active_peek(active, mutex);
if (!request || i915_gem_request_completed(request)) if (!request || i915_gem_request_completed(request))
return NULL; return NULL;
...@@ -352,11 +358,12 @@ i915_gem_active_isset(const struct i915_gem_active *active) ...@@ -352,11 +358,12 @@ i915_gem_active_isset(const struct i915_gem_active *active)
* the caller to hold struct_mutex (but that can be relaxed if desired). * the caller to hold struct_mutex (but that can be relaxed if desired).
*/ */
static inline bool static inline bool
i915_gem_active_is_idle(const struct i915_gem_active *active) i915_gem_active_is_idle(const struct i915_gem_active *active,
struct mutex *mutex)
{ {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
request = i915_gem_active_peek(active); request = i915_gem_active_peek(active, mutex);
if (!request || i915_gem_request_completed(request)) if (!request || i915_gem_request_completed(request))
return true; return true;
...@@ -372,11 +379,11 @@ i915_gem_active_is_idle(const struct i915_gem_active *active) ...@@ -372,11 +379,11 @@ i915_gem_active_is_idle(const struct i915_gem_active *active)
* retired first, see i915_gem_active_retire(). * retired first, see i915_gem_active_retire().
*/ */
static inline int __must_check static inline int __must_check
i915_gem_active_wait(const struct i915_gem_active *active) i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
{ {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
request = i915_gem_active_peek(active); request = i915_gem_active_peek(active, mutex);
if (!request) if (!request)
return 0; return 0;
...@@ -393,9 +400,10 @@ i915_gem_active_wait(const struct i915_gem_active *active) ...@@ -393,9 +400,10 @@ i915_gem_active_wait(const struct i915_gem_active *active)
* tracker is idle, the function returns immediately. * tracker is idle, the function returns immediately.
*/ */
static inline int __must_check static inline int __must_check
i915_gem_active_retire(const struct i915_gem_active *active) i915_gem_active_retire(const struct i915_gem_active *active,
struct mutex *mutex)
{ {
return i915_gem_active_wait(active); return i915_gem_active_wait(active, mutex);
} }
/* Convenience functions for peeking at state inside active's request whilst /* Convenience functions for peeking at state inside active's request whilst
...@@ -403,15 +411,17 @@ i915_gem_active_retire(const struct i915_gem_active *active) ...@@ -403,15 +411,17 @@ i915_gem_active_retire(const struct i915_gem_active *active)
*/ */
static inline uint32_t static inline uint32_t
i915_gem_active_get_seqno(const struct i915_gem_active *active) i915_gem_active_get_seqno(const struct i915_gem_active *active,
struct mutex *mutex)
{ {
return i915_gem_request_get_seqno(i915_gem_active_peek(active)); return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
} }
static inline struct intel_engine_cs * static inline struct intel_engine_cs *
i915_gem_active_get_engine(const struct i915_gem_active *active) i915_gem_active_get_engine(const struct i915_gem_active *active,
struct mutex *mutex)
{ {
return i915_gem_request_get_engine(i915_gem_active_peek(active)); return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
} }
#define for_each_active(mask, idx) \ #define for_each_active(mask, idx) \
......
...@@ -242,7 +242,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -242,7 +242,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
} }
obj->fence_dirty = obj->fence_dirty =
!i915_gem_active_is_idle(&obj->last_fence) || !i915_gem_active_is_idle(&obj->last_fence,
&dev->struct_mutex) ||
obj->fence_reg != I915_FENCE_REG_NONE; obj->fence_reg != I915_FENCE_REG_NONE;
obj->tiling_mode = args->tiling_mode; obj->tiling_mode = args->tiling_mode;
......
...@@ -74,7 +74,8 @@ static void wait_rendering(struct drm_i915_gem_object *obj) ...@@ -74,7 +74,8 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
for (i = 0; i < I915_NUM_ENGINES; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
req = i915_gem_active_get(&obj->last_read[i]); req = i915_gem_active_get(&obj->last_read[i],
&obj->base.dev->struct_mutex);
if (req) if (req)
requests[n++] = req; requests[n++] = req;
} }
......
...@@ -742,18 +742,38 @@ i915_error_object_create(struct drm_i915_private *dev_priv, ...@@ -742,18 +742,38 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
#define i915_error_ggtt_object_create(dev_priv, src) \ #define i915_error_ggtt_object_create(dev_priv, src) \
i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base) i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
/* The error capture is special as tries to run underneath the normal
* locking rules - so we use the raw version of the i915_gem_active lookup.
*/
static inline uint32_t
__active_get_seqno(struct i915_gem_active *active)
{
return i915_gem_request_get_seqno(__i915_gem_active_peek(active));
}
static inline int
__active_get_engine_id(struct i915_gem_active *active)
{
struct intel_engine_cs *engine;
engine = i915_gem_request_get_engine(__i915_gem_active_peek(active));
return engine ? engine->id : -1;
}
static void capture_bo(struct drm_i915_error_buffer *err, static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma) struct i915_vma *vma)
{ {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
struct intel_engine_cs *engine;
int i; int i;
err->size = obj->base.size; err->size = obj->base.size;
err->name = obj->base.name; err->name = obj->base.name;
for (i = 0; i < I915_NUM_ENGINES; i++) for (i = 0; i < I915_NUM_ENGINES; i++)
err->rseqno[i] = i915_gem_active_get_seqno(&obj->last_read[i]); err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
err->wseqno = i915_gem_active_get_seqno(&obj->last_write); err->wseqno = __active_get_seqno(&obj->last_write);
err->engine = __active_get_engine_id(&obj->last_write);
err->gtt_offset = vma->node.start; err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains; err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain; err->write_domain = obj->base.write_domain;
...@@ -766,9 +786,6 @@ static void capture_bo(struct drm_i915_error_buffer *err, ...@@ -766,9 +786,6 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->purgeable = obj->madv != I915_MADV_WILLNEED; err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL; err->userptr = obj->userptr.mm != NULL;
err->cache_level = obj->cache_level; err->cache_level = obj->cache_level;
engine = i915_gem_active_get_engine(&obj->last_write);
err->engine = engine ? engine->id : -1;
} }
static u32 capture_active_bo(struct drm_i915_error_buffer *err, static u32 capture_active_bo(struct drm_i915_error_buffer *err,
......
...@@ -11370,7 +11370,8 @@ static bool use_mmio_flip(struct intel_engine_cs *engine, ...@@ -11370,7 +11370,8 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
if (resv && !reservation_object_test_signaled_rcu(resv, false)) if (resv && !reservation_object_test_signaled_rcu(resv, false))
return true; return true;
return engine != i915_gem_active_get_engine(&obj->last_write); return engine != i915_gem_active_get_engine(&obj->last_write,
&obj->base.dev->struct_mutex);
} }
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
...@@ -11673,7 +11674,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -11673,7 +11674,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
engine = &dev_priv->engine[BCS]; engine = &dev_priv->engine[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) { } else if (INTEL_INFO(dev)->gen >= 7) {
engine = i915_gem_active_get_engine(&obj->last_write); engine = i915_gem_active_get_engine(&obj->last_write,
&obj->base.dev->struct_mutex);
if (engine == NULL || engine->id != RCS) if (engine == NULL || engine->id != RCS)
engine = &dev_priv->engine[BCS]; engine = &dev_priv->engine[BCS];
} else { } else {
...@@ -11694,7 +11696,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -11694,7 +11696,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (mmio_flip) { if (mmio_flip) {
INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func); INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
work->flip_queued_req = i915_gem_active_get(&obj->last_write); work->flip_queued_req = i915_gem_active_get(&obj->last_write,
&obj->base.dev->struct_mutex);
schedule_work(&work->mmio_work); schedule_work(&work->mmio_work);
} else { } else {
request = i915_gem_request_alloc(engine, engine->last_context); request = i915_gem_request_alloc(engine, engine->last_context);
...@@ -14038,7 +14041,8 @@ intel_prepare_plane_fb(struct drm_plane *plane, ...@@ -14038,7 +14041,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret == 0) { if (ret == 0) {
to_intel_plane_state(new_state)->wait_req = to_intel_plane_state(new_state)->wait_req =
i915_gem_active_get(&obj->last_write); i915_gem_active_get(&obj->last_write,
&obj->base.dev->struct_mutex);
} }
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment