Commit d7b9ca2f authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Remove request->uniq

We already assign a unique identifier to every request: seqno. That
someone felt like adding a second one without even mentioning why and
tweaking ABI smells very fishy.

Fixes regression from
commit b3a38998
Author: Nick Hoath <nicholas.hoath@intel.com>
Date:   Thu Feb 19 16:30:47 2015 +0000

    drm/i915: Fix a use after free, and unbalanced refcounting

v2: Rebase
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Nick Hoath <nicholas.hoath@intel.com>
Cc: Thomas Daniel <thomas.daniel@intel.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Jani Nikula <jani.nikula@intel.com>
[danvet: Fixup because different merge order.]
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 423795cb
...@@ -1823,8 +1823,6 @@ struct drm_i915_private { ...@@ -1823,8 +1823,6 @@ struct drm_i915_private {
void (*stop_ring)(struct intel_engine_cs *ring); void (*stop_ring)(struct intel_engine_cs *ring);
} gt; } gt;
uint32_t request_uniq;
/* /*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place. * will be rejected. Instead look for a better place.
...@@ -2094,8 +2092,6 @@ struct drm_i915_gem_request { ...@@ -2094,8 +2092,6 @@ struct drm_i915_gem_request {
/** process identifier submitting this request */ /** process identifier submitting this request */
struct pid *pid; struct pid *pid;
uint32_t uniq;
/** /**
* The ELSP only accepts two elements at a time, so we queue * The ELSP only accepts two elements at a time, so we queue
* context/tail pairs on a given queue (ring->execlist_queue) until the * context/tail pairs on a given queue (ring->execlist_queue) until the
......
...@@ -2532,7 +2532,6 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring, ...@@ -2532,7 +2532,6 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
} }
rq->ring = ring; rq->ring = ring;
rq->uniq = dev_priv->request_uniq++;
if (i915.enable_execlists) if (i915.enable_execlists)
ret = intel_logical_ring_alloc_request_extras(rq, ctx); ret = intel_logical_ring_alloc_request_extras(rq, ctx);
......
...@@ -505,7 +505,6 @@ DECLARE_EVENT_CLASS(i915_gem_request, ...@@ -505,7 +505,6 @@ DECLARE_EVENT_CLASS(i915_gem_request,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
__field(u32, ring) __field(u32, ring)
__field(u32, uniq)
__field(u32, seqno) __field(u32, seqno)
), ),
...@@ -514,13 +513,11 @@ DECLARE_EVENT_CLASS(i915_gem_request, ...@@ -514,13 +513,11 @@ DECLARE_EVENT_CLASS(i915_gem_request,
i915_gem_request_get_ring(req); i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index; __entry->dev = ring->dev->primary->index;
__entry->ring = ring->id; __entry->ring = ring->id;
__entry->uniq = req ? req->uniq : 0;
__entry->seqno = i915_gem_request_get_seqno(req); __entry->seqno = i915_gem_request_get_seqno(req);
), ),
TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u", TP_printk("dev=%u, ring=%u, seqno=%u",
__entry->dev, __entry->ring, __entry->uniq, __entry->dev, __entry->ring, __entry->seqno)
__entry->seqno)
); );
DEFINE_EVENT(i915_gem_request, i915_gem_request_add, DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
...@@ -565,7 +562,6 @@ TRACE_EVENT(i915_gem_request_wait_begin, ...@@ -565,7 +562,6 @@ TRACE_EVENT(i915_gem_request_wait_begin,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
__field(u32, ring) __field(u32, ring)
__field(u32, uniq)
__field(u32, seqno) __field(u32, seqno)
__field(bool, blocking) __field(bool, blocking)
), ),
...@@ -581,14 +577,13 @@ TRACE_EVENT(i915_gem_request_wait_begin, ...@@ -581,14 +577,13 @@ TRACE_EVENT(i915_gem_request_wait_begin,
i915_gem_request_get_ring(req); i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index; __entry->dev = ring->dev->primary->index;
__entry->ring = ring->id; __entry->ring = ring->id;
__entry->uniq = req ? req->uniq : 0;
__entry->seqno = i915_gem_request_get_seqno(req); __entry->seqno = i915_gem_request_get_seqno(req);
__entry->blocking = __entry->blocking =
mutex_is_locked(&ring->dev->struct_mutex); mutex_is_locked(&ring->dev->struct_mutex);
), ),
TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u, blocking=%s", TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
__entry->dev, __entry->ring, __entry->uniq, __entry->dev, __entry->ring,
__entry->seqno, __entry->blocking ? "yes (NB)" : "no") __entry->seqno, __entry->blocking ? "yes (NB)" : "no")
); );
......
...@@ -540,7 +540,6 @@ static int execlists_context_queue(struct intel_engine_cs *ring, ...@@ -540,7 +540,6 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
request->ring = ring; request->ring = ring;
request->ctx = to; request->ctx = to;
kref_init(&request->ref); kref_init(&request->ref);
request->uniq = dev_priv->request_uniq++;
i915_gem_context_reference(request->ctx); i915_gem_context_reference(request->ctx);
} else { } else {
i915_gem_request_reference(request); i915_gem_request_reference(request);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment