Commit 45db630e authored by Chris Wilson's avatar Chris Wilson Committed by Jani Nikula

drm/i915: Check for rq->hwsp validity after acquiring RCU lock

Since we allow removing the timeline map at runtime, there is a risk
that rq->hwsp points into a stale page. To control that risk, we hold
the RCU read lock while reading *rq->hwsp, but we missed a couple of
important barriers. First, the unpinning / removal of the timeline map
must be after all RCU readers into that map are complete, i.e. after an
rcu barrier (in this case courtesy of call_rcu()). Secondly, we must
make sure that the rq->hwsp we are about to dereference under the RCU
lock is valid. In this case, we make the rq->hwsp pointer safe during
i915_request_retire() and so we know that rq->hwsp may become invalid
only after the request has been signaled. Therefore is the request is
not yet signaled when we acquire rq->hwsp under the RCU, we know that
rq->hwsp will remain valid for the duration of the RCU read lock.

This is a very small window that may lead to either considering the
request not completed (causing a delay until the request is checked
again, any wait for the request is not affected) or dereferencing an
invalid pointer.

Fixes: 3adac468 ("drm/i915: Introduce concept of per-timeline (context) HWSP")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: <stable@vger.kernel.org> # v5.1+
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201218122421.18344-1-chris@chris-wilson.co.uk
(cherry picked from commit 9bb36cf6)
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210118101755.476744-1-chris@chris-wilson.co.uk
parent 171a8e99
...@@ -134,11 +134,6 @@ static bool remove_signaling_context(struct intel_breadcrumbs *b, ...@@ -134,11 +134,6 @@ static bool remove_signaling_context(struct intel_breadcrumbs *b,
return true; return true;
} }
static inline bool __request_completed(const struct i915_request *rq)
{
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
}
__maybe_unused static bool __maybe_unused static bool
check_signal_order(struct intel_context *ce, struct i915_request *rq) check_signal_order(struct intel_context *ce, struct i915_request *rq)
{ {
...@@ -257,7 +252,7 @@ static void signal_irq_work(struct irq_work *work) ...@@ -257,7 +252,7 @@ static void signal_irq_work(struct irq_work *work)
list_for_each_entry_rcu(rq, &ce->signals, signal_link) { list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
bool release; bool release;
if (!__request_completed(rq)) if (!__i915_request_is_complete(rq))
break; break;
if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
...@@ -379,7 +374,7 @@ static void insert_breadcrumb(struct i915_request *rq) ...@@ -379,7 +374,7 @@ static void insert_breadcrumb(struct i915_request *rq)
* straight onto a signaled list, and queue the irq worker for * straight onto a signaled list, and queue the irq worker for
* its signal completion. * its signal completion.
*/ */
if (__request_completed(rq)) { if (__i915_request_is_complete(rq)) {
if (__signal_request(rq) && if (__signal_request(rq) &&
llist_add(&rq->signal_node, &b->signaled_requests)) llist_add(&rq->signal_node, &b->signaled_requests))
irq_work_queue(&b->irq_work); irq_work_queue(&b->irq_work);
......
...@@ -126,6 +126,10 @@ static void __rcu_cacheline_free(struct rcu_head *rcu) ...@@ -126,6 +126,10 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
struct intel_timeline_cacheline *cl = struct intel_timeline_cacheline *cl =
container_of(rcu, typeof(*cl), rcu); container_of(rcu, typeof(*cl), rcu);
/* Must wait until after all *rq->hwsp are complete before removing */
i915_gem_object_unpin_map(cl->hwsp->vma->obj);
__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
i915_active_fini(&cl->active); i915_active_fini(&cl->active);
kfree(cl); kfree(cl);
} }
...@@ -133,11 +137,6 @@ static void __rcu_cacheline_free(struct rcu_head *rcu) ...@@ -133,11 +137,6 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
static void __idle_cacheline_free(struct intel_timeline_cacheline *cl) static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
{ {
GEM_BUG_ON(!i915_active_is_idle(&cl->active)); GEM_BUG_ON(!i915_active_is_idle(&cl->active));
i915_gem_object_unpin_map(cl->hwsp->vma->obj);
i915_vma_put(cl->hwsp->vma);
__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
call_rcu(&cl->rcu, __rcu_cacheline_free); call_rcu(&cl->rcu, __rcu_cacheline_free);
} }
...@@ -179,7 +178,6 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline) ...@@ -179,7 +178,6 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
return ERR_CAST(vaddr); return ERR_CAST(vaddr);
} }
i915_vma_get(hwsp->vma);
cl->hwsp = hwsp; cl->hwsp = hwsp;
cl->vaddr = page_pack_bits(vaddr, cacheline); cl->vaddr = page_pack_bits(vaddr, cacheline);
......
...@@ -434,7 +434,7 @@ static inline u32 hwsp_seqno(const struct i915_request *rq) ...@@ -434,7 +434,7 @@ static inline u32 hwsp_seqno(const struct i915_request *rq)
static inline bool __i915_request_has_started(const struct i915_request *rq) static inline bool __i915_request_has_started(const struct i915_request *rq)
{ {
return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1); return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
} }
/** /**
...@@ -465,11 +465,19 @@ static inline bool __i915_request_has_started(const struct i915_request *rq) ...@@ -465,11 +465,19 @@ static inline bool __i915_request_has_started(const struct i915_request *rq)
*/ */
static inline bool i915_request_started(const struct i915_request *rq) static inline bool i915_request_started(const struct i915_request *rq)
{ {
bool result;
if (i915_request_signaled(rq)) if (i915_request_signaled(rq))
return true; return true;
/* Remember: started but may have since been preempted! */ result = true;
return __i915_request_has_started(rq); rcu_read_lock(); /* the HWSP may be freed at runtime */
if (likely(!i915_request_signaled(rq)))
/* Remember: started but may have since been preempted! */
result = __i915_request_has_started(rq);
rcu_read_unlock();
return result;
} }
/** /**
...@@ -482,10 +490,16 @@ static inline bool i915_request_started(const struct i915_request *rq) ...@@ -482,10 +490,16 @@ static inline bool i915_request_started(const struct i915_request *rq)
*/ */
static inline bool i915_request_is_running(const struct i915_request *rq) static inline bool i915_request_is_running(const struct i915_request *rq)
{ {
bool result;
if (!i915_request_is_active(rq)) if (!i915_request_is_active(rq))
return false; return false;
return __i915_request_has_started(rq); rcu_read_lock();
result = __i915_request_has_started(rq) && i915_request_is_active(rq);
rcu_read_unlock();
return result;
} }
/** /**
...@@ -509,12 +523,25 @@ static inline bool i915_request_is_ready(const struct i915_request *rq) ...@@ -509,12 +523,25 @@ static inline bool i915_request_is_ready(const struct i915_request *rq)
return !list_empty(&rq->sched.link); return !list_empty(&rq->sched.link);
} }
static inline bool __i915_request_is_complete(const struct i915_request *rq)
{
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
}
static inline bool i915_request_completed(const struct i915_request *rq) static inline bool i915_request_completed(const struct i915_request *rq)
{ {
bool result;
if (i915_request_signaled(rq)) if (i915_request_signaled(rq))
return true; return true;
return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno); result = true;
rcu_read_lock(); /* the HWSP may be freed at runtime */
if (likely(!i915_request_signaled(rq)))
result = __i915_request_is_complete(rq);
rcu_read_unlock();
return result;
} }
static inline void i915_request_mark_complete(struct i915_request *rq) static inline void i915_request_mark_complete(struct i915_request *rq)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment