Commit 677db6ad authored by Jason Ekstrand's avatar Jason Ekstrand Committed by Daniel Vetter

drm/i915/gem: Set the watchdog timeout directly in intel_context_set_gem (v2)

Instead of handling it like a context param, unconditionally set it when
intel_contexts are created.  For years we've had the idea of a watchdog
uAPI floating about. The aim was for media, so that they could set very
tight deadlines for their transcodes jobs, so that if you have a corrupt
bitstream (especially for decoding) you don't hang your desktop too
hard.  But it's been stuck in limbo since forever, and this simplifies
things a bit in preparation for the proto-context work.  If we decide to
actually make said uAPI a reality, we can do it through the proto-
context easily enough.

This does mean that we move from reading the request_timeout_ms param
once per engine when engines are created instead of once at context
creation.  If someone changes request_timeout_ms between creating a
context and setting engines, it will mean that they get the new timeout.
If someone races setting request_timeout_ms and context creation, they
can theoretically end up with different timeouts.  However, since both
of these are fairly harmless and require changing kernel params, we
don't care.

v2 (Tvrtko Ursulin):
 - Add a comment about races with request_timeout_ms
Signed-off-by: default avatarJason Ekstrand <jason@jlekstrand.net>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210708154835.528166-5-jason@jlekstrand.net
parent 6ff6d61d
......@@ -232,7 +232,12 @@ static void intel_context_set_gem(struct intel_context *ce,
intel_engine_has_timeslices(ce->engine))
__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
intel_context_set_watchdog_us(ce, ctx->watchdog.timeout_us);
if (IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) &&
ctx->i915->params.request_timeout_ms) {
unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
}
}
static void __free_engines(struct i915_gem_engines *e, unsigned int count)
......@@ -791,41 +796,6 @@ static void __assign_timeline(struct i915_gem_context *ctx,
context_apply_all(ctx, __apply_timeline, timeline);
}
static int __apply_watchdog(struct intel_context *ce, void *timeout_us)
{
return intel_context_set_watchdog_us(ce, (uintptr_t)timeout_us);
}
static int
__set_watchdog(struct i915_gem_context *ctx, unsigned long timeout_us)
{
int ret;
ret = context_apply_all(ctx, __apply_watchdog,
(void *)(uintptr_t)timeout_us);
if (!ret)
ctx->watchdog.timeout_us = timeout_us;
return ret;
}
static void __set_default_fence_expiry(struct i915_gem_context *ctx)
{
struct drm_i915_private *i915 = ctx->i915;
int ret;
if (!IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) ||
!i915->params.request_timeout_ms)
return;
/* Default expiry for user fences. */
ret = __set_watchdog(ctx, i915->params.request_timeout_ms * 1000);
if (ret)
drm_notice(&i915->drm,
"Failed to configure default fence expiry! (%d)",
ret);
}
static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
{
......@@ -870,8 +840,6 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
intel_timeline_put(timeline);
}
__set_default_fence_expiry(ctx);
trace_i915_context_create(ctx);
return ctx;
......
......@@ -153,10 +153,6 @@ struct i915_gem_context {
*/
atomic_t active_count;
struct {
u64 timeout_us;
} watchdog;
/**
* @hang_timestamp: The last time(s) this context caused a GPU hang
*/
......
......@@ -10,11 +10,10 @@
#include "intel_context.h"
static inline int
static inline void
intel_context_set_watchdog_us(struct intel_context *ce, u64 timeout_us)
{
ce->watchdog.timeout_us = timeout_us;
return 0;
}
#endif /* INTEL_CONTEXT_PARAM_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment