[PATCH 07/21] drm/i915/gem: Set the watchdog timeout as part of intel_context creation
Jason Ekstrand
jason at jlekstrand.net
Fri Apr 16 02:16:53 UTC 2021
Instead of handling it like a context param, we add a little wrapper for
intel_context_create() whenever it's used to create a user context.
This wrapper always sets the timeout for us. This fixes a bug where a
client could get around the timeout by swapping out the set of engines
using SETPARAM.
Signed-off-by: Jason Ekstrand <jason at jlekstrand.net>
---
drivers/gpu/drm/i915/gem/i915_gem_context.c | 58 ++++++-------------
drivers/gpu/drm/i915/gt/intel_context_param.h | 3 +-
2 files changed, 20 insertions(+), 41 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 0b35ab9509cb2..857a60469d510 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -287,6 +287,23 @@ engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
+static struct intel_context *
+intel_user_context_create(struct drm_i915_private *i915,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ce;
+
+ if (IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) &&
+ i915->params.request_timeout_ms) {
+ unsigned long timeout_us = i915->params.request_timeout_ms * 1000;
+ intel_context_set_watchdog_us(ce, (uintptr_t)timeout_us);
+ }
+
+ return ce;
+}
+
static struct i915_gem_engines *alloc_engines(unsigned int count)
{
struct i915_gem_engines *e;
@@ -319,7 +336,7 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
GEM_BUG_ON(e->engines[engine->legacy_idx]);
- ce = intel_context_create(engine);
+ ce = intel_user_context_create(ctx->i915, engine);
if (IS_ERR(ce)) {
__free_engines(e, e->num_engines + 1);
return ERR_CAST(ce);
@@ -759,41 +776,6 @@ static void __assign_ppgtt(struct i915_gem_context *ctx,
i915_vm_close(vm);
}
-static int __apply_watchdog(struct intel_context *ce, void *timeout_us)
-{
- return intel_context_set_watchdog_us(ce, (uintptr_t)timeout_us);
-}
-
-static int
-__set_watchdog(struct i915_gem_context *ctx, unsigned long timeout_us)
-{
- int ret;
-
- ret = context_apply_all(ctx, __apply_watchdog,
- (void *)(uintptr_t)timeout_us);
- if (!ret)
- ctx->watchdog.timeout_us = timeout_us;
-
- return ret;
-}
-
-static void __set_default_fence_expiry(struct i915_gem_context *ctx)
-{
- struct drm_i915_private *i915 = ctx->i915;
- int ret;
-
- if (!IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) ||
- !i915->params.request_timeout_ms)
- return;
-
- /* Default expiry for user fences. */
- ret = __set_watchdog(ctx, i915->params.request_timeout_ms * 1000);
- if (ret)
- drm_notice(&i915->drm,
- "Failed to configure default fence expiry! (%d)",
- ret);
-}
-
static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
{
@@ -836,8 +818,6 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
}
}
- __set_default_fence_expiry(ctx);
-
trace_i915_context_create(ctx);
return ctx;
@@ -1715,7 +1695,7 @@ set_engines(struct i915_gem_context *ctx,
return -ENOENT;
}
- ce = intel_context_create(engine);
+ ce = intel_user_context_create(i915, engine);
if (IS_ERR(ce)) {
__free_engines(set.engines, n);
return PTR_ERR(ce);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.h b/drivers/gpu/drm/i915/gt/intel_context_param.h
index dffedd983693d..0c69cb42d075c 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_param.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_param.h
@@ -10,11 +10,10 @@
#include "intel_context.h"
-static inline int
+static inline void
intel_context_set_watchdog_us(struct intel_context *ce, u64 timeout_us)
{
ce->watchdog.timeout_us = timeout_us;
- return 0;
}
#endif /* INTEL_CONTEXT_PARAM_H */
--
2.31.1
More information about the Intel-gfx-trybot
mailing list