Commit 55612025 authored by Matthew Brost's avatar Matthew Brost Committed by John Harrison

drm/i915/guc: GuC virtual engines

Implement GuC virtual engines. Rather simple implementation, basically
just allocate an engine, setup context enter / exit function to virtual
engine specific functions, set all other variables / functions to guc
versions, and set the engine mask to that of all the siblings.

v2: Update to work with proto-ctx
v3:
 (Daniele)
  - Drop include, add comment to intel_virtual_engine_has_heartbeat

Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210727002348.97202-2-matthew.brost@intel.com
parent 13d29c82
...@@ -74,7 +74,6 @@ ...@@ -74,7 +74,6 @@
#include "gt/intel_context_param.h" #include "gt/intel_context_param.h"
#include "gt/intel_engine_heartbeat.h" #include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_user.h" #include "gt/intel_engine_user.h"
#include "gt/intel_execlists_submission.h" /* virtual_engine */
#include "gt/intel_gpu_commands.h" #include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h" #include "gt/intel_ring.h"
...@@ -363,9 +362,6 @@ set_proto_ctx_engines_balance(struct i915_user_extension __user *base, ...@@ -363,9 +362,6 @@ set_proto_ctx_engines_balance(struct i915_user_extension __user *base,
if (!HAS_EXECLISTS(i915)) if (!HAS_EXECLISTS(i915))
return -ENODEV; return -ENODEV;
if (intel_uc_uses_guc_submission(&i915->gt.uc))
return -ENODEV; /* not implement yet */
if (get_user(idx, &ext->engine_index)) if (get_user(idx, &ext->engine_index))
return -EFAULT; return -EFAULT;
...@@ -950,7 +946,7 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, ...@@ -950,7 +946,7 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
break; break;
case I915_GEM_ENGINE_TYPE_BALANCED: case I915_GEM_ENGINE_TYPE_BALANCED:
ce = intel_execlists_create_virtual(pe[n].siblings, ce = intel_engine_create_virtual(pe[n].siblings,
pe[n].num_siblings); pe[n].num_siblings);
break; break;
......
...@@ -47,6 +47,12 @@ struct intel_context_ops { ...@@ -47,6 +47,12 @@ struct intel_context_ops {
void (*reset)(struct intel_context *ce); void (*reset)(struct intel_context *ce);
void (*destroy)(struct kref *kref); void (*destroy)(struct kref *kref);
/* virtual engine/context interface */
struct intel_context *(*create_virtual)(struct intel_engine_cs **engine,
unsigned int count);
struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine,
unsigned int sibling);
}; };
struct intel_context { struct intel_context {
......
...@@ -273,13 +273,41 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine) ...@@ -273,13 +273,41 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
return intel_engine_has_preemption(engine); return intel_engine_has_preemption(engine);
} }
struct intel_context *
intel_engine_create_virtual(struct intel_engine_cs **siblings,
unsigned int count);
static inline bool
intel_virtual_engine_has_heartbeat(const struct intel_engine_cs *engine)
{
/*
* For non-GuC submission we expect the back-end to look at the
* heartbeat status of the actual physical engine that the work
* has been (or is being) scheduled on, so we should only reach
* here with GuC submission enabled.
*/
GEM_BUG_ON(!intel_engine_uses_guc(engine));
return intel_guc_virtual_engine_has_heartbeat(engine);
}
static inline bool static inline bool
intel_engine_has_heartbeat(const struct intel_engine_cs *engine) intel_engine_has_heartbeat(const struct intel_engine_cs *engine)
{ {
if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL)) if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
return false; return false;
if (intel_engine_is_virtual(engine))
return intel_virtual_engine_has_heartbeat(engine);
else
return READ_ONCE(engine->props.heartbeat_interval_ms); return READ_ONCE(engine->props.heartbeat_interval_ms);
} }
static inline struct intel_engine_cs *
intel_engine_get_sibling(struct intel_engine_cs *engine, unsigned int sibling)
{
GEM_BUG_ON(!intel_engine_is_virtual(engine));
return engine->cops->get_sibling(engine, sibling);
}
#endif /* _INTEL_RINGBUFFER_H_ */ #endif /* _INTEL_RINGBUFFER_H_ */
...@@ -1796,6 +1796,20 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now) ...@@ -1796,6 +1796,20 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
return total; return total;
} }
struct intel_context *
intel_engine_create_virtual(struct intel_engine_cs **siblings,
unsigned int count)
{
if (count == 0)
return ERR_PTR(-EINVAL);
if (count == 1)
return intel_context_create(siblings[0]);
GEM_BUG_ON(!siblings[0]->cops->create_virtual);
return siblings[0]->cops->create_virtual(siblings, count);
}
static bool match_ring(struct i915_request *rq) static bool match_ring(struct i915_request *rq)
{ {
u32 ring = ENGINE_READ(rq->engine, RING_START); u32 ring = ENGINE_READ(rq->engine, RING_START);
......
...@@ -199,6 +199,9 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine) ...@@ -199,6 +199,9 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
return container_of(engine, struct virtual_engine, base); return container_of(engine, struct virtual_engine, base);
} }
static struct intel_context *
execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count);
static struct i915_request * static struct i915_request *
__active_request(const struct intel_timeline * const tl, __active_request(const struct intel_timeline * const tl,
struct i915_request *rq, struct i915_request *rq,
...@@ -2599,6 +2602,8 @@ static const struct intel_context_ops execlists_context_ops = { ...@@ -2599,6 +2602,8 @@ static const struct intel_context_ops execlists_context_ops = {
.reset = lrc_reset, .reset = lrc_reset,
.destroy = lrc_destroy, .destroy = lrc_destroy,
.create_virtual = execlists_create_virtual,
}; };
static int emit_pdps(struct i915_request *rq) static int emit_pdps(struct i915_request *rq)
...@@ -3549,6 +3554,17 @@ static void virtual_context_exit(struct intel_context *ce) ...@@ -3549,6 +3554,17 @@ static void virtual_context_exit(struct intel_context *ce)
intel_engine_pm_put(ve->siblings[n]); intel_engine_pm_put(ve->siblings[n]);
} }
static struct intel_engine_cs *
virtual_get_sibling(struct intel_engine_cs *engine, unsigned int sibling)
{
struct virtual_engine *ve = to_virtual_engine(engine);
if (sibling >= ve->num_siblings)
return NULL;
return ve->siblings[sibling];
}
static const struct intel_context_ops virtual_context_ops = { static const struct intel_context_ops virtual_context_ops = {
.flags = COPS_HAS_INFLIGHT, .flags = COPS_HAS_INFLIGHT,
...@@ -3563,6 +3579,8 @@ static const struct intel_context_ops virtual_context_ops = { ...@@ -3563,6 +3579,8 @@ static const struct intel_context_ops virtual_context_ops = {
.exit = virtual_context_exit, .exit = virtual_context_exit,
.destroy = virtual_context_destroy, .destroy = virtual_context_destroy,
.get_sibling = virtual_get_sibling,
}; };
static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve) static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
...@@ -3711,20 +3729,13 @@ static void virtual_submit_request(struct i915_request *rq) ...@@ -3711,20 +3729,13 @@ static void virtual_submit_request(struct i915_request *rq)
spin_unlock_irqrestore(&ve->base.sched_engine->lock, flags); spin_unlock_irqrestore(&ve->base.sched_engine->lock, flags);
} }
struct intel_context * static struct intel_context *
intel_execlists_create_virtual(struct intel_engine_cs **siblings, execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count)
unsigned int count)
{ {
struct virtual_engine *ve; struct virtual_engine *ve;
unsigned int n; unsigned int n;
int err; int err;
if (count == 0)
return ERR_PTR(-EINVAL);
if (count == 1)
return intel_context_create(siblings[0]);
ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL); ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
if (!ve) if (!ve)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
......
...@@ -32,10 +32,6 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine, ...@@ -32,10 +32,6 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
int indent), int indent),
unsigned int max); unsigned int max);
struct intel_context *
intel_execlists_create_virtual(struct intel_engine_cs **siblings,
unsigned int count);
bool bool
intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine); intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
......
...@@ -3733,7 +3733,7 @@ static int nop_virtual_engine(struct intel_gt *gt, ...@@ -3733,7 +3733,7 @@ static int nop_virtual_engine(struct intel_gt *gt,
GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve)); GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
for (n = 0; n < nctx; n++) { for (n = 0; n < nctx; n++) {
ve[n] = intel_execlists_create_virtual(siblings, nsibling); ve[n] = intel_engine_create_virtual(siblings, nsibling);
if (IS_ERR(ve[n])) { if (IS_ERR(ve[n])) {
err = PTR_ERR(ve[n]); err = PTR_ERR(ve[n]);
nctx = n; nctx = n;
...@@ -3929,7 +3929,7 @@ static int mask_virtual_engine(struct intel_gt *gt, ...@@ -3929,7 +3929,7 @@ static int mask_virtual_engine(struct intel_gt *gt,
* restrict it to our desired engine within the virtual engine. * restrict it to our desired engine within the virtual engine.
*/ */
ve = intel_execlists_create_virtual(siblings, nsibling); ve = intel_engine_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) { if (IS_ERR(ve)) {
err = PTR_ERR(ve); err = PTR_ERR(ve);
goto out_close; goto out_close;
...@@ -4060,7 +4060,7 @@ static int slicein_virtual_engine(struct intel_gt *gt, ...@@ -4060,7 +4060,7 @@ static int slicein_virtual_engine(struct intel_gt *gt,
i915_request_add(rq); i915_request_add(rq);
} }
ce = intel_execlists_create_virtual(siblings, nsibling); ce = intel_engine_create_virtual(siblings, nsibling);
if (IS_ERR(ce)) { if (IS_ERR(ce)) {
err = PTR_ERR(ce); err = PTR_ERR(ce);
goto out; goto out;
...@@ -4112,7 +4112,7 @@ static int sliceout_virtual_engine(struct intel_gt *gt, ...@@ -4112,7 +4112,7 @@ static int sliceout_virtual_engine(struct intel_gt *gt,
/* XXX We do not handle oversubscription and fairness with normal rq */ /* XXX We do not handle oversubscription and fairness with normal rq */
for (n = 0; n < nsibling; n++) { for (n = 0; n < nsibling; n++) {
ce = intel_execlists_create_virtual(siblings, nsibling); ce = intel_engine_create_virtual(siblings, nsibling);
if (IS_ERR(ce)) { if (IS_ERR(ce)) {
err = PTR_ERR(ce); err = PTR_ERR(ce);
goto out; goto out;
...@@ -4214,7 +4214,7 @@ static int preserved_virtual_engine(struct intel_gt *gt, ...@@ -4214,7 +4214,7 @@ static int preserved_virtual_engine(struct intel_gt *gt,
if (err) if (err)
goto out_scratch; goto out_scratch;
ve = intel_execlists_create_virtual(siblings, nsibling); ve = intel_engine_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) { if (IS_ERR(ve)) {
err = PTR_ERR(ve); err = PTR_ERR(ve);
goto out_scratch; goto out_scratch;
...@@ -4354,7 +4354,7 @@ static int reset_virtual_engine(struct intel_gt *gt, ...@@ -4354,7 +4354,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
if (igt_spinner_init(&spin, gt)) if (igt_spinner_init(&spin, gt))
return -ENOMEM; return -ENOMEM;
ve = intel_execlists_create_virtual(siblings, nsibling); ve = intel_engine_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) { if (IS_ERR(ve)) {
err = PTR_ERR(ve); err = PTR_ERR(ve);
goto out_spin; goto out_spin;
......
...@@ -60,6 +60,15 @@ ...@@ -60,6 +60,15 @@
* *
*/ */
/* GuC Virtual Engine */
struct guc_virtual_engine {
struct intel_engine_cs base;
struct intel_context context;
};
static struct intel_context *
guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count);
#define GUC_REQUEST_SIZE 64 /* bytes */ #define GUC_REQUEST_SIZE 64 /* bytes */
/* /*
...@@ -931,14 +940,17 @@ static int guc_lrc_desc_pin(struct intel_context *ce) ...@@ -931,14 +940,17 @@ static int guc_lrc_desc_pin(struct intel_context *ce)
return ret; return ret;
} }
static int guc_context_pre_pin(struct intel_context *ce, static int __guc_context_pre_pin(struct intel_context *ce,
struct intel_engine_cs *engine,
struct i915_gem_ww_ctx *ww, struct i915_gem_ww_ctx *ww,
void **vaddr) void **vaddr)
{ {
return lrc_pre_pin(ce, ce->engine, ww, vaddr); return lrc_pre_pin(ce, engine, ww, vaddr);
} }
static int guc_context_pin(struct intel_context *ce, void *vaddr) static int __guc_context_pin(struct intel_context *ce,
struct intel_engine_cs *engine,
void *vaddr)
{ {
if (i915_ggtt_offset(ce->state) != if (i915_ggtt_offset(ce->state) !=
(ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
...@@ -949,7 +961,19 @@ static int guc_context_pin(struct intel_context *ce, void *vaddr) ...@@ -949,7 +961,19 @@ static int guc_context_pin(struct intel_context *ce, void *vaddr)
* explaination of why. * explaination of why.
*/ */
return lrc_pin(ce, ce->engine, vaddr); return lrc_pin(ce, engine, vaddr);
}
static int guc_context_pre_pin(struct intel_context *ce,
struct i915_gem_ww_ctx *ww,
void **vaddr)
{
return __guc_context_pre_pin(ce, ce->engine, ww, vaddr);
}
static int guc_context_pin(struct intel_context *ce, void *vaddr)
{
return __guc_context_pin(ce, ce->engine, vaddr);
} }
static void guc_context_unpin(struct intel_context *ce) static void guc_context_unpin(struct intel_context *ce)
...@@ -1054,6 +1078,21 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) ...@@ -1054,6 +1078,21 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
deregister_context(ce, ce->guc_id); deregister_context(ce, ce->guc_id);
} }
static void __guc_context_destroy(struct intel_context *ce)
{
lrc_fini(ce);
intel_context_fini(ce);
if (intel_engine_is_virtual(ce->engine)) {
struct guc_virtual_engine *ve =
container_of(ce, typeof(*ve), context);
kfree(ve);
} else {
intel_context_free(ce);
}
}
static void guc_context_destroy(struct kref *kref) static void guc_context_destroy(struct kref *kref)
{ {
struct intel_context *ce = container_of(kref, typeof(*ce), ref); struct intel_context *ce = container_of(kref, typeof(*ce), ref);
...@@ -1068,11 +1107,11 @@ static void guc_context_destroy(struct kref *kref) ...@@ -1068,11 +1107,11 @@ static void guc_context_destroy(struct kref *kref)
* registered with the GuC. * registered with the GuC.
*/ */
if (context_guc_id_invalid(ce)) { if (context_guc_id_invalid(ce)) {
lrc_destroy(kref); __guc_context_destroy(ce);
return; return;
} else if (!lrc_desc_registered(guc, ce->guc_id)) { } else if (!lrc_desc_registered(guc, ce->guc_id)) {
release_guc_id(guc, ce); release_guc_id(guc, ce);
lrc_destroy(kref); __guc_context_destroy(ce);
return; return;
} }
...@@ -1087,7 +1126,7 @@ static void guc_context_destroy(struct kref *kref) ...@@ -1087,7 +1126,7 @@ static void guc_context_destroy(struct kref *kref)
spin_lock_irqsave(&guc->contexts_lock, flags); spin_lock_irqsave(&guc->contexts_lock, flags);
if (context_guc_id_invalid(ce)) { if (context_guc_id_invalid(ce)) {
spin_unlock_irqrestore(&guc->contexts_lock, flags); spin_unlock_irqrestore(&guc->contexts_lock, flags);
lrc_destroy(kref); __guc_context_destroy(ce);
return; return;
} }
...@@ -1132,6 +1171,8 @@ static const struct intel_context_ops guc_context_ops = { ...@@ -1132,6 +1171,8 @@ static const struct intel_context_ops guc_context_ops = {
.reset = lrc_reset, .reset = lrc_reset,
.destroy = guc_context_destroy, .destroy = guc_context_destroy,
.create_virtual = guc_create_virtual,
}; };
static void __guc_signal_context_fence(struct intel_context *ce) static void __guc_signal_context_fence(struct intel_context *ce)
...@@ -1260,6 +1301,83 @@ static int guc_request_alloc(struct i915_request *rq) ...@@ -1260,6 +1301,83 @@ static int guc_request_alloc(struct i915_request *rq)
return 0; return 0;
} }
static struct intel_engine_cs *
guc_virtual_get_sibling(struct intel_engine_cs *ve, unsigned int sibling)
{
struct intel_engine_cs *engine;
intel_engine_mask_t tmp, mask = ve->mask;
unsigned int num_siblings = 0;
for_each_engine_masked(engine, ve->gt, mask, tmp)
if (num_siblings++ == sibling)
return engine;
return NULL;
}
static int guc_virtual_context_pre_pin(struct intel_context *ce,
struct i915_gem_ww_ctx *ww,
void **vaddr)
{
struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
return __guc_context_pre_pin(ce, engine, ww, vaddr);
}
static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
{
struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
return __guc_context_pin(ce, engine, vaddr);
}
static void guc_virtual_context_enter(struct intel_context *ce)
{
intel_engine_mask_t tmp, mask = ce->engine->mask;
struct intel_engine_cs *engine;
for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
intel_engine_pm_get(engine);
intel_timeline_enter(ce->timeline);
}
static void guc_virtual_context_exit(struct intel_context *ce)
{
intel_engine_mask_t tmp, mask = ce->engine->mask;
struct intel_engine_cs *engine;
for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
intel_engine_pm_put(engine);
intel_timeline_exit(ce->timeline);
}
static int guc_virtual_context_alloc(struct intel_context *ce)
{
struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
return lrc_alloc(ce, engine);
}
static const struct intel_context_ops virtual_guc_context_ops = {
.alloc = guc_virtual_context_alloc,
.pre_pin = guc_virtual_context_pre_pin,
.pin = guc_virtual_context_pin,
.unpin = guc_context_unpin,
.post_unpin = guc_context_post_unpin,
.enter = guc_virtual_context_enter,
.exit = guc_virtual_context_exit,
.sched_disable = guc_context_sched_disable,
.destroy = guc_context_destroy,
.get_sibling = guc_virtual_get_sibling,
};
static void sanitize_hwsp(struct intel_engine_cs *engine) static void sanitize_hwsp(struct intel_engine_cs *engine)
{ {
struct intel_timeline *tl; struct intel_timeline *tl;
...@@ -1566,7 +1684,7 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc, ...@@ -1566,7 +1684,7 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
} else if (context_destroyed(ce)) { } else if (context_destroyed(ce)) {
/* Context has been destroyed */ /* Context has been destroyed */
release_guc_id(guc, ce); release_guc_id(guc, ce);
lrc_destroy(&ce->ref); __guc_context_destroy(ce);
} }
decr_outstanding_submission_g2h(guc); decr_outstanding_submission_g2h(guc);
...@@ -1681,3 +1799,107 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, ...@@ -1681,3 +1799,107 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
atomic_read(&ce->guc_sched_state_no_lock)); atomic_read(&ce->guc_sched_state_no_lock));
} }
} }
static struct intel_context *
guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count)
{
struct guc_virtual_engine *ve;
struct intel_guc *guc;
unsigned int n;
int err;
ve = kzalloc(sizeof(*ve), GFP_KERNEL);
if (!ve)
return ERR_PTR(-ENOMEM);
guc = &siblings[0]->gt->uc.guc;
ve->base.i915 = siblings[0]->i915;
ve->base.gt = siblings[0]->gt;
ve->base.uncore = siblings[0]->uncore;
ve->base.id = -1;
ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
ve->base.saturated = ALL_ENGINES;
ve->base.breadcrumbs = intel_breadcrumbs_create(&ve->base);
if (!ve->base.breadcrumbs) {
kfree(ve);
return ERR_PTR(-ENOMEM);
}
snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
ve->base.sched_engine = i915_sched_engine_get(guc->sched_engine);
ve->base.cops = &virtual_guc_context_ops;
ve->base.request_alloc = guc_request_alloc;
ve->base.submit_request = guc_submit_request;
ve->base.flags = I915_ENGINE_IS_VIRTUAL;
intel_context_init(&ve->context, &ve->base);
for (n = 0; n < count; n++) {
struct intel_engine_cs *sibling = siblings[n];
GEM_BUG_ON(!is_power_of_2(sibling->mask));
if (sibling->mask & ve->base.mask) {
DRM_DEBUG("duplicate %s entry in load balancer\n",
sibling->name);
err = -EINVAL;
goto err_put;
}
ve->base.mask |= sibling->mask;
if (n != 0 && ve->base.class != sibling->class) {
DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
sibling->class, ve->base.class);
err = -EINVAL;
goto err_put;
} else if (n == 0) {
ve->base.class = sibling->class;
ve->base.uabi_class = sibling->uabi_class;
snprintf(ve->base.name, sizeof(ve->base.name),
"v%dx%d", ve->base.class, count);
ve->base.context_size = sibling->context_size;
ve->base.emit_bb_start = sibling->emit_bb_start;
ve->base.emit_flush = sibling->emit_flush;
ve->base.emit_init_breadcrumb =
sibling->emit_init_breadcrumb;
ve->base.emit_fini_breadcrumb =
sibling->emit_fini_breadcrumb;
ve->base.emit_fini_breadcrumb_dw =
sibling->emit_fini_breadcrumb_dw;
ve->base.flags |= sibling->flags;
ve->base.props.timeslice_duration_ms =
sibling->props.timeslice_duration_ms;
ve->base.props.preempt_timeout_ms =
sibling->props.preempt_timeout_ms;
}
}
return &ve->context;
err_put:
intel_context_put(&ve->context);
return ERR_PTR(err);
}
bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
{
struct intel_engine_cs *engine;
intel_engine_mask_t tmp, mask = ve->mask;
for_each_engine_masked(engine, ve->gt, mask, tmp)
if (READ_ONCE(engine->props.heartbeat_interval_ms))
return true;
return false;
}
...@@ -26,6 +26,8 @@ void intel_guc_submission_print_info(struct intel_guc *guc, ...@@ -26,6 +26,8 @@ void intel_guc_submission_print_info(struct intel_guc *guc,
void intel_guc_submission_print_context_info(struct intel_guc *guc, void intel_guc_submission_print_context_info(struct intel_guc *guc,
struct drm_printer *p); struct drm_printer *p);
bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve);
static inline bool intel_guc_submission_is_supported(struct intel_guc *guc) static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
{ {
/* XXX: GuC submission is unavailable for now */ /* XXX: GuC submission is unavailable for now */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment