Commit e4734057 authored by Zhi Wang's avatar Zhi Wang Committed by Zhenyu Wang

drm/i915/gvt: vGPU workload scheduler

This patch introduces the vGPU workload scheduler routines.

GVT workload scheduler is responsible for picking and executing GVT workload
from current scheduled vGPU. Before the workload is submitted to host i915,
the guest execlist context will be shadowed in the host GVT shadow context.
the instructions in guest ring buffer will be copied into GVT shadow ring
buffer. Then GVT-g workload scheduler will scan the instructions in guest
ring buffer and submit it to host i915.
Signed-off-by: default avatarZhi Wang <zhi.a.wang@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent 28c4c6ca
GVT_DIR := gvt GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \ interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o execlist.o scheduler.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
...@@ -45,4 +45,7 @@ ...@@ -45,4 +45,7 @@
#define gvt_dbg_el(fmt, args...) \ #define gvt_dbg_el(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args) DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args)
#define gvt_dbg_sched(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: sched: "fmt, ##args)
#endif #endif
...@@ -394,7 +394,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) ...@@ -394,7 +394,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
gvt_dbg_el("complete workload %p status %d\n", workload, gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status); workload->status);
if (workload->status) if (workload->status || vgpu->resetting)
goto out; goto out;
if (!list_empty(workload_q_head(vgpu, workload->ring_id))) { if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
...@@ -672,3 +672,25 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) ...@@ -672,3 +672,25 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
return 0; return 0;
} }
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
unsigned long ring_bitmap)
{
int bit;
struct list_head *pos, *n;
struct intel_vgpu_workload *workload = NULL;
for_each_set_bit(bit, &ring_bitmap, sizeof(ring_bitmap) * 8) {
if (bit >= I915_NUM_ENGINES)
break;
/* free the unsubmited workload in the queue */
list_for_each_safe(pos, n, &vgpu->workload_q_head[bit]) {
workload = container_of(pos,
struct intel_vgpu_workload, list);
list_del_init(&workload->list);
free_workload(workload);
}
init_vgpu_execlist(vgpu, bit);
}
}
...@@ -182,4 +182,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu); ...@@ -182,4 +182,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id); int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
unsigned long ring_bitmap);
#endif /*_GVT_EXECLIST_H_*/ #endif /*_GVT_EXECLIST_H_*/
...@@ -177,6 +177,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) ...@@ -177,6 +177,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
return; return;
clean_service_thread(gvt); clean_service_thread(gvt);
intel_gvt_clean_workload_scheduler(gvt);
intel_gvt_clean_opregion(gvt); intel_gvt_clean_opregion(gvt);
intel_gvt_clean_gtt(gvt); intel_gvt_clean_gtt(gvt);
intel_gvt_clean_irq(gvt); intel_gvt_clean_irq(gvt);
...@@ -239,14 +240,20 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ...@@ -239,14 +240,20 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret) if (ret)
goto out_clean_gtt; goto out_clean_gtt;
ret = init_service_thread(gvt); ret = intel_gvt_init_workload_scheduler(gvt);
if (ret) if (ret)
goto out_clean_opregion; goto out_clean_opregion;
ret = init_service_thread(gvt);
if (ret)
goto out_clean_workload_scheduler;
gvt_dbg_core("gvt device creation is done\n"); gvt_dbg_core("gvt device creation is done\n");
gvt->initialized = true; gvt->initialized = true;
return 0; return 0;
out_clean_workload_scheduler:
intel_gvt_clean_workload_scheduler(gvt);
out_clean_opregion: out_clean_opregion:
intel_gvt_clean_opregion(gvt); intel_gvt_clean_opregion(gvt);
out_clean_gtt: out_clean_gtt:
......
...@@ -151,6 +151,9 @@ struct intel_vgpu { ...@@ -151,6 +151,9 @@ struct intel_vgpu {
struct intel_vgpu_execlist execlist[I915_NUM_ENGINES]; struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
struct list_head workload_q_head[I915_NUM_ENGINES]; struct list_head workload_q_head[I915_NUM_ENGINES];
struct kmem_cache *workloads; struct kmem_cache *workloads;
atomic_t running_workload_num;
struct i915_gem_context *shadow_ctx;
struct notifier_block shadow_ctx_notifier_block;
}; };
struct intel_gvt_gm { struct intel_gvt_gm {
......
...@@ -227,11 +227,32 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, ...@@ -227,11 +227,32 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
return 0; return 0;
} }
static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes, unsigned long bitmap)
{
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
vgpu->resetting = true;
if (scheduler->current_vgpu == vgpu) {
mutex_unlock(&vgpu->gvt->lock);
intel_gvt_wait_vgpu_idle(vgpu);
mutex_lock(&vgpu->gvt->lock);
}
intel_vgpu_reset_execlist(vgpu, bitmap);
vgpu->resetting = false;
return 0;
}
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
u32 data; u32 data;
u32 bitmap = 0; u64 bitmap = 0;
data = vgpu_vreg(vgpu, offset); data = vgpu_vreg(vgpu, offset);
...@@ -260,7 +281,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -260,7 +281,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
if (HAS_BSD2(vgpu->gvt->dev_priv)) if (HAS_BSD2(vgpu->gvt->dev_priv))
bitmap |= (1 << VCS2); bitmap |= (1 << VCS2);
} }
return 0; return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
} }
static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
......
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhi Wang <zhi.a.wang@intel.com>
*
* Contributors:
* Ping Gao <ping.a.gao@intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Chanbin Du <changbin.du@intel.com>
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
*
*/
#include "i915_drv.h"
#include <linux/kthread.h>
#define RING_CTX_OFF(x) \
offsetof(struct execlist_ring_context, x)
void set_context_pdp_root_pointer(struct execlist_ring_context *ring_context,
u32 pdp[8])
{
struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
int i;
for (i = 0; i < 8; i++)
pdp_pair[i].val = pdp[7 - i];
}
static int populate_shadow_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *dst;
unsigned long context_gpa, context_page_num;
int i;
gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
workload->ctx_desc.lrca);
context_page_num = intel_lr_context_size(
&gvt->dev_priv->engine[ring_id]);
context_page_num = context_page_num >> PAGE_SHIFT;
if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
context_page_num = 19;
i = 2;
while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("Invalid guest context descriptor\n");
return -EINVAL;
}
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
dst = kmap_atomic(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
GTT_PAGE_SIZE);
kunmap_atomic(dst);
i++;
}
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page);
#define COPY_REG(name) \
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
COPY_REG(ctx_ctrl);
COPY_REG(ctx_timestamp);
if (ring_id == RCS) {
COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
}
#undef COPY_REG
set_context_pdp_root_pointer(shadow_ring_context,
workload->shadow_mm->shadow_page_table);
intel_gvt_hypervisor_read_gpa(vgpu,
workload->ring_context_gpa +
sizeof(*shadow_ring_context),
(void *)shadow_ring_context +
sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap_atomic(shadow_ring_context);
return 0;
}
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
struct intel_vgpu *vgpu = container_of(nb,
struct intel_vgpu, shadow_ctx_notifier_block);
struct drm_i915_gem_request *req =
(struct drm_i915_gem_request *)data;
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
struct intel_vgpu_workload *workload =
scheduler->current_workload[req->engine->id];
switch (action) {
case INTEL_CONTEXT_SCHEDULE_IN:
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
atomic_set(&workload->shadow_ctx_active, 0);
break;
default:
WARN_ON(1);
return NOTIFY_OK;
}
wake_up(&workload->shadow_ctx_status_wq);
return NOTIFY_OK;
}
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
ring_id, workload);
shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
workload->req = i915_gem_request_alloc(&dev_priv->engine[ring_id],
shadow_ctx);
if (IS_ERR_OR_NULL(workload->req)) {
gvt_err("fail to allocate gem request\n");
workload->status = PTR_ERR(workload->req);
workload->req = NULL;
return workload->status;
}
gvt_dbg_sched("ring id %d get i915 gem request %p\n",
ring_id, workload->req);
mutex_lock(&gvt->lock);
ret = populate_shadow_context(workload);
if (ret)
goto err;
if (workload->prepare) {
ret = workload->prepare(workload);
if (ret)
goto err;
}
mutex_unlock(&gvt->lock);
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req);
i915_add_request_no_flush(workload->req);
workload->dispatched = true;
return 0;
err:
workload->status = ret;
if (workload->req)
workload->req = NULL;
mutex_unlock(&gvt->lock);
return ret;
}
static struct intel_vgpu_workload *pick_next_workload(
struct intel_gvt *gvt, int ring_id)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
mutex_lock(&gvt->lock);
/*
* no current vgpu / will be scheduled out / no workload
* bail out
*/
if (!scheduler->current_vgpu) {
gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
goto out;
}
if (scheduler->need_reschedule) {
gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
goto out;
}
if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) {
gvt_dbg_sched("ring id %d stop - no available workload\n",
ring_id);
goto out;
}
/*
* still have current workload, maybe the workload disptacher
* fail to submit it for some reason, resubmit it.
*/
if (scheduler->current_workload[ring_id]) {
workload = scheduler->current_workload[ring_id];
gvt_dbg_sched("ring id %d still have current workload %p\n",
ring_id, workload);
goto out;
}
/*
* pick a workload as current workload
* once current workload is set, schedule policy routines
* will wait the current workload is finished when trying to
* schedule out a vgpu.
*/
scheduler->current_workload[ring_id] = container_of(
workload_q_head(scheduler->current_vgpu, ring_id)->next,
struct intel_vgpu_workload, list);
workload = scheduler->current_workload[ring_id];
gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
atomic_inc(&workload->vgpu->running_workload_num);
out:
mutex_unlock(&gvt->lock);
return workload;
}
static void update_guest_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *src;
unsigned long context_gpa, context_page_num;
int i;
gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
workload->ctx_desc.lrca);
context_page_num = intel_lr_context_size(
&gvt->dev_priv->engine[ring_id]);
context_page_num = context_page_num >> PAGE_SHIFT;
if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
context_page_num = 19;
i = 2;
while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("invalid guest context descriptor\n");
return;
}
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
src = kmap_atomic(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
GTT_PAGE_SIZE);
kunmap_atomic(src);
i++;
}
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page);
#define COPY_REG(name) \
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
COPY_REG(ctx_ctrl);
COPY_REG(ctx_timestamp);
#undef COPY_REG
intel_gvt_hypervisor_write_gpa(vgpu,
workload->ring_context_gpa +
sizeof(*shadow_ring_context),
(void *)shadow_ring_context +
sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap_atomic(shadow_ring_context);
}
static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload;
mutex_lock(&gvt->lock);
workload = scheduler->current_workload[ring_id];
if (!workload->status && !workload->vgpu->resetting) {
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
update_guest_context(workload);
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
ring_id, workload, workload->status);
scheduler->current_workload[ring_id] = NULL;
atomic_dec(&workload->vgpu->running_workload_num);
list_del_init(&workload->list);
workload->complete(workload);
wake_up(&scheduler->workload_complete_wq);
mutex_unlock(&gvt->lock);
}
struct workload_thread_param {
struct intel_gvt *gvt;
int ring_id;
};
static int workload_thread(void *priv)
{
struct workload_thread_param *p = (struct workload_thread_param *)priv;
struct intel_gvt *gvt = p->gvt;
int ring_id = p->ring_id;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
kfree(p);
gvt_dbg_core("workload thread for ring %d started\n", ring_id);
while (!kthread_should_stop()) {
ret = wait_event_interruptible(scheduler->waitq[ring_id],
kthread_should_stop() ||
(workload = pick_next_workload(gvt, ring_id)));
WARN_ON_ONCE(ret);
if (kthread_should_stop())
break;
gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
workload->ring_id, workload,
workload->vgpu->id);
intel_runtime_pm_get(gvt->dev_priv);
/*
* Always take i915 big lock first
*/
ret = i915_mutex_lock_interruptible(&gvt->dev_priv->drm);
if (ret < 0) {
gvt_err("i915 submission is not available, retry\n");
schedule_timeout(1);
continue;
}
gvt_dbg_sched("ring id %d will dispatch workload %p\n",
workload->ring_id, workload);
if (need_force_wake)
intel_uncore_forcewake_get(gvt->dev_priv,
FORCEWAKE_ALL);
ret = dispatch_workload(workload);
if (ret) {
gvt_err("fail to dispatch workload, skip\n");
goto complete;
}
gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload);
workload->status = i915_wait_request(workload->req,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
NULL, NULL);
if (workload->status != 0)
gvt_err("fail to wait workload, skip\n");
complete:
gvt_dbg_sched("will complete workload %p\n, status: %d\n",
workload, workload->status);
complete_current_workload(gvt, ring_id);
if (need_force_wake)
intel_uncore_forcewake_put(gvt->dev_priv,
FORCEWAKE_ALL);
mutex_unlock(&gvt->dev_priv->drm.struct_mutex);
intel_runtime_pm_put(gvt->dev_priv);
}
return 0;
}
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
if (atomic_read(&vgpu->running_workload_num)) {
gvt_dbg_sched("wait vgpu idle\n");
wait_event(scheduler->workload_complete_wq,
!atomic_read(&vgpu->running_workload_num));
}
}
void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
int i;
gvt_dbg_core("clean workload scheduler\n");
for (i = 0; i < I915_NUM_ENGINES; i++) {
if (scheduler->thread[i]) {
kthread_stop(scheduler->thread[i]);
scheduler->thread[i] = NULL;
}
}
}
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct workload_thread_param *param = NULL;
int ret;
int i;
gvt_dbg_core("init workload scheduler\n");
init_waitqueue_head(&scheduler->workload_complete_wq);
for (i = 0; i < I915_NUM_ENGINES; i++) {
init_waitqueue_head(&scheduler->waitq[i]);
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param) {
ret = -ENOMEM;
goto err;
}
param->gvt = gvt;
param->ring_id = i;
scheduler->thread[i] = kthread_run(workload_thread, param,
"gvt workload %d", i);
if (IS_ERR(scheduler->thread[i])) {
gvt_err("fail to create workload thread\n");
ret = PTR_ERR(scheduler->thread[i]);
goto err;
}
}
return 0;
err:
intel_gvt_clean_workload_scheduler(gvt);
kfree(param);
param = NULL;
return ret;
}
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
&vgpu->shadow_ctx_notifier_block);
mutex_lock(&dev_priv->drm.struct_mutex);
/* a little hacky to mark as ctx closed */
vgpu->shadow_ctx->closed = true;
i915_gem_context_put(vgpu->shadow_ctx);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
{
atomic_set(&vgpu->running_workload_num, 0);
vgpu->shadow_ctx = i915_gem_context_create_gvt(
&vgpu->gvt->dev_priv->drm);
if (IS_ERR(vgpu->shadow_ctx))
return PTR_ERR(vgpu->shadow_ctx);
vgpu->shadow_ctx->engine[RCS].initialised = true;
vgpu->shadow_ctx_notifier_block.notifier_call =
shadow_context_status_change;
atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
&vgpu->shadow_ctx_notifier_block);
return 0;
}
...@@ -19,13 +19,32 @@ ...@@ -19,13 +19,32 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
*
* Authors:
* Zhi Wang <zhi.a.wang@intel.com>
*
* Contributors:
* Ping Gao <ping.a.gao@intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Chanbin Du <changbin.du@intel.com>
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
*
*/ */
#ifndef _GVT_SCHEDULER_H_ #ifndef _GVT_SCHEDULER_H_
#define _GVT_SCHEDULER_H_ #define _GVT_SCHEDULER_H_
struct intel_gvt_workload_scheduler { struct intel_gvt_workload_scheduler {
struct list_head workload_q_head[I915_NUM_ENGINES]; struct intel_vgpu *current_vgpu;
struct intel_vgpu *next_vgpu;
struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
bool need_reschedule;
wait_queue_head_t workload_complete_wq;
struct task_struct *thread[I915_NUM_ENGINES];
wait_queue_head_t waitq[I915_NUM_ENGINES];
}; };
struct intel_vgpu_workload { struct intel_vgpu_workload {
...@@ -47,6 +66,7 @@ struct intel_vgpu_workload { ...@@ -47,6 +66,7 @@ struct intel_vgpu_workload {
struct execlist_ctx_descriptor_format ctx_desc; struct execlist_ctx_descriptor_format ctx_desc;
struct execlist_ring_context *ring_context; struct execlist_ring_context *ring_context;
unsigned long rb_head, rb_tail, rb_ctl, rb_start; unsigned long rb_head, rb_tail, rb_ctl, rb_start;
bool restore_inhibit;
struct intel_vgpu_elsp_dwords elsp_dwords; struct intel_vgpu_elsp_dwords elsp_dwords;
bool emulate_schedule_in; bool emulate_schedule_in;
atomic_t shadow_ctx_active; atomic_t shadow_ctx_active;
...@@ -57,8 +77,21 @@ struct intel_vgpu_workload { ...@@ -57,8 +77,21 @@ struct intel_vgpu_workload {
#define workload_q_head(vgpu, ring_id) \ #define workload_q_head(vgpu, ring_id) \
(&(vgpu->workload_q_head[ring_id])) (&(vgpu->workload_q_head[ring_id]))
#define queue_workload(workload) \ #define queue_workload(workload) do { \
list_add_tail(&workload->list, \ list_add_tail(&workload->list, \
workload_q_head(workload->vgpu, workload->ring_id)) workload_q_head(workload->vgpu, workload->ring_id)); \
wake_up(&workload->vgpu->gvt-> \
scheduler.waitq[workload->ring_id]); \
} while (0)
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
#endif #endif
...@@ -146,6 +146,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) ...@@ -146,6 +146,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
vgpu->active = false; vgpu->active = false;
idr_remove(&gvt->vgpu_idr, vgpu->id); idr_remove(&gvt->vgpu_idr, vgpu->id);
intel_vgpu_clean_gvt_context(vgpu);
intel_vgpu_clean_execlist(vgpu); intel_vgpu_clean_execlist(vgpu);
intel_vgpu_clean_display(vgpu); intel_vgpu_clean_display(vgpu);
intel_vgpu_clean_opregion(vgpu); intel_vgpu_clean_opregion(vgpu);
...@@ -226,11 +227,17 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -226,11 +227,17 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret) if (ret)
goto out_clean_display; goto out_clean_display;
ret = intel_vgpu_init_gvt_context(vgpu);
if (ret)
goto out_clean_execlist;
vgpu->active = true; vgpu->active = true;
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return vgpu; return vgpu;
out_clean_execlist:
intel_vgpu_clean_execlist(vgpu);
out_clean_display: out_clean_display:
intel_vgpu_clean_display(vgpu); intel_vgpu_clean_display(vgpu);
out_clean_opregion: out_clean_opregion:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment