Commit e4734057 authored by Zhi Wang's avatar Zhi Wang Committed by Zhenyu Wang

drm/i915/gvt: vGPU workload scheduler

This patch introduces the vGPU workload scheduler routines.

GVT workload scheduler is responsible for picking and executing GVT workload
from current scheduled vGPU. Before the workload is submitted to host i915,
the guest execlist context will be shadowed in the host GVT shadow context.
the instructions in guest ring buffer will be copied into GVT shadow ring
buffer. Then GVT-g workload scheduler will scan the instructions in guest
ring buffer and submit it to host i915.
Signed-off-by: default avatarZhi Wang <zhi.a.wang@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent 28c4c6ca
GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o
execlist.o scheduler.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
......@@ -45,4 +45,7 @@
#define gvt_dbg_el(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args)
#define gvt_dbg_sched(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: sched: "fmt, ##args)
#endif
......@@ -394,7 +394,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);
if (workload->status)
if (workload->status || vgpu->resetting)
goto out;
if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
......@@ -672,3 +672,25 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
return 0;
}
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
unsigned long ring_bitmap)
{
int bit;
struct list_head *pos, *n;
struct intel_vgpu_workload *workload = NULL;
for_each_set_bit(bit, &ring_bitmap, sizeof(ring_bitmap) * 8) {
if (bit >= I915_NUM_ENGINES)
break;
/* free the unsubmited workload in the queue */
list_for_each_safe(pos, n, &vgpu->workload_q_head[bit]) {
workload = container_of(pos,
struct intel_vgpu_workload, list);
list_del_init(&workload->list);
free_workload(workload);
}
init_vgpu_execlist(vgpu, bit);
}
}
......@@ -182,4 +182,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
unsigned long ring_bitmap);
#endif /*_GVT_EXECLIST_H_*/
......@@ -177,6 +177,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
return;
clean_service_thread(gvt);
intel_gvt_clean_workload_scheduler(gvt);
intel_gvt_clean_opregion(gvt);
intel_gvt_clean_gtt(gvt);
intel_gvt_clean_irq(gvt);
......@@ -239,14 +240,20 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret)
goto out_clean_gtt;
ret = init_service_thread(gvt);
ret = intel_gvt_init_workload_scheduler(gvt);
if (ret)
goto out_clean_opregion;
ret = init_service_thread(gvt);
if (ret)
goto out_clean_workload_scheduler;
gvt_dbg_core("gvt device creation is done\n");
gvt->initialized = true;
return 0;
out_clean_workload_scheduler:
intel_gvt_clean_workload_scheduler(gvt);
out_clean_opregion:
intel_gvt_clean_opregion(gvt);
out_clean_gtt:
......
......@@ -151,6 +151,9 @@ struct intel_vgpu {
struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
struct list_head workload_q_head[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
struct i915_gem_context *shadow_ctx;
struct notifier_block shadow_ctx_notifier_block;
};
struct intel_gvt_gm {
......
......@@ -227,11 +227,32 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
return 0;
}
static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes, unsigned long bitmap)
{
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
vgpu->resetting = true;
if (scheduler->current_vgpu == vgpu) {
mutex_unlock(&vgpu->gvt->lock);
intel_gvt_wait_vgpu_idle(vgpu);
mutex_lock(&vgpu->gvt->lock);
}
intel_vgpu_reset_execlist(vgpu, bitmap);
vgpu->resetting = false;
return 0;
}
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 data;
u32 bitmap = 0;
u64 bitmap = 0;
data = vgpu_vreg(vgpu, offset);
......@@ -260,7 +281,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
if (HAS_BSD2(vgpu->gvt->dev_priv))
bitmap |= (1 << VCS2);
}
return 0;
return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
}
static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
......
This diff is collapsed.
......@@ -19,13 +19,32 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhi Wang <zhi.a.wang@intel.com>
*
* Contributors:
* Ping Gao <ping.a.gao@intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Chanbin Du <changbin.du@intel.com>
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
*
*/
#ifndef _GVT_SCHEDULER_H_
#define _GVT_SCHEDULER_H_
struct intel_gvt_workload_scheduler {
struct list_head workload_q_head[I915_NUM_ENGINES];
struct intel_vgpu *current_vgpu;
struct intel_vgpu *next_vgpu;
struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
bool need_reschedule;
wait_queue_head_t workload_complete_wq;
struct task_struct *thread[I915_NUM_ENGINES];
wait_queue_head_t waitq[I915_NUM_ENGINES];
};
struct intel_vgpu_workload {
......@@ -47,6 +66,7 @@ struct intel_vgpu_workload {
struct execlist_ctx_descriptor_format ctx_desc;
struct execlist_ring_context *ring_context;
unsigned long rb_head, rb_tail, rb_ctl, rb_start;
bool restore_inhibit;
struct intel_vgpu_elsp_dwords elsp_dwords;
bool emulate_schedule_in;
atomic_t shadow_ctx_active;
......@@ -57,8 +77,21 @@ struct intel_vgpu_workload {
#define workload_q_head(vgpu, ring_id) \
(&(vgpu->workload_q_head[ring_id]))
#define queue_workload(workload) \
#define queue_workload(workload) do { \
list_add_tail(&workload->list, \
workload_q_head(workload->vgpu, workload->ring_id))
workload_q_head(workload->vgpu, workload->ring_id)); \
wake_up(&workload->vgpu->gvt-> \
scheduler.waitq[workload->ring_id]); \
} while (0)
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
#endif
......@@ -146,6 +146,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
vgpu->active = false;
idr_remove(&gvt->vgpu_idr, vgpu->id);
intel_vgpu_clean_gvt_context(vgpu);
intel_vgpu_clean_execlist(vgpu);
intel_vgpu_clean_display(vgpu);
intel_vgpu_clean_opregion(vgpu);
......@@ -226,11 +227,17 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_display;
ret = intel_vgpu_init_gvt_context(vgpu);
if (ret)
goto out_clean_execlist;
vgpu->active = true;
mutex_unlock(&gvt->lock);
return vgpu;
out_clean_execlist:
intel_vgpu_clean_execlist(vgpu);
out_clean_display:
intel_vgpu_clean_display(vgpu);
out_clean_opregion:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment