Commit f13eed7a authored by Jani Nikula's avatar Jani Nikula

Merge tag 'gvt-fixes-2017-03-17' of https://github.com/01org/gvt-linux into drm-intel-fixes

gvt-fixes-2017-03-17

- force_nonpriv reg handling in cmd parser (Yan)
- gvt error message cleanup (Tina)
- i915_wait_request fix from Chris
- KVM srcu warning fix (Changbin)
- ensure shadow ctx pinned (Chuanxiao)
- critical gvt scheduler interval time fix (Zhenyu)
- etc.
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parents b7048ea1 2958b901
...@@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, ...@@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
const char *item; const char *item;
if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) { if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
gvt_err("Invalid vGPU creation params\n"); gvt_vgpu_err("Invalid vGPU creation params\n");
return -EINVAL; return -EINVAL;
} }
...@@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu, ...@@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu,
return 0; return 0;
no_enough_resource: no_enough_resource:
gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item); gvt_vgpu_err("fail to allocate resource %s\n", item);
gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n", gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n",
vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail), BYTES_TO_MB(request), BYTES_TO_MB(avail),
BYTES_TO_MB(max), BYTES_TO_MB(taken)); BYTES_TO_MB(max), BYTES_TO_MB(taken));
return -ENOSPC; return -ENOSPC;
} }
......
...@@ -817,6 +817,25 @@ static bool is_shadowed_mmio(unsigned int offset) ...@@ -817,6 +817,25 @@ static bool is_shadowed_mmio(unsigned int offset)
return ret; return ret;
} }
static inline bool is_force_nonpriv_mmio(unsigned int offset)
{
return (offset >= 0x24d0 && offset < 0x2500);
}
static int force_nonpriv_reg_handler(struct parser_exec_state *s,
unsigned int offset, unsigned int index)
{
struct intel_gvt *gvt = s->vgpu->gvt;
unsigned int data = cmd_val(s, index + 1);
if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
offset, data);
return -EINVAL;
}
return 0;
}
static int cmd_reg_handler(struct parser_exec_state *s, static int cmd_reg_handler(struct parser_exec_state *s,
unsigned int offset, unsigned int index, char *cmd) unsigned int offset, unsigned int index, char *cmd)
{ {
...@@ -824,23 +843,26 @@ static int cmd_reg_handler(struct parser_exec_state *s, ...@@ -824,23 +843,26 @@ static int cmd_reg_handler(struct parser_exec_state *s,
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
if (offset + 4 > gvt->device_info.mmio_size) { if (offset + 4 > gvt->device_info.mmio_size) {
gvt_err("%s access to (%x) outside of MMIO range\n", gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
cmd, offset); cmd, offset);
return -EINVAL; return -EINVAL;
} }
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
gvt_err("vgpu%d: %s access to non-render register (%x)\n", gvt_vgpu_err("%s access to non-render register (%x)\n",
s->vgpu->id, cmd, offset); cmd, offset);
return 0; return 0;
} }
if (is_shadowed_mmio(offset)) { if (is_shadowed_mmio(offset)) {
gvt_err("vgpu%d: found access of shadowed MMIO %x\n", gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
s->vgpu->id, offset);
return 0; return 0;
} }
if (is_force_nonpriv_mmio(offset) &&
force_nonpriv_reg_handler(s, offset, index))
return -EINVAL;
if (offset == i915_mmio_reg_offset(DERRMR) || if (offset == i915_mmio_reg_offset(DERRMR) ||
offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */ /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
...@@ -1008,7 +1030,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s) ...@@ -1008,7 +1030,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl"); ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
else if (post_sync == 1) { else if (post_sync == 1) {
/* check ggtt*/ /* check ggtt*/
if ((cmd_val(s, 2) & (1 << 2))) { if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
gma = cmd_val(s, 2) & GENMASK(31, 3); gma = cmd_val(s, 2) & GENMASK(31, 3);
if (gmadr_bytes == 8) if (gmadr_bytes == 8)
gma |= (cmd_gma_hi(s, 3)) << 32; gma |= (cmd_gma_hi(s, 3)) << 32;
...@@ -1129,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, ...@@ -1129,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info) struct mi_display_flip_command_info *info)
{ {
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0); u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1); u32 dword1 = cmd_val(s, 1);
u32 dword2 = cmd_val(s, 2); u32 dword2 = cmd_val(s, 2);
...@@ -1167,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, ...@@ -1167,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
break; break;
default: default:
gvt_err("unknown plane code %d\n", plane); gvt_vgpu_err("unknown plane code %d\n", plane);
return -EINVAL; return -EINVAL;
} }
...@@ -1274,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip( ...@@ -1274,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip(
static int cmd_handler_mi_display_flip(struct parser_exec_state *s) static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
{ {
struct mi_display_flip_command_info info; struct mi_display_flip_command_info info;
struct intel_vgpu *vgpu = s->vgpu;
int ret; int ret;
int i; int i;
int len = cmd_length(s); int len = cmd_length(s);
ret = decode_mi_display_flip(s, &info); ret = decode_mi_display_flip(s, &info);
if (ret) { if (ret) {
gvt_err("fail to decode MI display flip command\n"); gvt_vgpu_err("fail to decode MI display flip command\n");
return ret; return ret;
} }
ret = check_mi_display_flip(s, &info); ret = check_mi_display_flip(s, &info);
if (ret) { if (ret) {
gvt_err("invalid MI display flip command\n"); gvt_vgpu_err("invalid MI display flip command\n");
return ret; return ret;
} }
ret = update_plane_mmio_from_mi_display_flip(s, &info); ret = update_plane_mmio_from_mi_display_flip(s, &info);
if (ret) { if (ret) {
gvt_err("fail to update plane mmio\n"); gvt_vgpu_err("fail to update plane mmio\n");
return ret; return ret;
} }
...@@ -1350,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s, ...@@ -1350,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
int ret; int ret;
if (op_size > max_surface_size) { if (op_size > max_surface_size) {
gvt_err("command address audit fail name %s\n", s->info->name); gvt_vgpu_err("command address audit fail name %s\n",
s->info->name);
return -EINVAL; return -EINVAL;
} }
...@@ -1367,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s, ...@@ -1367,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
} }
return 0; return 0;
err: err:
gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
s->info->name, guest_gma, op_size); s->info->name, guest_gma, op_size);
pr_err("cmd dump: "); pr_err("cmd dump: ");
...@@ -1412,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s) ...@@ -1412,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
static inline int unexpected_cmd(struct parser_exec_state *s) static inline int unexpected_cmd(struct parser_exec_state *s)
{ {
gvt_err("vgpu%d: Unexpected %s in command buffer!\n", struct intel_vgpu *vgpu = s->vgpu;
s->vgpu->id, s->info->name);
gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
return -EINVAL; return -EINVAL;
} }
...@@ -1516,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm, ...@@ -1516,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
while (gma != end_gma) { while (gma != end_gma) {
gpa = intel_vgpu_gma_to_gpa(mm, gma); gpa = intel_vgpu_gma_to_gpa(mm, gma);
if (gpa == INTEL_GVT_INVALID_ADDR) { if (gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("invalid gma address: %lx\n", gma); gvt_vgpu_err("invalid gma address: %lx\n", gma);
return -EFAULT; return -EFAULT;
} }
...@@ -1557,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s) ...@@ -1557,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
uint32_t bb_size = 0; uint32_t bb_size = 0;
uint32_t cmd_len = 0; uint32_t cmd_len = 0;
bool met_bb_end = false; bool met_bb_end = false;
struct intel_vgpu *vgpu = s->vgpu;
u32 cmd; u32 cmd;
/* get the start gm address of the batch buffer */ /* get the start gm address of the batch buffer */
...@@ -1565,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s) ...@@ -1565,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) { if (info == NULL) {
gvt_err("unknown cmd 0x%x, opcode=0x%x\n", gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id)); cmd, get_opcode(cmd, s->ring_id));
return -EINVAL; return -EINVAL;
} }
...@@ -1574,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s) ...@@ -1574,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
gma, gma + 4, &cmd); gma, gma + 4, &cmd);
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) { if (info == NULL) {
gvt_err("unknown cmd 0x%x, opcode=0x%x\n", gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id)); cmd, get_opcode(cmd, s->ring_id));
return -EINVAL; return -EINVAL;
} }
...@@ -1599,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s) ...@@ -1599,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
static int perform_bb_shadow(struct parser_exec_state *s) static int perform_bb_shadow(struct parser_exec_state *s)
{ {
struct intel_shadow_bb_entry *entry_obj; struct intel_shadow_bb_entry *entry_obj;
struct intel_vgpu *vgpu = s->vgpu;
unsigned long gma = 0; unsigned long gma = 0;
uint32_t bb_size; uint32_t bb_size;
void *dst = NULL; void *dst = NULL;
...@@ -1633,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) ...@@ -1633,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false); ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
if (ret) { if (ret) {
gvt_err("failed to set shadow batch to CPU\n"); gvt_vgpu_err("failed to set shadow batch to CPU\n");
goto unmap_src; goto unmap_src;
} }
...@@ -1645,7 +1674,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) ...@@ -1645,7 +1674,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
gma, gma + bb_size, gma, gma + bb_size,
dst); dst);
if (ret) { if (ret) {
gvt_err("fail to copy guest ring buffer\n"); gvt_vgpu_err("fail to copy guest ring buffer\n");
goto unmap_src; goto unmap_src;
} }
...@@ -1676,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) ...@@ -1676,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
{ {
bool second_level; bool second_level;
int ret = 0; int ret = 0;
struct intel_vgpu *vgpu = s->vgpu;
if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
return -EINVAL; return -EINVAL;
} }
second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
gvt_err("Jumping to 2nd level BB from RB is not allowed\n"); gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
return -EINVAL; return -EINVAL;
} }
...@@ -1702,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) ...@@ -1702,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
if (batch_buffer_needs_scan(s)) { if (batch_buffer_needs_scan(s)) {
ret = perform_bb_shadow(s); ret = perform_bb_shadow(s);
if (ret < 0) if (ret < 0)
gvt_err("invalid shadow batch buffer\n"); gvt_vgpu_err("invalid shadow batch buffer\n");
} else { } else {
/* emulate a batch buffer end to do return right */ /* emulate a batch buffer end to do return right */
ret = cmd_handler_mi_batch_buffer_end(s); ret = cmd_handler_mi_batch_buffer_end(s);
...@@ -2429,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) ...@@ -2429,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
int ret = 0; int ret = 0;
cycles_t t0, t1, t2; cycles_t t0, t1, t2;
struct parser_exec_state s_before_advance_custom; struct parser_exec_state s_before_advance_custom;
struct intel_vgpu *vgpu = s->vgpu;
t0 = get_cycles(); t0 = get_cycles();
...@@ -2436,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) ...@@ -2436,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) { if (info == NULL) {
gvt_err("unknown cmd 0x%x, opcode=0x%x\n", gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id)); cmd, get_opcode(cmd, s->ring_id));
return -EINVAL; return -EINVAL;
} }
...@@ -2452,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) ...@@ -2452,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (info->handler) { if (info->handler) {
ret = info->handler(s); ret = info->handler(s);
if (ret < 0) { if (ret < 0) {
gvt_err("%s handler error\n", info->name); gvt_vgpu_err("%s handler error\n", info->name);
return ret; return ret;
} }
} }
...@@ -2463,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) ...@@ -2463,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (!(info->flag & F_IP_ADVANCE_CUSTOM)) { if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
ret = cmd_advance_default(s); ret = cmd_advance_default(s);
if (ret) { if (ret) {
gvt_err("%s IP advance error\n", info->name); gvt_vgpu_err("%s IP advance error\n", info->name);
return ret; return ret;
} }
} }
...@@ -2486,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s, ...@@ -2486,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s,
unsigned long gma_head, gma_tail, gma_bottom; unsigned long gma_head, gma_tail, gma_bottom;
int ret = 0; int ret = 0;
struct intel_vgpu *vgpu = s->vgpu;
gma_head = rb_start + rb_head; gma_head = rb_start + rb_head;
gma_tail = rb_start + rb_tail; gma_tail = rb_start + rb_tail;
...@@ -2497,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s, ...@@ -2497,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s,
if (s->buf_type == RING_BUFFER_INSTRUCTION) { if (s->buf_type == RING_BUFFER_INSTRUCTION) {
if (!(s->ip_gma >= rb_start) || if (!(s->ip_gma >= rb_start) ||
!(s->ip_gma < gma_bottom)) { !(s->ip_gma < gma_bottom)) {
gvt_err("ip_gma %lx out of ring scope." gvt_vgpu_err("ip_gma %lx out of ring scope."
"(base:0x%lx, bottom: 0x%lx)\n", "(base:0x%lx, bottom: 0x%lx)\n",
s->ip_gma, rb_start, s->ip_gma, rb_start,
gma_bottom); gma_bottom);
...@@ -2505,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s, ...@@ -2505,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s,
return -EINVAL; return -EINVAL;
} }
if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
gvt_err("ip_gma %lx out of range." gvt_vgpu_err("ip_gma %lx out of range."
"base 0x%lx head 0x%lx tail 0x%lx\n", "base 0x%lx head 0x%lx tail 0x%lx\n",
s->ip_gma, rb_start, s->ip_gma, rb_start,
rb_head, rb_tail); rb_head, rb_tail);
...@@ -2515,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s, ...@@ -2515,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s,
} }
ret = cmd_parser_exec(s); ret = cmd_parser_exec(s);
if (ret) { if (ret) {
gvt_err("cmd parser error\n"); gvt_vgpu_err("cmd parser error\n");
parser_exec_state_dump(s); parser_exec_state_dump(s);
break; break;
} }
...@@ -2639,7 +2671,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) ...@@ -2639,7 +2671,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_head, gma_top, gma_head, gma_top,
workload->shadow_ring_buffer_va); workload->shadow_ring_buffer_va);
if (ret) { if (ret) {
gvt_err("fail to copy guest ring buffer\n"); gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret; return ret;
} }
copy_len = gma_top - gma_head; copy_len = gma_top - gma_head;
...@@ -2651,7 +2683,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) ...@@ -2651,7 +2683,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_head, gma_tail, gma_head, gma_tail,
workload->shadow_ring_buffer_va + copy_len); workload->shadow_ring_buffer_va + copy_len);
if (ret) { if (ret) {
gvt_err("fail to copy guest ring buffer\n"); gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret; return ret;
} }
ring->tail += workload->rb_len; ring->tail += workload->rb_len;
...@@ -2662,16 +2694,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) ...@@ -2662,16 +2694,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{ {
int ret; int ret;
struct intel_vgpu *vgpu = workload->vgpu;
ret = shadow_workload_ring_buffer(workload); ret = shadow_workload_ring_buffer(workload);
if (ret) { if (ret) {
gvt_err("fail to shadow workload ring_buffer\n"); gvt_vgpu_err("fail to shadow workload ring_buffer\n");
return ret; return ret;
} }
ret = scan_workload(workload); ret = scan_workload(workload);
if (ret) { if (ret) {
gvt_err("scan workload error\n"); gvt_vgpu_err("scan workload error\n");
return ret; return ret;
} }
return 0; return 0;
...@@ -2681,6 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2681,6 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{ {
int ctx_size = wa_ctx->indirect_ctx.size; int ctx_size = wa_ctx->indirect_ctx.size;
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int ret = 0; int ret = 0;
void *map; void *map;
...@@ -2694,14 +2728,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2694,14 +2728,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
/* get the va of the shadow batch buffer */ /* get the va of the shadow batch buffer */
map = i915_gem_object_pin_map(obj, I915_MAP_WB); map = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(map)) { if (IS_ERR(map)) {
gvt_err("failed to vmap shadow indirect ctx\n"); gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
ret = PTR_ERR(map); ret = PTR_ERR(map);
goto put_obj; goto put_obj;
} }
ret = i915_gem_object_set_to_cpu_domain(obj, false); ret = i915_gem_object_set_to_cpu_domain(obj, false);
if (ret) { if (ret) {
gvt_err("failed to set shadow indirect ctx to CPU\n"); gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
goto unmap_src; goto unmap_src;
} }
...@@ -2710,7 +2744,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2710,7 +2744,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
guest_gma, guest_gma + ctx_size, guest_gma, guest_gma + ctx_size,
map); map);
if (ret) { if (ret) {
gvt_err("fail to copy guest indirect ctx\n"); gvt_vgpu_err("fail to copy guest indirect ctx\n");
goto unmap_src; goto unmap_src;
} }
...@@ -2744,13 +2778,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2744,13 +2778,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{ {
int ret; int ret;
struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
if (wa_ctx->indirect_ctx.size == 0) if (wa_ctx->indirect_ctx.size == 0)
return 0; return 0;
ret = shadow_indirect_ctx(wa_ctx); ret = shadow_indirect_ctx(wa_ctx);
if (ret) { if (ret) {
gvt_err("fail to shadow indirect ctx\n"); gvt_vgpu_err("fail to shadow indirect ctx\n");
return ret; return ret;
} }
...@@ -2758,7 +2793,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2758,7 +2793,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
ret = scan_wa_ctx(wa_ctx); ret = scan_wa_ctx(wa_ctx);
if (ret) { if (ret) {
gvt_err("scan wa ctx error\n"); gvt_vgpu_err("scan wa ctx error\n");
return ret; return ret;
} }
......
...@@ -27,6 +27,14 @@ ...@@ -27,6 +27,14 @@
#define gvt_err(fmt, args...) \ #define gvt_err(fmt, args...) \
DRM_ERROR("gvt: "fmt, ##args) DRM_ERROR("gvt: "fmt, ##args)
#define gvt_vgpu_err(fmt, args...) \
do { \
if (IS_ERR_OR_NULL(vgpu)) \
DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \
else \
DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
} while (0)
#define gvt_dbg_core(fmt, args...) \ #define gvt_dbg_core(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args) DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
......
...@@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) ...@@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
unsigned char chr = 0; unsigned char chr = 0;
if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) { if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
gvt_err("Driver tries to read EDID without proper sequence!\n"); gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n");
return 0; return 0;
} }
if (edid->current_edid_read >= EDID_SIZE) { if (edid->current_edid_read >= EDID_SIZE) {
gvt_err("edid_get_byte() exceeds the size of EDID!\n"); gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n");
return 0; return 0;
} }
if (!edid->edid_available) { if (!edid->edid_available) {
gvt_err("Reading EDID but EDID is not available!\n"); gvt_vgpu_err("Reading EDID but EDID is not available!\n");
return 0; return 0;
} }
...@@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) ...@@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
chr = edid_data->edid_block[edid->current_edid_read]; chr = edid_data->edid_block[edid->current_edid_read];
edid->current_edid_read++; edid->current_edid_read++;
} else { } else {
gvt_err("No EDID available during the reading?\n"); gvt_vgpu_err("No EDID available during the reading?\n");
} }
return chr; return chr;
} }
...@@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE; vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
break; break;
default: default:
gvt_err("Unknown/reserved GMBUS cycle detected!\n"); gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
break; break;
} }
/* /*
...@@ -292,8 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -292,8 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
*/ */
} else { } else {
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n", gvt_vgpu_err("warning: gmbus3 read with nothing returned\n");
vgpu->id);
} }
return 0; return 0;
} }
......
...@@ -172,6 +172,7 @@ static int emulate_execlist_ctx_schedule_out( ...@@ -172,6 +172,7 @@ static int emulate_execlist_ctx_schedule_out(
struct intel_vgpu_execlist *execlist, struct intel_vgpu_execlist *execlist,
struct execlist_ctx_descriptor_format *ctx) struct execlist_ctx_descriptor_format *ctx)
{ {
struct intel_vgpu *vgpu = execlist->vgpu;
struct intel_vgpu_execlist_slot *running = execlist->running_slot; struct intel_vgpu_execlist_slot *running = execlist->running_slot;
struct intel_vgpu_execlist_slot *pending = execlist->pending_slot; struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0]; struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
...@@ -183,7 +184,7 @@ static int emulate_execlist_ctx_schedule_out( ...@@ -183,7 +184,7 @@ static int emulate_execlist_ctx_schedule_out(
gvt_dbg_el("schedule out context id %x\n", ctx->context_id); gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
if (WARN_ON(!same_context(ctx, execlist->running_context))) { if (WARN_ON(!same_context(ctx, execlist->running_context))) {
gvt_err("schedule out context is not running context," gvt_vgpu_err("schedule out context is not running context,"
"ctx id %x running ctx id %x\n", "ctx id %x running ctx id %x\n",
ctx->context_id, ctx->context_id,
execlist->running_context->context_id); execlist->running_context->context_id);
...@@ -254,7 +255,7 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot( ...@@ -254,7 +255,7 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
status.udw = vgpu_vreg(vgpu, status_reg + 4); status.udw = vgpu_vreg(vgpu, status_reg + 4);
if (status.execlist_queue_full) { if (status.execlist_queue_full) {
gvt_err("virtual execlist slots are full\n"); gvt_vgpu_err("virtual execlist slots are full\n");
return NULL; return NULL;
} }
...@@ -270,11 +271,12 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist, ...@@ -270,11 +271,12 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
struct execlist_ctx_descriptor_format *ctx0, *ctx1; struct execlist_ctx_descriptor_format *ctx0, *ctx1;
struct execlist_context_status_format status; struct execlist_context_status_format status;
struct intel_vgpu *vgpu = execlist->vgpu;
gvt_dbg_el("emulate schedule-in\n"); gvt_dbg_el("emulate schedule-in\n");
if (!slot) { if (!slot) {
gvt_err("no available execlist slot\n"); gvt_vgpu_err("no available execlist slot\n");
return -EINVAL; return -EINVAL;
} }
...@@ -375,7 +377,6 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) ...@@ -375,7 +377,6 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
gvt_err("Cannot pin\n");
return; return;
} }
...@@ -428,7 +429,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -428,7 +429,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
0, CACHELINE_BYTES, 0); 0, CACHELINE_BYTES, 0);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
gvt_err("Cannot pin indirect ctx obj\n");
return; return;
} }
...@@ -561,6 +561,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) ...@@ -561,6 +561,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
{ {
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm; struct intel_vgpu_mm *mm;
struct intel_vgpu *vgpu = workload->vgpu;
int page_table_level; int page_table_level;
u32 pdp[8]; u32 pdp[8];
...@@ -569,7 +570,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) ...@@ -569,7 +570,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
} else if (desc->addressing_mode == 3) { /* legacy 64 bit */ } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
page_table_level = 4; page_table_level = 4;
} else { } else {
gvt_err("Advanced Context mode(SVM) is not supported!\n"); gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
return -EINVAL; return -EINVAL;
} }
...@@ -583,7 +584,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) ...@@ -583,7 +584,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
pdp, page_table_level, 0); pdp, page_table_level, 0);
if (IS_ERR(mm)) { if (IS_ERR(mm)) {
gvt_err("fail to create mm object.\n"); gvt_vgpu_err("fail to create mm object.\n");
return PTR_ERR(mm); return PTR_ERR(mm);
} }
} }
...@@ -609,7 +610,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, ...@@ -609,7 +610,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("invalid guest context LRCA: %x\n", desc->lrca); gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
return -EINVAL; return -EINVAL;
} }
...@@ -724,8 +725,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) ...@@ -724,8 +725,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
continue; continue;
if (!desc[i]->privilege_access) { if (!desc[i]->privilege_access) {
gvt_err("vgpu%d: unexpected GGTT elsp submission\n", gvt_vgpu_err("unexpected GGTT elsp submission\n");
vgpu->id);
return -EINVAL; return -EINVAL;
} }
...@@ -735,15 +735,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) ...@@ -735,15 +735,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
} }
if (!valid_desc_bitmap) { if (!valid_desc_bitmap) {
gvt_err("vgpu%d: no valid desc in a elsp submission\n", gvt_vgpu_err("no valid desc in a elsp submission\n");
vgpu->id);
return -EINVAL; return -EINVAL;
} }
if (!test_bit(0, (void *)&valid_desc_bitmap) && if (!test_bit(0, (void *)&valid_desc_bitmap) &&
test_bit(1, (void *)&valid_desc_bitmap)) { test_bit(1, (void *)&valid_desc_bitmap)) {
gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n", gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
vgpu->id);
return -EINVAL; return -EINVAL;
} }
...@@ -752,8 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) ...@@ -752,8 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
ret = submit_context(vgpu, ring_id, &valid_desc[i], ret = submit_context(vgpu, ring_id, &valid_desc[i],
emulate_schedule_in); emulate_schedule_in);
if (ret) { if (ret) {
gvt_err("vgpu%d: fail to schedule workload\n", gvt_vgpu_err("fail to schedule workload\n");
vgpu->id);
return ret; return ret;
} }
emulate_schedule_in = false; emulate_schedule_in = false;
......
...@@ -49,8 +49,8 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) ...@@ -49,8 +49,8 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
{ {
if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
&& !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n", gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
vgpu->id, addr, size); addr, size);
return false; return false;
} }
return true; return true;
...@@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p, ...@@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
if (mfn == INTEL_GVT_INVALID_ADDR) { if (mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to translate gfn: 0x%lx\n", gfn); gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
return -ENXIO; return -ENXIO;
} }
...@@ -611,7 +611,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu, ...@@ -611,7 +611,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu,
daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(kdev, daddr)) { if (dma_mapping_error(kdev, daddr)) {
gvt_err("fail to map dma addr\n"); gvt_vgpu_err("fail to map dma addr\n");
return -EINVAL; return -EINVAL;
} }
...@@ -735,7 +735,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page( ...@@ -735,7 +735,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
if (reclaim_one_mm(vgpu->gvt)) if (reclaim_one_mm(vgpu->gvt))
goto retry; goto retry;
gvt_err("fail to allocate ppgtt shadow page\n"); gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -750,14 +750,14 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page( ...@@ -750,14 +750,14 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
*/ */
ret = init_shadow_page(vgpu, &spt->shadow_page, type); ret = init_shadow_page(vgpu, &spt->shadow_page, type);
if (ret) { if (ret) {
gvt_err("fail to initialize shadow page for spt\n"); gvt_vgpu_err("fail to initialize shadow page for spt\n");
goto err; goto err;
} }
ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page, ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
gfn, ppgtt_write_protection_handler, NULL); gfn, ppgtt_write_protection_handler, NULL);
if (ret) { if (ret) {
gvt_err("fail to initialize guest page for spt\n"); gvt_vgpu_err("fail to initialize guest page for spt\n");
goto err; goto err;
} }
...@@ -776,8 +776,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page( ...@@ -776,8 +776,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
if (p) if (p)
return shadow_page_to_ppgtt_spt(p); return shadow_page_to_ppgtt_spt(p);
gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n", gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
vgpu->id, mfn);
return NULL; return NULL;
} }
...@@ -827,8 +826,8 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, ...@@ -827,8 +826,8 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
} }
s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
if (!s) { if (!s) {
gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n", gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
vgpu->id, ops->get_pfn(e)); ops->get_pfn(e));
return -ENXIO; return -ENXIO;
} }
return ppgtt_invalidate_shadow_page(s); return ppgtt_invalidate_shadow_page(s);
...@@ -836,6 +835,7 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, ...@@ -836,6 +835,7 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
{ {
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_entry e; struct intel_gvt_gtt_entry e;
unsigned long index; unsigned long index;
int ret; int ret;
...@@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) ...@@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
for_each_present_shadow_entry(spt, &e, index) { for_each_present_shadow_entry(spt, &e, index) {
if (!gtt_type_is_pt(get_next_pt_type(e.type))) { if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
gvt_err("GVT doesn't support pse bit for now\n"); gvt_vgpu_err("GVT doesn't support pse bit for now\n");
return -EINVAL; return -EINVAL;
} }
ret = ppgtt_invalidate_shadow_page_by_shadow_entry( ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
...@@ -868,8 +868,8 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) ...@@ -868,8 +868,8 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
ppgtt_free_shadow_page(spt); ppgtt_free_shadow_page(spt);
return 0; return 0;
fail: fail:
gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n", gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
spt->vgpu->id, spt, e.val64, e.type); spt, e.val64, e.type);
return ret; return ret;
} }
...@@ -914,8 +914,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry( ...@@ -914,8 +914,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
} }
return s; return s;
fail: fail:
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
vgpu->id, s, we->val64, we->type); s, we->val64, we->type);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) ...@@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
for_each_present_guest_entry(spt, &ge, i) { for_each_present_guest_entry(spt, &ge, i) {
if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
gvt_err("GVT doesn't support pse bit now\n"); gvt_vgpu_err("GVT doesn't support pse bit now\n");
ret = -EINVAL; ret = -EINVAL;
goto fail; goto fail;
} }
...@@ -969,8 +969,8 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) ...@@ -969,8 +969,8 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
} }
return 0; return 0;
fail: fail:
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
vgpu->id, spt, ge.val64, ge.type); spt, ge.val64, ge.type);
return ret; return ret;
} }
...@@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, ...@@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
struct intel_vgpu_ppgtt_spt *s = struct intel_vgpu_ppgtt_spt *s =
ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e)); ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
if (!s) { if (!s) {
gvt_err("fail to find guest page\n"); gvt_vgpu_err("fail to find guest page\n");
ret = -ENXIO; ret = -ENXIO;
goto fail; goto fail;
} }
...@@ -1011,8 +1011,8 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, ...@@ -1011,8 +1011,8 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
ppgtt_set_shadow_entry(spt, &e, index); ppgtt_set_shadow_entry(spt, &e, index);
return 0; return 0;
fail: fail:
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
vgpu->id, spt, e.val64, e.type); spt, e.val64, e.type);
return ret; return ret;
} }
...@@ -1046,7 +1046,7 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt, ...@@ -1046,7 +1046,7 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
} }
return 0; return 0;
fail: fail:
gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id, gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
spt, we->val64, we->type); spt, we->val64, we->type);
return ret; return ret;
} }
...@@ -1250,8 +1250,8 @@ static int ppgtt_handle_guest_write_page_table( ...@@ -1250,8 +1250,8 @@ static int ppgtt_handle_guest_write_page_table(
} }
return 0; return 0;
fail: fail:
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n", gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
vgpu->id, spt, we->val64, we->type); spt, we->val64, we->type);
return ret; return ret;
} }
...@@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm) ...@@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
if (IS_ERR(spt)) { if (IS_ERR(spt)) {
gvt_err("fail to populate guest root pointer\n"); gvt_vgpu_err("fail to populate guest root pointer\n");
ret = PTR_ERR(spt); ret = PTR_ERR(spt);
goto fail; goto fail;
} }
...@@ -1566,7 +1566,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, ...@@ -1566,7 +1566,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
ret = gtt->mm_alloc_page_table(mm); ret = gtt->mm_alloc_page_table(mm);
if (ret) { if (ret) {
gvt_err("fail to allocate page table for mm\n"); gvt_vgpu_err("fail to allocate page table for mm\n");
goto fail; goto fail;
} }
...@@ -1584,7 +1584,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, ...@@ -1584,7 +1584,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
} }
return mm; return mm;
fail: fail:
gvt_err("fail to create mm\n"); gvt_vgpu_err("fail to create mm\n");
if (mm) if (mm)
intel_gvt_mm_unreference(mm); intel_gvt_mm_unreference(mm);
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) ...@@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
mm->page_table_level, gma, gpa); mm->page_table_level, gma, gpa);
return gpa; return gpa;
err: err:
gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma); gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
return INTEL_GVT_INVALID_ADDR; return INTEL_GVT_INVALID_ADDR;
} }
...@@ -1836,8 +1836,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -1836,8 +1836,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
if (ops->test_present(&e)) { if (ops->test_present(&e)) {
ret = gtt_entry_p2m(vgpu, &e, &m); ret = gtt_entry_p2m(vgpu, &e, &m);
if (ret) { if (ret) {
gvt_err("vgpu%d: fail to translate guest gtt entry\n", gvt_vgpu_err("fail to translate guest gtt entry\n");
vgpu->id);
return ret; return ret;
} }
} else { } else {
...@@ -1893,14 +1892,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, ...@@ -1893,14 +1892,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
if (!scratch_pt) { if (!scratch_pt) {
gvt_err("fail to allocate scratch page\n"); gvt_vgpu_err("fail to allocate scratch page\n");
return -ENOMEM; return -ENOMEM;
} }
daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
4096, PCI_DMA_BIDIRECTIONAL); 4096, PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, daddr)) { if (dma_mapping_error(dev, daddr)) {
gvt_err("fail to dmamap scratch_pt\n"); gvt_vgpu_err("fail to dmamap scratch_pt\n");
__free_page(virt_to_page(scratch_pt)); __free_page(virt_to_page(scratch_pt));
return -ENOMEM; return -ENOMEM;
} }
...@@ -2003,7 +2002,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) ...@@ -2003,7 +2002,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
NULL, 1, 0); NULL, 1, 0);
if (IS_ERR(ggtt_mm)) { if (IS_ERR(ggtt_mm)) {
gvt_err("fail to create mm for ggtt.\n"); gvt_vgpu_err("fail to create mm for ggtt.\n");
return PTR_ERR(ggtt_mm); return PTR_ERR(ggtt_mm);
} }
...@@ -2076,7 +2075,6 @@ static int setup_spt_oos(struct intel_gvt *gvt) ...@@ -2076,7 +2075,6 @@ static int setup_spt_oos(struct intel_gvt *gvt)
for (i = 0; i < preallocated_oos_pages; i++) { for (i = 0; i < preallocated_oos_pages; i++) {
oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
if (!oos_page) { if (!oos_page) {
gvt_err("fail to pre-allocate oos page\n");
ret = -ENOMEM; ret = -ENOMEM;
goto fail; goto fail;
} }
...@@ -2166,7 +2164,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, ...@@ -2166,7 +2164,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT, mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
pdp, page_table_level, 0); pdp, page_table_level, 0);
if (IS_ERR(mm)) { if (IS_ERR(mm)) {
gvt_err("fail to create mm\n"); gvt_vgpu_err("fail to create mm\n");
return PTR_ERR(mm); return PTR_ERR(mm);
} }
} }
...@@ -2196,7 +2194,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, ...@@ -2196,7 +2194,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
if (!mm) { if (!mm) {
gvt_err("fail to find ppgtt instance.\n"); gvt_vgpu_err("fail to find ppgtt instance.\n");
return -EINVAL; return -EINVAL;
} }
intel_gvt_mm_unreference(mm); intel_gvt_mm_unreference(mm);
......
...@@ -181,11 +181,9 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, ...@@ -181,11 +181,9 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
GVT_FAILSAFE_UNSUPPORTED_GUEST); GVT_FAILSAFE_UNSUPPORTED_GUEST);
if (!vgpu->mmio.disable_warn_untrack) { if (!vgpu->mmio.disable_warn_untrack) {
gvt_err("vgpu%d: found oob fence register access\n", gvt_vgpu_err("found oob fence register access\n");
vgpu->id); gvt_vgpu_err("total fence %d, access fence %d\n",
gvt_err("vgpu%d: total fence %d, access fence %d\n", vgpu_fence_sz(vgpu), fence_num);
vgpu->id, vgpu_fence_sz(vgpu),
fence_num);
} }
memset(p_data, 0, bytes); memset(p_data, 0, bytes);
return -EINVAL; return -EINVAL;
...@@ -249,7 +247,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, ...@@ -249,7 +247,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
break; break;
default: default:
/*should not hit here*/ /*should not hit here*/
gvt_err("invalid forcewake offset 0x%x\n", offset); gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
return -EINVAL; return -EINVAL;
} }
} else { } else {
...@@ -530,7 +528,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu, ...@@ -530,7 +528,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2; fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK; fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
} else { } else {
gvt_err("Invalid train pattern %d\n", train_pattern); gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
return -EINVAL; return -EINVAL;
} }
...@@ -588,7 +586,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu, ...@@ -588,7 +586,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX) else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
index = FDI_RX_IMR_TO_PIPE(offset); index = FDI_RX_IMR_TO_PIPE(offset);
else { else {
gvt_err("Unsupport registers %x\n", offset); gvt_vgpu_err("Unsupport registers %x\n", offset);
return -EINVAL; return -EINVAL;
} }
...@@ -818,7 +816,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, ...@@ -818,7 +816,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
u32 data; u32 data;
if (!dpy_is_valid_port(port_index)) { if (!dpy_is_valid_port(port_index)) {
gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id); gvt_vgpu_err("Unsupported DP port access!\n");
return 0; return 0;
} }
...@@ -1016,8 +1014,7 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu, ...@@ -1016,8 +1014,7 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
if (i == num) { if (i == num) {
if (num == SBI_REG_MAX) { if (num == SBI_REG_MAX) {
gvt_err("vgpu%d: SBI caching meets maximum limits\n", gvt_vgpu_err("SBI caching meets maximum limits\n");
vgpu->id);
return; return;
} }
display->sbi.number++; display->sbi.number++;
...@@ -1097,7 +1094,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1097,7 +1094,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
break; break;
} }
if (invalid_read) if (invalid_read)
gvt_err("invalid pvinfo read: [%x:%x] = %x\n", gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
offset, bytes, *(u32 *)p_data); offset, bytes, *(u32 *)p_data);
vgpu->pv_notified = true; vgpu->pv_notified = true;
return 0; return 0;
...@@ -1125,7 +1122,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) ...@@ -1125,7 +1122,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
case 1: /* Remove this in guest driver. */ case 1: /* Remove this in guest driver. */
break; break;
default: default:
gvt_err("Invalid PV notification %d\n", notification); gvt_vgpu_err("Invalid PV notification %d\n", notification);
} }
return ret; return ret;
} }
...@@ -1181,7 +1178,7 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1181,7 +1178,7 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE); enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
break; break;
default: default:
gvt_err("invalid pvinfo write offset %x bytes %x data %x\n", gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
offset, bytes, data); offset, bytes, data);
break; break;
} }
...@@ -1415,7 +1412,8 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1415,7 +1412,8 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
if (execlist->elsp_dwords.index == 3) { if (execlist->elsp_dwords.index == 3) {
ret = intel_vgpu_submit_execlist(vgpu, ring_id); ret = intel_vgpu_submit_execlist(vgpu, ring_id);
if(ret) if(ret)
gvt_err("fail submit workload on ring %d\n", ring_id); gvt_vgpu_err("fail submit workload on ring %d\n",
ring_id);
} }
++execlist->elsp_dwords.index; ++execlist->elsp_dwords.index;
...@@ -2988,3 +2986,20 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -2988,3 +2986,20 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
write_vreg(vgpu, offset, p_data, bytes); write_vreg(vgpu, offset, p_data, bytes);
return 0; return 0;
} }
/**
* intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
* force-nopriv register
*
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
* True if the register is in force-nonpriv whitelist;
* False if outside;
*/
bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
unsigned int offset)
{
return in_whitelist(offset);
}
...@@ -426,7 +426,7 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, ...@@ -426,7 +426,7 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
{ {
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu = NULL;
struct intel_vgpu_type *type; struct intel_vgpu_type *type;
struct device *pdev; struct device *pdev;
void *gvt; void *gvt;
...@@ -437,7 +437,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) ...@@ -437,7 +437,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
if (!type) { if (!type) {
gvt_err("failed to find type %s to create\n", gvt_vgpu_err("failed to find type %s to create\n",
kobject_name(kobj)); kobject_name(kobj));
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
...@@ -446,7 +446,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) ...@@ -446,7 +446,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
vgpu = intel_gvt_ops->vgpu_create(gvt, type); vgpu = intel_gvt_ops->vgpu_create(gvt, type);
if (IS_ERR_OR_NULL(vgpu)) { if (IS_ERR_OR_NULL(vgpu)) {
ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
gvt_err("failed to create intel vgpu: %d\n", ret); gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
goto out; goto out;
} }
...@@ -526,7 +526,8 @@ static int intel_vgpu_open(struct mdev_device *mdev) ...@@ -526,7 +526,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
&vgpu->vdev.iommu_notifier); &vgpu->vdev.iommu_notifier);
if (ret != 0) { if (ret != 0) {
gvt_err("vfio_register_notifier for iommu failed: %d\n", ret); gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
ret);
goto out; goto out;
} }
...@@ -534,7 +535,8 @@ static int intel_vgpu_open(struct mdev_device *mdev) ...@@ -534,7 +535,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
&vgpu->vdev.group_notifier); &vgpu->vdev.group_notifier);
if (ret != 0) { if (ret != 0) {
gvt_err("vfio_register_notifier for group failed: %d\n", ret); gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
ret);
goto undo_iommu; goto undo_iommu;
} }
...@@ -635,7 +637,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, ...@@ -635,7 +637,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
if (index >= VFIO_PCI_NUM_REGIONS) { if (index >= VFIO_PCI_NUM_REGIONS) {
gvt_err("invalid index: %u\n", index); gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL; return -EINVAL;
} }
...@@ -669,7 +671,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, ...@@ -669,7 +671,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_VGA_REGION_INDEX: case VFIO_PCI_VGA_REGION_INDEX:
case VFIO_PCI_ROM_REGION_INDEX: case VFIO_PCI_ROM_REGION_INDEX:
default: default:
gvt_err("unsupported region: %u\n", index); gvt_vgpu_err("unsupported region: %u\n", index);
} }
return ret == 0 ? count : ret; return ret == 0 ? count : ret;
...@@ -861,7 +863,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, ...@@ -861,7 +863,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
trigger = eventfd_ctx_fdget(fd); trigger = eventfd_ctx_fdget(fd);
if (IS_ERR(trigger)) { if (IS_ERR(trigger)) {
gvt_err("eventfd_ctx_fdget failed\n"); gvt_vgpu_err("eventfd_ctx_fdget failed\n");
return PTR_ERR(trigger); return PTR_ERR(trigger);
} }
vgpu->vdev.msi_trigger = trigger; vgpu->vdev.msi_trigger = trigger;
...@@ -1120,7 +1122,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, ...@@ -1120,7 +1122,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
ret = vfio_set_irqs_validate_and_prepare(&hdr, max, ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
VFIO_PCI_NUM_IRQS, &data_size); VFIO_PCI_NUM_IRQS, &data_size);
if (ret) { if (ret) {
gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
return -EINVAL; return -EINVAL;
} }
if (data_size) { if (data_size) {
...@@ -1310,7 +1312,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev) ...@@ -1310,7 +1312,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvm = vgpu->vdev.kvm; kvm = vgpu->vdev.kvm;
if (!kvm || kvm->mm != current->mm) { if (!kvm || kvm->mm != current->mm) {
gvt_err("KVM is required to use Intel vGPU\n"); gvt_vgpu_err("KVM is required to use Intel vGPU\n");
return -ESRCH; return -ESRCH;
} }
...@@ -1337,8 +1339,10 @@ static int kvmgt_guest_init(struct mdev_device *mdev) ...@@ -1337,8 +1339,10 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
{ {
struct intel_vgpu *vgpu = info->vgpu;
if (!info) { if (!info) {
gvt_err("kvmgt_guest_info invalid\n"); gvt_vgpu_err("kvmgt_guest_info invalid\n");
return false; return false;
} }
...@@ -1383,12 +1387,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) ...@@ -1383,12 +1387,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
unsigned long iova, pfn; unsigned long iova, pfn;
struct kvmgt_guest_info *info; struct kvmgt_guest_info *info;
struct device *dev; struct device *dev;
struct intel_vgpu *vgpu;
int rc; int rc;
if (!handle_valid(handle)) if (!handle_valid(handle))
return INTEL_GVT_INVALID_ADDR; return INTEL_GVT_INVALID_ADDR;
info = (struct kvmgt_guest_info *)handle; info = (struct kvmgt_guest_info *)handle;
vgpu = info->vgpu;
iova = gvt_cache_find(info->vgpu, gfn); iova = gvt_cache_find(info->vgpu, gfn);
if (iova != INTEL_GVT_INVALID_ADDR) if (iova != INTEL_GVT_INVALID_ADDR)
return iova; return iova;
...@@ -1397,13 +1403,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) ...@@ -1397,13 +1403,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
dev = mdev_dev(info->vgpu->vdev.mdev); dev = mdev_dev(info->vgpu->vdev.mdev);
rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
if (rc != 1) { if (rc != 1) {
gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
gfn, rc);
return INTEL_GVT_INVALID_ADDR; return INTEL_GVT_INVALID_ADDR;
} }
/* transfer to host iova for GFX to use DMA */ /* transfer to host iova for GFX to use DMA */
rc = gvt_dma_map_iova(info->vgpu, pfn, &iova); rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
if (rc) { if (rc) {
gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
vfio_unpin_pages(dev, &gfn, 1); vfio_unpin_pages(dev, &gfn, 1);
return INTEL_GVT_INVALID_ADDR; return INTEL_GVT_INVALID_ADDR;
} }
...@@ -1417,7 +1424,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, ...@@ -1417,7 +1424,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
{ {
struct kvmgt_guest_info *info; struct kvmgt_guest_info *info;
struct kvm *kvm; struct kvm *kvm;
int ret; int idx, ret;
bool kthread = current->mm == NULL; bool kthread = current->mm == NULL;
if (!handle_valid(handle)) if (!handle_valid(handle))
...@@ -1429,8 +1436,10 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, ...@@ -1429,8 +1436,10 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
if (kthread) if (kthread)
use_mm(kvm->mm); use_mm(kvm->mm);
idx = srcu_read_lock(&kvm->srcu);
ret = write ? kvm_write_guest(kvm, gpa, buf, len) : ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
kvm_read_guest(kvm, gpa, buf, len); kvm_read_guest(kvm, gpa, buf, len);
srcu_read_unlock(&kvm->srcu, idx);
if (kthread) if (kthread)
unuse_mm(kvm->mm); unuse_mm(kvm->mm);
......
...@@ -142,10 +142,10 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -142,10 +142,10 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
p_data, bytes); p_data, bytes);
if (ret) { if (ret) {
gvt_err("vgpu%d: guest page read error %d, " gvt_vgpu_err("guest page read error %d, "
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
vgpu->id, ret, ret, gp->gfn, pa, *(u32 *)p_data,
gp->gfn, pa, *(u32 *)p_data, bytes); bytes);
} }
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return ret; return ret;
...@@ -200,14 +200,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -200,14 +200,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
if (!vgpu->mmio.disable_warn_untrack) { if (!vgpu->mmio.disable_warn_untrack) {
gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
vgpu->id, offset, bytes, *(u32 *)p_data); offset, bytes, *(u32 *)p_data);
if (offset == 0x206c) { if (offset == 0x206c) {
gvt_err("------------------------------------------\n"); gvt_vgpu_err("------------------------------------------\n");
gvt_err("vgpu%d: likely triggers a gfx reset\n", gvt_vgpu_err("likely triggers a gfx reset\n");
vgpu->id); gvt_vgpu_err("------------------------------------------\n");
gvt_err("------------------------------------------\n");
vgpu->mmio.disable_warn_untrack = true; vgpu->mmio.disable_warn_untrack = true;
} }
} }
...@@ -220,8 +219,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -220,8 +219,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return 0; return 0;
err: err:
gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n", gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
vgpu->id, offset, bytes); offset, bytes);
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return ret; return ret;
} }
...@@ -259,10 +258,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -259,10 +258,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
if (gp) { if (gp) {
ret = gp->handler(gp, pa, p_data, bytes); ret = gp->handler(gp, pa, p_data, bytes);
if (ret) { if (ret) {
gvt_err("vgpu%d: guest page write error %d, " gvt_err("guest page write error %d, "
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", "gfn 0x%lx, pa 0x%llx, "
vgpu->id, ret, "var 0x%x, len %d\n",
gp->gfn, pa, *(u32 *)p_data, bytes); ret, gp->gfn, pa,
*(u32 *)p_data, bytes);
} }
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return ret; return ret;
...@@ -329,8 +329,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -329,8 +329,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
/* all register bits are RO. */ /* all register bits are RO. */
if (ro_mask == ~(u64)0) { if (ro_mask == ~(u64)0) {
gvt_err("vgpu%d: try to write RO reg %x\n", gvt_vgpu_err("try to write RO reg %x\n",
vgpu->id, offset); offset);
ret = 0; ret = 0;
goto out; goto out;
} }
...@@ -360,8 +360,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -360,8 +360,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return 0; return 0;
err: err:
gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n", gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
vgpu->id, offset, bytes); bytes);
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return ret; return ret;
} }
......
...@@ -107,4 +107,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -107,4 +107,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes); void *p_data, unsigned int bytes);
int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes); void *p_data, unsigned int bytes);
bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
unsigned int offset);
#endif #endif
...@@ -67,14 +67,15 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) ...@@ -67,14 +67,15 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
+ i * PAGE_SIZE); + i * PAGE_SIZE);
if (mfn == INTEL_GVT_INVALID_ADDR) { if (mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to get MFN from VA\n"); gvt_vgpu_err("fail to get MFN from VA\n");
return -EINVAL; return -EINVAL;
} }
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
vgpu_opregion(vgpu)->gfn[i], vgpu_opregion(vgpu)->gfn[i],
mfn, 1, map); mfn, 1, map);
if (ret) { if (ret) {
gvt_err("fail to map GFN to MFN, errno: %d\n", ret); gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
ret);
return ret; return ret;
} }
} }
...@@ -287,7 +288,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) ...@@ -287,7 +288,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
if (!(swsci & SWSCI_SCI_SELECT)) { if (!(swsci & SWSCI_SCI_SELECT)) {
gvt_err("vgpu%d: requesting SMI service\n", vgpu->id); gvt_vgpu_err("requesting SMI service\n");
return 0; return 0;
} }
/* ignore non 0->1 trasitions */ /* ignore non 0->1 trasitions */
...@@ -300,9 +301,8 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) ...@@ -300,9 +301,8 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
func = GVT_OPREGION_FUNC(*scic); func = GVT_OPREGION_FUNC(*scic);
subfunc = GVT_OPREGION_SUBFUNC(*scic); subfunc = GVT_OPREGION_SUBFUNC(*scic);
if (!querying_capabilities(*scic)) { if (!querying_capabilities(*scic)) {
gvt_err("vgpu%d: requesting runtime service: func \"%s\"," gvt_vgpu_err("requesting runtime service: func \"%s\","
" subfunc \"%s\"\n", " subfunc \"%s\"\n",
vgpu->id,
opregion_func_name(func), opregion_func_name(func),
opregion_subfunc_name(subfunc)); opregion_subfunc_name(subfunc));
/* /*
......
...@@ -167,7 +167,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) ...@@ -167,7 +167,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
I915_WRITE_FW(reg, 0x1); I915_WRITE_FW(reg, 0x1);
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id); gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
else else
vgpu_vreg(vgpu, regs[ring_id]) = 0; vgpu_vreg(vgpu, regs[ring_id]) = 0;
......
...@@ -101,7 +101,7 @@ struct tbs_sched_data { ...@@ -101,7 +101,7 @@ struct tbs_sched_data {
struct list_head runq_head; struct list_head runq_head;
}; };
#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000) #define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
static void tbs_sched_func(struct work_struct *work) static void tbs_sched_func(struct work_struct *work)
{ {
...@@ -223,7 +223,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) ...@@ -223,7 +223,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
return; return;
list_add_tail(&vgpu_data->list, &sched_data->runq_head); list_add_tail(&vgpu_data->list, &sched_data->runq_head);
schedule_delayed_work(&sched_data->work, sched_data->period); schedule_delayed_work(&sched_data->work, 0);
} }
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
......
...@@ -84,7 +84,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -84,7 +84,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
(u32)((workload->ctx_desc.lrca + i) << (u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT)); GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) { if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("Invalid guest context descriptor\n"); gvt_vgpu_err("Invalid guest context descriptor\n");
return -EINVAL; return -EINVAL;
} }
...@@ -175,7 +175,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -175,7 +175,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
int ring_id = workload->ring_id; int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq; struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
int ret; int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
...@@ -187,9 +189,24 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -187,9 +189,24 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
/* pin shadow context by gvt even the shadow context will be pinned
* when i915 alloc request. That is because gvt will update the guest
* context from shadow context when workload is completed, and at that
* moment, i915 may already unpined the shadow context to make the
* shadow_ctx pages invalid. So gvt need to pin itself. After update
* the guest context, gvt can unpin the shadow_ctx safely.
*/
ret = engine->context_pin(engine, shadow_ctx);
if (ret) {
gvt_vgpu_err("fail to pin shadow context\n");
workload->status = ret;
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
gvt_err("fail to allocate gem request\n"); gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq); ret = PTR_ERR(rq);
goto out; goto out;
} }
...@@ -202,9 +219,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -202,9 +219,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (ret) if (ret)
goto out; goto out;
if ((workload->ring_id == RCS) &&
(workload->wa_ctx.indirect_ctx.size != 0)) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret) if (ret)
goto out; goto out;
}
ret = populate_shadow_context(workload); ret = populate_shadow_context(workload);
if (ret) if (ret)
...@@ -227,6 +247,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -227,6 +247,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (!IS_ERR_OR_NULL(rq)) if (!IS_ERR_OR_NULL(rq))
i915_add_request_no_flush(rq); i915_add_request_no_flush(rq);
else
engine->context_unpin(engine, shadow_ctx);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
return ret; return ret;
} }
...@@ -322,7 +345,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -322,7 +345,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
(u32)((workload->ctx_desc.lrca + i) << (u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT)); GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) { if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("invalid guest context descriptor\n"); gvt_vgpu_err("invalid guest context descriptor\n");
return; return;
} }
...@@ -376,6 +399,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ...@@ -376,6 +399,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
* For the workload w/o request, directly complete the workload. * For the workload w/o request, directly complete the workload.
*/ */
if (workload->req) { if (workload->req) {
struct drm_i915_private *dev_priv =
workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine =
dev_priv->engine[workload->ring_id];
wait_event(workload->shadow_ctx_status_wq, wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active)); !atomic_read(&workload->shadow_ctx_active));
...@@ -388,6 +415,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ...@@ -388,6 +415,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
INTEL_GVT_EVENT_MAX) INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(vgpu, event); intel_vgpu_trigger_virtual_event(vgpu, event);
} }
mutex_lock(&dev_priv->drm.struct_mutex);
/* unpin shadow ctx as the shadow_ctx update is done */
engine->context_unpin(engine, workload->vgpu->shadow_ctx);
mutex_unlock(&dev_priv->drm.struct_mutex);
} }
gvt_dbg_sched("ring id %d complete workload %p status %d\n", gvt_dbg_sched("ring id %d complete workload %p status %d\n",
...@@ -417,6 +448,7 @@ static int workload_thread(void *priv) ...@@ -417,6 +448,7 @@ static int workload_thread(void *priv)
int ring_id = p->ring_id; int ring_id = p->ring_id;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL; struct intel_vgpu_workload *workload = NULL;
struct intel_vgpu *vgpu = NULL;
int ret; int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function); DEFINE_WAIT_FUNC(wait, woken_wake_function);
...@@ -459,25 +491,14 @@ static int workload_thread(void *priv) ...@@ -459,25 +491,14 @@ static int workload_thread(void *priv)
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
if (ret) { if (ret) {
gvt_err("fail to dispatch workload, skip\n"); vgpu = workload->vgpu;
gvt_vgpu_err("fail to dispatch workload, skip\n");
goto complete; goto complete;
} }
gvt_dbg_sched("ring id %d wait workload %p\n", gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload); workload->ring_id, workload);
retry: i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
i915_wait_request(workload->req,
0, MAX_SCHEDULE_TIMEOUT);
/* I915 has replay mechanism and a request will be replayed
* if there is i915 reset. So the seqno will be updated anyway.
* If the seqno is not updated yet after waiting, which means
* the replay may still be in progress and we can wait again.
*/
if (!i915_gem_request_completed(workload->req)) {
gvt_dbg_sched("workload %p not completed, wait again\n",
workload);
goto retry;
}
complete: complete:
gvt_dbg_sched("will complete workload %p, status: %d\n", gvt_dbg_sched("will complete workload %p, status: %d\n",
......
...@@ -77,6 +77,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) ...@@ -77,6 +77,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
goto bail; goto bail;
} }
if (!i915.enable_execlists) {
DRM_INFO("GPU guest virtualisation [GVT-g] disabled due to disabled execlist submission [i915.enable_execlists module parameter]\n");
goto bail;
}
/* /*
* We're not in host or fail to find a MPT module, disable GVT-g * We're not in host or fail to find a MPT module, disable GVT-g
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment