Commit 94fc27ac authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-fixes-2018-02-07' of...

Merge tag 'drm-intel-next-fixes-2018-02-07' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Fix for pcode timeouts on BXT and GLK, cmdparser fixes and fixes
for new vbt version on CFL and CNL.

GVT contains vGPU reset enhancement, which refines vGPU reset flow
and the support of virtual aperture read/write when x-no-mmap=on
is set in KVM, which is required by a test case from Redhat and
also another fix for virtual OpRegion.

* tag 'drm-intel-next-fixes-2018-02-07' of git://anongit.freedesktop.org/drm/drm-intel:
  drm/i915/bios: add DP max link rate to VBT child device struct
  drm/i915/cnp: Properly handle VBT ddc pin out of bounds.
  drm/i915/cnp: Ignore VBT request for know invalid DDC pin.
  drm/i915/cmdparser: Do not check past the cmd length.
  drm/i915/cmdparser: Check reg_table_count before derefencing.
  drm/i915/bxt, glk: Increase PCODE timeouts during CDCLK freq changing
  drm/i915/gvt: Use KVM r/w to access guest opregion
  drm/i915/gvt: Fix aperture read/write emulation when enable x-no-mmap=on
  drm/i915/gvt: only reset execlist state of one engine during VM engine reset
  drm/i915/gvt: refine intel_vgpu_submission_ops as per engine ops
parents 2dd27794 6dd3104e
...@@ -119,16 +119,6 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map) ...@@ -119,16 +119,6 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
return 0; return 0;
if (map) {
vgpu->gm.aperture_va = memremap(aperture_pa, aperture_sz,
MEMREMAP_WC);
if (!vgpu->gm.aperture_va)
return -ENOMEM;
} else {
memunmap(vgpu->gm.aperture_va);
vgpu->gm.aperture_va = NULL;
}
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2]; val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
...@@ -141,11 +131,8 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map) ...@@ -141,11 +131,8 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
aperture_pa >> PAGE_SHIFT, aperture_pa >> PAGE_SHIFT,
aperture_sz >> PAGE_SHIFT, aperture_sz >> PAGE_SHIFT,
map); map);
if (ret) { if (ret)
memunmap(vgpu->gm.aperture_va);
vgpu->gm.aperture_va = NULL;
return ret; return ret;
}
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
return 0; return 0;
......
...@@ -521,24 +521,23 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) ...@@ -521,24 +521,23 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
_EL_OFFSET_STATUS_PTR); _EL_OFFSET_STATUS_PTR);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
ctx_status_ptr.read_ptr = 0; ctx_status_ptr.read_ptr = 0;
ctx_status_ptr.write_ptr = 0x7; ctx_status_ptr.write_ptr = 0x7;
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
} }
static void clean_execlist(struct intel_vgpu *vgpu) static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
{ {
enum intel_engine_id i; unsigned int tmp;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct intel_vgpu_submission *s = &vgpu->submission;
for_each_engine(engine, vgpu->gvt->dev_priv, i) { for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
struct intel_vgpu_submission *s = &vgpu->submission; kfree(s->ring_scan_buffer[engine->id]);
s->ring_scan_buffer[engine->id] = NULL;
kfree(s->ring_scan_buffer[i]); s->ring_scan_buffer_size[engine->id] = 0;
s->ring_scan_buffer[i] = NULL;
s->ring_scan_buffer_size[i] = 0;
} }
} }
...@@ -553,9 +552,10 @@ static void reset_execlist(struct intel_vgpu *vgpu, ...@@ -553,9 +552,10 @@ static void reset_execlist(struct intel_vgpu *vgpu,
init_vgpu_execlist(vgpu, engine->id); init_vgpu_execlist(vgpu, engine->id);
} }
static int init_execlist(struct intel_vgpu *vgpu) static int init_execlist(struct intel_vgpu *vgpu,
unsigned long engine_mask)
{ {
reset_execlist(vgpu, ALL_ENGINES); reset_execlist(vgpu, engine_mask);
return 0; return 0;
} }
......
...@@ -82,7 +82,6 @@ struct intel_gvt_device_info { ...@@ -82,7 +82,6 @@ struct intel_gvt_device_info {
struct intel_vgpu_gm { struct intel_vgpu_gm {
u64 aperture_sz; u64 aperture_sz;
u64 hidden_sz; u64 hidden_sz;
void *aperture_va;
struct drm_mm_node low_gm_node; struct drm_mm_node low_gm_node;
struct drm_mm_node high_gm_node; struct drm_mm_node high_gm_node;
}; };
...@@ -127,7 +126,6 @@ struct intel_vgpu_irq { ...@@ -127,7 +126,6 @@ struct intel_vgpu_irq {
struct intel_vgpu_opregion { struct intel_vgpu_opregion {
bool mapped; bool mapped;
void *va; void *va;
void *va_gopregion;
u32 gfn[INTEL_GVT_OPREGION_PAGES]; u32 gfn[INTEL_GVT_OPREGION_PAGES];
}; };
...@@ -152,8 +150,8 @@ enum { ...@@ -152,8 +150,8 @@ enum {
struct intel_vgpu_submission_ops { struct intel_vgpu_submission_ops {
const char *name; const char *name;
int (*init)(struct intel_vgpu *vgpu); int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
void (*clean)(struct intel_vgpu *vgpu); void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask); void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
}; };
......
...@@ -1494,7 +1494,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1494,7 +1494,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct intel_vgpu_submission *s = &vgpu->submission;
u32 data = *(u32 *)p_data; u32 data = *(u32 *)p_data;
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
bool enable_execlist; bool enable_execlist;
...@@ -1523,11 +1522,9 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1523,11 +1522,9 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
if (!enable_execlist) if (!enable_execlist)
return 0; return 0;
if (s->active)
return 0;
ret = intel_vgpu_select_submission_ops(vgpu, ret = intel_vgpu_select_submission_ops(vgpu,
INTEL_VGPU_EXECLIST_SUBMISSION); ENGINE_MASK(ring_id),
INTEL_VGPU_EXECLIST_SUBMISSION);
if (ret) if (ret)
return ret; return ret;
......
...@@ -651,6 +651,39 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off, ...@@ -651,6 +651,39 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
return ret; return ret;
} }
static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off)
{
return off >= vgpu_aperture_offset(vgpu) &&
off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
}
static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off,
void *buf, unsigned long count, bool is_write)
{
void *aperture_va;
if (!intel_vgpu_in_aperture(vgpu, off) ||
!intel_vgpu_in_aperture(vgpu, off + count)) {
gvt_vgpu_err("Invalid aperture offset %llu\n", off);
return -EINVAL;
}
aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
ALIGN_DOWN(off, PAGE_SIZE),
count + offset_in_page(off));
if (!aperture_va)
return -EIO;
if (is_write)
memcpy(aperture_va + offset_in_page(off), buf, count);
else
memcpy(buf, aperture_va + offset_in_page(off), count);
io_mapping_unmap(aperture_va);
return 0;
}
static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
size_t count, loff_t *ppos, bool is_write) size_t count, loff_t *ppos, bool is_write)
{ {
...@@ -679,8 +712,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, ...@@ -679,8 +712,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
buf, count, is_write); buf, count, is_write);
break; break;
case VFIO_PCI_BAR2_REGION_INDEX: case VFIO_PCI_BAR2_REGION_INDEX:
ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_2, pos, ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
buf, count, is_write);
break; break;
case VFIO_PCI_BAR1_REGION_INDEX: case VFIO_PCI_BAR1_REGION_INDEX:
case VFIO_PCI_BAR3_REGION_INDEX: case VFIO_PCI_BAR3_REGION_INDEX:
......
...@@ -56,38 +56,6 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) ...@@ -56,38 +56,6 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
(reg >= gvt->device_info.gtt_start_offset \ (reg >= gvt->device_info.gtt_start_offset \
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
static bool vgpu_gpa_is_aperture(struct intel_vgpu *vgpu, uint64_t gpa)
{
u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
u64 aperture_sz = vgpu_aperture_sz(vgpu);
return gpa >= aperture_gpa && gpa < aperture_gpa + aperture_sz;
}
static int vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t gpa,
void *pdata, unsigned int size, bool is_read)
{
u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
u64 offset = gpa - aperture_gpa;
if (!vgpu_gpa_is_aperture(vgpu, gpa + size - 1)) {
gvt_vgpu_err("Aperture rw out of range, offset %llx, size %d\n",
offset, size);
return -EINVAL;
}
if (!vgpu->gm.aperture_va) {
gvt_vgpu_err("BAR is not enabled\n");
return -ENXIO;
}
if (is_read)
memcpy(pdata, vgpu->gm.aperture_va + offset, size);
else
memcpy(vgpu->gm.aperture_va + offset, pdata, size);
return 0;
}
static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
void *p_data, unsigned int bytes, bool read) void *p_data, unsigned int bytes, bool read)
{ {
...@@ -144,11 +112,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -144,11 +112,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
} }
mutex_lock(&gvt->lock); mutex_lock(&gvt->lock);
if (vgpu_gpa_is_aperture(vgpu, pa)) {
ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
goto out;
}
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (WARN_ON(bytes > 8)) if (WARN_ON(bytes > 8))
...@@ -222,11 +185,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -222,11 +185,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
mutex_lock(&gvt->lock); mutex_lock(&gvt->lock);
if (vgpu_gpa_is_aperture(vgpu, pa)) {
ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
goto out;
}
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (WARN_ON(bytes > 8)) if (WARN_ON(bytes > 8))
......
...@@ -299,21 +299,13 @@ int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa) ...@@ -299,21 +299,13 @@ int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
{ {
int i, ret = 0; int i, ret = 0;
unsigned long pfn;
gvt_dbg_core("emulate opregion from kernel\n"); gvt_dbg_core("emulate opregion from kernel\n");
switch (intel_gvt_host.hypervisor_type) { switch (intel_gvt_host.hypervisor_type) {
case INTEL_GVT_HYPERVISOR_KVM: case INTEL_GVT_HYPERVISOR_KVM:
pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gpa >> PAGE_SHIFT); for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->va_gopregion = memremap(pfn << PAGE_SHIFT, vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
INTEL_GVT_OPREGION_SIZE,
MEMREMAP_WB);
if (!vgpu_opregion(vgpu)->va_gopregion) {
gvt_vgpu_err("failed to map guest opregion\n");
ret = -EFAULT;
}
vgpu_opregion(vgpu)->mapped = true;
break; break;
case INTEL_GVT_HYPERVISOR_XEN: case INTEL_GVT_HYPERVISOR_XEN:
/** /**
...@@ -352,10 +344,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) ...@@ -352,10 +344,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
if (vgpu_opregion(vgpu)->mapped) if (vgpu_opregion(vgpu)->mapped)
map_vgpu_opregion(vgpu, false); map_vgpu_opregion(vgpu, false);
} else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) { } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
if (vgpu_opregion(vgpu)->mapped) { /* Guest opregion is released by VFIO */
memunmap(vgpu_opregion(vgpu)->va_gopregion);
vgpu_opregion(vgpu)->va_gopregion = NULL;
}
} }
free_pages((unsigned long)vgpu_opregion(vgpu)->va, free_pages((unsigned long)vgpu_opregion(vgpu)->va,
get_order(INTEL_GVT_OPREGION_SIZE)); get_order(INTEL_GVT_OPREGION_SIZE));
...@@ -480,19 +469,40 @@ static bool querying_capabilities(u32 scic) ...@@ -480,19 +469,40 @@ static bool querying_capabilities(u32 scic)
*/ */
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
{ {
u32 *scic, *parm; u32 scic, parm;
u32 func, subfunc; u32 func, subfunc;
u64 scic_pa = 0, parm_pa = 0;
int ret;
switch (intel_gvt_host.hypervisor_type) { switch (intel_gvt_host.hypervisor_type) {
case INTEL_GVT_HYPERVISOR_XEN: case INTEL_GVT_HYPERVISOR_XEN:
scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC; scic = *((u32 *)vgpu_opregion(vgpu)->va +
parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; INTEL_GVT_OPREGION_SCIC);
parm = *((u32 *)vgpu_opregion(vgpu)->va +
INTEL_GVT_OPREGION_PARM);
break; break;
case INTEL_GVT_HYPERVISOR_KVM: case INTEL_GVT_HYPERVISOR_KVM:
scic = vgpu_opregion(vgpu)->va_gopregion + scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
INTEL_GVT_OPREGION_SCIC; INTEL_GVT_OPREGION_SCIC;
parm = vgpu_opregion(vgpu)->va_gopregion + parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
INTEL_GVT_OPREGION_PARM; INTEL_GVT_OPREGION_PARM;
ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa,
&scic, sizeof(scic));
if (ret) {
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa,
&parm, sizeof(parm));
if (ret) {
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
break; break;
default: default:
gvt_vgpu_err("not supported hypervisor\n"); gvt_vgpu_err("not supported hypervisor\n");
...@@ -510,9 +520,9 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) ...@@ -510,9 +520,9 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
return 0; return 0;
} }
func = GVT_OPREGION_FUNC(*scic); func = GVT_OPREGION_FUNC(scic);
subfunc = GVT_OPREGION_SUBFUNC(*scic); subfunc = GVT_OPREGION_SUBFUNC(scic);
if (!querying_capabilities(*scic)) { if (!querying_capabilities(scic)) {
gvt_vgpu_err("requesting runtime service: func \"%s\"," gvt_vgpu_err("requesting runtime service: func \"%s\","
" subfunc \"%s\"\n", " subfunc \"%s\"\n",
opregion_func_name(func), opregion_func_name(func),
...@@ -521,11 +531,43 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) ...@@ -521,11 +531,43 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
* emulate exit status of function call, '0' means * emulate exit status of function call, '0' means
* "failure, generic, unsupported or unknown cause" * "failure, generic, unsupported or unknown cause"
*/ */
*scic &= ~OPREGION_SCIC_EXIT_MASK; scic &= ~OPREGION_SCIC_EXIT_MASK;
return 0; goto out;
}
scic = 0;
parm = 0;
out:
switch (intel_gvt_host.hypervisor_type) {
case INTEL_GVT_HYPERVISOR_XEN:
*((u32 *)vgpu_opregion(vgpu)->va +
INTEL_GVT_OPREGION_SCIC) = scic;
*((u32 *)vgpu_opregion(vgpu)->va +
INTEL_GVT_OPREGION_PARM) = parm;
break;
case INTEL_GVT_HYPERVISOR_KVM:
ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa,
&scic, sizeof(scic));
if (ret) {
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa,
&parm, sizeof(parm));
if (ret) {
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
ret, scic_pa, sizeof(scic));
return ret;
}
break;
default:
gvt_vgpu_err("not supported hypervisor\n");
return -EINVAL;
} }
*scic = 0;
*parm = 0;
return 0; return 0;
} }
...@@ -50,6 +50,7 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) ...@@ -50,6 +50,7 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
struct vgpu_sched_data { struct vgpu_sched_data {
struct list_head lru_list; struct list_head lru_list;
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu;
bool active;
ktime_t sched_in_time; ktime_t sched_in_time;
ktime_t sched_out_time; ktime_t sched_out_time;
...@@ -332,6 +333,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) ...@@ -332,6 +333,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
if (!hrtimer_active(&sched_data->timer)) if (!hrtimer_active(&sched_data->timer))
hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
sched_data->period), HRTIMER_MODE_ABS); sched_data->period), HRTIMER_MODE_ABS);
vgpu_data->active = true;
} }
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
...@@ -339,6 +341,7 @@ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) ...@@ -339,6 +341,7 @@ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
struct vgpu_sched_data *vgpu_data = vgpu->sched_data; struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
list_del_init(&vgpu_data->lru_list); list_del_init(&vgpu_data->lru_list);
vgpu_data->active = false;
} }
static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
...@@ -374,9 +377,12 @@ void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu) ...@@ -374,9 +377,12 @@ void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
{ {
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); if (!vgpu_data->active) {
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
}
} }
void intel_gvt_kick_schedule(struct intel_gvt *gvt) void intel_gvt_kick_schedule(struct intel_gvt *gvt)
...@@ -389,6 +395,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) ...@@ -389,6 +395,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
struct intel_gvt_workload_scheduler *scheduler = struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler; &vgpu->gvt->scheduler;
int ring_id; int ring_id;
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
if (!vgpu_data->active)
return;
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
......
...@@ -991,7 +991,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) ...@@ -991,7 +991,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
{ {
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
intel_vgpu_select_submission_ops(vgpu, 0); intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
i915_gem_context_put(s->shadow_ctx); i915_gem_context_put(s->shadow_ctx);
kmem_cache_destroy(s->workloads); kmem_cache_destroy(s->workloads);
} }
...@@ -1079,6 +1079,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ...@@ -1079,6 +1079,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
* *
*/ */
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
unsigned long engine_mask,
unsigned int interface) unsigned int interface)
{ {
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
...@@ -1091,21 +1092,21 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, ...@@ -1091,21 +1092,21 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
if (WARN_ON(interface >= ARRAY_SIZE(ops))) if (WARN_ON(interface >= ARRAY_SIZE(ops)))
return -EINVAL; return -EINVAL;
if (s->active) { if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
s->ops->clean(vgpu); return -EINVAL;
s->active = false;
gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n", if (s->active)
vgpu->id, s->ops->name); s->ops->clean(vgpu, engine_mask);
}
if (interface == 0) { if (interface == 0) {
s->ops = NULL; s->ops = NULL;
s->virtual_submission_interface = 0; s->virtual_submission_interface = 0;
gvt_dbg_core("vgpu%d: no submission ops\n", vgpu->id); s->active = false;
gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
return 0; return 0;
} }
ret = ops[interface]->init(vgpu); ret = ops[interface]->init(vgpu, engine_mask);
if (ret) if (ret)
return ret; return ret;
......
...@@ -141,6 +141,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, ...@@ -141,6 +141,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
unsigned long engine_mask,
unsigned int interface); unsigned int interface);
extern const struct intel_vgpu_submission_ops extern const struct intel_vgpu_submission_ops
......
...@@ -520,8 +520,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, ...@@ -520,8 +520,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
intel_vgpu_reset_submission(vgpu, resetting_eng); intel_vgpu_reset_submission(vgpu, resetting_eng);
/* full GPU reset or device model level reset */ /* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) { if (engine_mask == ALL_ENGINES || dmlr) {
intel_vgpu_select_submission_ops(vgpu, 0); intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
/*fence will not be reset during virtual reset */ /*fence will not be reset during virtual reset */
if (dmlr) { if (dmlr) {
intel_vgpu_reset_gtt(vgpu); intel_vgpu_reset_gtt(vgpu);
......
...@@ -1032,7 +1032,7 @@ find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr) ...@@ -1032,7 +1032,7 @@ find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
const struct drm_i915_reg_table *table = engine->reg_tables; const struct drm_i915_reg_table *table = engine->reg_tables;
int count = engine->reg_table_count; int count = engine->reg_table_count;
do { for (; count > 0; ++table, --count) {
if (!table->master || is_master) { if (!table->master || is_master) {
const struct drm_i915_reg_descriptor *reg; const struct drm_i915_reg_descriptor *reg;
...@@ -1040,7 +1040,7 @@ find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr) ...@@ -1040,7 +1040,7 @@ find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
if (reg != NULL) if (reg != NULL)
return reg; return reg;
} }
} while (table++, --count); }
return NULL; return NULL;
} }
...@@ -1212,6 +1212,12 @@ static bool check_cmd(const struct intel_engine_cs *engine, ...@@ -1212,6 +1212,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
continue; continue;
} }
if (desc->bits[i].offset >= length) {
DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
*cmd, engine->name);
return false;
}
dword = cmd[desc->bits[i].offset] & dword = cmd[desc->bits[i].offset] &
desc->bits[i].mask; desc->bits[i].mask;
......
...@@ -3717,7 +3717,11 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, ...@@ -3717,7 +3717,11 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_display_error_state *error); struct intel_display_error_state *error);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
u32 val, int timeout_us);
#define sandybridge_pcode_write(dev_priv, mbox, val) \
sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500)
int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms); u32 reply_mask, u32 reply, int timeout_base_ms);
......
...@@ -1107,6 +1107,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, ...@@ -1107,6 +1107,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
} }
static const u8 cnp_ddc_pin_map[] = { static const u8 cnp_ddc_pin_map[] = {
[0] = 0, /* N/A */
[DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT, [DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
[DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT, [DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
[DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */ [DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
...@@ -1115,9 +1116,14 @@ static const u8 cnp_ddc_pin_map[] = { ...@@ -1115,9 +1116,14 @@ static const u8 cnp_ddc_pin_map[] = {
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin) static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{ {
if (HAS_PCH_CNP(dev_priv) && if (HAS_PCH_CNP(dev_priv)) {
vbt_pin > 0 && vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) if (vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) {
return cnp_ddc_pin_map[vbt_pin]; return cnp_ddc_pin_map[vbt_pin];
} else {
DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin);
return 0;
}
}
return vbt_pin; return vbt_pin;
} }
...@@ -1323,11 +1329,13 @@ parse_general_definitions(struct drm_i915_private *dev_priv, ...@@ -1323,11 +1329,13 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE; expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
} else if (bdb->version == 195) { } else if (bdb->version == 195) {
expected_size = 37; expected_size = 37;
} else if (bdb->version <= 197) { } else if (bdb->version <= 215) {
expected_size = 38; expected_size = 38;
} else if (bdb->version <= 216) {
expected_size = 39;
} else { } else {
expected_size = 38; expected_size = sizeof(*child);
BUILD_BUG_ON(sizeof(*child) < 38); BUILD_BUG_ON(sizeof(*child) < 39);
DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n", DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
bdb->version, expected_size); bdb->version, expected_size);
} }
......
...@@ -1370,10 +1370,15 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -1370,10 +1370,15 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
break; break;
} }
/* Inform power controller of upcoming frequency change */ /*
* Inform power controller of upcoming frequency change. BSpec
* requires us to wait up to 150usec, but that leads to timeouts;
* the 2ms used here is based on experiment.
*/
mutex_lock(&dev_priv->pcu_lock); mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, ret = sandybridge_pcode_write_timeout(dev_priv,
0x80000000); HSW_PCODE_DE_WRITE_FREQ_REQ,
0x80000000, 2000);
mutex_unlock(&dev_priv->pcu_lock); mutex_unlock(&dev_priv->pcu_lock);
if (ret) { if (ret) {
...@@ -1404,8 +1409,15 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -1404,8 +1409,15 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
I915_WRITE(CDCLK_CTL, val); I915_WRITE(CDCLK_CTL, val);
mutex_lock(&dev_priv->pcu_lock); mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, /*
cdclk_state->voltage_level); * The timeout isn't specified, the 2ms used here is based on
* experiment.
* FIXME: Waiting for the request completion could be delayed until
* the next PCODE request based on BSpec.
*/
ret = sandybridge_pcode_write_timeout(dev_priv,
HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_state->voltage_level, 2000);
mutex_unlock(&dev_priv->pcu_lock); mutex_unlock(&dev_priv->pcu_lock);
if (ret) { if (ret) {
......
...@@ -9149,8 +9149,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val ...@@ -9149,8 +9149,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
return 0; return 0;
} }
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
u32 mbox, u32 val) u32 mbox, u32 val, int timeout_us)
{ {
int status; int status;
...@@ -9173,7 +9173,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, ...@@ -9173,7 +9173,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
if (__intel_wait_for_register_fw(dev_priv, if (__intel_wait_for_register_fw(dev_priv,
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
500, 0, NULL)) { timeout_us, 0, NULL)) {
DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n", DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n",
val, mbox, __builtin_return_address(0)); val, mbox, __builtin_return_address(0));
return -ETIMEDOUT; return -ETIMEDOUT;
......
...@@ -412,6 +412,8 @@ struct child_device_config { ...@@ -412,6 +412,8 @@ struct child_device_config {
u16 dp_gpio_pin_num; /* 195 */ u16 dp_gpio_pin_num; /* 195 */
u8 dp_iboost_level:4; /* 196 */ u8 dp_iboost_level:4; /* 196 */
u8 hdmi_iboost_level:4; /* 196 */ u8 hdmi_iboost_level:4; /* 196 */
u8 dp_max_link_rate:2; /* 216 CNL+ */
u8 dp_max_link_rate_reserved:6; /* 216 */
} __packed; } __packed;
struct bdb_general_definitions { struct bdb_general_definitions {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment