Commit 06a75ace authored by Daniel Vetter's avatar Daniel Vetter

Merge tag 'gvt-next-2016-10-14' of https://github.com/01org/gvt-linux into drm-intel-next-queued

Zhenyu Wang writes:

This is first pull request to merge GVT-g device model in i915
which contains core GVT-g device model work to virtualize GPU
resources. This tries to add feature of Intel GVT-g technology
for full GPU virtualization. This version will support KVM based
virtualization solution named as KVMGT.

More background is on official project home: https://01.org/igvt-gSigned-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
parents 1bab7502 21196a81
GVT_DIR := gvt GVT_DIR := gvt
GVT_SOURCE := gvt.o GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Kevin Tian <kevin.tian@intel.com>
* Dexuan Cui
*
* Contributors:
* Pei Zhang <pei.zhang@intel.com>
* Min He <min.he@intel.com>
* Niu Bing <bing.niu@intel.com>
* Yulei Zhang <yulei.zhang@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include "i915_drv.h"
#define MB_TO_BYTES(mb) ((mb) << 20ULL)
#define BYTES_TO_MB(b) ((b) >> 20ULL)
#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
#define HOST_FENCE 4
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
u32 alloc_flag, search_flag;
u64 start, end, size;
struct drm_mm_node *node;
int retried = 0;
int ret;
if (high_gm) {
search_flag = DRM_MM_SEARCH_BELOW;
alloc_flag = DRM_MM_CREATE_TOP;
node = &vgpu->gm.high_gm_node;
size = vgpu_hidden_sz(vgpu);
start = gvt_hidden_gmadr_base(gvt);
end = gvt_hidden_gmadr_end(gvt);
} else {
search_flag = DRM_MM_SEARCH_DEFAULT;
alloc_flag = DRM_MM_CREATE_DEFAULT;
node = &vgpu->gm.low_gm_node;
size = vgpu_aperture_sz(vgpu);
start = gvt_aperture_gmadr_base(gvt);
end = gvt_aperture_gmadr_end(gvt);
}
mutex_lock(&dev_priv->drm.struct_mutex);
search_again:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
node, size, 4096, 0,
start, end, search_flag,
alloc_flag);
if (ret) {
ret = i915_gem_evict_something(&dev_priv->ggtt.base,
size, 4096, 0, start, end, 0);
if (ret == 0 && ++retried < 3)
goto search_again;
gvt_err("fail to alloc %s gm space from host, retried %d\n",
high_gm ? "high" : "low", retried);
}
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
ret = alloc_gm(vgpu, false);
if (ret)
return ret;
ret = alloc_gm(vgpu, true);
if (ret)
goto out_free_aperture;
gvt_dbg_core("vgpu%d: alloc low GM start %llx size %llx\n", vgpu->id,
vgpu_aperture_offset(vgpu), vgpu_aperture_sz(vgpu));
gvt_dbg_core("vgpu%d: alloc high GM start %llx size %llx\n", vgpu->id,
vgpu_hidden_offset(vgpu), vgpu_hidden_sz(vgpu));
return 0;
out_free_aperture:
mutex_lock(&dev_priv->drm.struct_mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
static void free_vgpu_gm(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
mutex_lock(&dev_priv->drm.struct_mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
drm_mm_remove_node(&vgpu->gm.high_gm_node);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
/**
* intel_vgpu_write_fence - write fence registers owned by a vGPU
* @vgpu: vGPU instance
* @fence: vGPU fence register number
* @value: Fence register value to be written
*
* This function is used to write fence registers owned by a vGPU. The vGPU
* fence register number will be translated into HW fence register number.
*
*/
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct drm_i915_fence_reg *reg;
i915_reg_t fence_reg_lo, fence_reg_hi;
if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
return;
reg = vgpu->fence.regs[fence];
if (WARN_ON(!reg))
return;
fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
I915_WRITE(fence_reg_lo, 0);
POSTING_READ(fence_reg_lo);
I915_WRITE(fence_reg_hi, upper_32_bits(value));
I915_WRITE(fence_reg_lo, lower_32_bits(value));
POSTING_READ(fence_reg_lo);
}
static void free_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct drm_i915_fence_reg *reg;
u32 i;
if (WARN_ON(!vgpu_fence_sz(vgpu)))
return;
mutex_lock(&dev_priv->drm.struct_mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
intel_vgpu_write_fence(vgpu, i, 0);
list_add_tail(&reg->link,
&dev_priv->mm.fence_list);
}
mutex_unlock(&dev_priv->drm.struct_mutex);
}
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct drm_i915_fence_reg *reg;
int i;
struct list_head *pos, *q;
/* Request fences from host */
mutex_lock(&dev_priv->drm.struct_mutex);
i = 0;
list_for_each_safe(pos, q, &dev_priv->mm.fence_list) {
reg = list_entry(pos, struct drm_i915_fence_reg, link);
if (reg->pin_count || reg->vma)
continue;
list_del(pos);
vgpu->fence.regs[i] = reg;
intel_vgpu_write_fence(vgpu, i, 0);
if (++i == vgpu_fence_sz(vgpu))
break;
}
if (i != vgpu_fence_sz(vgpu))
goto out_free_fence;
mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
out_free_fence:
/* Return fences to host, if fail */
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
if (!reg)
continue;
list_add_tail(&reg->link,
&dev_priv->mm.fence_list);
}
mutex_unlock(&dev_priv->drm.struct_mutex);
return -ENOSPC;
}
static void free_resource(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
gvt->gm.vgpu_allocated_low_gm_size -= vgpu_aperture_sz(vgpu);
gvt->gm.vgpu_allocated_high_gm_size -= vgpu_hidden_sz(vgpu);
gvt->fence.vgpu_allocated_fence_num -= vgpu_fence_sz(vgpu);
}
static int alloc_resource(struct intel_vgpu *vgpu,
struct intel_vgpu_creation_params *param)
{
struct intel_gvt *gvt = vgpu->gvt;
unsigned long request, avail, max, taken;
const char *item;
if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
gvt_err("Invalid vGPU creation params\n");
return -EINVAL;
}
item = "low GM space";
max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
taken = gvt->gm.vgpu_allocated_low_gm_size;
avail = max - taken;
request = MB_TO_BYTES(param->low_gm_sz);
if (request > avail)
goto no_enough_resource;
vgpu_aperture_sz(vgpu) = request;
item = "high GM space";
max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
taken = gvt->gm.vgpu_allocated_high_gm_size;
avail = max - taken;
request = MB_TO_BYTES(param->high_gm_sz);
if (request > avail)
goto no_enough_resource;
vgpu_hidden_sz(vgpu) = request;
item = "fence";
max = gvt_fence_sz(gvt) - HOST_FENCE;
taken = gvt->fence.vgpu_allocated_fence_num;
avail = max - taken;
request = param->fence_sz;
if (request > avail)
goto no_enough_resource;
vgpu_fence_sz(vgpu) = request;
gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz);
gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz);
gvt->fence.vgpu_allocated_fence_num += param->fence_sz;
return 0;
no_enough_resource:
gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n",
vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
BYTES_TO_MB(max), BYTES_TO_MB(taken));
return -ENOSPC;
}
/**
* inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU
* @vgpu: a vGPU
*
* This function is used to free the HW resource owned by a vGPU.
*
*/
void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
{
free_vgpu_gm(vgpu);
free_vgpu_fence(vgpu);
free_resource(vgpu);
}
/**
* intel_alloc_vgpu_resource - allocate HW resource for a vGPU
* @vgpu: vGPU
* @param: vGPU creation params
*
* This function is used to allocate HW resource for a vGPU. User specifies
* the resource configuration through the creation params.
*
* Returns:
* zero on success, negative error code if failed.
*
*/
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
struct intel_vgpu_creation_params *param)
{
int ret;
ret = alloc_resource(vgpu, param);
if (ret)
return ret;
ret = alloc_vgpu_gm(vgpu);
if (ret)
goto out_free_resource;
ret = alloc_vgpu_fence(vgpu);
if (ret)
goto out_free_vgpu_gm;
return 0;
out_free_vgpu_gm:
free_vgpu_gm(vgpu);
out_free_resource:
free_resource(vgpu);
return ret;
}
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eddie Dong <eddie.dong@intel.com>
* Jike Song <jike.song@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
*
*/
#include "i915_drv.h"
enum {
INTEL_GVT_PCI_BAR_GTTMMIO = 0,
INTEL_GVT_PCI_BAR_APERTURE,
INTEL_GVT_PCI_BAR_PIO,
INTEL_GVT_PCI_BAR_MAX,
};
/**
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct intel_vgpu *vgpu = __vgpu;
if (WARN_ON(bytes > 4))
return -EINVAL;
if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
return -EINVAL;
memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
return 0;
}
static int map_aperture(struct intel_vgpu *vgpu, bool map)
{
u64 first_gfn, first_mfn;
u64 val;
int ret;
if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
return 0;
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
else
val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
first_mfn,
vgpu_aperture_sz(vgpu)
>> PAGE_SHIFT, map,
GVT_MAP_APERTURE);
if (ret)
return ret;
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
return 0;
}
static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
{
u64 start, end;
u64 val;
int ret;
if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
return 0;
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
else
start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
start &= ~GENMASK(3, 0);
end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
if (ret)
return ret;
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
return 0;
}
static int emulate_pci_command_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u8 old = vgpu_cfg_space(vgpu)[offset];
u8 new = *(u8 *)p_data;
u8 changed = old ^ new;
int ret;
if (!(changed & PCI_COMMAND_MEMORY))
return 0;
if (old & PCI_COMMAND_MEMORY) {
ret = trap_gttmmio(vgpu, false);
if (ret)
return ret;
ret = map_aperture(vgpu, false);
if (ret)
return ret;
} else {
ret = trap_gttmmio(vgpu, true);
if (ret)
return ret;
ret = map_aperture(vgpu, true);
if (ret)
return ret;
}
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
return 0;
}
static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
unsigned int bar_index =
(rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8;
u32 new = *(u32 *)(p_data);
bool lo = IS_ALIGNED(offset, 8);
u64 size;
int ret = 0;
bool mmio_enabled =
vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX))
return -EINVAL;
if (new == 0xffffffff) {
/*
* Power-up software can determine how much address
* space the device requires by writing a value of
* all 1's to the register and then reading the value
* back. The device will return 0's in all don't-care
* address bits.
*/
size = vgpu->cfg_space.bar[bar_index].size;
if (lo) {
new = rounddown(new, size);
} else {
u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)];
/* for 32bit mode bar it returns all-0 in upper 32
* bit, for 64bit mode bar it will calculate the
* size with lower 32bit and return the corresponding
* value
*/
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
new &= (~(size-1)) >> 32;
else
new = 0;
}
/*
* Unmapp & untrap the BAR, since guest hasn't configured a
* valid GPA
*/
switch (bar_index) {
case INTEL_GVT_PCI_BAR_GTTMMIO:
ret = trap_gttmmio(vgpu, false);
break;
case INTEL_GVT_PCI_BAR_APERTURE:
ret = map_aperture(vgpu, false);
break;
}
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
} else {
/*
* Unmapp & untrap the old BAR first, since guest has
* re-configured the BAR
*/
switch (bar_index) {
case INTEL_GVT_PCI_BAR_GTTMMIO:
ret = trap_gttmmio(vgpu, false);
break;
case INTEL_GVT_PCI_BAR_APERTURE:
ret = map_aperture(vgpu, false);
break;
}
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
/* Track the new BAR */
if (mmio_enabled) {
switch (bar_index) {
case INTEL_GVT_PCI_BAR_GTTMMIO:
ret = trap_gttmmio(vgpu, true);
break;
case INTEL_GVT_PCI_BAR_APERTURE:
ret = map_aperture(vgpu, true);
break;
}
}
}
return ret;
}
/**
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct intel_vgpu *vgpu = __vgpu;
int ret;
if (WARN_ON(bytes > 4))
return -EINVAL;
if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ))
return -EINVAL;
/* First check if it's PCI_COMMAND */
if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
if (WARN_ON(bytes > 2))
return -EINVAL;
return emulate_pci_command_write(vgpu, offset, p_data, bytes);
}
switch (rounddown(offset, 4)) {
case PCI_BASE_ADDRESS_0:
case PCI_BASE_ADDRESS_1:
case PCI_BASE_ADDRESS_2:
case PCI_BASE_ADDRESS_3:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
case INTEL_GVT_PCI_SWSCI:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
if (ret)
return ret;
break;
case INTEL_GVT_PCI_OPREGION:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data);
if (ret)
return ret;
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
break;
default:
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
break;
}
return 0;
}
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Kevin Tian <kevin.tian@intel.com>
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Ping Gao <ping.a.gao@intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Yulei Zhang <yulei.zhang@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include <linux/slab.h>
#include "i915_drv.h"
#include "trace.h"
#define INVALID_OP (~0U)
#define OP_LEN_MI 9
#define OP_LEN_2D 10
#define OP_LEN_3D_MEDIA 16
#define OP_LEN_MFX_VC 16
#define OP_LEN_VEBOX 16
#define CMD_TYPE(cmd) (((cmd) >> 29) & 7)
struct sub_op_bits {
int hi;
int low;
};
struct decode_info {
char *name;
int op_len;
int nr_sub_op;
struct sub_op_bits *sub_op;
};
#define MAX_CMD_BUDGET 0x7fffffff
#define MI_WAIT_FOR_PLANE_C_FLIP_PENDING (1<<15)
#define MI_WAIT_FOR_PLANE_B_FLIP_PENDING (1<<9)
#define MI_WAIT_FOR_PLANE_A_FLIP_PENDING (1<<1)
#define MI_WAIT_FOR_SPRITE_C_FLIP_PENDING (1<<20)
#define MI_WAIT_FOR_SPRITE_B_FLIP_PENDING (1<<10)
#define MI_WAIT_FOR_SPRITE_A_FLIP_PENDING (1<<2)
/* Render Command Map */
/* MI_* command Opcode (28:23) */
#define OP_MI_NOOP 0x0
#define OP_MI_SET_PREDICATE 0x1 /* HSW+ */
#define OP_MI_USER_INTERRUPT 0x2
#define OP_MI_WAIT_FOR_EVENT 0x3
#define OP_MI_FLUSH 0x4
#define OP_MI_ARB_CHECK 0x5
#define OP_MI_RS_CONTROL 0x6 /* HSW+ */
#define OP_MI_REPORT_HEAD 0x7
#define OP_MI_ARB_ON_OFF 0x8
#define OP_MI_URB_ATOMIC_ALLOC 0x9 /* HSW+ */
#define OP_MI_BATCH_BUFFER_END 0xA
#define OP_MI_SUSPEND_FLUSH 0xB
#define OP_MI_PREDICATE 0xC /* IVB+ */
#define OP_MI_TOPOLOGY_FILTER 0xD /* IVB+ */
#define OP_MI_SET_APPID 0xE /* IVB+ */
#define OP_MI_RS_CONTEXT 0xF /* HSW+ */
#define OP_MI_LOAD_SCAN_LINES_INCL 0x12 /* HSW+ */
#define OP_MI_DISPLAY_FLIP 0x14
#define OP_MI_SEMAPHORE_MBOX 0x16
#define OP_MI_SET_CONTEXT 0x18
#define OP_MI_MATH 0x1A
#define OP_MI_URB_CLEAR 0x19
#define OP_MI_SEMAPHORE_SIGNAL 0x1B /* BDW+ */
#define OP_MI_SEMAPHORE_WAIT 0x1C /* BDW+ */
#define OP_MI_STORE_DATA_IMM 0x20
#define OP_MI_STORE_DATA_INDEX 0x21
#define OP_MI_LOAD_REGISTER_IMM 0x22
#define OP_MI_UPDATE_GTT 0x23
#define OP_MI_STORE_REGISTER_MEM 0x24
#define OP_MI_FLUSH_DW 0x26
#define OP_MI_CLFLUSH 0x27
#define OP_MI_REPORT_PERF_COUNT 0x28
#define OP_MI_LOAD_REGISTER_MEM 0x29 /* HSW+ */
#define OP_MI_LOAD_REGISTER_REG 0x2A /* HSW+ */
#define OP_MI_RS_STORE_DATA_IMM 0x2B /* HSW+ */
#define OP_MI_LOAD_URB_MEM 0x2C /* HSW+ */
#define OP_MI_STORE_URM_MEM 0x2D /* HSW+ */
#define OP_MI_2E 0x2E /* BDW+ */
#define OP_MI_2F 0x2F /* BDW+ */
#define OP_MI_BATCH_BUFFER_START 0x31
/* Bit definition for dword 0 */
#define _CMDBIT_BB_START_IN_PPGTT (1UL << 8)
#define OP_MI_CONDITIONAL_BATCH_BUFFER_END 0x36
#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
#define BATCH_BUFFER_ADR_SPACE_BIT(x) (((x) >> 8) & 1U)
#define BATCH_BUFFER_2ND_LEVEL_BIT(x) ((x) >> 22 & 1U)
/* 2D command: Opcode (28:22) */
#define OP_2D(x) ((2<<7) | x)
#define OP_XY_SETUP_BLT OP_2D(0x1)
#define OP_XY_SETUP_CLIP_BLT OP_2D(0x3)
#define OP_XY_SETUP_MONO_PATTERN_SL_BLT OP_2D(0x11)
#define OP_XY_PIXEL_BLT OP_2D(0x24)
#define OP_XY_SCANLINES_BLT OP_2D(0x25)
#define OP_XY_TEXT_BLT OP_2D(0x26)
#define OP_XY_TEXT_IMMEDIATE_BLT OP_2D(0x31)
#define OP_XY_COLOR_BLT OP_2D(0x50)
#define OP_XY_PAT_BLT OP_2D(0x51)
#define OP_XY_MONO_PAT_BLT OP_2D(0x52)
#define OP_XY_SRC_COPY_BLT OP_2D(0x53)
#define OP_XY_MONO_SRC_COPY_BLT OP_2D(0x54)
#define OP_XY_FULL_BLT OP_2D(0x55)
#define OP_XY_FULL_MONO_SRC_BLT OP_2D(0x56)
#define OP_XY_FULL_MONO_PATTERN_BLT OP_2D(0x57)
#define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT OP_2D(0x58)
#define OP_XY_MONO_PAT_FIXED_BLT OP_2D(0x59)
#define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT OP_2D(0x71)
#define OP_XY_PAT_BLT_IMMEDIATE OP_2D(0x72)
#define OP_XY_SRC_COPY_CHROMA_BLT OP_2D(0x73)
#define OP_XY_FULL_IMMEDIATE_PATTERN_BLT OP_2D(0x74)
#define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT OP_2D(0x75)
#define OP_XY_PAT_CHROMA_BLT OP_2D(0x76)
#define OP_XY_PAT_CHROMA_BLT_IMMEDIATE OP_2D(0x77)
/* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
#define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
#define OP_STATE_PREFETCH OP_3D_MEDIA(0x0, 0x0, 0x03)
#define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01)
#define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02)
#define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04)
#define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B)
#define OP_PIPELINE_SELECT OP_3D_MEDIA(0x1, 0x1, 0x04)
#define OP_MEDIA_VFE_STATE OP_3D_MEDIA(0x2, 0x0, 0x0)
#define OP_MEDIA_CURBE_LOAD OP_3D_MEDIA(0x2, 0x0, 0x1)
#define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2)
#define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3)
#define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4)
#define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0)
#define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2)
#define OP_MEDIA_OBJECT_WALKER OP_3D_MEDIA(0x2, 0x1, 0x3)
#define OP_GPGPU_WALKER OP_3D_MEDIA(0x2, 0x1, 0x5)
#define OP_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
#define OP_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
#define OP_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
#define OP_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
#define OP_3DSTATE_VERTEX_BUFFERS OP_3D_MEDIA(0x3, 0x0, 0x08)
#define OP_3DSTATE_VERTEX_ELEMENTS OP_3D_MEDIA(0x3, 0x0, 0x09)
#define OP_3DSTATE_INDEX_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x0A)
#define OP_3DSTATE_VF_STATISTICS OP_3D_MEDIA(0x3, 0x0, 0x0B)
#define OP_3DSTATE_VF OP_3D_MEDIA(0x3, 0x0, 0x0C) /* HSW+ */
#define OP_3DSTATE_CC_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0E)
#define OP_3DSTATE_SCISSOR_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0F)
#define OP_3DSTATE_VS OP_3D_MEDIA(0x3, 0x0, 0x10)
#define OP_3DSTATE_GS OP_3D_MEDIA(0x3, 0x0, 0x11)
#define OP_3DSTATE_CLIP OP_3D_MEDIA(0x3, 0x0, 0x12)
#define OP_3DSTATE_SF OP_3D_MEDIA(0x3, 0x0, 0x13)
#define OP_3DSTATE_WM OP_3D_MEDIA(0x3, 0x0, 0x14)
#define OP_3DSTATE_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x15)
#define OP_3DSTATE_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x16)
#define OP_3DSTATE_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x17)
#define OP_3DSTATE_SAMPLE_MASK OP_3D_MEDIA(0x3, 0x0, 0x18)
#define OP_3DSTATE_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
#define OP_3DSTATE_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
#define OP_3DSTATE_HS OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
#define OP_3DSTATE_TE OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
#define OP_3DSTATE_DS OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
#define OP_3DSTATE_STREAMOUT OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
#define OP_3DSTATE_SBE OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
#define OP_3DSTATE_PS OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
#define OP_3DSTATE_BLEND_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
#define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
#define OP_3DSTATE_BINDING_TABLE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
#define OP_3DSTATE_BINDING_TABLE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
#define OP_3DSTATE_BINDING_TABLE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
#define OP_3DSTATE_BINDING_TABLE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
#define OP_3DSTATE_BINDING_TABLE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
#define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
#define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
#define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
#define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
#define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
#define OP_3DSTATE_URB_VS OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
#define OP_3DSTATE_URB_HS OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
#define OP_3DSTATE_URB_DS OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
#define OP_3DSTATE_URB_GS OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
#define OP_3DSTATE_GATHER_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
#define OP_3DSTATE_GATHER_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
#define OP_3DSTATE_GATHER_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
#define OP_3DSTATE_GATHER_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
#define OP_3DSTATE_GATHER_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
#define OP_3DSTATE_DX9_CONSTANTF_VS OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
#define OP_3DSTATE_DX9_CONSTANTF_PS OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
#define OP_3DSTATE_DX9_CONSTANTI_VS OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
#define OP_3DSTATE_DX9_CONSTANTI_PS OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
#define OP_3DSTATE_DX9_CONSTANTB_VS OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
#define OP_3DSTATE_DX9_CONSTANTB_PS OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
#define OP_3DSTATE_DX9_LOCAL_VALID_VS OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
#define OP_3DSTATE_DX9_LOCAL_VALID_PS OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
#define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
#define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
#define OP_3DSTATE_BINDING_TABLE_EDIT_VS OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
#define OP_3DSTATE_BINDING_TABLE_EDIT_GS OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
#define OP_3DSTATE_BINDING_TABLE_EDIT_HS OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
#define OP_3DSTATE_BINDING_TABLE_EDIT_DS OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
#define OP_3DSTATE_BINDING_TABLE_EDIT_PS OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
#define OP_3DSTATE_VF_INSTANCING OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
#define OP_3DSTATE_VF_SGVS OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
#define OP_3DSTATE_VF_TOPOLOGY OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
#define OP_3DSTATE_WM_CHROMAKEY OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
#define OP_3DSTATE_PS_BLEND OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
#define OP_3DSTATE_WM_DEPTH_STENCIL OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
#define OP_3DSTATE_PS_EXTRA OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
#define OP_3DSTATE_RASTER OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
#define OP_3DSTATE_SBE_SWIZ OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
#define OP_3DSTATE_WM_HZ_OP OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
#define OP_3DSTATE_COMPONENT_PACKING OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
#define OP_3DSTATE_DRAWING_RECTANGLE OP_3D_MEDIA(0x3, 0x1, 0x00)
#define OP_3DSTATE_SAMPLER_PALETTE_LOAD0 OP_3D_MEDIA(0x3, 0x1, 0x02)
#define OP_3DSTATE_CHROMA_KEY OP_3D_MEDIA(0x3, 0x1, 0x04)
#define OP_SNB_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x05)
#define OP_3DSTATE_POLY_STIPPLE_OFFSET OP_3D_MEDIA(0x3, 0x1, 0x06)
#define OP_3DSTATE_POLY_STIPPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x07)
#define OP_3DSTATE_LINE_STIPPLE OP_3D_MEDIA(0x3, 0x1, 0x08)
#define OP_3DSTATE_AA_LINE_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x0A)
#define OP_3DSTATE_GS_SVB_INDEX OP_3D_MEDIA(0x3, 0x1, 0x0B)
#define OP_3DSTATE_SAMPLER_PALETTE_LOAD1 OP_3D_MEDIA(0x3, 0x1, 0x0C)
#define OP_3DSTATE_MULTISAMPLE_BDW OP_3D_MEDIA(0x3, 0x0, 0x0D)
#define OP_SNB_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0E)
#define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0F)
#define OP_SNB_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x10)
#define OP_3DSTATE_MONOFILTER_SIZE OP_3D_MEDIA(0x3, 0x1, 0x11)
#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
#define OP_3DSTATE_SO_DECL_LIST OP_3D_MEDIA(0x3, 0x1, 0x17)
#define OP_3DSTATE_SO_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x18)
#define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
#define OP_3DSTATE_GATHER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
#define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
#define OP_3DSTATE_SAMPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x1C)
#define OP_PIPE_CONTROL OP_3D_MEDIA(0x3, 0x2, 0x00)
#define OP_3DPRIMITIVE OP_3D_MEDIA(0x3, 0x3, 0x00)
/* VCCP Command Parser */
/*
* Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
* git://anongit.freedesktop.org/vaapi/intel-driver
* src/i965_defines.h
*
*/
#define OP_MFX(pipeline, op, sub_opa, sub_opb) \
(3 << 13 | \
(pipeline) << 11 | \
(op) << 8 | \
(sub_opa) << 5 | \
(sub_opb))
#define OP_MFX_PIPE_MODE_SELECT OP_MFX(2, 0, 0, 0) /* ALL */
#define OP_MFX_SURFACE_STATE OP_MFX(2, 0, 0, 1) /* ALL */
#define OP_MFX_PIPE_BUF_ADDR_STATE OP_MFX(2, 0, 0, 2) /* ALL */
#define OP_MFX_IND_OBJ_BASE_ADDR_STATE OP_MFX(2, 0, 0, 3) /* ALL */
#define OP_MFX_BSP_BUF_BASE_ADDR_STATE OP_MFX(2, 0, 0, 4) /* ALL */
#define OP_2_0_0_5 OP_MFX(2, 0, 0, 5) /* ALL */
#define OP_MFX_STATE_POINTER OP_MFX(2, 0, 0, 6) /* ALL */
#define OP_MFX_QM_STATE OP_MFX(2, 0, 0, 7) /* IVB+ */
#define OP_MFX_FQM_STATE OP_MFX(2, 0, 0, 8) /* IVB+ */
#define OP_MFX_PAK_INSERT_OBJECT OP_MFX(2, 0, 2, 8) /* IVB+ */
#define OP_MFX_STITCH_OBJECT OP_MFX(2, 0, 2, 0xA) /* IVB+ */
#define OP_MFD_IT_OBJECT OP_MFX(2, 0, 1, 9) /* ALL */
#define OP_MFX_WAIT OP_MFX(1, 0, 0, 0) /* IVB+ */
#define OP_MFX_AVC_IMG_STATE OP_MFX(2, 1, 0, 0) /* ALL */
#define OP_MFX_AVC_QM_STATE OP_MFX(2, 1, 0, 1) /* ALL */
#define OP_MFX_AVC_DIRECTMODE_STATE OP_MFX(2, 1, 0, 2) /* ALL */
#define OP_MFX_AVC_SLICE_STATE OP_MFX(2, 1, 0, 3) /* ALL */
#define OP_MFX_AVC_REF_IDX_STATE OP_MFX(2, 1, 0, 4) /* ALL */
#define OP_MFX_AVC_WEIGHTOFFSET_STATE OP_MFX(2, 1, 0, 5) /* ALL */
#define OP_MFD_AVC_PICID_STATE OP_MFX(2, 1, 1, 5) /* HSW+ */
#define OP_MFD_AVC_DPB_STATE OP_MFX(2, 1, 1, 6) /* IVB+ */
#define OP_MFD_AVC_SLICEADDR OP_MFX(2, 1, 1, 7) /* IVB+ */
#define OP_MFD_AVC_BSD_OBJECT OP_MFX(2, 1, 1, 8) /* ALL */
#define OP_MFC_AVC_PAK_OBJECT OP_MFX(2, 1, 2, 9) /* ALL */
#define OP_MFX_VC1_PRED_PIPE_STATE OP_MFX(2, 2, 0, 1) /* ALL */
#define OP_MFX_VC1_DIRECTMODE_STATE OP_MFX(2, 2, 0, 2) /* ALL */
#define OP_MFD_VC1_SHORT_PIC_STATE OP_MFX(2, 2, 1, 0) /* IVB+ */
#define OP_MFD_VC1_LONG_PIC_STATE OP_MFX(2, 2, 1, 1) /* IVB+ */
#define OP_MFD_VC1_BSD_OBJECT OP_MFX(2, 2, 1, 8) /* ALL */
#define OP_MFX_MPEG2_PIC_STATE OP_MFX(2, 3, 0, 0) /* ALL */
#define OP_MFX_MPEG2_QM_STATE OP_MFX(2, 3, 0, 1) /* ALL */
#define OP_MFD_MPEG2_BSD_OBJECT OP_MFX(2, 3, 1, 8) /* ALL */
#define OP_MFC_MPEG2_SLICEGROUP_STATE OP_MFX(2, 3, 2, 3) /* ALL */
#define OP_MFC_MPEG2_PAK_OBJECT OP_MFX(2, 3, 2, 9) /* ALL */
#define OP_MFX_2_6_0_0 OP_MFX(2, 6, 0, 0) /* IVB+ */
#define OP_MFX_2_6_0_8 OP_MFX(2, 6, 0, 8) /* IVB+ */
#define OP_MFX_2_6_0_9 OP_MFX(2, 6, 0, 9) /* IVB+ */
#define OP_MFX_JPEG_PIC_STATE OP_MFX(2, 7, 0, 0)
#define OP_MFX_JPEG_HUFF_TABLE_STATE OP_MFX(2, 7, 0, 2)
#define OP_MFD_JPEG_BSD_OBJECT OP_MFX(2, 7, 1, 8)
#define OP_VEB(pipeline, op, sub_opa, sub_opb) \
(3 << 13 | \
(pipeline) << 11 | \
(op) << 8 | \
(sub_opa) << 5 | \
(sub_opb))
#define OP_VEB_SURFACE_STATE OP_VEB(2, 4, 0, 0)
#define OP_VEB_STATE OP_VEB(2, 4, 0, 2)
#define OP_VEB_DNDI_IECP_STATE OP_VEB(2, 4, 0, 3)
struct parser_exec_state;
typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
#define GVT_CMD_HASH_BITS 7
/* which DWords need address fix */
#define ADDR_FIX_1(x1) (1 << (x1))
#define ADDR_FIX_2(x1, x2) (ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
#define ADDR_FIX_3(x1, x2, x3) (ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
#define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
#define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
struct cmd_info {
char *name;
u32 opcode;
#define F_LEN_MASK (1U<<0)
#define F_LEN_CONST 1U
#define F_LEN_VAR 0U
/*
* command has its own ip advance logic
* e.g. MI_BATCH_START, MI_BATCH_END
*/
#define F_IP_ADVANCE_CUSTOM (1<<1)
#define F_POST_HANDLE (1<<2)
u32 flag;
#define R_RCS (1 << RCS)
#define R_VCS1 (1 << VCS)
#define R_VCS2 (1 << VCS2)
#define R_VCS (R_VCS1 | R_VCS2)
#define R_BCS (1 << BCS)
#define R_VECS (1 << VECS)
#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
/* rings that support this cmd: BLT/RCS/VCS/VECS */
uint16_t rings;
/* devices that support this cmd: SNB/IVB/HSW/... */
uint16_t devices;
/* which DWords are address that need fix up.
* bit 0 means a 32-bit non address operand in command
* bit 1 means address operand, which could be 32-bit
* or 64-bit depending on different architectures.(
* defined by "gmadr_bytes_in_cmd" in intel_gvt.
* No matter the address length, each address only takes
* one bit in the bitmap.
*/
uint16_t addr_bitmap;
/* flag == F_LEN_CONST : command length
* flag == F_LEN_VAR : length bias bits
* Note: length is in DWord
*/
uint8_t len;
parser_cmd_handler handler;
};
struct cmd_entry {
struct hlist_node hlist;
struct cmd_info *info;
};
enum {
RING_BUFFER_INSTRUCTION,
BATCH_BUFFER_INSTRUCTION,
BATCH_BUFFER_2ND_LEVEL,
};
enum {
GTT_BUFFER,
PPGTT_BUFFER
};
struct parser_exec_state {
struct intel_vgpu *vgpu;
int ring_id;
int buf_type;
/* batch buffer address type */
int buf_addr_type;
/* graphics memory address of ring buffer start */
unsigned long ring_start;
unsigned long ring_size;
unsigned long ring_head;
unsigned long ring_tail;
/* instruction graphics memory address */
unsigned long ip_gma;
/* mapped va of the instr_gma */
void *ip_va;
void *rb_va;
void *ret_bb_va;
/* next instruction when return from batch buffer to ring buffer */
unsigned long ret_ip_gma_ring;
/* next instruction when return from 2nd batch buffer to batch buffer */
unsigned long ret_ip_gma_bb;
/* batch buffer address type (GTT or PPGTT)
* used when ret from 2nd level batch buffer
*/
int saved_buf_addr_type;
struct cmd_info *info;
struct intel_vgpu_workload *workload;
};
#define gmadr_dw_number(s) \
(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
unsigned long bypass_scan_mask = 0;
bool bypass_batch_buffer_scan = true;
/* ring ALL, type = 0 */
static struct sub_op_bits sub_op_mi[] = {
{31, 29},
{28, 23},
};
static struct decode_info decode_info_mi = {
"MI",
OP_LEN_MI,
ARRAY_SIZE(sub_op_mi),
sub_op_mi,
};
/* ring RCS, command type 2 */
static struct sub_op_bits sub_op_2d[] = {
{31, 29},
{28, 22},
};
static struct decode_info decode_info_2d = {
"2D",
OP_LEN_2D,
ARRAY_SIZE(sub_op_2d),
sub_op_2d,
};
/* ring RCS, command type 3 */
static struct sub_op_bits sub_op_3d_media[] = {
{31, 29},
{28, 27},
{26, 24},
{23, 16},
};
static struct decode_info decode_info_3d_media = {
"3D_Media",
OP_LEN_3D_MEDIA,
ARRAY_SIZE(sub_op_3d_media),
sub_op_3d_media,
};
/* ring VCS, command type 3 */
static struct sub_op_bits sub_op_mfx_vc[] = {
{31, 29},
{28, 27},
{26, 24},
{23, 21},
{20, 16},
};
static struct decode_info decode_info_mfx_vc = {
"MFX_VC",
OP_LEN_MFX_VC,
ARRAY_SIZE(sub_op_mfx_vc),
sub_op_mfx_vc,
};
/* ring VECS, command type 3 */
static struct sub_op_bits sub_op_vebox[] = {
{31, 29},
{28, 27},
{26, 24},
{23, 21},
{20, 16},
};
static struct decode_info decode_info_vebox = {
"VEBOX",
OP_LEN_VEBOX,
ARRAY_SIZE(sub_op_vebox),
sub_op_vebox,
};
static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
[RCS] = {
&decode_info_mi,
NULL,
NULL,
&decode_info_3d_media,
NULL,
NULL,
NULL,
NULL,
},
[VCS] = {
&decode_info_mi,
NULL,
NULL,
&decode_info_mfx_vc,
NULL,
NULL,
NULL,
NULL,
},
[BCS] = {
&decode_info_mi,
NULL,
&decode_info_2d,
NULL,
NULL,
NULL,
NULL,
NULL,
},
[VECS] = {
&decode_info_mi,
NULL,
NULL,
&decode_info_vebox,
NULL,
NULL,
NULL,
NULL,
},
[VCS2] = {
&decode_info_mi,
NULL,
NULL,
&decode_info_mfx_vc,
NULL,
NULL,
NULL,
NULL,
},
};
static inline u32 get_opcode(u32 cmd, int ring_id)
{
struct decode_info *d_info;
if (ring_id >= I915_NUM_ENGINES)
return INVALID_OP;
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
if (d_info == NULL)
return INVALID_OP;
return cmd >> (32 - d_info->op_len);
}
static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
unsigned int opcode, int ring_id)
{
struct cmd_entry *e;
hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
if ((opcode == e->info->opcode) &&
(e->info->rings & (1 << ring_id)))
return e->info;
}
return NULL;
}
static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
u32 cmd, int ring_id)
{
u32 opcode;
opcode = get_opcode(cmd, ring_id);
if (opcode == INVALID_OP)
return NULL;
return find_cmd_entry(gvt, opcode, ring_id);
}
static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
{
return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
}
static inline void print_opcode(u32 cmd, int ring_id)
{
struct decode_info *d_info;
int i;
if (ring_id >= I915_NUM_ENGINES)
return;
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
if (d_info == NULL)
return;
gvt_err("opcode=0x%x %s sub_ops:",
cmd >> (32 - d_info->op_len), d_info->name);
for (i = 0; i < d_info->nr_sub_op; i++)
pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi,
d_info->sub_op[i].low));
pr_err("\n");
}
static inline u32 *cmd_ptr(struct parser_exec_state *s, int index)
{
return s->ip_va + (index << 2);
}
static inline u32 cmd_val(struct parser_exec_state *s, int index)
{
return *cmd_ptr(s, index);
}
static void parser_exec_state_dump(struct parser_exec_state *s)
{
int cnt = 0;
int i;
gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
" ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
s->ring_id, s->ring_start, s->ring_start + s->ring_size,
s->ring_head, s->ring_tail);
gvt_err(" %s %s ip_gma(%08lx) ",
s->buf_type == RING_BUFFER_INSTRUCTION ?
"RING_BUFFER" : "BATCH_BUFFER",
s->buf_addr_type == GTT_BUFFER ?
"GTT" : "PPGTT", s->ip_gma);
if (s->ip_va == NULL) {
gvt_err(" ip_va(NULL)");
return;
}
gvt_err(" ip_va=%p: %08x %08x %08x %08x\n",
s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
cmd_val(s, 2), cmd_val(s, 3));
print_opcode(cmd_val(s, 0), s->ring_id);
/* print the whole page to trace */
pr_err(" ip_va=%p: %08x %08x %08x %08x\n",
s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
cmd_val(s, 2), cmd_val(s, 3));
s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
while (cnt < 1024) {
pr_err("ip_va=%p: ", s->ip_va);
for (i = 0; i < 8; i++)
pr_err("%08x ", cmd_val(s, i));
pr_err("\n");
s->ip_va += 8 * sizeof(u32);
cnt += 8;
}
}
static inline void update_ip_va(struct parser_exec_state *s)
{
unsigned long len = 0;
if (WARN_ON(s->ring_head == s->ring_tail))
return;
if (s->buf_type == RING_BUFFER_INSTRUCTION) {
unsigned long ring_top = s->ring_start + s->ring_size;
if (s->ring_head > s->ring_tail) {
if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top)
len = (s->ip_gma - s->ring_head);
else if (s->ip_gma >= s->ring_start &&
s->ip_gma <= s->ring_tail)
len = (ring_top - s->ring_head) +
(s->ip_gma - s->ring_start);
} else
len = (s->ip_gma - s->ring_head);
s->ip_va = s->rb_va + len;
} else {/* shadow batch buffer */
s->ip_va = s->ret_bb_va;
}
}
static inline int ip_gma_set(struct parser_exec_state *s,
unsigned long ip_gma)
{
WARN_ON(!IS_ALIGNED(ip_gma, 4));
s->ip_gma = ip_gma;
update_ip_va(s);
return 0;
}
static inline int ip_gma_advance(struct parser_exec_state *s,
unsigned int dw_len)
{
s->ip_gma += (dw_len << 2);
if (s->buf_type == RING_BUFFER_INSTRUCTION) {
if (s->ip_gma >= s->ring_start + s->ring_size)
s->ip_gma -= s->ring_size;
update_ip_va(s);
} else {
s->ip_va += (dw_len << 2);
}
return 0;
}
static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
{
if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
return info->len;
else
return (cmd & ((1U << info->len) - 1)) + 2;
return 0;
}
static inline int cmd_length(struct parser_exec_state *s)
{
return get_cmd_length(s->info, cmd_val(s, 0));
}
/* do not remove this, some platform may need clflush here */
#define patch_value(s, addr, val) do { \
*addr = val; \
} while (0)
static bool is_shadowed_mmio(unsigned int offset)
{
bool ret = false;
if ((offset == 0x2168) || /*BB current head register UDW */
(offset == 0x2140) || /*BB current header register */
(offset == 0x211c) || /*second BB header register UDW */
(offset == 0x2114)) { /*second BB header register UDW */
ret = true;
}
return ret;
}
static int cmd_reg_handler(struct parser_exec_state *s,
unsigned int offset, unsigned int index, char *cmd)
{
struct intel_vgpu *vgpu = s->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
if (offset + 4 > gvt->device_info.mmio_size) {
gvt_err("%s access to (%x) outside of MMIO range\n",
cmd, offset);
return -EINVAL;
}
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
gvt_err("vgpu%d: %s access to non-render register (%x)\n",
s->vgpu->id, cmd, offset);
return 0;
}
if (is_shadowed_mmio(offset)) {
gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
s->vgpu->id, offset);
return 0;
}
if (offset == i915_mmio_reg_offset(DERRMR) ||
offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
}
/* TODO: Update the global mask if this MMIO is a masked-MMIO */
intel_gvt_mmio_set_cmd_accessed(gvt, offset);
return 0;
}
#define cmd_reg(s, i) \
(cmd_val(s, i) & GENMASK(22, 2))
#define cmd_reg_inhibit(s, i) \
(cmd_val(s, i) & GENMASK(22, 18))
#define cmd_gma(s, i) \
(cmd_val(s, i) & GENMASK(31, 2))
#define cmd_gma_hi(s, i) \
(cmd_val(s, i) & GENMASK(15, 0))
static int cmd_handler_lri(struct parser_exec_state *s)
{
int i, ret = 0;
int cmd_len = cmd_length(s);
struct intel_gvt *gvt = s->vgpu->gvt;
for (i = 1; i < cmd_len; i += 2) {
if (IS_BROADWELL(gvt->dev_priv) &&
(s->ring_id != RCS)) {
if (s->ring_id == BCS &&
cmd_reg(s, i) ==
i915_mmio_reg_offset(DERRMR))
ret |= 0;
else
ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
}
if (ret)
break;
ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
}
return ret;
}
static int cmd_handler_lrr(struct parser_exec_state *s)
{
int i, ret = 0;
int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len; i += 2) {
if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
ret |= ((cmd_reg_inhibit(s, i) ||
(cmd_reg_inhibit(s, i + 1)))) ?
-EINVAL : 0;
if (ret)
break;
ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
}
return ret;
}
static inline int cmd_address_audit(struct parser_exec_state *s,
unsigned long guest_gma, int op_size, bool index_mode);
static int cmd_handler_lrm(struct parser_exec_state *s)
{
struct intel_gvt *gvt = s->vgpu->gvt;
int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
unsigned long gma;
int i, ret = 0;
int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len;) {
if (IS_BROADWELL(gvt->dev_priv))
ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
if (ret)
break;
ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
if (cmd_val(s, 0) & (1 << 22)) {
gma = cmd_gma(s, i + 1);
if (gmadr_bytes == 8)
gma |= (cmd_gma_hi(s, i + 2)) << 32;
ret |= cmd_address_audit(s, gma, sizeof(u32), false);
}
i += gmadr_dw_number(s) + 1;
}
return ret;
}
static int cmd_handler_srm(struct parser_exec_state *s)
{
int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
unsigned long gma;
int i, ret = 0;
int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len;) {
ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
if (cmd_val(s, 0) & (1 << 22)) {
gma = cmd_gma(s, i + 1);
if (gmadr_bytes == 8)
gma |= (cmd_gma_hi(s, i + 2)) << 32;
ret |= cmd_address_audit(s, gma, sizeof(u32), false);
}
i += gmadr_dw_number(s) + 1;
}
return ret;
}
struct cmd_interrupt_event {
int pipe_control_notify;
int mi_flush_dw;
int mi_user_interrupt;
};
struct cmd_interrupt_event cmd_interrupt_events[] = {
[RCS] = {
.pipe_control_notify = RCS_PIPE_CONTROL,
.mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
.mi_user_interrupt = RCS_MI_USER_INTERRUPT,
},
[BCS] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = BCS_MI_FLUSH_DW,
.mi_user_interrupt = BCS_MI_USER_INTERRUPT,
},
[VCS] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VCS_MI_FLUSH_DW,
.mi_user_interrupt = VCS_MI_USER_INTERRUPT,
},
[VCS2] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VCS2_MI_FLUSH_DW,
.mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
},
[VECS] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VECS_MI_FLUSH_DW,
.mi_user_interrupt = VECS_MI_USER_INTERRUPT,
},
};
static int cmd_handler_pipe_control(struct parser_exec_state *s)
{
int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
unsigned long gma;
bool index_mode = false;
unsigned int post_sync;
int ret = 0;
post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
/* LRI post sync */
if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE)
ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl");
/* post sync */
else if (post_sync) {
if (post_sync == 2)
ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl");
else if (post_sync == 3)
ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
else if (post_sync == 1) {
/* check ggtt*/
if ((cmd_val(s, 2) & (1 << 2))) {
gma = cmd_val(s, 2) & GENMASK(31, 3);
if (gmadr_bytes == 8)
gma |= (cmd_gma_hi(s, 3)) << 32;
/* Store Data Index */
if (cmd_val(s, 1) & (1 << 21))
index_mode = true;
ret |= cmd_address_audit(s, gma, sizeof(u64),
index_mode);
}
}
}
if (ret)
return ret;
if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
s->workload->pending_events);
return 0;
}
static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
{
set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
s->workload->pending_events);
return 0;
}
static int cmd_advance_default(struct parser_exec_state *s)
{
return ip_gma_advance(s, cmd_length(s));
}
static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
{
int ret;
if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
s->buf_type = BATCH_BUFFER_INSTRUCTION;
ret = ip_gma_set(s, s->ret_ip_gma_bb);
s->buf_addr_type = s->saved_buf_addr_type;
} else {
s->buf_type = RING_BUFFER_INSTRUCTION;
s->buf_addr_type = GTT_BUFFER;
if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size)
s->ret_ip_gma_ring -= s->ring_size;
ret = ip_gma_set(s, s->ret_ip_gma_ring);
}
return ret;
}
struct mi_display_flip_command_info {
int pipe;
int plane;
int event;
i915_reg_t stride_reg;
i915_reg_t ctrl_reg;
i915_reg_t surf_reg;
u64 stride_val;
u64 tile_val;
u64 surf_val;
bool async_flip;
};
struct plane_code_mapping {
int pipe;
int plane;
int event;
};
static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
struct plane_code_mapping gen8_plane_code[] = {
[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
[2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE},
[3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE},
[4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE},
[5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE},
};
u32 dword0, dword1, dword2;
u32 v;
dword0 = cmd_val(s, 0);
dword1 = cmd_val(s, 1);
dword2 = cmd_val(s, 2);
v = (dword0 & GENMASK(21, 19)) >> 19;
if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
return -EINVAL;
info->pipe = gen8_plane_code[v].pipe;
info->plane = gen8_plane_code[v].plane;
info->event = gen8_plane_code[v].event;
info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
info->tile_val = (dword1 & 0x1);
info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
if (info->plane == PLANE_A) {
info->ctrl_reg = DSPCNTR(info->pipe);
info->stride_reg = DSPSTRIDE(info->pipe);
info->surf_reg = DSPSURF(info->pipe);
} else if (info->plane == PLANE_B) {
info->ctrl_reg = SPRCTL(info->pipe);
info->stride_reg = SPRSTRIDE(info->pipe);
info->surf_reg = SPRSURF(info->pipe);
} else {
WARN_ON(1);
return -EINVAL;
}
return 0;
}
static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1);
u32 dword2 = cmd_val(s, 2);
u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
switch (plane) {
case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
info->pipe = PIPE_A;
info->event = PRIMARY_A_FLIP_DONE;
break;
case MI_DISPLAY_FLIP_SKL_PLANE_1_B:
info->pipe = PIPE_B;
info->event = PRIMARY_B_FLIP_DONE;
break;
case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
info->pipe = PIPE_B;
info->event = PRIMARY_C_FLIP_DONE;
break;
default:
gvt_err("unknown plane code %d\n", plane);
return -EINVAL;
}
info->pipe = PRIMARY_PLANE;
info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
info->tile_val = (dword1 & GENMASK(2, 0));
info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
info->ctrl_reg = DSPCNTR(info->pipe);
info->stride_reg = DSPSTRIDE(info->pipe);
info->surf_reg = DSPSURF(info->pipe);
return 0;
}
static int gen8_check_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
u32 stride, tile;
if (!info->async_flip)
return 0;
if (IS_SKYLAKE(dev_priv)) {
stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
} else {
stride = (vgpu_vreg(s->vgpu, info->stride_reg) &
GENMASK(15, 6)) >> 6;
tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
}
if (stride != info->stride_val)
gvt_dbg_cmd("cannot change stride during async flip\n");
if (tile != info->tile_val)
gvt_dbg_cmd("cannot change tile during async flip\n");
return 0;
}
static int gen8_update_plane_mmio_from_mi_display_flip(
struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
struct intel_vgpu *vgpu = s->vgpu;
#define write_bits(reg, e, s, v) do { \
vgpu_vreg(vgpu, reg) &= ~GENMASK(e, s); \
vgpu_vreg(vgpu, reg) |= (v << s); \
} while (0)
write_bits(info->surf_reg, 31, 12, info->surf_val);
if (IS_SKYLAKE(dev_priv))
write_bits(info->stride_reg, 9, 0, info->stride_val);
else
write_bits(info->stride_reg, 15, 6, info->stride_val);
write_bits(info->ctrl_reg, IS_SKYLAKE(dev_priv) ? 12 : 10,
10, info->tile_val);
#undef write_bits
vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, info->event);
return 0;
}
static int decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
if (IS_BROADWELL(dev_priv))
return gen8_decode_mi_display_flip(s, info);
if (IS_SKYLAKE(dev_priv))
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
}
static int check_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
return gen8_check_mi_display_flip(s, info);
return -ENODEV;
}
static int update_plane_mmio_from_mi_display_flip(
struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
return gen8_update_plane_mmio_from_mi_display_flip(s, info);
return -ENODEV;
}
static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
{
struct mi_display_flip_command_info info;
int ret;
int i;
int len = cmd_length(s);
ret = decode_mi_display_flip(s, &info);
if (ret) {
gvt_err("fail to decode MI display flip command\n");
return ret;
}
ret = check_mi_display_flip(s, &info);
if (ret) {
gvt_err("invalid MI display flip command\n");
return ret;
}
ret = update_plane_mmio_from_mi_display_flip(s, &info);
if (ret) {
gvt_err("fail to update plane mmio\n");
return ret;
}
for (i = 0; i < len; i++)
patch_value(s, cmd_ptr(s, i), MI_NOOP);
return 0;
}
static bool is_wait_for_flip_pending(u32 cmd)
{
return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING |
MI_WAIT_FOR_PLANE_B_FLIP_PENDING |
MI_WAIT_FOR_PLANE_C_FLIP_PENDING |
MI_WAIT_FOR_SPRITE_A_FLIP_PENDING |
MI_WAIT_FOR_SPRITE_B_FLIP_PENDING |
MI_WAIT_FOR_SPRITE_C_FLIP_PENDING);
}
static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s)
{
u32 cmd = cmd_val(s, 0);
if (!is_wait_for_flip_pending(cmd))
return 0;
patch_value(s, cmd_ptr(s, 0), MI_NOOP);
return 0;
}
static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
{
unsigned long addr;
unsigned long gma_high, gma_low;
int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
return INTEL_GVT_INVALID_ADDR;
gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
if (gmadr_bytes == 4) {
addr = gma_low;
} else {
gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK;
addr = (((unsigned long)gma_high) << 32) | gma_low;
}
return addr;
}
static inline int cmd_address_audit(struct parser_exec_state *s,
unsigned long guest_gma, int op_size, bool index_mode)
{
struct intel_vgpu *vgpu = s->vgpu;
u32 max_surface_size = vgpu->gvt->device_info.max_surface_size;
int i;
int ret;
if (op_size > max_surface_size) {
gvt_err("command address audit fail name %s\n", s->info->name);
return -EINVAL;
}
if (index_mode) {
if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) {
ret = -EINVAL;
goto err;
}
} else if ((!vgpu_gmadr_is_valid(s->vgpu, guest_gma)) ||
(!vgpu_gmadr_is_valid(s->vgpu,
guest_gma + op_size - 1))) {
ret = -EINVAL;
goto err;
}
return 0;
err:
gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
s->info->name, guest_gma, op_size);
pr_err("cmd dump: ");
for (i = 0; i < cmd_length(s); i++) {
if (!(i % 4))
pr_err("\n%08x ", cmd_val(s, i));
else
pr_err("%08x ", cmd_val(s, i));
}
pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
vgpu->id,
vgpu_aperture_gmadr_base(vgpu),
vgpu_aperture_gmadr_end(vgpu),
vgpu_hidden_gmadr_base(vgpu),
vgpu_hidden_gmadr_end(vgpu));
return ret;
}
static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
{
int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
int op_size = (cmd_length(s) - 3) * sizeof(u32);
int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
unsigned long gma, gma_low, gma_high;
int ret = 0;
/* check ppggt */
if (!(cmd_val(s, 0) & (1 << 22)))
return 0;
gma = cmd_val(s, 2) & GENMASK(31, 2);
if (gmadr_bytes == 8) {
gma_low = cmd_val(s, 1) & GENMASK(31, 2);
gma_high = cmd_val(s, 2) & GENMASK(15, 0);
gma = (gma_high << 32) | gma_low;
core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0;
}
ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false);
return ret;
}
static inline int unexpected_cmd(struct parser_exec_state *s)
{
gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
s->vgpu->id, s->info->name);
return -EINVAL;
}
static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
{
return unexpected_cmd(s);
}
static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s)
{
return unexpected_cmd(s);
}
static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
{
return unexpected_cmd(s);
}
static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
{
int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
int op_size = ((1 << (cmd_val(s, 0) & GENMASK(20, 19) >> 19)) *
sizeof(u32));
unsigned long gma, gma_high;
int ret = 0;
if (!(cmd_val(s, 0) & (1 << 22)))
return ret;
gma = cmd_val(s, 1) & GENMASK(31, 2);
if (gmadr_bytes == 8) {
gma_high = cmd_val(s, 2) & GENMASK(15, 0);
gma = (gma_high << 32) | gma;
}
ret = cmd_address_audit(s, gma, op_size, false);
return ret;
}
static int cmd_handler_mi_store_data_index(struct parser_exec_state *s)
{
return unexpected_cmd(s);
}
static int cmd_handler_mi_clflush(struct parser_exec_state *s)
{
return unexpected_cmd(s);
}
static int cmd_handler_mi_conditional_batch_buffer_end(
struct parser_exec_state *s)
{
return unexpected_cmd(s);
}
static int cmd_handler_mi_update_gtt(struct parser_exec_state *s)
{
return unexpected_cmd(s);
}
static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
{
int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
unsigned long gma;
bool index_mode = false;
int ret = 0;
/* Check post-sync and ppgtt bit */
if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
gma = cmd_val(s, 1) & GENMASK(31, 3);
if (gmadr_bytes == 8)
gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32;
/* Store Data Index */
if (cmd_val(s, 0) & (1 << 21))
index_mode = true;
ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
}
/* Check notify bit */
if ((cmd_val(s, 0) & (1 << 8)))
set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
s->workload->pending_events);
return ret;
}
static void addr_type_update_snb(struct parser_exec_state *s)
{
if ((s->buf_type == RING_BUFFER_INSTRUCTION) &&
(BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) {
s->buf_addr_type = PPGTT_BUFFER;
}
}
static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
unsigned long gma, unsigned long end_gma, void *va)
{
unsigned long copy_len, offset;
unsigned long len = 0;
unsigned long gpa;
while (gma != end_gma) {
gpa = intel_vgpu_gma_to_gpa(mm, gma);
if (gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("invalid gma address: %lx\n", gma);
return -EFAULT;
}
offset = gma & (GTT_PAGE_SIZE - 1);
copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ?
GTT_PAGE_SIZE - offset : end_gma - gma;
intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
len += copy_len;
gma += copy_len;
}
return 0;
}
/*
* Check whether a batch buffer needs to be scanned. Currently
* the only criteria is based on privilege.
*/
static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
struct intel_gvt *gvt = s->vgpu->gvt;
if (bypass_batch_buffer_scan)
return 0;
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
/* BDW decides privilege based on address space */
if (cmd_val(s, 0) & (1 << 8))
return 0;
}
return 1;
}
static uint32_t find_bb_size(struct parser_exec_state *s)
{
unsigned long gma = 0;
struct cmd_info *info;
uint32_t bb_size = 0;
uint32_t cmd_len = 0;
bool met_bb_end = false;
u32 cmd;
/* get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
cmd = cmd_val(s, 0);
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
return -EINVAL;
}
do {
copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
gma, gma + 4, &cmd);
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
return -EINVAL;
}
if (info->opcode == OP_MI_BATCH_BUFFER_END) {
met_bb_end = true;
} else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) {
/* chained batch buffer */
met_bb_end = true;
}
}
cmd_len = get_cmd_length(info, cmd) << 2;
bb_size += cmd_len;
gma += cmd_len;
} while (!met_bb_end);
return bb_size;
}
static u32 *vmap_batch(struct drm_i915_gem_object *obj,
unsigned int start, unsigned int len)
{
int i;
void *addr = NULL;
struct sg_page_iter sg_iter;
int first_page = start >> PAGE_SHIFT;
int last_page = (len + start + 4095) >> PAGE_SHIFT;
int npages = last_page - first_page;
struct page **pages;
pages = drm_malloc_ab(npages, sizeof(*pages));
if (pages == NULL) {
DRM_DEBUG_DRIVER("Failed to get space for pages\n");
goto finish;
}
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
first_page) {
pages[i++] = sg_page_iter_page(&sg_iter);
if (i == npages)
break;
}
addr = vmap(pages, i, 0, PAGE_KERNEL);
if (addr == NULL) {
DRM_DEBUG_DRIVER("Failed to vmap pages\n");
goto finish;
}
finish:
if (pages)
drm_free_large(pages);
return (u32 *)addr;
}
static int perform_bb_shadow(struct parser_exec_state *s)
{
struct intel_shadow_bb_entry *entry_obj;
unsigned long gma = 0;
uint32_t bb_size;
void *dst = NULL;
int ret = 0;
/* get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
/* get the size of the batch buffer */
bb_size = find_bb_size(s);
/* allocate shadow batch buffer */
entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
if (entry_obj == NULL)
return -ENOMEM;
entry_obj->obj = i915_gem_object_create(&(s->vgpu->gvt->dev_priv->drm),
round_up(bb_size, PAGE_SIZE));
if (entry_obj->obj == NULL)
return -ENOMEM;
entry_obj->len = bb_size;
INIT_LIST_HEAD(&entry_obj->list);
ret = i915_gem_object_get_pages(entry_obj->obj);
if (ret)
return ret;
i915_gem_object_pin_pages(entry_obj->obj);
/* get the va of the shadow batch buffer */
dst = (void *)vmap_batch(entry_obj->obj, 0, bb_size);
if (!dst) {
gvt_err("failed to vmap shadow batch\n");
ret = -ENOMEM;
goto unpin_src;
}
ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
if (ret) {
gvt_err("failed to set shadow batch to CPU\n");
goto unmap_src;
}
entry_obj->va = dst;
entry_obj->bb_start_cmd_va = s->ip_va;
/* copy batch buffer to shadow batch buffer*/
ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
gma, gma + bb_size, dst);
if (ret) {
gvt_err("fail to copy guest ring buffer\n");
return ret;
}
list_add(&entry_obj->list, &s->workload->shadow_bb);
/*
* ip_va saves the virtual address of the shadow batch buffer, while
* ip_gma saves the graphics address of the original batch buffer.
* As the shadow batch buffer is just a copy from the originial one,
* it should be right to use shadow batch buffer'va and original batch
* buffer's gma in pair. After all, we don't want to pin the shadow
* buffer here (too early).
*/
s->ip_va = dst;
s->ip_gma = gma;
return 0;
unmap_src:
vunmap(dst);
unpin_src:
i915_gem_object_unpin_pages(entry_obj->obj);
return ret;
}
static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
{
bool second_level;
int ret = 0;
if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
return -EINVAL;
}
second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
return -EINVAL;
}
s->saved_buf_addr_type = s->buf_addr_type;
addr_type_update_snb(s);
if (s->buf_type == RING_BUFFER_INSTRUCTION) {
s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32);
s->buf_type = BATCH_BUFFER_INSTRUCTION;
} else if (second_level) {
s->buf_type = BATCH_BUFFER_2ND_LEVEL;
s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32);
s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32);
}
if (batch_buffer_needs_scan(s)) {
ret = perform_bb_shadow(s);
if (ret < 0)
gvt_err("invalid shadow batch buffer\n");
} else {
/* emulate a batch buffer end to do return right */
ret = cmd_handler_mi_batch_buffer_end(s);
if (ret < 0)
return ret;
}
return ret;
}
static struct cmd_info cmd_info[] = {
{"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
{"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
0, 1, NULL},
{"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL,
0, 1, cmd_handler_mi_user_interrupt},
{"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS,
D_ALL, 0, 1, cmd_handler_mi_wait_for_event},
{"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
{"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
NULL},
{"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
NULL},
{"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
NULL},
{"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
NULL},
{"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS,
D_ALL, 0, 1, NULL},
{"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END,
F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1,
cmd_handler_mi_batch_buffer_end},
{"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL,
0, 1, NULL},
{"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
NULL},
{"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL,
D_ALL, 0, 1, NULL},
{"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
NULL},
{"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
NULL},
{"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
{"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
0, 8, NULL},
{"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
{"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
D_BDW_PLUS, 0, 8, NULL},
{"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
{"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
{"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL,
0, 8, cmd_handler_mi_store_data_index},
{"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL,
D_ALL, 0, 8, cmd_handler_lri},
{"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
cmd_handler_mi_update_gtt},
{"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
{"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
cmd_handler_mi_flush_dw},
{"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
10, cmd_handler_mi_clflush},
{"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
{"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
{"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
D_ALL, 0, 8, cmd_handler_lrr},
{"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
D_ALL, 0, 8, NULL},
{"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
ADDR_FIX_1(2), 8, NULL},
{"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
ADDR_FIX_1(2), 8, NULL},
{"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
8, cmd_handler_mi_op_2e},
{"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
8, cmd_handler_mi_op_2f},
{"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START,
F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8,
cmd_handler_mi_batch_buffer_start},
{"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
cmd_handler_mi_conditional_batch_buffer_end},
{"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
R_RCS | R_BCS, D_ALL, 0, 2, NULL},
{"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL,
ADDR_FIX_2(4, 7), 8, NULL},
{"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL,
0, 8, NULL},
{"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT,
F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
{"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
{"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL,
0, 8, NULL},
{"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL,
ADDR_FIX_1(3), 8, NULL},
{"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS,
D_ALL, 0, 8, NULL},
{"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL,
ADDR_FIX_1(4), 8, NULL},
{"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
ADDR_FIX_2(4, 5), 8, NULL},
{"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
ADDR_FIX_1(4), 8, NULL},
{"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL,
ADDR_FIX_2(4, 7), 8, NULL},
{"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS,
D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
{"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
{"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS,
D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL},
{"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR,
R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
{"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT,
F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
{"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS,
D_ALL, ADDR_FIX_1(4), 8, NULL},
{"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT,
F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
{"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS,
D_ALL, ADDR_FIX_1(4), 8, NULL},
{"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS,
D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
{"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT,
F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
{"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT,
F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
{"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL,
ADDR_FIX_2(4, 5), 8, NULL},
{"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE,
F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
{"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_BLEND_STATE_POINTERS",
OP_3DSTATE_BLEND_STATE_POINTERS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_BINDING_TABLE_POINTERS_VS",
OP_3DSTATE_BINDING_TABLE_POINTERS_VS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_BINDING_TABLE_POINTERS_HS",
OP_3DSTATE_BINDING_TABLE_POINTERS_HS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_BINDING_TABLE_POINTERS_DS",
OP_3DSTATE_BINDING_TABLE_POINTERS_DS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_BINDING_TABLE_POINTERS_GS",
OP_3DSTATE_BINDING_TABLE_POINTERS_GS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_BINDING_TABLE_POINTERS_PS",
OP_3DSTATE_BINDING_TABLE_POINTERS_PS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_SAMPLER_STATE_POINTERS_VS",
OP_3DSTATE_SAMPLER_STATE_POINTERS_VS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_SAMPLER_STATE_POINTERS_HS",
OP_3DSTATE_SAMPLER_STATE_POINTERS_HS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_SAMPLER_STATE_POINTERS_DS",
OP_3DSTATE_SAMPLER_STATE_POINTERS_DS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_SAMPLER_STATE_POINTERS_GS",
OP_3DSTATE_SAMPLER_STATE_POINTERS_GS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_SAMPLER_STATE_POINTERS_PS",
OP_3DSTATE_SAMPLER_STATE_POINTERS_PS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL,
0, 8, NULL},
{"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL,
0, 8, NULL},
{"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL,
0, 8, NULL},
{"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL,
0, 8, NULL},
{"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS,
F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
{"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS,
F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
{"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS,
F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
{"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS,
F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
{"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS,
F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
{"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS,
F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
{"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS,
F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
{"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS,
D_BDW_PLUS, 0, 8, NULL},
{"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
NULL},
{"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS,
D_BDW_PLUS, 0, 8, NULL},
{"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS,
D_BDW_PLUS, 0, 8, NULL},
{"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
8, NULL},
{"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR,
R_RCS, D_BDW_PLUS, 0, 8, NULL},
{"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
8, NULL},
{"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
NULL},
{"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
NULL},
{"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
NULL},
{"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS,
D_BDW_PLUS, 0, 8, NULL},
{"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR,
R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS,
D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL},
{"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST,
R_RCS, D_ALL, 0, 1, NULL},
{"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR,
R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS,
D_BDW_PLUS, 0, 8, NULL},
{"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS,
D_BDW_PLUS, 0, 8, NULL},
{"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS,
D_ALL, 0, 8, NULL},
{"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS,
D_BDW_PLUS, 0, 8, NULL},
{"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS,
D_BDW_PLUS, 0, 8, NULL},
{"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS,
D_ALL, 0, 8, NULL},
{"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR,
R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL,
0, 8, NULL},
{"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS,
D_ALL, ADDR_FIX_1(2), 8, NULL},
{"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS,
D_ALL, 0, 8, NULL},
{"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS,
D_ALL, 0, 8, NULL},
{"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS,
D_ALL, 0, 8, NULL},
{"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS,
D_BDW_PLUS, 0, 8, NULL},
{"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS,
D_ALL, ADDR_FIX_1(2), 8, NULL},
{"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR,
R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL},
{"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR,
R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR,
R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS,
D_ALL, 0, 9, NULL},
{"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS,
ADDR_FIX_2(2, 4), 8, NULL},
{"3DSTATE_BINDING_TABLE_POOL_ALLOC",
OP_3DSTATE_BINDING_TABLE_POOL_ALLOC,
F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
{"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC,
F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
{"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC,
F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
{"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS,
D_BDW_PLUS, 0, 8, NULL},
{"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL,
ADDR_FIX_1(2), 8, cmd_handler_pipe_control},
{"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0,
1, NULL},
{"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL,
ADDR_FIX_1(1), 8, NULL},
{"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL},
{"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
ADDR_FIX_1(1), 8, NULL},
{"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
0, 8, NULL},
{"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS,
D_SKL_PLUS, 0, 8, NULL},
{"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
{"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL,
0, 16, NULL},
{"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
0, 16, NULL},
{"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
{"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
0, 16, NULL},
{"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL,
0, 16, NULL},
{"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL,
0, 16, NULL},
{"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL,
0, 8, NULL},
{"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16,
NULL},
{"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45,
F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
{"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR,
R_VCS, D_BDW_PLUS, 0, 12, NULL},
{"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE,
F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
{"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE,
F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL},
{"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
{"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR,
R_VCS, D_ALL, 0, 6, NULL},
{"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR,
R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL},
{"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL,
0, 16, NULL},
{"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
{"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
{"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR,
R_VCS, D_ALL, 0, 12, NULL},
{"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL},
{"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL,
0, 12, NULL},
{"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
0, 20, NULL},
};
static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
{
hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
}
#define GVT_MAX_CMD_LENGTH 20 /* In Dword */
static void trace_cs_command(struct parser_exec_state *s,
cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler)
{
/* This buffer is used by ftrace to store all commands copied from
* guest gma space. Sometimes commands can cross pages, this should
* not be handled in ftrace logic. So this is just used as a
* 'bounce buffer'
*/
u32 cmd_trace_buf[GVT_MAX_CMD_LENGTH];
int i;
u32 cmd_len = cmd_length(s);
/* The chosen value of GVT_MAX_CMD_LENGTH are just based on
* following two considerations:
* 1) From observation, most common ring commands is not that long.
* But there are execeptions. So it indeed makes sence to observe
* longer commands.
* 2) From the performance and debugging point of view, dumping all
* contents of very commands is not necessary.
* We mgith shrink GVT_MAX_CMD_LENGTH or remove this trace event in
* future for performance considerations.
*/
if (unlikely(cmd_len > GVT_MAX_CMD_LENGTH)) {
gvt_dbg_cmd("cmd length exceed tracing limitation!\n");
cmd_len = GVT_MAX_CMD_LENGTH;
}
for (i = 0; i < cmd_len; i++)
cmd_trace_buf[i] = cmd_val(s, i);
trace_gvt_command(s->vgpu->id, s->ring_id, s->ip_gma, cmd_trace_buf,
cmd_len, s->buf_type == RING_BUFFER_INSTRUCTION,
cost_pre_cmd_handler, cost_cmd_handler);
}
/* call the cmd handler, and advance ip */
static int cmd_parser_exec(struct parser_exec_state *s)
{
struct cmd_info *info;
u32 cmd;
int ret = 0;
cycles_t t0, t1, t2;
struct parser_exec_state s_before_advance_custom;
t0 = get_cycles();
cmd = cmd_val(s, 0);
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
return -EINVAL;
}
gvt_dbg_cmd("%s\n", info->name);
s->info = info;
t1 = get_cycles();
memcpy(&s_before_advance_custom, s, sizeof(struct parser_exec_state));
if (info->handler) {
ret = info->handler(s);
if (ret < 0) {
gvt_err("%s handler error\n", info->name);
return ret;
}
}
t2 = get_cycles();
trace_cs_command(&s_before_advance_custom, t1 - t0, t2 - t1);
if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
ret = cmd_advance_default(s);
if (ret) {
gvt_err("%s IP advance error\n", info->name);
return ret;
}
}
return 0;
}
static inline bool gma_out_of_range(unsigned long gma,
unsigned long gma_head, unsigned int gma_tail)
{
if (gma_tail >= gma_head)
return (gma < gma_head) || (gma > gma_tail);
else
return (gma > gma_tail) && (gma < gma_head);
}
static int command_scan(struct parser_exec_state *s,
unsigned long rb_head, unsigned long rb_tail,
unsigned long rb_start, unsigned long rb_len)
{
unsigned long gma_head, gma_tail, gma_bottom;
int ret = 0;
gma_head = rb_start + rb_head;
gma_tail = rb_start + rb_tail;
gma_bottom = rb_start + rb_len;
gvt_dbg_cmd("scan_start: start=%lx end=%lx\n", gma_head, gma_tail);
while (s->ip_gma != gma_tail) {
if (s->buf_type == RING_BUFFER_INSTRUCTION) {
if (!(s->ip_gma >= rb_start) ||
!(s->ip_gma < gma_bottom)) {
gvt_err("ip_gma %lx out of ring scope."
"(base:0x%lx, bottom: 0x%lx)\n",
s->ip_gma, rb_start,
gma_bottom);
parser_exec_state_dump(s);
return -EINVAL;
}
if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
gvt_err("ip_gma %lx out of range."
"base 0x%lx head 0x%lx tail 0x%lx\n",
s->ip_gma, rb_start,
rb_head, rb_tail);
parser_exec_state_dump(s);
break;
}
}
ret = cmd_parser_exec(s);
if (ret) {
gvt_err("cmd parser error\n");
parser_exec_state_dump(s);
break;
}
}
gvt_dbg_cmd("scan_end\n");
return ret;
}
static int scan_workload(struct intel_vgpu_workload *workload)
{
unsigned long gma_head, gma_tail, gma_bottom;
struct parser_exec_state s;
int ret = 0;
/* ring base is page aligned */
if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE)))
return -EINVAL;
gma_head = workload->rb_start + workload->rb_head;
gma_tail = workload->rb_start + workload->rb_tail;
gma_bottom = workload->rb_start + _RING_CTL_BUF_SIZE(workload->rb_ctl);
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
s.vgpu = workload->vgpu;
s.ring_id = workload->ring_id;
s.ring_start = workload->rb_start;
s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
s.ring_head = gma_head;
s.ring_tail = gma_tail;
s.rb_va = workload->shadow_ring_buffer_va;
s.workload = workload;
if (bypass_scan_mask & (1 << workload->ring_id))
return 0;
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
ret = command_scan(&s, workload->rb_head, workload->rb_tail,
workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
out:
return ret;
}
static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
struct parser_exec_state s;
int ret = 0;
/* ring base is page aligned */
if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
return -EINVAL;
ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
PAGE_SIZE);
gma_head = wa_ctx->indirect_ctx.guest_gma;
gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
s.vgpu = wa_ctx->workload->vgpu;
s.ring_id = wa_ctx->workload->ring_id;
s.ring_start = wa_ctx->indirect_ctx.guest_gma;
s.ring_size = ring_size;
s.ring_head = gma_head;
s.ring_tail = gma_tail;
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
s.workload = wa_ctx->workload;
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
ret = command_scan(&s, 0, ring_tail,
wa_ctx->indirect_ctx.guest_gma, ring_size);
out:
return ret;
}
static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
struct intel_ring *ring = shadow_ctx->engine[ring_id].ring;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
unsigned int copy_len = 0;
int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
/* calculate workload ring buffer size */
workload->rb_len = (workload->rb_tail + guest_rb_size -
workload->rb_head) % guest_rb_size;
gma_head = workload->rb_start + workload->rb_head;
gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size;
/* allocate shadow ring buffer */
ret = intel_ring_begin(workload->req, workload->rb_len / 4);
if (ret)
return ret;
/* get shadow ring buffer va */
workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
/* head > tail --> copy head <-> top */
if (gma_head > gma_tail) {
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
gma_head, gma_top,
workload->shadow_ring_buffer_va);
if (ret) {
gvt_err("fail to copy guest ring buffer\n");
return ret;
}
copy_len = gma_top - gma_head;
gma_head = workload->rb_start;
}
/* copy head or start <-> tail */
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
gma_head, gma_tail,
workload->shadow_ring_buffer_va + copy_len);
if (ret) {
gvt_err("fail to copy guest ring buffer\n");
return ret;
}
ring->tail += workload->rb_len;
intel_ring_advance(ring);
return 0;
}
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
int ret;
ret = shadow_workload_ring_buffer(workload);
if (ret) {
gvt_err("fail to shadow workload ring_buffer\n");
return ret;
}
ret = scan_workload(workload);
if (ret) {
gvt_err("scan workload error\n");
return ret;
}
return 0;
}
static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
struct drm_device *dev = &wa_ctx->workload->vgpu->gvt->dev_priv->drm;
int ctx_size = wa_ctx->indirect_ctx.size;
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
int ret = 0;
void *dest = NULL;
wa_ctx->indirect_ctx.obj = i915_gem_object_create(dev,
round_up(ctx_size + CACHELINE_BYTES, PAGE_SIZE));
if (wa_ctx->indirect_ctx.obj == NULL)
return -ENOMEM;
ret = i915_gem_object_get_pages(wa_ctx->indirect_ctx.obj);
if (ret)
return ret;
i915_gem_object_pin_pages(wa_ctx->indirect_ctx.obj);
/* get the va of the shadow batch buffer */
dest = (void *)vmap_batch(wa_ctx->indirect_ctx.obj, 0,
ctx_size + CACHELINE_BYTES);
if (!dest) {
gvt_err("failed to vmap shadow indirect ctx\n");
ret = -ENOMEM;
goto unpin_src;
}
ret = i915_gem_object_set_to_cpu_domain(wa_ctx->indirect_ctx.obj,
false);
if (ret) {
gvt_err("failed to set shadow indirect ctx to CPU\n");
goto unmap_src;
}
wa_ctx->indirect_ctx.shadow_va = dest;
memset(dest, 0, round_up(ctx_size + CACHELINE_BYTES, PAGE_SIZE));
ret = copy_gma_to_hva(wa_ctx->workload->vgpu,
wa_ctx->workload->vgpu->gtt.ggtt_mm,
guest_gma, guest_gma + ctx_size, dest);
if (ret) {
gvt_err("fail to copy guest indirect ctx\n");
return ret;
}
return 0;
unmap_src:
vunmap(dest);
unpin_src:
i915_gem_object_unpin_pages(wa_ctx->indirect_ctx.obj);
return ret;
}
static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
unsigned char *bb_start_sva;
per_ctx_start[0] = 0x18800001;
per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
wa_ctx->indirect_ctx.size;
memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES);
return 0;
}
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ret;
if (wa_ctx->indirect_ctx.size == 0)
return 0;
ret = shadow_indirect_ctx(wa_ctx);
if (ret) {
gvt_err("fail to shadow indirect ctx\n");
return ret;
}
combine_wa_ctx(wa_ctx);
ret = scan_wa_ctx(wa_ctx);
if (ret) {
gvt_err("scan wa ctx error\n");
return ret;
}
return 0;
}
static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
unsigned int opcode, int rings)
{
struct cmd_info *info = NULL;
unsigned int ring;
for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) {
info = find_cmd_entry(gvt, opcode, ring);
if (info)
break;
}
return info;
}
static int init_cmd_table(struct intel_gvt *gvt)
{
int i;
struct cmd_entry *e;
struct cmd_info *info;
unsigned int gen_type;
gen_type = intel_gvt_get_device_type(gvt);
for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
if (!(cmd_info[i].devices & gen_type))
continue;
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e)
return -ENOMEM;
e->info = &cmd_info[i];
info = find_cmd_entry_any_ring(gvt,
e->info->opcode, e->info->rings);
if (info) {
gvt_err("%s %s duplicated\n", e->info->name,
info->name);
return -EEXIST;
}
INIT_HLIST_NODE(&e->hlist);
add_cmd_entry(gvt, e);
gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
e->info->name, e->info->opcode, e->info->flag,
e->info->devices, e->info->rings);
}
return 0;
}
static void clean_cmd_table(struct intel_gvt *gvt)
{
struct hlist_node *tmp;
struct cmd_entry *e;
int i;
hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
kfree(e);
hash_init(gvt->cmd_table);
}
void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt)
{
clean_cmd_table(gvt);
}
int intel_gvt_init_cmd_parser(struct intel_gvt *gvt)
{
int ret;
ret = init_cmd_table(gvt);
if (ret) {
intel_gvt_clean_cmd_parser(gvt);
return ret;
}
return 0;
}
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Kevin Tian <kevin.tian@intel.com>
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Ping Gao <ping.a.gao@intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Yulei Zhang <yulei.zhang@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_CMD_PARSER_H_
#define _GVT_CMD_PARSER_H_
#define GVT_CMD_HASH_BITS 7
void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
#endif
...@@ -24,11 +24,34 @@ ...@@ -24,11 +24,34 @@
#ifndef __GVT_DEBUG_H__ #ifndef __GVT_DEBUG_H__
#define __GVT_DEBUG_H__ #define __GVT_DEBUG_H__
#define gvt_err(fmt, args...) \
DRM_ERROR("gvt: "fmt, ##args)
#define gvt_dbg_core(fmt, args...) \ #define gvt_dbg_core(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args) DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
/* #define gvt_dbg_irq(fmt, args...) \
* Other GVT debug stuff will be introduced in the GVT device model patches. DRM_DEBUG_DRIVER("gvt: irq: "fmt, ##args)
*/
#define gvt_dbg_mm(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: mm: "fmt, ##args)
#define gvt_dbg_mmio(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: mmio: "fmt, ##args)
#define gvt_dbg_dpy(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: dpy: "fmt, ##args)
#define gvt_dbg_el(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args)
#define gvt_dbg_sched(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: sched: "fmt, ##args)
#define gvt_dbg_render(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args)
#define gvt_dbg_cmd(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: cmd: "fmt, ##args)
#endif #endif
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Terrence Xu <terrence.xu@intel.com>
* Changbin Du <changbin.du@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include "i915_drv.h"
static int get_edp_pipe(struct intel_vgpu *vgpu)
{
u32 data = vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP);
int pipe = -1;
switch (data & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
case TRANS_DDI_EDP_INPUT_A_ONOFF:
pipe = PIPE_A;
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
pipe = PIPE_B;
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
pipe = PIPE_C;
break;
}
return pipe;
}
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (!(vgpu_vreg(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
return 0;
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
return 0;
return 1;
}
static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL;
if (vgpu_vreg(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
return 1;
if (edp_pipe_is_enabled(vgpu) &&
get_edp_pipe(vgpu) == pipe)
return 1;
return 0;
}
/* EDID with 1024x768 as its resolution */
static unsigned char virtual_dp_monitor_edid[] = {
/*Header*/
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
/* Vendor & Product Identification */
0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
/* Version & Revision */
0x01, 0x04,
/* Basic Display Parameters & Features */
0xa5, 0x34, 0x20, 0x78, 0x23,
/* Color Characteristics */
0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
/* Established Timings: maximum resolution is 1024x768 */
0x21, 0x08, 0x00,
/* Standard Timings. All invalid */
0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
/* 18 Byte Data Blocks 1: invalid */
0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
/* 18 Byte Data Blocks 2: invalid */
0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
/* 18 Byte Data Blocks 3: invalid */
0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
/* 18 Byte Data Blocks 4: invalid */
0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
/* Extension Block Count */
0x00,
/* Checksum */
0xef,
};
#define DPCD_HEADER_SIZE 0xb
u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
if (IS_SKYLAKE(dev_priv))
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B))
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C))
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D))
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
if (IS_SKYLAKE(dev_priv) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
if (IS_BROADWELL(dev_priv))
vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_PORT_DP_A_HOTPLUG;
else
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
}
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
{
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
kfree(port->edid);
port->edid = NULL;
kfree(port->dpcd);
port->dpcd = NULL;
}
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type)
{
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
if (!port->edid)
return -ENOMEM;
port->dpcd = kzalloc(sizeof(*(port->dpcd)), GFP_KERNEL);
if (!port->dpcd) {
kfree(port->edid);
return -ENOMEM;
}
memcpy(port->edid->edid_block, virtual_dp_monitor_edid,
EDID_SIZE);
port->edid->data_valid = true;
memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE);
port->dpcd->data_valid = true;
port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
port->type = type;
emulate_monitor_status_change(vgpu);
return 0;
}
/**
* intel_gvt_check_vblank_emulation - check if vblank emulation timer should
* be turned on/off when a virtual pipe is enabled/disabled.
* @gvt: a GVT device
*
* This function is used to turn on/off vblank timer according to currently
* enabled/disabled virtual pipes.
*
*/
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
{
struct intel_gvt_irq *irq = &gvt->irq;
struct intel_vgpu *vgpu;
bool have_enabled_pipe = false;
int pipe, id;
if (WARN_ON(!mutex_is_locked(&gvt->lock)))
return;
hrtimer_cancel(&irq->vblank_timer.timer);
for_each_active_vgpu(gvt, vgpu, id) {
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
have_enabled_pipe =
pipe_is_enabled(vgpu, pipe);
if (have_enabled_pipe)
break;
}
}
if (have_enabled_pipe)
hrtimer_start(&irq->vblank_timer.timer,
ktime_add_ns(ktime_get(), irq->vblank_timer.period),
HRTIMER_MODE_ABS);
}
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_irq *irq = &vgpu->irq;
int vblank_event[] = {
[PIPE_A] = PIPE_A_VBLANK,
[PIPE_B] = PIPE_B_VBLANK,
[PIPE_C] = PIPE_C_VBLANK,
};
int event;
if (pipe < PIPE_A || pipe > PIPE_C)
return;
for_each_set_bit(event, irq->flip_done_event[pipe],
INTEL_GVT_EVENT_MAX) {
clear_bit(event, irq->flip_done_event[pipe]);
if (!pipe_is_enabled(vgpu, pipe))
continue;
vgpu_vreg(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, event);
}
if (pipe_is_enabled(vgpu, pipe)) {
vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
}
}
static void emulate_vblank(struct intel_vgpu *vgpu)
{
int pipe;
for_each_pipe(vgpu->gvt->dev_priv, pipe)
emulate_vblank_on_pipe(vgpu, pipe);
}
/**
* intel_gvt_emulate_vblank - trigger vblank events for vGPUs on GVT device
* @gvt: a GVT device
*
* This function is used to trigger vblank interrupts for vGPUs on GVT device
*
*/
void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
{
struct intel_vgpu *vgpu;
int id;
if (WARN_ON(!mutex_is_locked(&gvt->lock)))
return;
for_each_active_vgpu(gvt, vgpu, id)
emulate_vblank(vgpu);
}
/**
* intel_vgpu_clean_display - clean vGPU virtual display emulation
* @vgpu: a vGPU
*
* This function is used to clean vGPU virtual display emulation stuffs
*
*/
void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (IS_SKYLAKE(dev_priv))
clean_virtual_dp_monitor(vgpu, PORT_D);
else
clean_virtual_dp_monitor(vgpu, PORT_B);
}
/**
* intel_vgpu_init_display- initialize vGPU virtual display emulation
* @vgpu: a vGPU
*
* This function is used to initialize vGPU virtual display emulation stuffs
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_vgpu_init_display(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
intel_vgpu_init_i2c_edid(vgpu);
if (IS_SKYLAKE(dev_priv))
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D);
else
return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
}
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Terrence Xu <terrence.xu@intel.com>
* Changbin Du <changbin.du@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_DISPLAY_H_
#define _GVT_DISPLAY_H_
#define SBI_REG_MAX 20
#define DPCD_SIZE 0x700
#define intel_vgpu_port(vgpu, port) \
(&(vgpu->display.ports[port]))
#define intel_vgpu_has_monitor_on_port(vgpu, port) \
(intel_vgpu_port(vgpu, port)->edid && \
intel_vgpu_port(vgpu, port)->edid->data_valid)
#define intel_vgpu_port_is_dp(vgpu, port) \
((intel_vgpu_port(vgpu, port)->type == GVT_DP_A) || \
(intel_vgpu_port(vgpu, port)->type == GVT_DP_B) || \
(intel_vgpu_port(vgpu, port)->type == GVT_DP_C) || \
(intel_vgpu_port(vgpu, port)->type == GVT_DP_D))
#define INTEL_GVT_MAX_UEVENT_VARS 3
/* DPCD start */
#define DPCD_SIZE 0x700
/* DPCD */
#define DP_SET_POWER 0x600
#define DP_SET_POWER_D0 0x1
#define AUX_NATIVE_WRITE 0x8
#define AUX_NATIVE_READ 0x9
#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
#define AUX_NATIVE_REPLY_NAK (0x1 << 4)
#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
#define AUX_BURST_SIZE 16
/* DPCD addresses */
#define DPCD_REV 0x000
#define DPCD_MAX_LINK_RATE 0x001
#define DPCD_MAX_LANE_COUNT 0x002
#define DPCD_TRAINING_PATTERN_SET 0x102
#define DPCD_SINK_COUNT 0x200
#define DPCD_LANE0_1_STATUS 0x202
#define DPCD_LANE2_3_STATUS 0x203
#define DPCD_LANE_ALIGN_STATUS_UPDATED 0x204
#define DPCD_SINK_STATUS 0x205
/* link training */
#define DPCD_TRAINING_PATTERN_SET_MASK 0x03
#define DPCD_LINK_TRAINING_DISABLED 0x00
#define DPCD_TRAINING_PATTERN_1 0x01
#define DPCD_TRAINING_PATTERN_2 0x02
#define DPCD_CP_READY_MASK (1 << 6)
/* lane status */
#define DPCD_LANES_CR_DONE 0x11
#define DPCD_LANES_EQ_DONE 0x22
#define DPCD_SYMBOL_LOCKED 0x44
#define DPCD_INTERLANE_ALIGN_DONE 0x01
#define DPCD_SINK_IN_SYNC 0x03
/* DPCD end */
#define SBI_RESPONSE_MASK 0x3
#define SBI_RESPONSE_SHIFT 0x1
#define SBI_STAT_MASK 0x1
#define SBI_STAT_SHIFT 0x0
#define SBI_OPCODE_SHIFT 8
#define SBI_OPCODE_MASK (0xff << SBI_OPCODE_SHIFT)
#define SBI_CMD_IORD 2
#define SBI_CMD_IOWR 3
#define SBI_CMD_CRRD 6
#define SBI_CMD_CRWR 7
#define SBI_ADDR_OFFSET_SHIFT 16
#define SBI_ADDR_OFFSET_MASK (0xffff << SBI_ADDR_OFFSET_SHIFT)
struct intel_vgpu_sbi_register {
unsigned int offset;
u32 value;
};
struct intel_vgpu_sbi {
int number;
struct intel_vgpu_sbi_register registers[SBI_REG_MAX];
};
enum intel_gvt_plane_type {
PRIMARY_PLANE = 0,
CURSOR_PLANE,
SPRITE_PLANE,
MAX_PLANE
};
struct intel_vgpu_dpcd_data {
bool data_valid;
u8 data[DPCD_SIZE];
};
enum intel_vgpu_port_type {
GVT_CRT = 0,
GVT_DP_A,
GVT_DP_B,
GVT_DP_C,
GVT_DP_D,
GVT_HDMI_B,
GVT_HDMI_C,
GVT_HDMI_D,
GVT_PORT_MAX
};
struct intel_vgpu_port {
/* per display EDID information */
struct intel_vgpu_edid_data *edid;
/* per display DPCD information */
struct intel_vgpu_dpcd_data *dpcd;
int type;
};
void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
int intel_vgpu_init_display(struct intel_vgpu *vgpu);
void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
#endif
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Terrence Xu <terrence.xu@intel.com>
* Changbin Du <changbin.du@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include "i915_drv.h"
#define GMBUS1_TOTAL_BYTES_SHIFT 16
#define GMBUS1_TOTAL_BYTES_MASK 0x1ff
#define gmbus1_total_byte_count(v) (((v) >> \
GMBUS1_TOTAL_BYTES_SHIFT) & GMBUS1_TOTAL_BYTES_MASK)
#define gmbus1_slave_addr(v) (((v) & 0xff) >> 1)
#define gmbus1_slave_index(v) (((v) >> 8) & 0xff)
#define gmbus1_bus_cycle(v) (((v) >> 25) & 0x7)
/* GMBUS0 bits definitions */
#define _GMBUS_PIN_SEL_MASK (0x7)
static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
{
struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
unsigned char chr = 0;
if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
gvt_err("Driver tries to read EDID without proper sequence!\n");
return 0;
}
if (edid->current_edid_read >= EDID_SIZE) {
gvt_err("edid_get_byte() exceeds the size of EDID!\n");
return 0;
}
if (!edid->edid_available) {
gvt_err("Reading EDID but EDID is not available!\n");
return 0;
}
if (intel_vgpu_has_monitor_on_port(vgpu, edid->port)) {
struct intel_vgpu_edid_data *edid_data =
intel_vgpu_port(vgpu, edid->port)->edid;
chr = edid_data->edid_block[edid->current_edid_read];
edid->current_edid_read++;
} else {
gvt_err("No EDID available during the reading?\n");
}
return chr;
}
static inline int get_port_from_gmbus0(u32 gmbus0)
{
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
int port = -EINVAL;
if (port_select == 2)
port = PORT_E;
else if (port_select == 4)
port = PORT_C;
else if (port_select == 5)
port = PORT_B;
else if (port_select == 6)
port = PORT_D;
return port;
}
static void reset_gmbus_controller(struct intel_vgpu *vgpu)
{
vgpu_vreg(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
if (!vgpu->display.i2c_edid.edid_available)
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
}
/* GMBUS0 */
static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
int port, pin_select;
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
pin_select = vgpu_vreg(vgpu, offset) & _GMBUS_PIN_SEL_MASK;
intel_vgpu_init_i2c_edid(vgpu);
if (pin_select == 0)
return 0;
port = get_port_from_gmbus0(pin_select);
if (WARN_ON(port < 0))
return 0;
vgpu->display.i2c_edid.state = I2C_GMBUS;
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
if (intel_vgpu_has_monitor_on_port(vgpu, port) &&
!intel_vgpu_port_is_dp(vgpu, port)) {
vgpu->display.i2c_edid.port = port;
vgpu->display.i2c_edid.edid_available = true;
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
} else
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
return 0;
}
static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
u32 slave_addr;
u32 wvalue = *(u32 *)p_data;
if (vgpu_vreg(vgpu, offset) & GMBUS_SW_CLR_INT) {
if (!(wvalue & GMBUS_SW_CLR_INT)) {
vgpu_vreg(vgpu, offset) &= ~GMBUS_SW_CLR_INT;
reset_gmbus_controller(vgpu);
}
/*
* TODO: "This bit is cleared to zero when an event
* causes the HW_RDY bit transition to occur "
*/
} else {
/*
* per bspec setting this bit can cause:
* 1) INT status bit cleared
* 2) HW_RDY bit asserted
*/
if (wvalue & GMBUS_SW_CLR_INT) {
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
}
/* For virtualization, we suppose that HW is always ready,
* so GMBUS_SW_RDY should always be cleared
*/
if (wvalue & GMBUS_SW_RDY)
wvalue &= ~GMBUS_SW_RDY;
i2c_edid->gmbus.total_byte_count =
gmbus1_total_byte_count(wvalue);
slave_addr = gmbus1_slave_addr(wvalue);
/* vgpu gmbus only support EDID */
if (slave_addr == EDID_ADDR) {
i2c_edid->slave_selected = true;
} else if (slave_addr != 0) {
gvt_dbg_dpy(
"vgpu%d: unsupported gmbus slave addr(0x%x)\n"
" gmbus operations will be ignored.\n",
vgpu->id, slave_addr);
}
if (wvalue & GMBUS_CYCLE_INDEX)
i2c_edid->current_edid_read =
gmbus1_slave_index(wvalue);
i2c_edid->gmbus.cycle_type = gmbus1_bus_cycle(wvalue);
switch (gmbus1_bus_cycle(wvalue)) {
case GMBUS_NOCYCLE:
break;
case GMBUS_STOP:
/* From spec:
* This can only cause a STOP to be generated
* if a GMBUS cycle is generated, the GMBUS is
* currently in a data/wait/idle phase, or it is in a
* WAIT phase
*/
if (gmbus1_bus_cycle(vgpu_vreg(vgpu, offset))
!= GMBUS_NOCYCLE) {
intel_vgpu_init_i2c_edid(vgpu);
/* After the 'stop' cycle, hw state would become
* 'stop phase' and then 'idle phase' after a
* few milliseconds. In emulation, we just set
* it as 'idle phase' ('stop phase' is not
* visible in gmbus interface)
*/
i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
}
break;
case NIDX_NS_W:
case IDX_NS_W:
case NIDX_STOP:
case IDX_STOP:
/* From hw spec the GMBUS phase
* transition like this:
* START (-->INDEX) -->DATA
*/
i2c_edid->gmbus.phase = GMBUS_DATA_PHASE;
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
break;
default:
gvt_err("Unknown/reserved GMBUS cycle detected!\n");
break;
}
/*
* From hw spec the WAIT state will be
* cleared:
* (1) in a new GMBUS cycle
* (2) by generating a stop
*/
vgpu_vreg(vgpu, offset) = wvalue;
}
return 0;
}
static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
WARN_ON(1);
return 0;
}
static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
int i;
unsigned char byte_data;
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
int byte_left = i2c_edid->gmbus.total_byte_count -
i2c_edid->current_edid_read;
int byte_count = byte_left;
u32 reg_data = 0;
/* Data can only be recevied if previous settings correct */
if (vgpu_vreg(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
if (byte_left <= 0) {
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
return 0;
}
if (byte_count > 4)
byte_count = 4;
for (i = 0; i < byte_count; i++) {
byte_data = edid_get_byte(vgpu);
reg_data |= (byte_data << (i << 3));
}
memcpy(&vgpu_vreg(vgpu, offset), &reg_data, byte_count);
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
if (byte_left <= 4) {
switch (i2c_edid->gmbus.cycle_type) {
case NIDX_STOP:
case IDX_STOP:
i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
break;
case NIDX_NS_W:
case IDX_NS_W:
default:
i2c_edid->gmbus.phase = GMBUS_WAIT_PHASE;
break;
}
intel_vgpu_init_i2c_edid(vgpu);
}
/*
* Read GMBUS3 during send operation,
* return the latest written value
*/
} else {
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n",
vgpu->id);
}
return 0;
}
static int gmbus2_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 value = vgpu_vreg(vgpu, offset);
if (!(vgpu_vreg(vgpu, offset) & GMBUS_INUSE))
vgpu_vreg(vgpu, offset) |= GMBUS_INUSE;
memcpy(p_data, (void *)&value, bytes);
return 0;
}
static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 wvalue = *(u32 *)p_data;
if (wvalue & GMBUS_INUSE)
vgpu_vreg(vgpu, offset) &= ~GMBUS_INUSE;
/* All other bits are read-only */
return 0;
}
/**
* intel_gvt_i2c_handle_gmbus_read - emulate gmbus register mmio read
* @vgpu: a vGPU
*
* This function is used to emulate gmbus register mmio read
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
return -EINVAL;
if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
return gmbus2_mmio_read(vgpu, offset, p_data, bytes);
else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
return gmbus3_mmio_read(vgpu, offset, p_data, bytes);
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
return 0;
}
/**
* intel_gvt_i2c_handle_gmbus_write - emulate gmbus register mmio write
* @vgpu: a vGPU
*
* This function is used to emulate gmbus register mmio write
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
return -EINVAL;
if (offset == i915_mmio_reg_offset(PCH_GMBUS0))
return gmbus0_mmio_write(vgpu, offset, p_data, bytes);
else if (offset == i915_mmio_reg_offset(PCH_GMBUS1))
return gmbus1_mmio_write(vgpu, offset, p_data, bytes);
else if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
return gmbus2_mmio_write(vgpu, offset, p_data, bytes);
else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
return gmbus3_mmio_write(vgpu, offset, p_data, bytes);
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
return 0;
}
enum {
AUX_CH_CTL = 0,
AUX_CH_DATA1,
AUX_CH_DATA2,
AUX_CH_DATA3,
AUX_CH_DATA4,
AUX_CH_DATA5
};
static inline int get_aux_ch_reg(unsigned int offset)
{
int reg;
switch (offset & 0xff) {
case 0x10:
reg = AUX_CH_CTL;
break;
case 0x14:
reg = AUX_CH_DATA1;
break;
case 0x18:
reg = AUX_CH_DATA2;
break;
case 0x1c:
reg = AUX_CH_DATA3;
break;
case 0x20:
reg = AUX_CH_DATA4;
break;
case 0x24:
reg = AUX_CH_DATA5;
break;
default:
reg = -1;
break;
}
return reg;
}
#define AUX_CTL_MSG_LENGTH(reg) \
((reg & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> \
DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT)
/**
* intel_gvt_i2c_handle_aux_ch_write - emulate AUX channel register write
* @vgpu: a vGPU
*
* This function is used to emulate AUX channel register write
*
*/
void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
int port_idx,
unsigned int offset,
void *p_data)
{
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
int msg_length, ret_msg_size;
int msg, addr, ctrl, op;
u32 value = *(u32 *)p_data;
int aux_data_for_write = 0;
int reg = get_aux_ch_reg(offset);
if (reg != AUX_CH_CTL) {
vgpu_vreg(vgpu, offset) = value;
return;
}
msg_length = AUX_CTL_MSG_LENGTH(value);
// check the msg in DATA register.
msg = vgpu_vreg(vgpu, offset + 4);
addr = (msg >> 8) & 0xffff;
ctrl = (msg >> 24) & 0xff;
op = ctrl >> 4;
if (!(value & DP_AUX_CH_CTL_SEND_BUSY)) {
/* The ctl write to clear some states */
return;
}
/* Always set the wanted value for vms. */
ret_msg_size = (((op & 0x1) == GVT_AUX_I2C_READ) ? 2 : 1);
vgpu_vreg(vgpu, offset) =
DP_AUX_CH_CTL_DONE |
((ret_msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) &
DP_AUX_CH_CTL_MESSAGE_SIZE_MASK);
if (msg_length == 3) {
if (!(op & GVT_AUX_I2C_MOT)) {
/* stop */
intel_vgpu_init_i2c_edid(vgpu);
} else {
/* start or restart */
i2c_edid->aux_ch.i2c_over_aux_ch = true;
i2c_edid->aux_ch.aux_ch_mot = true;
if (addr == 0) {
/* reset the address */
intel_vgpu_init_i2c_edid(vgpu);
} else if (addr == EDID_ADDR) {
i2c_edid->state = I2C_AUX_CH;
i2c_edid->port = port_idx;
i2c_edid->slave_selected = true;
if (intel_vgpu_has_monitor_on_port(vgpu,
port_idx) &&
intel_vgpu_port_is_dp(vgpu, port_idx))
i2c_edid->edid_available = true;
}
}
} else if ((op & 0x1) == GVT_AUX_I2C_WRITE) {
/* TODO
* We only support EDID reading from I2C_over_AUX. And
* we do not expect the index mode to be used. Right now
* the WRITE operation is ignored. It is good enough to
* support the gfx driver to do EDID access.
*/
} else {
if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ))
return;
if (WARN_ON(msg_length != 4))
return;
if (i2c_edid->edid_available && i2c_edid->slave_selected) {
unsigned char val = edid_get_byte(vgpu);
aux_data_for_write = (val << 16);
}
}
/* write the return value in AUX_CH_DATA reg which includes:
* ACK of I2C_WRITE
* returned byte if it is READ
*/
aux_data_for_write |= (GVT_AUX_I2C_REPLY_ACK & 0xff) << 24;
vgpu_vreg(vgpu, offset + 4) = aux_data_for_write;
}
/**
* intel_vgpu_init_i2c_edid - initialize vGPU i2c edid emulation
* @vgpu: a vGPU
*
* This function is used to initialize vGPU i2c edid emulation stuffs
*
*/
void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu)
{
struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
edid->state = I2C_NOT_SPECIFIED;
edid->port = -1;
edid->slave_selected = false;
edid->edid_available = false;
edid->current_edid_read = 0;
memset(&edid->gmbus, 0, sizeof(struct intel_vgpu_i2c_gmbus));
edid->aux_ch.i2c_over_aux_ch = false;
edid->aux_ch.aux_ch_mot = false;
}
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Terrence Xu <terrence.xu@intel.com>
* Changbin Du <changbin.du@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_EDID_H_
#define _GVT_EDID_H_
#define EDID_SIZE 128
#define EDID_ADDR 0x50 /* Linux hvm EDID addr */
#define GVT_AUX_NATIVE_WRITE 0x8
#define GVT_AUX_NATIVE_READ 0x9
#define GVT_AUX_I2C_WRITE 0x0
#define GVT_AUX_I2C_READ 0x1
#define GVT_AUX_I2C_STATUS 0x2
#define GVT_AUX_I2C_MOT 0x4
#define GVT_AUX_I2C_REPLY_ACK (0x0 << 6)
struct intel_vgpu_edid_data {
bool data_valid;
unsigned char edid_block[EDID_SIZE];
};
enum gmbus_cycle_type {
GMBUS_NOCYCLE = 0x0,
NIDX_NS_W = 0x1,
IDX_NS_W = 0x3,
GMBUS_STOP = 0x4,
NIDX_STOP = 0x5,
IDX_STOP = 0x7
};
/*
* States of GMBUS
*
* GMBUS0-3 could be related to the EDID virtualization. Another two GMBUS
* registers, GMBUS4 (interrupt mask) and GMBUS5 (2 byte indes register), are
* not considered here. Below describes the usage of GMBUS registers that are
* cared by the EDID virtualization
*
* GMBUS0:
* R/W
* port selection. value of bit0 - bit2 corresponds to the GPIO registers.
*
* GMBUS1:
* R/W Protect
* Command and Status.
* bit0 is the direction bit: 1 is read; 0 is write.
* bit1 - bit7 is slave 7-bit address.
* bit16 - bit24 total byte count (ignore?)
*
* GMBUS2:
* Most of bits are read only except bit 15 (IN_USE)
* Status register
* bit0 - bit8 current byte count
* bit 11: hardware ready;
*
* GMBUS3:
* Read/Write
* Data for transfer
*/
/* From hw specs, Other phases like START, ADDRESS, INDEX
* are invisible to GMBUS MMIO interface. So no definitions
* in below enum types
*/
enum gvt_gmbus_phase {
GMBUS_IDLE_PHASE = 0,
GMBUS_DATA_PHASE,
GMBUS_WAIT_PHASE,
//GMBUS_STOP_PHASE,
GMBUS_MAX_PHASE
};
struct intel_vgpu_i2c_gmbus {
unsigned int total_byte_count; /* from GMBUS1 */
enum gmbus_cycle_type cycle_type;
enum gvt_gmbus_phase phase;
};
struct intel_vgpu_i2c_aux_ch {
bool i2c_over_aux_ch;
bool aux_ch_mot;
};
enum i2c_state {
I2C_NOT_SPECIFIED = 0,
I2C_GMBUS = 1,
I2C_AUX_CH = 2
};
/* I2C sequences cannot interleave.
* GMBUS and AUX_CH sequences cannot interleave.
*/
struct intel_vgpu_i2c_edid {
enum i2c_state state;
unsigned int port;
bool slave_selected;
bool edid_available;
unsigned int current_edid_read;
struct intel_vgpu_i2c_gmbus gmbus;
struct intel_vgpu_i2c_aux_ch aux_ch;
};
void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu);
int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes);
int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes);
void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
int port_idx,
unsigned int offset,
void *p_data);
#endif /*_GVT_EDID_H_*/
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhiyuan Lv <zhiyuan.lv@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
* Ping Gao <ping.a.gao@intel.com>
* Tina Zhang <tina.zhang@intel.com>
*
*/
#include "i915_drv.h"
#define _EL_OFFSET_STATUS 0x234
#define _EL_OFFSET_STATUS_BUF 0x370
#define _EL_OFFSET_STATUS_PTR 0x3A0
#define execlist_ring_mmio(gvt, ring_id, offset) \
(gvt->dev_priv->engine[ring_id].mmio_base + (offset))
#define valid_context(ctx) ((ctx)->valid)
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
((a)->lrca == (b)->lrca))
static int context_switch_events[] = {
[RCS] = RCS_AS_CONTEXT_SWITCH,
[BCS] = BCS_AS_CONTEXT_SWITCH,
[VCS] = VCS_AS_CONTEXT_SWITCH,
[VCS2] = VCS2_AS_CONTEXT_SWITCH,
[VECS] = VECS_AS_CONTEXT_SWITCH,
};
static int ring_id_to_context_switch_event(int ring_id)
{
if (WARN_ON(ring_id < RCS && ring_id >
ARRAY_SIZE(context_switch_events)))
return -EINVAL;
return context_switch_events[ring_id];
}
static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
{
gvt_dbg_el("[before] running slot %d/context %x pending slot %d\n",
execlist->running_slot ?
execlist->running_slot->index : -1,
execlist->running_context ?
execlist->running_context->context_id : 0,
execlist->pending_slot ?
execlist->pending_slot->index : -1);
execlist->running_slot = execlist->pending_slot;
execlist->pending_slot = NULL;
execlist->running_context = execlist->running_context ?
&execlist->running_slot->ctx[0] : NULL;
gvt_dbg_el("[after] running slot %d/context %x pending slot %d\n",
execlist->running_slot ?
execlist->running_slot->index : -1,
execlist->running_context ?
execlist->running_context->context_id : 0,
execlist->pending_slot ?
execlist->pending_slot->index : -1);
}
static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
{
struct intel_vgpu_execlist_slot *running = execlist->running_slot;
struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
struct execlist_ctx_descriptor_format *desc = execlist->running_context;
struct intel_vgpu *vgpu = execlist->vgpu;
struct execlist_status_format status;
int ring_id = execlist->ring_id;
u32 status_reg = execlist_ring_mmio(vgpu->gvt,
ring_id, _EL_OFFSET_STATUS);
status.ldw = vgpu_vreg(vgpu, status_reg);
status.udw = vgpu_vreg(vgpu, status_reg + 4);
if (running) {
status.current_execlist_pointer = !!running->index;
status.execlist_write_pointer = !!!running->index;
status.execlist_0_active = status.execlist_0_valid =
!!!(running->index);
status.execlist_1_active = status.execlist_1_valid =
!!(running->index);
} else {
status.context_id = 0;
status.execlist_0_active = status.execlist_0_valid = 0;
status.execlist_1_active = status.execlist_1_valid = 0;
}
status.context_id = desc ? desc->context_id : 0;
status.execlist_queue_full = !!(pending);
vgpu_vreg(vgpu, status_reg) = status.ldw;
vgpu_vreg(vgpu, status_reg + 4) = status.udw;
gvt_dbg_el("vgpu%d: status reg offset %x ldw %x udw %x\n",
vgpu->id, status_reg, status.ldw, status.udw);
}
static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
struct execlist_context_status_format *status,
bool trigger_interrupt_later)
{
struct intel_vgpu *vgpu = execlist->vgpu;
int ring_id = execlist->ring_id;
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 write_pointer;
u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
_EL_OFFSET_STATUS_PTR);
ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
_EL_OFFSET_STATUS_BUF);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
write_pointer = ctx_status_ptr.write_ptr;
if (write_pointer == 0x7)
write_pointer = 0;
else {
++write_pointer;
write_pointer %= 0x6;
}
offset = ctx_status_buf_reg + write_pointer * 8;
vgpu_vreg(vgpu, offset) = status->ldw;
vgpu_vreg(vgpu, offset + 4) = status->udw;
ctx_status_ptr.write_ptr = write_pointer;
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
vgpu->id, write_pointer, offset, status->ldw, status->udw);
if (trigger_interrupt_later)
return;
intel_vgpu_trigger_virtual_event(vgpu,
ring_id_to_context_switch_event(execlist->ring_id));
}
static int emulate_execlist_ctx_schedule_out(
struct intel_vgpu_execlist *execlist,
struct execlist_ctx_descriptor_format *ctx)
{
struct intel_vgpu_execlist_slot *running = execlist->running_slot;
struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
struct execlist_ctx_descriptor_format *ctx1 = &running->ctx[1];
struct execlist_context_status_format status;
memset(&status, 0, sizeof(status));
gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
if (WARN_ON(!same_context(ctx, execlist->running_context))) {
gvt_err("schedule out context is not running context,"
"ctx id %x running ctx id %x\n",
ctx->context_id,
execlist->running_context->context_id);
return -EINVAL;
}
/* ctx1 is valid, ctx0/ctx is scheduled-out -> element switch */
if (valid_context(ctx1) && same_context(ctx0, ctx)) {
gvt_dbg_el("ctx 1 valid, ctx/ctx 0 is scheduled-out\n");
execlist->running_context = ctx1;
emulate_execlist_status(execlist);
status.context_complete = status.element_switch = 1;
status.context_id = ctx->context_id;
emulate_csb_update(execlist, &status, false);
/*
* ctx1 is not valid, ctx == ctx0
* ctx1 is valid, ctx1 == ctx
* --> last element is finished
* emulate:
* active-to-idle if there is *no* pending execlist
* context-complete if there *is* pending execlist
*/
} else if ((!valid_context(ctx1) && same_context(ctx0, ctx))
|| (valid_context(ctx1) && same_context(ctx1, ctx))) {
gvt_dbg_el("need to switch virtual execlist slot\n");
switch_virtual_execlist_slot(execlist);
emulate_execlist_status(execlist);
status.context_complete = status.active_to_idle = 1;
status.context_id = ctx->context_id;
if (!pending) {
emulate_csb_update(execlist, &status, false);
} else {
emulate_csb_update(execlist, &status, true);
memset(&status, 0, sizeof(status));
status.idle_to_active = 1;
status.context_id = 0;
emulate_csb_update(execlist, &status, false);
}
} else {
WARN_ON(1);
return -EINVAL;
}
return 0;
}
static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
struct intel_vgpu_execlist *execlist)
{
struct intel_vgpu *vgpu = execlist->vgpu;
int ring_id = execlist->ring_id;
u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
_EL_OFFSET_STATUS);
struct execlist_status_format status;
status.ldw = vgpu_vreg(vgpu, status_reg);
status.udw = vgpu_vreg(vgpu, status_reg + 4);
if (status.execlist_queue_full) {
gvt_err("virtual execlist slots are full\n");
return NULL;
}
return &execlist->slot[status.execlist_write_pointer];
}
static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
struct execlist_ctx_descriptor_format ctx[2])
{
struct intel_vgpu_execlist_slot *running = execlist->running_slot;
struct intel_vgpu_execlist_slot *slot =
get_next_execlist_slot(execlist);
struct execlist_ctx_descriptor_format *ctx0, *ctx1;
struct execlist_context_status_format status;
gvt_dbg_el("emulate schedule-in\n");
if (!slot) {
gvt_err("no available execlist slot\n");
return -EINVAL;
}
memset(&status, 0, sizeof(status));
memset(slot->ctx, 0, sizeof(slot->ctx));
slot->ctx[0] = ctx[0];
slot->ctx[1] = ctx[1];
gvt_dbg_el("alloc slot index %d ctx 0 %x ctx 1 %x\n",
slot->index, ctx[0].context_id,
ctx[1].context_id);
/*
* no running execlist, make this write bundle as running execlist
* -> idle-to-active
*/
if (!running) {
gvt_dbg_el("no current running execlist\n");
execlist->running_slot = slot;
execlist->pending_slot = NULL;
execlist->running_context = &slot->ctx[0];
gvt_dbg_el("running slot index %d running context %x\n",
execlist->running_slot->index,
execlist->running_context->context_id);
emulate_execlist_status(execlist);
status.idle_to_active = 1;
status.context_id = 0;
emulate_csb_update(execlist, &status, false);
return 0;
}
ctx0 = &running->ctx[0];
ctx1 = &running->ctx[1];
gvt_dbg_el("current running slot index %d ctx 0 %x ctx 1 %x\n",
running->index, ctx0->context_id, ctx1->context_id);
/*
* already has an running execlist
* a. running ctx1 is valid,
* ctx0 is finished, and running ctx1 == new execlist ctx[0]
* b. running ctx1 is not valid,
* ctx0 == new execlist ctx[0]
* ----> lite-restore + preempted
*/
if ((valid_context(ctx1) && same_context(ctx1, &slot->ctx[0]) &&
/* condition a */
(!same_context(ctx0, execlist->running_context))) ||
(!valid_context(ctx1) &&
same_context(ctx0, &slot->ctx[0]))) { /* condition b */
gvt_dbg_el("need to switch virtual execlist slot\n");
execlist->pending_slot = slot;
switch_virtual_execlist_slot(execlist);
emulate_execlist_status(execlist);
status.lite_restore = status.preempted = 1;
status.context_id = ctx[0].context_id;
emulate_csb_update(execlist, &status, false);
} else {
gvt_dbg_el("emulate as pending slot\n");
/*
* otherwise
* --> emulate pending execlist exist + but no preemption case
*/
execlist->pending_slot = slot;
emulate_execlist_status(execlist);
}
return 0;
}
static void free_workload(struct intel_vgpu_workload *workload)
{
intel_vgpu_unpin_mm(workload->shadow_mm);
intel_gvt_mm_unreference(workload->shadow_mm);
kmem_cache_free(workload->vgpu->workloads, workload);
}
#define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
unsigned long add, int gmadr_bytes)
{
if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
return -1;
*((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
BATCH_BUFFER_ADDR_MASK;
if (gmadr_bytes == 8) {
*((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
add & BATCH_BUFFER_ADDR_HIGH_MASK;
}
return 0;
}
static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
struct i915_vma *vma;
unsigned long gma;
/* pin the gem object to ggtt */
if (!list_empty(&workload->shadow_bb)) {
struct intel_shadow_bb_entry *entry_obj =
list_first_entry(&workload->shadow_bb,
struct intel_shadow_bb_entry,
list);
struct intel_shadow_bb_entry *temp;
list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
list) {
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
0, 0);
if (IS_ERR(vma)) {
gvt_err("Cannot pin\n");
return;
}
i915_gem_object_unpin_pages(entry_obj->obj);
/* update the relocate gma with shadow batch buffer*/
gma = i915_gem_object_ggtt_offset(entry_obj->obj, NULL);
WARN_ON(!IS_ALIGNED(gma, 4));
set_gma_to_bb_cmd(entry_obj, gma, gmadr_bytes);
}
}
}
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ring_id = wa_ctx->workload->ring_id;
struct i915_gem_context *shadow_ctx =
wa_ctx->workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page);
shadow_ring_context->bb_per_ctx_ptr.val =
(shadow_ring_context->bb_per_ctx_ptr.val &
(~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
shadow_ring_context->rcs_indirect_ctx.val =
(shadow_ring_context->rcs_indirect_ctx.val &
(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
kunmap_atomic(shadow_ring_context);
return 0;
}
static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
struct i915_vma *vma;
unsigned long gma;
unsigned char *per_ctx_va =
(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
wa_ctx->indirect_ctx.size;
if (wa_ctx->indirect_ctx.size == 0)
return;
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
gvt_err("Cannot pin indirect ctx obj\n");
return;
}
i915_gem_object_unpin_pages(wa_ctx->indirect_ctx.obj);
gma = i915_gem_object_ggtt_offset(wa_ctx->indirect_ctx.obj, NULL);
WARN_ON(!IS_ALIGNED(gma, CACHELINE_BYTES));
wa_ctx->indirect_ctx.shadow_gma = gma;
wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
memset(per_ctx_va, 0, CACHELINE_BYTES);
update_wa_ctx_2_shadow_ctx(wa_ctx);
}
static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct execlist_ctx_descriptor_format ctx[2];
int ring_id = workload->ring_id;
intel_vgpu_pin_mm(workload->shadow_mm);
intel_vgpu_sync_oos_pages(workload->vgpu);
intel_vgpu_flush_post_shadow(workload->vgpu);
prepare_shadow_batch_buffer(workload);
prepare_shadow_wa_ctx(&workload->wa_ctx);
if (!workload->emulate_schedule_in)
return 0;
ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
}
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
/* release all the shadow batch buffer */
if (!list_empty(&workload->shadow_bb)) {
struct intel_shadow_bb_entry *entry_obj =
list_first_entry(&workload->shadow_bb,
struct intel_shadow_bb_entry,
list);
struct intel_shadow_bb_entry *temp;
list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
list) {
drm_gem_object_unreference(&(entry_obj->obj->base));
kvfree(entry_obj->va);
list_del(&entry_obj->list);
kfree(entry_obj);
}
}
}
static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
if (wa_ctx->indirect_ctx.size == 0)
return;
drm_gem_object_unreference(&(wa_ctx->indirect_ctx.obj->base));
kvfree(wa_ctx->indirect_ctx.shadow_va);
}
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_execlist *execlist =
&vgpu->execlist[workload->ring_id];
struct intel_vgpu_workload *next_workload;
struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
bool lite_restore = false;
int ret;
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);
release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);
if (workload->status || vgpu->resetting)
goto out;
if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
next_workload = container_of(next,
struct intel_vgpu_workload, list);
this_desc = &workload->ctx_desc;
next_desc = &next_workload->ctx_desc;
lite_restore = same_context(this_desc, next_desc);
}
if (lite_restore) {
gvt_dbg_el("next context == current - no schedule-out\n");
free_workload(workload);
return 0;
}
ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
if (ret)
goto err;
out:
free_workload(workload);
return 0;
err:
free_workload(workload);
return ret;
}
#define RING_CTX_OFF(x) \
offsetof(struct execlist_ring_context, x)
static void read_guest_pdps(struct intel_vgpu *vgpu,
u64 ring_context_gpa, u32 pdp[8])
{
u64 gpa;
int i;
gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
for (i = 0; i < 8; i++)
intel_gvt_hypervisor_read_gpa(vgpu,
gpa + i * 8, &pdp[7 - i], 4);
}
static int prepare_mm(struct intel_vgpu_workload *workload)
{
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm;
int page_table_level;
u32 pdp[8];
if (desc->addressing_mode == 1) { /* legacy 32-bit */
page_table_level = 3;
} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
page_table_level = 4;
} else {
gvt_err("Advanced Context mode(SVM) is not supported!\n");
return -EINVAL;
}
read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
if (mm) {
intel_gvt_mm_reference(mm);
} else {
mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
pdp, page_table_level, 0);
if (IS_ERR(mm)) {
gvt_err("fail to create mm object.\n");
return PTR_ERR(mm);
}
}
workload->shadow_mm = mm;
return 0;
}
#define get_last_workload(q) \
(list_empty(q) ? NULL : container_of(q->prev, \
struct intel_vgpu_workload, list))
bool submit_context(struct intel_vgpu *vgpu, int ring_id,
struct execlist_ctx_descriptor_format *desc,
bool emulate_schedule_in)
{
struct list_head *q = workload_q_head(vgpu, ring_id);
struct intel_vgpu_workload *last_workload = get_last_workload(q);
struct intel_vgpu_workload *workload = NULL;
u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
int ret;
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
return -EINVAL;
}
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_header.val), &head, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_tail.val), &tail, 4);
head &= RB_HEAD_OFF_MASK;
tail &= RB_TAIL_OFF_MASK;
if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
gvt_dbg_el("ctx head %x real head %lx\n", head,
last_workload->rb_tail);
/*
* cannot use guest context head pointer here,
* as it might not be updated at this time
*/
head = last_workload->rb_tail;
}
gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
if (!workload)
return -ENOMEM;
/* record some ring buffer register values for scan and shadow */
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_start.val), &start, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
INIT_LIST_HEAD(&workload->list);
INIT_LIST_HEAD(&workload->shadow_bb);
init_waitqueue_head(&workload->shadow_ctx_status_wq);
atomic_set(&workload->shadow_ctx_active, 0);
workload->vgpu = vgpu;
workload->ring_id = ring_id;
workload->ctx_desc = *desc;
workload->ring_context_gpa = ring_context_gpa;
workload->rb_head = head;
workload->rb_tail = tail;
workload->rb_start = start;
workload->rb_ctl = ctl;
workload->prepare = prepare_execlist_workload;
workload->complete = complete_execlist_workload;
workload->status = -EINPROGRESS;
workload->emulate_schedule_in = emulate_schedule_in;
if (ring_id == RCS) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
workload->wa_ctx.indirect_ctx.guest_gma =
indirect_ctx & INDIRECT_CTX_ADDR_MASK;
workload->wa_ctx.indirect_ctx.size =
(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
CACHELINE_BYTES;
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
workload->wa_ctx.workload = workload;
WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
}
if (emulate_schedule_in)
memcpy(&workload->elsp_dwords,
&vgpu->execlist[ring_id].elsp_dwords,
sizeof(workload->elsp_dwords));
gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
workload, ring_id, head, tail, start, ctl);
gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
emulate_schedule_in);
ret = prepare_mm(workload);
if (ret) {
kmem_cache_free(vgpu->workloads, workload);
return ret;
}
queue_workload(workload);
return 0;
}
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
{
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
struct execlist_ctx_descriptor_format *desc[2], valid_desc[2];
unsigned long valid_desc_bitmap = 0;
bool emulate_schedule_in = true;
int ret;
int i;
memset(valid_desc, 0, sizeof(valid_desc));
desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
for (i = 0; i < 2; i++) {
if (!desc[i]->valid)
continue;
if (!desc[i]->privilege_access) {
gvt_err("vgpu%d: unexpected GGTT elsp submission\n",
vgpu->id);
return -EINVAL;
}
/* TODO: add another guest context checks here. */
set_bit(i, &valid_desc_bitmap);
valid_desc[i] = *desc[i];
}
if (!valid_desc_bitmap) {
gvt_err("vgpu%d: no valid desc in a elsp submission\n",
vgpu->id);
return -EINVAL;
}
if (!test_bit(0, (void *)&valid_desc_bitmap) &&
test_bit(1, (void *)&valid_desc_bitmap)) {
gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
vgpu->id);
return -EINVAL;
}
/* submit workload */
for_each_set_bit(i, (void *)&valid_desc_bitmap, 2) {
ret = submit_context(vgpu, ring_id, &valid_desc[i],
emulate_schedule_in);
if (ret) {
gvt_err("vgpu%d: fail to schedule workload\n",
vgpu->id);
return ret;
}
emulate_schedule_in = false;
}
return 0;
}
static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
{
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 ctx_status_ptr_reg;
memset(execlist, 0, sizeof(*execlist));
execlist->vgpu = vgpu;
execlist->ring_id = ring_id;
execlist->slot[0].index = 0;
execlist->slot[1].index = 1;
ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
_EL_OFFSET_STATUS_PTR);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
{
kmem_cache_destroy(vgpu->workloads);
}
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
{
int i;
/* each ring has a virtual execlist engine */
for (i = 0; i < I915_NUM_ENGINES; i++) {
init_vgpu_execlist(vgpu, i);
INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
}
vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
sizeof(struct intel_vgpu_workload), 0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!vgpu->workloads)
return -ENOMEM;
return 0;
}
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
unsigned long ring_bitmap)
{
int bit;
struct list_head *pos, *n;
struct intel_vgpu_workload *workload = NULL;
for_each_set_bit(bit, &ring_bitmap, sizeof(ring_bitmap) * 8) {
if (bit >= I915_NUM_ENGINES)
break;
/* free the unsubmited workload in the queue */
list_for_each_safe(pos, n, &vgpu->workload_q_head[bit]) {
workload = container_of(pos,
struct intel_vgpu_workload, list);
list_del_init(&workload->list);
free_workload(workload);
}
init_vgpu_execlist(vgpu, bit);
}
}
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhiyuan Lv <zhiyuan.lv@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
* Ping Gao <ping.a.gao@intel.com>
* Tina Zhang <tina.zhang@intel.com>
*
*/
#ifndef _GVT_EXECLIST_H_
#define _GVT_EXECLIST_H_
struct execlist_ctx_descriptor_format {
union {
u32 udw;
u32 context_id;
};
union {
u32 ldw;
struct {
u32 valid : 1;
u32 force_pd_restore : 1;
u32 force_restore : 1;
u32 addressing_mode : 2;
u32 llc_coherency : 1;
u32 fault_handling : 2;
u32 privilege_access : 1;
u32 reserved : 3;
u32 lrca : 20;
};
};
};
struct execlist_status_format {
union {
u32 ldw;
struct {
u32 current_execlist_pointer :1;
u32 execlist_write_pointer :1;
u32 execlist_queue_full :1;
u32 execlist_1_valid :1;
u32 execlist_0_valid :1;
u32 last_ctx_switch_reason :9;
u32 current_active_elm_status :2;
u32 arbitration_enable :1;
u32 execlist_1_active :1;
u32 execlist_0_active :1;
u32 reserved :13;
};
};
union {
u32 udw;
u32 context_id;
};
};
struct execlist_context_status_pointer_format {
union {
u32 dw;
struct {
u32 write_ptr :3;
u32 reserved :5;
u32 read_ptr :3;
u32 reserved2 :5;
u32 mask :16;
};
};
};
struct execlist_context_status_format {
union {
u32 ldw;
struct {
u32 idle_to_active :1;
u32 preempted :1;
u32 element_switch :1;
u32 active_to_idle :1;
u32 context_complete :1;
u32 wait_on_sync_flip :1;
u32 wait_on_vblank :1;
u32 wait_on_semaphore :1;
u32 wait_on_scanline :1;
u32 reserved :2;
u32 semaphore_wait_mode :1;
u32 display_plane :3;
u32 lite_restore :1;
u32 reserved_2 :16;
};
};
union {
u32 udw;
u32 context_id;
};
};
struct execlist_mmio_pair {
u32 addr;
u32 val;
};
/* The first 52 dwords in register state context */
struct execlist_ring_context {
u32 nop1;
u32 lri_cmd_1;
struct execlist_mmio_pair ctx_ctrl;
struct execlist_mmio_pair ring_header;
struct execlist_mmio_pair ring_tail;
struct execlist_mmio_pair rb_start;
struct execlist_mmio_pair rb_ctrl;
struct execlist_mmio_pair bb_cur_head_UDW;
struct execlist_mmio_pair bb_cur_head_LDW;
struct execlist_mmio_pair bb_state;
struct execlist_mmio_pair second_bb_addr_UDW;
struct execlist_mmio_pair second_bb_addr_LDW;
struct execlist_mmio_pair second_bb_state;
struct execlist_mmio_pair bb_per_ctx_ptr;
struct execlist_mmio_pair rcs_indirect_ctx;
struct execlist_mmio_pair rcs_indirect_ctx_offset;
u32 nop2;
u32 nop3;
u32 nop4;
u32 lri_cmd_2;
struct execlist_mmio_pair ctx_timestamp;
struct execlist_mmio_pair pdp3_UDW;
struct execlist_mmio_pair pdp3_LDW;
struct execlist_mmio_pair pdp2_UDW;
struct execlist_mmio_pair pdp2_LDW;
struct execlist_mmio_pair pdp1_UDW;
struct execlist_mmio_pair pdp1_LDW;
struct execlist_mmio_pair pdp0_UDW;
struct execlist_mmio_pair pdp0_LDW;
};
struct intel_vgpu_elsp_dwords {
u32 data[4];
u32 index;
};
struct intel_vgpu_execlist_slot {
struct execlist_ctx_descriptor_format ctx[2];
u32 index;
};
struct intel_vgpu_execlist {
struct intel_vgpu_execlist_slot slot[2];
struct intel_vgpu_execlist_slot *running_slot;
struct intel_vgpu_execlist_slot *pending_slot;
struct execlist_ctx_descriptor_format *running_context;
int ring_id;
struct intel_vgpu *vgpu;
struct intel_vgpu_elsp_dwords elsp_dwords;
};
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
unsigned long ring_bitmap);
#endif /*_GVT_EXECLIST_H_*/
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhi Wang <zhi.a.wang@intel.com>
*
* Contributors:
* Changbin Du <changbin.du@intel.com>
*
*/
#include <linux/firmware.h>
#include <linux/crc32.h>
#include "i915_drv.h"
#define FIRMWARE_VERSION (0x0)
struct gvt_firmware_header {
u64 magic;
u32 crc32; /* protect the data after this field */
u32 version;
u64 cfg_space_size;
u64 cfg_space_offset; /* offset in the file */
u64 mmio_size;
u64 mmio_offset; /* offset in the file */
unsigned char data[1];
};
#define RD(offset) (readl(mmio + offset.reg))
#define WR(v, offset) (writel(v, mmio + offset.reg))
static void bdw_forcewake_get(void *mmio)
{
WR(_MASKED_BIT_DISABLE(0xffff), FORCEWAKE_MT);
RD(ECOBUS);
if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL) == 0, 50))
gvt_err("fail to wait forcewake idle\n");
WR(_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL), FORCEWAKE_MT);
if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL), 50))
gvt_err("fail to wait forcewake ack\n");
if (wait_for((RD(GEN6_GT_THREAD_STATUS_REG) &
GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 50))
gvt_err("fail to wait c0 wake up\n");
}
#undef RD
#undef WR
#define dev_to_drm_minor(d) dev_get_drvdata((d))
static ssize_t
gvt_firmware_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
memcpy(buf, attr->private + offset, count);
return count;
}
static struct bin_attribute firmware_attr = {
.attr = {.name = "gvt_firmware", .mode = (S_IRUSR)},
.read = gvt_firmware_read,
.write = NULL,
.mmap = NULL,
};
static int expose_firmware_sysfs(struct intel_gvt *gvt, void *mmio)
{
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
struct intel_gvt_mmio_info *e;
struct gvt_firmware_header *h;
void *firmware;
void *p;
unsigned long size;
int i;
int ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
firmware = vmalloc(size);
if (!firmware)
return -ENOMEM;
h = firmware;
h->magic = VGT_MAGIC;
h->version = FIRMWARE_VERSION;
h->cfg_space_size = info->cfg_space_size;
h->cfg_space_offset = offsetof(struct gvt_firmware_header, data);
h->mmio_size = info->mmio_size;
h->mmio_offset = h->cfg_space_offset + h->cfg_space_size;
p = firmware + h->cfg_space_offset;
for (i = 0; i < h->cfg_space_size; i += 4)
pci_read_config_dword(pdev, i, p + i);
memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size);
p = firmware + h->mmio_offset;
hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
int j;
for (j = 0; j < e->length; j += 4)
*(u32 *)(p + e->offset + j) =
readl(mmio + e->offset + j);
}
memcpy(gvt->firmware.mmio, p, info->mmio_size);
firmware_attr.size = size;
firmware_attr.private = firmware;
ret = device_create_bin_file(&pdev->dev, &firmware_attr);
if (ret) {
vfree(firmware);
return ret;
}
return 0;
}
static void clean_firmware_sysfs(struct intel_gvt *gvt)
{
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
device_remove_bin_file(&pdev->dev, &firmware_attr);
vfree(firmware_attr.private);
}
/**
* intel_gvt_free_firmware - free GVT firmware
* @gvt: intel gvt device
*
*/
void intel_gvt_free_firmware(struct intel_gvt *gvt)
{
if (!gvt->firmware.firmware_loaded)
clean_firmware_sysfs(gvt);
kfree(gvt->firmware.cfg_space);
kfree(gvt->firmware.mmio);
}
static int verify_firmware(struct intel_gvt *gvt,
const struct firmware *fw)
{
struct intel_gvt_device_info *info = &gvt->device_info;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct pci_dev *pdev = dev_priv->drm.pdev;
struct gvt_firmware_header *h;
unsigned long id, crc32_start;
const void *mem;
const char *item;
u64 file, request;
h = (struct gvt_firmware_header *)fw->data;
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
mem = fw->data + crc32_start;
#define VERIFY(s, a, b) do { \
item = (s); file = (u64)(a); request = (u64)(b); \
if ((a) != (b)) \
goto invalid_firmware; \
} while (0)
VERIFY("magic number", h->magic, VGT_MAGIC);
VERIFY("version", h->version, FIRMWARE_VERSION);
VERIFY("crc32", h->crc32, crc32_le(0, mem, fw->size - crc32_start));
VERIFY("cfg space size", h->cfg_space_size, info->cfg_space_size);
VERIFY("mmio size", h->mmio_size, info->mmio_size);
mem = (fw->data + h->cfg_space_offset);
id = *(u16 *)(mem + PCI_VENDOR_ID);
VERIFY("vender id", id, pdev->vendor);
id = *(u16 *)(mem + PCI_DEVICE_ID);
VERIFY("device id", id, pdev->device);
id = *(u8 *)(mem + PCI_REVISION_ID);
VERIFY("revision id", id, pdev->revision);
#undef VERIFY
return 0;
invalid_firmware:
gvt_dbg_core("Invalid firmware: %s [file] 0x%llx [request] 0x%llx\n",
item, file, request);
return -EINVAL;
}
#define GVT_FIRMWARE_PATH "i915/gvt"
/**
* intel_gvt_load_firmware - load GVT firmware
* @gvt: intel gvt device
*
*/
int intel_gvt_load_firmware(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct pci_dev *pdev = dev_priv->drm.pdev;
struct intel_gvt_firmware *firmware = &gvt->firmware;
struct gvt_firmware_header *h;
const struct firmware *fw;
char *path;
void *mmio, *mem;
int ret;
path = kmalloc(PATH_MAX, GFP_KERNEL);
if (!path)
return -ENOMEM;
mem = kmalloc(info->cfg_space_size, GFP_KERNEL);
if (!mem) {
kfree(path);
return -ENOMEM;
}
firmware->cfg_space = mem;
mem = kmalloc(info->mmio_size, GFP_KERNEL);
if (!mem) {
kfree(path);
kfree(firmware->cfg_space);
return -ENOMEM;
}
firmware->mmio = mem;
mmio = pci_iomap(pdev, info->mmio_bar, info->mmio_size);
if (!mmio) {
kfree(path);
kfree(firmware->cfg_space);
kfree(firmware->mmio);
return -EINVAL;
}
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv))
bdw_forcewake_get(mmio);
sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
pdev->revision);
gvt_dbg_core("request hw state firmware %s...\n", path);
ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev);
kfree(path);
if (ret)
goto expose_firmware;
gvt_dbg_core("success.\n");
ret = verify_firmware(gvt, fw);
if (ret)
goto out_free_fw;
gvt_dbg_core("verified.\n");
h = (struct gvt_firmware_header *)fw->data;
memcpy(firmware->cfg_space, fw->data + h->cfg_space_offset,
h->cfg_space_size);
memcpy(firmware->mmio, fw->data + h->mmio_offset,
h->mmio_size);
release_firmware(fw);
firmware->firmware_loaded = true;
pci_iounmap(pdev, mmio);
return 0;
out_free_fw:
release_firmware(fw);
expose_firmware:
expose_firmware_sysfs(gvt, mmio);
pci_iounmap(pdev, mmio);
return 0;
}
/*
* GTT virtualization
*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhi Wang <zhi.a.wang@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
* Xiao Zheng <xiao.zheng@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
*
*/
#include "i915_drv.h"
#include "trace.h"
static bool enable_out_of_sync = false;
static int preallocated_oos_pages = 8192;
/*
* validate a gm address and related range size,
* translate it to host gm address
*/
bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
{
if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
&& !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
vgpu->id, addr, size);
return false;
}
return true;
}
/* translate a guest gmadr to host gmadr */
int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
{
if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
"invalid guest gmadr %llx\n", g_addr))
return -EACCES;
if (vgpu_gmadr_is_aperture(vgpu, g_addr))
*h_addr = vgpu_aperture_gmadr_base(vgpu)
+ (g_addr - vgpu_aperture_offset(vgpu));
else
*h_addr = vgpu_hidden_gmadr_base(vgpu)
+ (g_addr - vgpu_hidden_offset(vgpu));
return 0;
}
/* translate a host gmadr to guest gmadr */
int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
{
if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
"invalid host gmadr %llx\n", h_addr))
return -EACCES;
if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
*g_addr = vgpu_aperture_gmadr_base(vgpu)
+ (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
else
*g_addr = vgpu_hidden_gmadr_base(vgpu)
+ (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
return 0;
}
int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
unsigned long *h_index)
{
u64 h_addr;
int ret;
ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT,
&h_addr);
if (ret)
return ret;
*h_index = h_addr >> GTT_PAGE_SHIFT;
return 0;
}
int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
unsigned long *g_index)
{
u64 g_addr;
int ret;
ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT,
&g_addr);
if (ret)
return ret;
*g_index = g_addr >> GTT_PAGE_SHIFT;
return 0;
}
#define gtt_type_is_entry(type) \
(type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
&& type != GTT_TYPE_PPGTT_PTE_ENTRY \
&& type != GTT_TYPE_PPGTT_ROOT_ENTRY)
#define gtt_type_is_pt(type) \
(type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
#define gtt_type_is_pte_pt(type) \
(type == GTT_TYPE_PPGTT_PTE_PT)
#define gtt_type_is_root_pointer(type) \
(gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
#define gtt_init_entry(e, t, p, v) do { \
(e)->type = t; \
(e)->pdev = p; \
memcpy(&(e)->val64, &v, sizeof(v)); \
} while (0)
enum {
GTT_TYPE_INVALID = -1,
GTT_TYPE_GGTT_PTE,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_2M_ENTRY,
GTT_TYPE_PPGTT_PTE_1G_ENTRY,
GTT_TYPE_PPGTT_PTE_ENTRY,
GTT_TYPE_PPGTT_PDE_ENTRY,
GTT_TYPE_PPGTT_PDP_ENTRY,
GTT_TYPE_PPGTT_PML4_ENTRY,
GTT_TYPE_PPGTT_ROOT_ENTRY,
GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
GTT_TYPE_PPGTT_ENTRY,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_MAX,
};
/*
* Mappings between GTT_TYPE* enumerations.
* Following information can be found according to the given type:
* - type of next level page table
* - type of entry inside this level page table
* - type of entry with PSE set
*
* If the given type doesn't have such a kind of information,
* e.g. give a l4 root entry type, then request to get its PSE type,
* give a PTE page table type, then request to get its next level page
* table type, as we know l4 root entry doesn't have a PSE bit,
* and a PTE page table doesn't have a next level page table type,
* GTT_TYPE_INVALID will be returned. This is useful when traversing a
* page table.
*/
struct gtt_type_table_entry {
int entry_type;
int next_pt_type;
int pse_entry_type;
};
#define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
[type] = { \
.entry_type = e_type, \
.next_pt_type = npt_type, \
.pse_entry_type = pse_type, \
}
static struct gtt_type_table_entry gtt_type_table[] = {
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_PPGTT_PML4_ENTRY,
GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
GTT_TYPE_PPGTT_PML4_ENTRY,
GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_PPGTT_PDP_ENTRY,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
GTT_TYPE_PPGTT_PDP_ENTRY,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PDE_ENTRY,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
GTT_TYPE_PPGTT_PDE_ENTRY,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_INVALID,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_INVALID,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
GTT_TYPE_PPGTT_PDE_ENTRY,
GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
GTT_TYPE_PPGTT_PDP_ENTRY,
GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
GTT_TYPE_GGTT_PTE,
GTT_TYPE_INVALID,
GTT_TYPE_INVALID),
};
static inline int get_next_pt_type(int type)
{
return gtt_type_table[type].next_pt_type;
}
static inline int get_entry_type(int type)
{
return gtt_type_table[type].entry_type;
}
static inline int get_pse_type(int type)
{
return gtt_type_table[type].pse_entry_type;
}
static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
{
void *addr = (u64 *)dev_priv->ggtt.gsm + index;
u64 pte;
#ifdef readq
pte = readq(addr);
#else
pte = ioread32(addr);
pte |= ioread32(addr + 4) << 32;
#endif
return pte;
}
static void write_pte64(struct drm_i915_private *dev_priv,
unsigned long index, u64 pte)
{
void *addr = (u64 *)dev_priv->ggtt.gsm + index;
#ifdef writeq
writeq(pte, addr);
#else
iowrite32((u32)pte, addr);
iowrite32(pte >> 32, addr + 4);
#endif
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
struct intel_gvt_gtt_entry *e,
unsigned long index, bool hypervisor_access, unsigned long gpa,
struct intel_vgpu *vgpu)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
int ret;
if (WARN_ON(info->gtt_entry_size != 8))
return e;
if (hypervisor_access) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
(index << info->gtt_entry_size_shift),
&e->val64, 8);
WARN_ON(ret);
} else if (!pt) {
e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
} else {
e->val64 = *((u64 *)pt + index);
}
return e;
}
static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
struct intel_gvt_gtt_entry *e,
unsigned long index, bool hypervisor_access, unsigned long gpa,
struct intel_vgpu *vgpu)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
int ret;
if (WARN_ON(info->gtt_entry_size != 8))
return e;
if (hypervisor_access) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
(index << info->gtt_entry_size_shift),
&e->val64, 8);
WARN_ON(ret);
} else if (!pt) {
write_pte64(vgpu->gvt->dev_priv, index, e->val64);
} else {
*((u64 *)pt + index) = e->val64;
}
return e;
}
#define GTT_HAW 46
#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
{
unsigned long pfn;
if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
pfn = (e->val64 & ADDR_1G_MASK) >> 12;
else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
pfn = (e->val64 & ADDR_2M_MASK) >> 12;
else
pfn = (e->val64 & ADDR_4K_MASK) >> 12;
return pfn;
}
static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
{
if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
e->val64 &= ~ADDR_1G_MASK;
pfn &= (ADDR_1G_MASK >> 12);
} else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
e->val64 &= ~ADDR_2M_MASK;
pfn &= (ADDR_2M_MASK >> 12);
} else {
e->val64 &= ~ADDR_4K_MASK;
pfn &= (ADDR_4K_MASK >> 12);
}
e->val64 |= (pfn << 12);
}
static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
{
/* Entry doesn't have PSE bit. */
if (get_pse_type(e->type) == GTT_TYPE_INVALID)
return false;
e->type = get_entry_type(e->type);
if (!(e->val64 & (1 << 7)))
return false;
e->type = get_pse_type(e->type);
return true;
}
static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
{
/*
* i915 writes PDP root pointer registers without present bit,
* it also works, so we need to treat root pointer entry
* specifically.
*/
if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
|| e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
return (e->val64 != 0);
else
return (e->val64 & (1 << 0));
}
static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
{
e->val64 &= ~(1 << 0);
}
/*
* Per-platform GMA routines.
*/
static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
{
unsigned long x = (gma >> GTT_PAGE_SHIFT);
trace_gma_index(__func__, gma, x);
return x;
}
#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
{ \
unsigned long x = (exp); \
trace_gma_index(__func__, gma, x); \
return x; \
}
DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
.get_entry = gtt_get_entry64,
.set_entry = gtt_set_entry64,
.clear_present = gtt_entry_clear_present,
.test_present = gen8_gtt_test_present,
.test_pse = gen8_gtt_test_pse,
.get_pfn = gen8_gtt_get_pfn,
.set_pfn = gen8_gtt_set_pfn,
};
static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
.gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
.gma_to_pte_index = gen8_gma_to_pte_index,
.gma_to_pde_index = gen8_gma_to_pde_index,
.gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
.gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
.gma_to_pml4_index = gen8_gma_to_pml4_index,
};
static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
struct intel_gvt_gtt_entry *m)
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
unsigned long gfn, mfn;
*m = *p;
if (!ops->test_present(p))
return 0;
gfn = ops->get_pfn(p);
mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
if (mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to translate gfn: 0x%lx\n", gfn);
return -ENXIO;
}
ops->set_pfn(m, mfn);
return 0;
}
/*
* MM helpers.
*/
struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index)
{
struct intel_gvt *gvt = mm->vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
e->type = mm->page_table_entry_type;
ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
ops->test_pse(e);
return e;
}
struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index)
{
struct intel_gvt *gvt = mm->vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
}
/*
* PPGTT shadow page table helpers.
*/
static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
struct intel_vgpu_ppgtt_spt *spt,
void *page_table, int type,
struct intel_gvt_gtt_entry *e, unsigned long index,
bool guest)
{
struct intel_gvt *gvt = spt->vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
e->type = get_entry_type(type);
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
return e;
ops->get_entry(page_table, e, index, guest,
spt->guest_page.gfn << GTT_PAGE_SHIFT,
spt->vgpu);
ops->test_pse(e);
return e;
}
static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
struct intel_vgpu_ppgtt_spt *spt,
void *page_table, int type,
struct intel_gvt_gtt_entry *e, unsigned long index,
bool guest)
{
struct intel_gvt *gvt = spt->vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
return e;
return ops->set_entry(page_table, e, index, guest,
spt->guest_page.gfn << GTT_PAGE_SHIFT,
spt->vgpu);
}
#define ppgtt_get_guest_entry(spt, e, index) \
ppgtt_spt_get_entry(spt, NULL, \
spt->guest_page_type, e, index, true)
#define ppgtt_set_guest_entry(spt, e, index) \
ppgtt_spt_set_entry(spt, NULL, \
spt->guest_page_type, e, index, true)
#define ppgtt_get_shadow_entry(spt, e, index) \
ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
spt->shadow_page.type, e, index, false)
#define ppgtt_set_shadow_entry(spt, e, index) \
ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
spt->shadow_page.type, e, index, false)
/**
* intel_vgpu_init_guest_page - init a guest page data structure
* @vgpu: a vGPU
* @p: a guest page data structure
* @gfn: guest memory page frame number
* @handler: function will be called when target guest memory page has
* been modified.
*
* This function is called when user wants to track a guest memory page.
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *p,
unsigned long gfn,
int (*handler)(void *, u64, void *, int),
void *data)
{
INIT_HLIST_NODE(&p->node);
p->writeprotection = false;
p->gfn = gfn;
p->handler = handler;
p->data = data;
p->oos_page = NULL;
p->write_cnt = 0;
hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn);
return 0;
}
static int detach_oos_page(struct intel_vgpu *vgpu,
struct intel_vgpu_oos_page *oos_page);
/**
* intel_vgpu_clean_guest_page - release the resource owned by guest page data
* structure
* @vgpu: a vGPU
* @p: a tracked guest page
*
* This function is called when user tries to stop tracking a guest memory
* page.
*/
void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *p)
{
if (!hlist_unhashed(&p->node))
hash_del(&p->node);
if (p->oos_page)
detach_oos_page(vgpu, p->oos_page);
if (p->writeprotection)
intel_gvt_hypervisor_unset_wp_page(vgpu, p);
}
/**
* intel_vgpu_find_guest_page - find a guest page data structure by GFN.
* @vgpu: a vGPU
* @gfn: guest memory page frame number
*
* This function is called when emulation logic wants to know if a trapped GFN
* is a tracked guest page.
*
* Returns:
* Pointer to guest page data structure, NULL if failed.
*/
struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
struct intel_vgpu *vgpu, unsigned long gfn)
{
struct intel_vgpu_guest_page *p;
hash_for_each_possible(vgpu->gtt.guest_page_hash_table,
p, node, gfn) {
if (p->gfn == gfn)
return p;
}
return NULL;
}
static inline int init_shadow_page(struct intel_vgpu *vgpu,
struct intel_vgpu_shadow_page *p, int type)
{
p->vaddr = page_address(p->page);
p->type = type;
INIT_HLIST_NODE(&p->node);
p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr);
if (p->mfn == INTEL_GVT_INVALID_ADDR)
return -EFAULT;
hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
return 0;
}
static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p)
{
if (!hlist_unhashed(&p->node))
hash_del(&p->node);
}
static inline struct intel_vgpu_shadow_page *find_shadow_page(
struct intel_vgpu *vgpu, unsigned long mfn)
{
struct intel_vgpu_shadow_page *p;
hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
p, node, mfn) {
if (p->mfn == mfn)
return p;
}
return NULL;
}
#define guest_page_to_ppgtt_spt(ptr) \
container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
#define shadow_page_to_ppgtt_spt(ptr) \
container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
static void *alloc_spt(gfp_t gfp_mask)
{
struct intel_vgpu_ppgtt_spt *spt;
spt = kzalloc(sizeof(*spt), gfp_mask);
if (!spt)
return NULL;
spt->shadow_page.page = alloc_page(gfp_mask);
if (!spt->shadow_page.page) {
kfree(spt);
return NULL;
}
return spt;
}
static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
{
__free_page(spt->shadow_page.page);
kfree(spt);
}
static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
{
trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
clean_shadow_page(&spt->shadow_page);
intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
list_del_init(&spt->post_shadow_list);
free_spt(spt);
}
static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
{
struct hlist_node *n;
struct intel_vgpu_shadow_page *sp;
int i;
hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
}
static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
u64 pa, void *p_data, int bytes);
static int ppgtt_write_protection_handler(void *gp, u64 pa,
void *p_data, int bytes)
{
struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
int ret;
if (bytes != 4 && bytes != 8)
return -EINVAL;
if (!gpt->writeprotection)
return -EINVAL;
ret = ppgtt_handle_guest_write_page_table_bytes(gp,
pa, p_data, bytes);
if (ret)
return ret;
return ret;
}
static int reclaim_one_mm(struct intel_gvt *gvt);
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
struct intel_vgpu *vgpu, int type, unsigned long gfn)
{
struct intel_vgpu_ppgtt_spt *spt = NULL;
int ret;
retry:
spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
if (!spt) {
if (reclaim_one_mm(vgpu->gvt))
goto retry;
gvt_err("fail to allocate ppgtt shadow page\n");
return ERR_PTR(-ENOMEM);
}
spt->vgpu = vgpu;
spt->guest_page_type = type;
atomic_set(&spt->refcount, 1);
INIT_LIST_HEAD(&spt->post_shadow_list);
/*
* TODO: guest page type may be different with shadow page type,
* when we support PSE page in future.
*/
ret = init_shadow_page(vgpu, &spt->shadow_page, type);
if (ret) {
gvt_err("fail to initialize shadow page for spt\n");
goto err;
}
ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
gfn, ppgtt_write_protection_handler, NULL);
if (ret) {
gvt_err("fail to initialize guest page for spt\n");
goto err;
}
trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
return spt;
err:
ppgtt_free_shadow_page(spt);
return ERR_PTR(ret);
}
static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
struct intel_vgpu *vgpu, unsigned long mfn)
{
struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
if (p)
return shadow_page_to_ppgtt_spt(p);
gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
vgpu->id, mfn);
return NULL;
}
#define pt_entry_size_shift(spt) \
((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
#define pt_entries(spt) \
(GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
#define for_each_present_guest_entry(spt, e, i) \
for (i = 0; i < pt_entries(spt); i++) \
if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
ppgtt_get_guest_entry(spt, e, i)))
#define for_each_present_shadow_entry(spt, e, i) \
for (i = 0; i < pt_entries(spt); i++) \
if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
ppgtt_get_shadow_entry(spt, e, i)))
static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
{
int v = atomic_read(&spt->refcount);
trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
atomic_inc(&spt->refcount);
}
static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_entry *e)
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
return -EINVAL;
if (ops->get_pfn(e) == vgpu->gtt.scratch_page_mfn)
return 0;
s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
if (!s) {
gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
vgpu->id, ops->get_pfn(e));
return -ENXIO;
}
return ppgtt_invalidate_shadow_page(s);
}
static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
{
struct intel_gvt_gtt_entry e;
unsigned long index;
int ret;
int v = atomic_read(&spt->refcount);
trace_spt_change(spt->vgpu->id, "die", spt,
spt->guest_page.gfn, spt->shadow_page.type);
trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
if (atomic_dec_return(&spt->refcount) > 0)
return 0;
if (gtt_type_is_pte_pt(spt->shadow_page.type))
goto release;
for_each_present_shadow_entry(spt, &e, index) {
if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
gvt_err("GVT doesn't support pse bit for now\n");
return -EINVAL;
}
ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
spt->vgpu, &e);
if (ret)
goto fail;
}
release:
trace_spt_change(spt->vgpu->id, "release", spt,
spt->guest_page.gfn, spt->shadow_page.type);
ppgtt_free_shadow_page(spt);
return 0;
fail:
gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n",
spt->vgpu->id, spt, e.val64, e.type);
return ret;
}
static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s = NULL;
struct intel_vgpu_guest_page *g;
int ret;
if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
ret = -EINVAL;
goto fail;
}
g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
if (g) {
s = guest_page_to_ppgtt_spt(g);
ppgtt_get_shadow_page(s);
} else {
int type = get_next_pt_type(we->type);
s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
if (IS_ERR(s)) {
ret = PTR_ERR(s);
goto fail;
}
ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page);
if (ret)
goto fail;
ret = ppgtt_populate_shadow_page(s);
if (ret)
goto fail;
trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn,
s->shadow_page.type);
}
return s;
fail:
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
vgpu->id, s, we->val64, we->type);
return ERR_PTR(ret);
}
static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
{
struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
se->type = ge->type;
se->val64 = ge->val64;
ops->set_pfn(se, s->shadow_page.mfn);
}
static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
{
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_vgpu_ppgtt_spt *s;
struct intel_gvt_gtt_entry se, ge;
unsigned long i;
int ret;
trace_spt_change(spt->vgpu->id, "born", spt,
spt->guest_page.gfn, spt->shadow_page.type);
if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
for_each_present_guest_entry(spt, &ge, i) {
ret = gtt_entry_p2m(vgpu, &ge, &se);
if (ret)
goto fail;
ppgtt_set_shadow_entry(spt, &se, i);
}
return 0;
}
for_each_present_guest_entry(spt, &ge, i) {
if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
gvt_err("GVT doesn't support pse bit now\n");
ret = -EINVAL;
goto fail;
}
s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
if (IS_ERR(s)) {
ret = PTR_ERR(s);
goto fail;
}
ppgtt_get_shadow_entry(spt, &se, i);
ppgtt_generate_shadow_entry(&se, s, &ge);
ppgtt_set_shadow_entry(spt, &se, i);
}
return 0;
fail:
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
vgpu->id, spt, ge.val64, ge.type);
return ret;
}
static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
struct intel_gvt_gtt_entry *we, unsigned long index)
{
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry e;
int ret;
trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type,
we->val64, index);
ppgtt_get_shadow_entry(spt, &e, index);
if (!ops->test_present(&e))
return 0;
if (ops->get_pfn(&e) == vgpu->gtt.scratch_page_mfn)
return 0;
if (gtt_type_is_pt(get_next_pt_type(we->type))) {
struct intel_vgpu_guest_page *g =
intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
if (!g) {
gvt_err("fail to find guest page\n");
ret = -ENXIO;
goto fail;
}
ret = ppgtt_invalidate_shadow_page(guest_page_to_ppgtt_spt(g));
if (ret)
goto fail;
}
ops->set_pfn(&e, vgpu->gtt.scratch_page_mfn);
ppgtt_set_shadow_entry(spt, &e, index);
return 0;
fail:
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
vgpu->id, spt, we->val64, we->type);
return ret;
}
static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
struct intel_gvt_gtt_entry *we, unsigned long index)
{
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_entry m;
struct intel_vgpu_ppgtt_spt *s;
int ret;
trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
we->val64, index);
if (gtt_type_is_pt(get_next_pt_type(we->type))) {
s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
if (IS_ERR(s)) {
ret = PTR_ERR(s);
goto fail;
}
ppgtt_get_shadow_entry(spt, &m, index);
ppgtt_generate_shadow_entry(&m, s, we);
ppgtt_set_shadow_entry(spt, &m, index);
} else {
ret = gtt_entry_p2m(vgpu, we, &m);
if (ret)
goto fail;
ppgtt_set_shadow_entry(spt, &m, index);
}
return 0;
fail:
gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id,
spt, we->val64, we->type);
return ret;
}
static int sync_oos_page(struct intel_vgpu *vgpu,
struct intel_vgpu_oos_page *oos_page)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *spt =
guest_page_to_ppgtt_spt(oos_page->guest_page);
struct intel_gvt_gtt_entry old, new, m;
int index;
int ret;
trace_oos_change(vgpu->id, "sync", oos_page->id,
oos_page->guest_page, spt->guest_page_type);
old.type = new.type = get_entry_type(spt->guest_page_type);
old.val64 = new.val64 = 0;
for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift);
index++) {
ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
ops->get_entry(NULL, &new, index, true,
oos_page->guest_page->gfn << PAGE_SHIFT, vgpu);
if (old.val64 == new.val64
&& !test_and_clear_bit(index, spt->post_shadow_bitmap))
continue;
trace_oos_sync(vgpu->id, oos_page->id,
oos_page->guest_page, spt->guest_page_type,
new.val64, index);
ret = gtt_entry_p2m(vgpu, &new, &m);
if (ret)
return ret;
ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
ppgtt_set_shadow_entry(spt, &m, index);
}
oos_page->guest_page->write_cnt = 0;
list_del_init(&spt->post_shadow_list);
return 0;
}
static int detach_oos_page(struct intel_vgpu *vgpu,
struct intel_vgpu_oos_page *oos_page)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_ppgtt_spt *spt =
guest_page_to_ppgtt_spt(oos_page->guest_page);
trace_oos_change(vgpu->id, "detach", oos_page->id,
oos_page->guest_page, spt->guest_page_type);
oos_page->guest_page->write_cnt = 0;
oos_page->guest_page->oos_page = NULL;
oos_page->guest_page = NULL;
list_del_init(&oos_page->vm_list);
list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
return 0;
}
static int attach_oos_page(struct intel_vgpu *vgpu,
struct intel_vgpu_oos_page *oos_page,
struct intel_vgpu_guest_page *gpt)
{
struct intel_gvt *gvt = vgpu->gvt;
int ret;
ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT,
oos_page->mem, GTT_PAGE_SIZE);
if (ret)
return ret;
oos_page->guest_page = gpt;
gpt->oos_page = oos_page;
list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
return 0;
}
static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *gpt)
{
int ret;
ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt);
if (ret)
return ret;
trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
list_del_init(&gpt->oos_page->vm_list);
return sync_oos_page(vgpu, gpt->oos_page);
}
static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *gpt)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_gtt *gtt = &gvt->gtt;
struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
int ret;
WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
if (list_empty(&gtt->oos_page_free_list_head)) {
oos_page = container_of(gtt->oos_page_use_list_head.next,
struct intel_vgpu_oos_page, list);
ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
if (ret)
return ret;
ret = detach_oos_page(vgpu, oos_page);
if (ret)
return ret;
} else
oos_page = container_of(gtt->oos_page_free_list_head.next,
struct intel_vgpu_oos_page, list);
return attach_oos_page(vgpu, oos_page, gpt);
}
static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *gpt)
{
struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
return -EINVAL;
trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt);
}
/**
* intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
* @vgpu: a vGPU
*
* This function is called before submitting a guest workload to host,
* to sync all the out-of-synced shadow for vGPU
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
{
struct list_head *pos, *n;
struct intel_vgpu_oos_page *oos_page;
int ret;
if (!enable_out_of_sync)
return 0;
list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
oos_page = container_of(pos,
struct intel_vgpu_oos_page, vm_list);
ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
if (ret)
return ret;
}
return 0;
}
/*
* The heart of PPGTT shadow page table.
*/
static int ppgtt_handle_guest_write_page_table(
struct intel_vgpu_guest_page *gpt,
struct intel_gvt_gtt_entry *we, unsigned long index)
{
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry ge;
int old_present, new_present;
int ret;
ppgtt_get_guest_entry(spt, &ge, index);
old_present = ops->test_present(&ge);
new_present = ops->test_present(we);
ppgtt_set_guest_entry(spt, we, index);
if (old_present) {
ret = ppgtt_handle_guest_entry_removal(gpt, &ge, index);
if (ret)
goto fail;
}
if (new_present) {
ret = ppgtt_handle_guest_entry_add(gpt, we, index);
if (ret)
goto fail;
}
return 0;
fail:
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
vgpu->id, spt, we->val64, we->type);
return ret;
}
static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
{
return enable_out_of_sync
&& gtt_type_is_pte_pt(
guest_page_to_ppgtt_spt(gpt)->guest_page_type)
&& gpt->write_cnt >= 2;
}
static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
unsigned long index)
{
set_bit(index, spt->post_shadow_bitmap);
if (!list_empty(&spt->post_shadow_list))
return;
list_add_tail(&spt->post_shadow_list,
&spt->vgpu->gtt.post_shadow_list_head);
}
/**
* intel_vgpu_flush_post_shadow - flush the post shadow transactions
* @vgpu: a vGPU
*
* This function is called before submitting a guest workload to host,
* to flush all the post shadows for a vGPU.
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
{
struct list_head *pos, *n;
struct intel_vgpu_ppgtt_spt *spt;
struct intel_gvt_gtt_entry ge, e;
unsigned long index;
int ret;
list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
post_shadow_list);
for_each_set_bit(index, spt->post_shadow_bitmap,
GTT_ENTRY_NUM_IN_ONE_PAGE) {
ppgtt_get_guest_entry(spt, &ge, index);
e = ge;
e.val64 = 0;
ppgtt_set_guest_entry(spt, &e, index);
ret = ppgtt_handle_guest_write_page_table(
&spt->guest_page, &ge, index);
if (ret)
return ret;
clear_bit(index, spt->post_shadow_bitmap);
}
list_del_init(&spt->post_shadow_list);
}
return 0;
}
static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
u64 pa, void *p_data, int bytes)
{
struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
struct intel_gvt_gtt_entry we;
unsigned long index;
int ret;
index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
ppgtt_get_guest_entry(spt, &we, index);
memcpy((void *)&we.val64 + (pa & (info->gtt_entry_size - 1)),
p_data, bytes);
ops->test_pse(&we);
if (bytes == info->gtt_entry_size) {
ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
if (ret)
return ret;
} else {
struct intel_gvt_gtt_entry ge;
ppgtt_get_guest_entry(spt, &ge, index);
if (!test_bit(index, spt->post_shadow_bitmap)) {
ret = ppgtt_handle_guest_entry_removal(gpt,
&ge, index);
if (ret)
return ret;
}
ppgtt_set_post_shadow(spt, index);
ppgtt_set_guest_entry(spt, &we, index);
}
if (!enable_out_of_sync)
return 0;
gpt->write_cnt++;
if (gpt->oos_page)
ops->set_entry(gpt->oos_page->mem, &we, index,
false, 0, vgpu);
if (can_do_out_of_sync(gpt)) {
if (!gpt->oos_page)
ppgtt_allocate_oos_page(vgpu, gpt);
ret = ppgtt_set_guest_page_oos(vgpu, gpt);
if (ret < 0)
return ret;
}
return 0;
}
/*
* mm page table allocation policy for bdw+
* - for ggtt, only virtual page table will be allocated.
* - for ppgtt, dedicated virtual/shadow page table will be allocated.
*/
static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
{
struct intel_vgpu *vgpu = mm->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
void *mem;
if (mm->type == INTEL_GVT_MM_PPGTT) {
mm->page_table_entry_cnt = 4;
mm->page_table_entry_size = mm->page_table_entry_cnt *
info->gtt_entry_size;
mem = kzalloc(mm->has_shadow_page_table ?
mm->page_table_entry_size * 2
: mm->page_table_entry_size,
GFP_ATOMIC);
if (!mem)
return -ENOMEM;
mm->virtual_page_table = mem;
if (!mm->has_shadow_page_table)
return 0;
mm->shadow_page_table = mem + mm->page_table_entry_size;
} else if (mm->type == INTEL_GVT_MM_GGTT) {
mm->page_table_entry_cnt =
(gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT);
mm->page_table_entry_size = mm->page_table_entry_cnt *
info->gtt_entry_size;
mem = vzalloc(mm->page_table_entry_size);
if (!mem)
return -ENOMEM;
mm->virtual_page_table = mem;
}
return 0;
}
static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
{
if (mm->type == INTEL_GVT_MM_PPGTT) {
kfree(mm->virtual_page_table);
} else if (mm->type == INTEL_GVT_MM_GGTT) {
if (mm->virtual_page_table)
vfree(mm->virtual_page_table);
}
mm->virtual_page_table = mm->shadow_page_table = NULL;
}
static void invalidate_mm(struct intel_vgpu_mm *mm)
{
struct intel_vgpu *vgpu = mm->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_gtt *gtt = &gvt->gtt;
struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
struct intel_gvt_gtt_entry se;
int i;
if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
return;
for (i = 0; i < mm->page_table_entry_cnt; i++) {
ppgtt_get_shadow_root_entry(mm, &se, i);
if (!ops->test_present(&se))
continue;
ppgtt_invalidate_shadow_page_by_shadow_entry(
vgpu, &se);
se.val64 = 0;
ppgtt_set_shadow_root_entry(mm, &se, i);
trace_gpt_change(vgpu->id, "destroy root pointer",
NULL, se.type, se.val64, i);
}
mm->shadowed = false;
}
/**
* intel_vgpu_destroy_mm - destroy a mm object
* @mm: a kref object
*
* This function is used to destroy a mm object for vGPU
*
*/
void intel_vgpu_destroy_mm(struct kref *mm_ref)
{
struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
struct intel_vgpu *vgpu = mm->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_gtt *gtt = &gvt->gtt;
if (!mm->initialized)
goto out;
list_del(&mm->list);
list_del(&mm->lru_list);
if (mm->has_shadow_page_table)
invalidate_mm(mm);
gtt->mm_free_page_table(mm);
out:
kfree(mm);
}
static int shadow_mm(struct intel_vgpu_mm *mm)
{
struct intel_vgpu *vgpu = mm->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_gtt *gtt = &gvt->gtt;
struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
struct intel_vgpu_ppgtt_spt *spt;
struct intel_gvt_gtt_entry ge, se;
int i;
int ret;
if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
return 0;
mm->shadowed = true;
for (i = 0; i < mm->page_table_entry_cnt; i++) {
ppgtt_get_guest_root_entry(mm, &ge, i);
if (!ops->test_present(&ge))
continue;
trace_gpt_change(vgpu->id, __func__, NULL,
ge.type, ge.val64, i);
spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
if (IS_ERR(spt)) {
gvt_err("fail to populate guest root pointer\n");
ret = PTR_ERR(spt);
goto fail;
}
ppgtt_generate_shadow_entry(&se, spt, &ge);
ppgtt_set_shadow_root_entry(mm, &se, i);
trace_gpt_change(vgpu->id, "populate root pointer",
NULL, se.type, se.val64, i);
}
return 0;
fail:
invalidate_mm(mm);
return ret;
}
/**
* intel_vgpu_create_mm - create a mm object for a vGPU
* @vgpu: a vGPU
* @mm_type: mm object type, should be PPGTT or GGTT
* @virtual_page_table: page table root pointers. Could be NULL if user wants
* to populate shadow later.
* @page_table_level: describe the page table level of the mm object
* @pde_base_index: pde root pointer base in GGTT MMIO.
*
* This function is used to create a mm object for a vGPU.
*
* Returns:
* Zero on success, negative error code in pointer if failed.
*/
struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
int mm_type, void *virtual_page_table, int page_table_level,
u32 pde_base_index)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_gtt *gtt = &gvt->gtt;
struct intel_vgpu_mm *mm;
int ret;
mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
if (!mm) {
ret = -ENOMEM;
goto fail;
}
mm->type = mm_type;
if (page_table_level == 1)
mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
else if (page_table_level == 3)
mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
else if (page_table_level == 4)
mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
else {
WARN_ON(1);
ret = -EINVAL;
goto fail;
}
mm->page_table_level = page_table_level;
mm->pde_base_index = pde_base_index;
mm->vgpu = vgpu;
mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
kref_init(&mm->ref);
atomic_set(&mm->pincount, 0);
INIT_LIST_HEAD(&mm->list);
INIT_LIST_HEAD(&mm->lru_list);
list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
ret = gtt->mm_alloc_page_table(mm);
if (ret) {
gvt_err("fail to allocate page table for mm\n");
goto fail;
}
mm->initialized = true;
if (virtual_page_table)
memcpy(mm->virtual_page_table, virtual_page_table,
mm->page_table_entry_size);
if (mm->has_shadow_page_table) {
ret = shadow_mm(mm);
if (ret)
goto fail;
list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
}
return mm;
fail:
gvt_err("fail to create mm\n");
if (mm)
intel_gvt_mm_unreference(mm);
return ERR_PTR(ret);
}
/**
* intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
* @mm: a vGPU mm object
*
* This function is called when user doesn't want to use a vGPU mm object
*/
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
{
if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
return;
atomic_dec(&mm->pincount);
}
/**
* intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
* @vgpu: a vGPU
*
* This function is called when user wants to use a vGPU mm object. If this
* mm object hasn't been shadowed yet, the shadow will be populated at this
* time.
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
{
int ret;
if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
return 0;
atomic_inc(&mm->pincount);
if (!mm->shadowed) {
ret = shadow_mm(mm);
if (ret)
return ret;
}
list_del_init(&mm->lru_list);
list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
return 0;
}
static int reclaim_one_mm(struct intel_gvt *gvt)
{
struct intel_vgpu_mm *mm;
struct list_head *pos, *n;
list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
mm = container_of(pos, struct intel_vgpu_mm, lru_list);
if (mm->type != INTEL_GVT_MM_PPGTT)
continue;
if (atomic_read(&mm->pincount))
continue;
list_del_init(&mm->lru_list);
invalidate_mm(mm);
return 1;
}
return 0;
}
/*
* GMA translation APIs.
*/
static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
{
struct intel_vgpu *vgpu = mm->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
if (WARN_ON(!mm->has_shadow_page_table))
return -EINVAL;
s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
if (!s)
return -ENXIO;
if (!guest)
ppgtt_get_shadow_entry(s, e, index);
else
ppgtt_get_guest_entry(s, e, index);
return 0;
}
/**
* intel_vgpu_gma_to_gpa - translate a gma to GPA
* @mm: mm object. could be a PPGTT or GGTT mm object
* @gma: graphics memory address in this mm object
*
* This function is used to translate a graphics memory address in specific
* graphics memory space to guest physical address.
*
* Returns:
* Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
*/
unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
{
struct intel_vgpu *vgpu = mm->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
unsigned long gpa = INTEL_GVT_INVALID_ADDR;
unsigned long gma_index[4];
struct intel_gvt_gtt_entry e;
int i, index;
int ret;
if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
return INTEL_GVT_INVALID_ADDR;
if (mm->type == INTEL_GVT_MM_GGTT) {
if (!vgpu_gmadr_is_valid(vgpu, gma))
goto err;
ggtt_get_guest_entry(mm, &e,
gma_ops->gma_to_ggtt_pte_index(gma));
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
+ (gma & ~GTT_PAGE_MASK);
trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
return gpa;
}
switch (mm->page_table_level) {
case 4:
ppgtt_get_shadow_root_entry(mm, &e, 0);
gma_index[0] = gma_ops->gma_to_pml4_index(gma);
gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
gma_index[2] = gma_ops->gma_to_pde_index(gma);
gma_index[3] = gma_ops->gma_to_pte_index(gma);
index = 4;
break;
case 3:
ppgtt_get_shadow_root_entry(mm, &e,
gma_ops->gma_to_l3_pdp_index(gma));
gma_index[0] = gma_ops->gma_to_pde_index(gma);
gma_index[1] = gma_ops->gma_to_pte_index(gma);
index = 2;
break;
case 2:
ppgtt_get_shadow_root_entry(mm, &e,
gma_ops->gma_to_pde_index(gma));
gma_index[0] = gma_ops->gma_to_pte_index(gma);
index = 1;
break;
default:
WARN_ON(1);
goto err;
}
/* walk into the shadow page table and get gpa from guest entry */
for (i = 0; i < index; i++) {
ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
(i == index - 1));
if (ret)
goto err;
}
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
+ (gma & ~GTT_PAGE_MASK);
trace_gma_translate(vgpu->id, "ppgtt", 0,
mm->page_table_level, gma, gpa);
return gpa;
err:
gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma);
return INTEL_GVT_INVALID_ADDR;
}
static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes)
{
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
unsigned long index = off >> info->gtt_entry_size_shift;
struct intel_gvt_gtt_entry e;
if (bytes != 4 && bytes != 8)
return -EINVAL;
ggtt_get_guest_entry(ggtt_mm, &e, index);
memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
bytes);
return 0;
}
/**
* intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
* @vgpu: a vGPU
* @off: register offset
* @p_data: data will be returned to guest
* @bytes: data length
*
* This function is used to emulate the GTT MMIO register read
*
* Returns:
* Zero on success, error code if failed.
*/
int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
int ret;
if (bytes != 4 && bytes != 8)
return -EINVAL;
off -= info->gtt_start_offset;
ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes);
return ret;
}
static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
unsigned long gma;
struct intel_gvt_gtt_entry e, m;
int ret;
if (bytes != 4 && bytes != 8)
return -EINVAL;
gma = g_gtt_index << GTT_PAGE_SHIFT;
/* the VM may configure the whole GM space when ballooning is used */
if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma),
"vgpu%d: found oob ggtt write, offset %x\n",
vgpu->id, off)) {
return 0;
}
ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
bytes);
if (ops->test_present(&e)) {
ret = gtt_entry_p2m(vgpu, &e, &m);
if (ret) {
gvt_err("vgpu%d: fail to translate guest gtt entry\n",
vgpu->id);
return ret;
}
} else {
m = e;
m.val64 = 0;
}
ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
return 0;
}
/*
* intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
* @vgpu: a vGPU
* @off: register offset
* @p_data: data from guest write
* @bytes: data length
*
* This function is used to emulate the GTT MMIO register write
*
* Returns:
* Zero on success, error code if failed.
*/
int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
int ret;
if (bytes != 4 && bytes != 8)
return -EINVAL;
off -= info->gtt_start_offset;
ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes);
return ret;
}
bool intel_gvt_create_scratch_page(struct intel_vgpu *vgpu)
{
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
void *p;
void *vaddr;
unsigned long mfn;
gtt->scratch_page = alloc_page(GFP_KERNEL);
if (!gtt->scratch_page) {
gvt_err("Failed to allocate scratch page.\n");
return -ENOMEM;
}
/* set to zero */
p = kmap_atomic(gtt->scratch_page);
memset(p, 0, PAGE_SIZE);
kunmap_atomic(p);
/* translate page to mfn */
vaddr = page_address(gtt->scratch_page);
mfn = intel_gvt_hypervisor_virt_to_mfn(vaddr);
if (mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to translate vaddr:0x%llx\n", (u64)vaddr);
__free_page(gtt->scratch_page);
gtt->scratch_page = NULL;
return -ENXIO;
}
gtt->scratch_page_mfn = mfn;
gvt_dbg_core("vgpu%d create scratch page: mfn=0x%lx\n", vgpu->id, mfn);
return 0;
}
void intel_gvt_release_scratch_page(struct intel_vgpu *vgpu)
{
if (vgpu->gtt.scratch_page != NULL) {
__free_page(vgpu->gtt.scratch_page);
vgpu->gtt.scratch_page = NULL;
vgpu->gtt.scratch_page_mfn = 0;
}
}
/**
* intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
* @vgpu: a vGPU
*
* This function is used to initialize per-vGPU graphics memory virtualization
* components.
*
* Returns:
* Zero on success, error code if failed.
*/
int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
{
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_vgpu_mm *ggtt_mm;
hash_init(gtt->guest_page_hash_table);
hash_init(gtt->shadow_page_hash_table);
INIT_LIST_HEAD(&gtt->mm_list_head);
INIT_LIST_HEAD(&gtt->oos_page_list_head);
INIT_LIST_HEAD(&gtt->post_shadow_list_head);
ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
NULL, 1, 0);
if (IS_ERR(ggtt_mm)) {
gvt_err("fail to create mm for ggtt.\n");
return PTR_ERR(ggtt_mm);
}
gtt->ggtt_mm = ggtt_mm;
intel_gvt_create_scratch_page(vgpu);
return 0;
}
/**
* intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
* @vgpu: a vGPU
*
* This function is used to clean up per-vGPU graphics memory virtualization
* components.
*
* Returns:
* Zero on success, error code if failed.
*/
void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
{
struct list_head *pos, *n;
struct intel_vgpu_mm *mm;
ppgtt_free_all_shadow_page(vgpu);
intel_gvt_release_scratch_page(vgpu);
list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
mm = container_of(pos, struct intel_vgpu_mm, list);
vgpu->gvt->gtt.mm_free_page_table(mm);
list_del(&mm->list);
list_del(&mm->lru_list);
kfree(mm);
}
}
static void clean_spt_oos(struct intel_gvt *gvt)
{
struct intel_gvt_gtt *gtt = &gvt->gtt;
struct list_head *pos, *n;
struct intel_vgpu_oos_page *oos_page;
WARN(!list_empty(&gtt->oos_page_use_list_head),
"someone is still using oos page\n");
list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
list_del(&oos_page->list);
kfree(oos_page);
}
}
static int setup_spt_oos(struct intel_gvt *gvt)
{
struct intel_gvt_gtt *gtt = &gvt->gtt;
struct intel_vgpu_oos_page *oos_page;
int i;
int ret;
INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
for (i = 0; i < preallocated_oos_pages; i++) {
oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
if (!oos_page) {
gvt_err("fail to pre-allocate oos page\n");
ret = -ENOMEM;
goto fail;
}
INIT_LIST_HEAD(&oos_page->list);
INIT_LIST_HEAD(&oos_page->vm_list);
oos_page->id = i;
list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
}
gvt_dbg_mm("%d oos pages preallocated\n", i);
return 0;
fail:
clean_spt_oos(gvt);
return ret;
}
/**
* intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
* @vgpu: a vGPU
* @page_table_level: PPGTT page table level
* @root_entry: PPGTT page table root pointers
*
* This function is used to find a PPGTT mm object from mm object pool
*
* Returns:
* pointer to mm object on success, NULL if failed.
*/
struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level, void *root_entry)
{
struct list_head *pos;
struct intel_vgpu_mm *mm;
u64 *src, *dst;
list_for_each(pos, &vgpu->gtt.mm_list_head) {
mm = container_of(pos, struct intel_vgpu_mm, list);
if (mm->type != INTEL_GVT_MM_PPGTT)
continue;
if (mm->page_table_level != page_table_level)
continue;
src = root_entry;
dst = mm->virtual_page_table;
if (page_table_level == 3) {
if (src[0] == dst[0]
&& src[1] == dst[1]
&& src[2] == dst[2]
&& src[3] == dst[3])
return mm;
} else {
if (src[0] == dst[0])
return mm;
}
}
return NULL;
}
/**
* intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
* g2v notification
* @vgpu: a vGPU
* @page_table_level: PPGTT page table level
*
* This function is used to create a PPGTT mm object from a guest to GVT-g
* notification.
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level)
{
u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
struct intel_vgpu_mm *mm;
if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
return -EINVAL;
mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
if (mm) {
intel_gvt_mm_reference(mm);
} else {
mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
pdp, page_table_level, 0);
if (IS_ERR(mm)) {
gvt_err("fail to create mm\n");
return PTR_ERR(mm);
}
}
return 0;
}
/**
* intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
* g2v notification
* @vgpu: a vGPU
* @page_table_level: PPGTT page table level
*
* This function is used to create a PPGTT mm object from a guest to GVT-g
* notification.
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level)
{
u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
struct intel_vgpu_mm *mm;
if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
return -EINVAL;
mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
if (!mm) {
gvt_err("fail to find ppgtt instance.\n");
return -EINVAL;
}
intel_gvt_mm_unreference(mm);
return 0;
}
/**
* intel_gvt_init_gtt - initialize mm components of a GVT device
* @gvt: GVT device
*
* This function is called at the initialization stage, to initialize
* the mm components of a GVT device.
*
* Returns:
* zero on success, negative error code if failed.
*/
int intel_gvt_init_gtt(struct intel_gvt *gvt)
{
int ret;
gvt_dbg_core("init gtt\n");
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
} else {
return -ENODEV;
}
if (enable_out_of_sync) {
ret = setup_spt_oos(gvt);
if (ret) {
gvt_err("fail to initialize SPT oos\n");
return ret;
}
}
INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
return 0;
}
/**
* intel_gvt_clean_gtt - clean up mm components of a GVT device
* @gvt: GVT device
*
* This function is called at the driver unloading stage, to clean up the
* the mm components of a GVT device.
*
*/
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{
if (enable_out_of_sync)
clean_spt_oos(gvt);
}
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhi Wang <zhi.a.wang@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
* Xiao Zheng <xiao.zheng@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
*
*/
#ifndef _GVT_GTT_H_
#define _GVT_GTT_H_
#define GTT_PAGE_SHIFT 12
#define GTT_PAGE_SIZE (1UL << GTT_PAGE_SHIFT)
#define GTT_PAGE_MASK (~(GTT_PAGE_SIZE-1))
struct intel_vgpu_mm;
#define INTEL_GVT_GTT_HASH_BITS 8
#define INTEL_GVT_INVALID_ADDR (~0UL)
struct intel_gvt_gtt_entry {
u64 val64;
int type;
};
struct intel_gvt_gtt_pte_ops {
struct intel_gvt_gtt_entry *(*get_entry)(void *pt,
struct intel_gvt_gtt_entry *e,
unsigned long index, bool hypervisor_access, unsigned long gpa,
struct intel_vgpu *vgpu);
struct intel_gvt_gtt_entry *(*set_entry)(void *pt,
struct intel_gvt_gtt_entry *e,
unsigned long index, bool hypervisor_access, unsigned long gpa,
struct intel_vgpu *vgpu);
bool (*test_present)(struct intel_gvt_gtt_entry *e);
void (*clear_present)(struct intel_gvt_gtt_entry *e);
bool (*test_pse)(struct intel_gvt_gtt_entry *e);
void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
};
struct intel_gvt_gtt_gma_ops {
unsigned long (*gma_to_ggtt_pte_index)(unsigned long gma);
unsigned long (*gma_to_pte_index)(unsigned long gma);
unsigned long (*gma_to_pde_index)(unsigned long gma);
unsigned long (*gma_to_l3_pdp_index)(unsigned long gma);
unsigned long (*gma_to_l4_pdp_index)(unsigned long gma);
unsigned long (*gma_to_pml4_index)(unsigned long gma);
};
struct intel_gvt_gtt {
struct intel_gvt_gtt_pte_ops *pte_ops;
struct intel_gvt_gtt_gma_ops *gma_ops;
int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm);
void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head;
struct list_head mm_lru_list_head;
};
enum {
INTEL_GVT_MM_GGTT = 0,
INTEL_GVT_MM_PPGTT,
};
struct intel_vgpu_mm {
int type;
bool initialized;
bool shadowed;
int page_table_entry_type;
u32 page_table_entry_size;
u32 page_table_entry_cnt;
void *virtual_page_table;
void *shadow_page_table;
int page_table_level;
bool has_shadow_page_table;
u32 pde_base_index;
struct list_head list;
struct kref ref;
atomic_t pincount;
struct list_head lru_list;
struct intel_vgpu *vgpu;
};
extern struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(
struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index);
extern struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(
struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index);
#define ggtt_get_guest_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
#define ggtt_set_guest_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
#define ggtt_get_shadow_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
#define ggtt_set_shadow_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
#define ppgtt_get_guest_root_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
#define ppgtt_set_guest_root_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
#define ppgtt_get_shadow_root_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
#define ppgtt_set_shadow_root_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
int mm_type, void *virtual_page_table, int page_table_level,
u32 pde_base_index);
extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
struct intel_vgpu_guest_page;
struct intel_vgpu_gtt {
struct intel_vgpu_mm *ggtt_mm;
unsigned long active_ppgtt_mm_bitmap;
struct list_head mm_list_head;
DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
DECLARE_HASHTABLE(guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
atomic_t n_write_protected_guest_page;
struct list_head oos_page_list_head;
struct list_head post_shadow_list_head;
struct page *scratch_page;
unsigned long scratch_page_mfn;
};
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level, void *root_entry);
struct intel_vgpu_oos_page;
struct intel_vgpu_shadow_page {
void *vaddr;
struct page *page;
int type;
struct hlist_node node;
unsigned long mfn;
};
struct intel_vgpu_guest_page {
struct hlist_node node;
bool writeprotection;
unsigned long gfn;
int (*handler)(void *, u64, void *, int);
void *data;
unsigned long write_cnt;
struct intel_vgpu_oos_page *oos_page;
};
struct intel_vgpu_oos_page {
struct intel_vgpu_guest_page *guest_page;
struct list_head list;
struct list_head vm_list;
int id;
unsigned char mem[GTT_PAGE_SIZE];
};
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
struct intel_vgpu_ppgtt_spt {
struct intel_vgpu_shadow_page shadow_page;
struct intel_vgpu_guest_page guest_page;
int guest_page_type;
atomic_t refcount;
struct intel_vgpu *vgpu;
DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
struct list_head post_shadow_list;
};
int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *guest_page,
unsigned long gfn,
int (*handler)(void *gp, u64, void *, int),
void *data);
void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *guest_page);
int intel_vgpu_set_guest_page_writeprotection(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *guest_page);
void intel_vgpu_clear_guest_page_writeprotection(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *guest_page);
struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
struct intel_vgpu *vgpu, unsigned long gfn);
int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm)
{
kref_get(&mm->ref);
}
static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm)
{
kref_put(&mm->ref, intel_vgpu_destroy_mm);
}
int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
unsigned long gma);
struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level, void *root_entry);
int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level);
int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level);
int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
#endif /* _GVT_GTT_H_ */
...@@ -19,10 +19,20 @@ ...@@ -19,10 +19,20 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
*
* Authors:
* Kevin Tian <kevin.tian@intel.com>
* Eddie Dong <eddie.dong@intel.com>
*
* Contributors:
* Niu Bing <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/ */
#include <linux/types.h> #include <linux/types.h>
#include <xen/xen.h> #include <xen/xen.h>
#include <linux/kthread.h>
#include "i915_drv.h" #include "i915_drv.h"
...@@ -33,6 +43,13 @@ static const char * const supported_hypervisors[] = { ...@@ -33,6 +43,13 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM", [INTEL_GVT_HYPERVISOR_KVM] = "KVM",
}; };
struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
.emulate_mmio_read = intel_vgpu_emulate_mmio_read,
.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
};
/** /**
* intel_gvt_init_host - Load MPT modules and detect if we're running in host * intel_gvt_init_host - Load MPT modules and detect if we're running in host
* @gvt: intel gvt device * @gvt: intel gvt device
...@@ -84,9 +101,66 @@ int intel_gvt_init_host(void) ...@@ -84,9 +101,66 @@ int intel_gvt_init_host(void)
static void init_device_info(struct intel_gvt *gvt) static void init_device_info(struct intel_gvt *gvt)
{ {
if (IS_BROADWELL(gvt->dev_priv)) struct intel_gvt_device_info *info = &gvt->device_info;
gvt->device_info.max_support_vgpus = 8;
/* This function will grow large in GVT device model patches. */ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
info->max_support_vgpus = 8;
info->cfg_space_size = 256;
info->mmio_size = 2 * 1024 * 1024;
info->mmio_bar = 0;
info->msi_cap_offset = IS_SKYLAKE(gvt->dev_priv) ? 0xac : 0x90;
info->gtt_start_offset = 8 * 1024 * 1024;
info->gtt_entry_size = 8;
info->gtt_entry_size_shift = 3;
info->gmadr_bytes_in_cmd = 8;
info->max_surface_size = 36 * 1024 * 1024;
}
}
static int gvt_service_thread(void *data)
{
struct intel_gvt *gvt = (struct intel_gvt *)data;
int ret;
gvt_dbg_core("service thread start\n");
while (!kthread_should_stop()) {
ret = wait_event_interruptible(gvt->service_thread_wq,
kthread_should_stop() || gvt->service_request);
if (kthread_should_stop())
break;
if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
continue;
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
(void *)&gvt->service_request)) {
mutex_lock(&gvt->lock);
intel_gvt_emulate_vblank(gvt);
mutex_unlock(&gvt->lock);
}
}
return 0;
}
static void clean_service_thread(struct intel_gvt *gvt)
{
kthread_stop(gvt->service_thread);
}
static int init_service_thread(struct intel_gvt *gvt)
{
init_waitqueue_head(&gvt->service_thread_wq);
gvt->service_thread = kthread_run(gvt_service_thread,
gvt, "gvt_service_thread");
if (IS_ERR(gvt->service_thread)) {
gvt_err("fail to start service thread.\n");
return PTR_ERR(gvt->service_thread);
}
return 0;
} }
/** /**
...@@ -104,7 +178,15 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) ...@@ -104,7 +178,15 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
if (WARN_ON(!gvt->initialized)) if (WARN_ON(!gvt->initialized))
return; return;
/* Other de-initialization of GVT components will be introduced. */ clean_service_thread(gvt);
intel_gvt_clean_cmd_parser(gvt);
intel_gvt_clean_sched_policy(gvt);
intel_gvt_clean_workload_scheduler(gvt);
intel_gvt_clean_opregion(gvt);
intel_gvt_clean_gtt(gvt);
intel_gvt_clean_irq(gvt);
intel_gvt_clean_mmio_info(gvt);
intel_gvt_free_firmware(gvt);
gvt->initialized = false; gvt->initialized = false;
} }
...@@ -123,6 +205,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) ...@@ -123,6 +205,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
int intel_gvt_init_device(struct drm_i915_private *dev_priv) int intel_gvt_init_device(struct drm_i915_private *dev_priv)
{ {
struct intel_gvt *gvt = &dev_priv->gvt; struct intel_gvt *gvt = &dev_priv->gvt;
int ret;
/* /*
* Cannot initialize GVT device without intel_gvt_host gets * Cannot initialize GVT device without intel_gvt_host gets
* initialized first. * initialized first.
...@@ -135,11 +219,66 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ...@@ -135,11 +219,66 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
gvt_dbg_core("init gvt device\n"); gvt_dbg_core("init gvt device\n");
mutex_init(&gvt->lock);
gvt->dev_priv = dev_priv;
init_device_info(gvt); init_device_info(gvt);
/*
* Other initialization of GVT components will be introduce here. ret = intel_gvt_setup_mmio_info(gvt);
*/ if (ret)
return ret;
ret = intel_gvt_load_firmware(gvt);
if (ret)
goto out_clean_mmio_info;
ret = intel_gvt_init_irq(gvt);
if (ret)
goto out_free_firmware;
ret = intel_gvt_init_gtt(gvt);
if (ret)
goto out_clean_irq;
ret = intel_gvt_init_opregion(gvt);
if (ret)
goto out_clean_gtt;
ret = intel_gvt_init_workload_scheduler(gvt);
if (ret)
goto out_clean_opregion;
ret = intel_gvt_init_sched_policy(gvt);
if (ret)
goto out_clean_workload_scheduler;
ret = intel_gvt_init_cmd_parser(gvt);
if (ret)
goto out_clean_sched_policy;
ret = init_service_thread(gvt);
if (ret)
goto out_clean_cmd_parser;
gvt_dbg_core("gvt device creation is done\n"); gvt_dbg_core("gvt device creation is done\n");
gvt->initialized = true; gvt->initialized = true;
return 0; return 0;
out_clean_cmd_parser:
intel_gvt_clean_cmd_parser(gvt);
out_clean_sched_policy:
intel_gvt_clean_sched_policy(gvt);
out_clean_workload_scheduler:
intel_gvt_clean_workload_scheduler(gvt);
out_clean_opregion:
intel_gvt_clean_opregion(gvt);
out_clean_gtt:
intel_gvt_clean_gtt(gvt);
out_clean_irq:
intel_gvt_clean_irq(gvt);
out_free_firmware:
intel_gvt_free_firmware(gvt);
out_clean_mmio_info:
intel_gvt_clean_mmio_info(gvt);
return ret;
} }
...@@ -19,6 +19,15 @@ ...@@ -19,6 +19,15 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
*
* Authors:
* Kevin Tian <kevin.tian@intel.com>
* Eddie Dong <eddie.dong@intel.com>
*
* Contributors:
* Niu Bing <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/ */
#ifndef _GVT_H_ #ifndef _GVT_H_
...@@ -26,6 +35,17 @@ ...@@ -26,6 +35,17 @@
#include "debug.h" #include "debug.h"
#include "hypercall.h" #include "hypercall.h"
#include "mmio.h"
#include "reg.h"
#include "interrupt.h"
#include "gtt.h"
#include "display.h"
#include "edid.h"
#include "execlist.h"
#include "scheduler.h"
#include "sched_policy.h"
#include "render.h"
#include "cmd_parser.h"
#define GVT_MAX_VGPU 8 #define GVT_MAX_VGPU 8
...@@ -45,13 +65,129 @@ extern struct intel_gvt_host intel_gvt_host; ...@@ -45,13 +65,129 @@ extern struct intel_gvt_host intel_gvt_host;
/* Describe per-platform limitations. */ /* Describe per-platform limitations. */
struct intel_gvt_device_info { struct intel_gvt_device_info {
u32 max_support_vgpus; u32 max_support_vgpus;
/* This data structure will grow bigger in GVT device model patches */ u32 cfg_space_size;
u32 mmio_size;
u32 mmio_bar;
unsigned long msi_cap_offset;
u32 gtt_start_offset;
u32 gtt_entry_size;
u32 gtt_entry_size_shift;
int gmadr_bytes_in_cmd;
u32 max_surface_size;
};
/* GM resources owned by a vGPU */
struct intel_vgpu_gm {
u64 aperture_sz;
u64 hidden_sz;
struct drm_mm_node low_gm_node;
struct drm_mm_node high_gm_node;
};
#define INTEL_GVT_MAX_NUM_FENCES 32
/* Fences owned by a vGPU */
struct intel_vgpu_fence {
struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
u32 base;
u32 size;
};
struct intel_vgpu_mmio {
void *vreg;
void *sreg;
bool disable_warn_untrack;
};
#define INTEL_GVT_MAX_CFG_SPACE_SZ 256
#define INTEL_GVT_MAX_BAR_NUM 4
struct intel_vgpu_pci_bar {
u64 size;
bool tracked;
};
struct intel_vgpu_cfg_space {
unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ];
struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
};
#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
#define INTEL_GVT_MAX_PIPE 4
struct intel_vgpu_irq {
bool irq_warn_once[INTEL_GVT_EVENT_MAX];
DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
INTEL_GVT_EVENT_MAX);
};
struct intel_vgpu_opregion {
void *va;
u32 gfn[INTEL_GVT_OPREGION_PAGES];
struct page *pages[INTEL_GVT_OPREGION_PAGES];
};
#define vgpu_opregion(vgpu) (&(vgpu->opregion))
#define INTEL_GVT_MAX_PORT 5
struct intel_vgpu_display {
struct intel_vgpu_i2c_edid i2c_edid;
struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
struct intel_vgpu_sbi sbi;
}; };
struct intel_vgpu { struct intel_vgpu {
struct intel_gvt *gvt; struct intel_gvt *gvt;
int id; int id;
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
bool active;
bool resetting;
void *sched_data;
struct intel_vgpu_fence fence;
struct intel_vgpu_gm gm;
struct intel_vgpu_cfg_space cfg_space;
struct intel_vgpu_mmio mmio;
struct intel_vgpu_irq irq;
struct intel_vgpu_gtt gtt;
struct intel_vgpu_opregion opregion;
struct intel_vgpu_display display;
struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
struct list_head workload_q_head[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
struct notifier_block shadow_ctx_notifier_block;
};
struct intel_gvt_gm {
unsigned long vgpu_allocated_low_gm_size;
unsigned long vgpu_allocated_high_gm_size;
};
struct intel_gvt_fence {
unsigned long vgpu_allocated_fence_num;
};
#define INTEL_GVT_MMIO_HASH_BITS 9
struct intel_gvt_mmio {
u32 *mmio_attribute;
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
};
struct intel_gvt_firmware {
void *cfg_space;
void *mmio;
bool firmware_loaded;
};
struct intel_gvt_opregion {
void *opregion_va;
u32 opregion_pa;
}; };
struct intel_gvt { struct intel_gvt {
...@@ -62,8 +198,188 @@ struct intel_gvt { ...@@ -62,8 +198,188 @@ struct intel_gvt {
struct idr vgpu_idr; /* vGPU IDR pool */ struct idr vgpu_idr; /* vGPU IDR pool */
struct intel_gvt_device_info device_info; struct intel_gvt_device_info device_info;
struct intel_gvt_gm gm;
struct intel_gvt_fence fence;
struct intel_gvt_mmio mmio;
struct intel_gvt_firmware firmware;
struct intel_gvt_irq irq;
struct intel_gvt_gtt gtt;
struct intel_gvt_opregion opregion;
struct intel_gvt_workload_scheduler scheduler;
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
struct task_struct *service_thread;
wait_queue_head_t service_thread_wq;
unsigned long service_request;
}; };
enum {
INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
};
static inline void intel_gvt_request_service(struct intel_gvt *gvt,
int service)
{
set_bit(service, (void *)&gvt->service_request);
wake_up(&gvt->service_thread_wq);
}
void intel_gvt_free_firmware(struct intel_gvt *gvt);
int intel_gvt_load_firmware(struct intel_gvt *gvt);
/* Aperture/GM space definitions for GVT device */
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
#define gvt_ggtt_sz(gvt) \
((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_aperture_gmadr_base(gvt) (0)
#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
+ gvt_aperture_sz(gvt) - 1)
#define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
+ gvt_aperture_sz(gvt))
#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+ gvt_hidden_sz(gvt) - 1)
#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
/* Aperture/GM space definitions for vGPU */
#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
#define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
#define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
#define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
#define vgpu_aperture_pa_base(vgpu) \
(gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
#define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
#define vgpu_aperture_pa_end(vgpu) \
(vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
#define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
#define vgpu_aperture_gmadr_end(vgpu) \
(vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
#define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
#define vgpu_hidden_gmadr_end(vgpu) \
(vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
#define vgpu_fence_base(vgpu) (vgpu->fence.base)
#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
struct intel_vgpu_creation_params {
__u64 handle;
__u64 low_gm_sz; /* in MB */
__u64 high_gm_sz; /* in MB */
__u64 fence_sz;
__s32 primary;
__u64 vgpu_id;
};
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
struct intel_vgpu_creation_params *param);
void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value);
/* Macros for easily accessing vGPU virtual/shadow register */
#define vgpu_vreg(vgpu, reg) \
(*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define vgpu_vreg8(vgpu, reg) \
(*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define vgpu_vreg16(vgpu, reg) \
(*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define vgpu_vreg64(vgpu, reg) \
(*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define vgpu_sreg(vgpu, reg) \
(*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define vgpu_sreg8(vgpu, reg) \
(*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define vgpu_sreg16(vgpu, reg) \
(*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define vgpu_sreg64(vgpu, reg) \
(*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
for_each_if(vgpu->active)
static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
u32 offset, u32 val, bool low)
{
u32 *pval;
/* BAR offset should be 32 bits algiend */
offset = rounddown(offset, 4);
pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
if (low) {
/*
* only update bit 31 - bit 4,
* leave the bit 3 - bit 0 unchanged.
*/
*pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
}
}
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_creation_params *
param);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
/* validating GM functions */
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
(gmadr <= vgpu_aperture_gmadr_end(vgpu)))
#define vgpu_gmadr_is_hidden(vgpu, gmadr) \
((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
(gmadr <= vgpu_hidden_gmadr_end(vgpu)))
#define vgpu_gmadr_is_valid(vgpu, gmadr) \
((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
(vgpu_gmadr_is_hidden(vgpu, gmadr))))
#define gvt_gmadr_is_aperture(gvt, gmadr) \
((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
(gmadr <= gvt_aperture_gmadr_end(gvt)))
#define gvt_gmadr_is_hidden(gvt, gmadr) \
((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
(gmadr <= gvt_hidden_gmadr_end(gvt)))
#define gvt_gmadr_is_valid(gvt, gmadr) \
(gvt_gmadr_is_aperture(gvt, gmadr) || \
gvt_gmadr_is_hidden(gvt, gmadr))
bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
unsigned long *h_index);
int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
unsigned long *g_index);
int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
void intel_gvt_clean_opregion(struct intel_gvt *gvt);
int intel_gvt_init_opregion(struct intel_gvt *gvt);
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
#include "mpt.h" #include "mpt.h"
#endif #endif
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Kevin Tian <kevin.tian@intel.com>
* Eddie Dong <eddie.dong@intel.com>
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Pei Zhang <pei.zhang@intel.com>
* Niu Bing <bing.niu@intel.com>
* Ping Gao <ping.a.gao@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include "i915_drv.h"
/* XXX FIXME i915 has changed PP_XXX definition */
#define PCH_PP_STATUS _MMIO(0xc7200)
#define PCH_PP_CONTROL _MMIO(0xc7204)
#define PCH_PP_ON_DELAYS _MMIO(0xc7208)
#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
#define PCH_PP_DIVISOR _MMIO(0xc7210)
/* Register contains RO bits */
#define F_RO (1 << 0)
/* Register contains graphics address */
#define F_GMADR (1 << 1)
/* Mode mask registers with high 16 bits as the mask bits */
#define F_MODE_MASK (1 << 2)
/* This reg can be accessed by GPU commands */
#define F_CMD_ACCESS (1 << 3)
/* This reg has been accessed by a VM */
#define F_ACCESSED (1 << 4)
/* This reg has been accessed through GPU commands */
#define F_CMD_ACCESSED (1 << 5)
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
{
if (IS_BROADWELL(gvt->dev_priv))
return D_BDW;
else if (IS_SKYLAKE(gvt->dev_priv))
return D_SKL;
return 0;
}
bool intel_gvt_match_device(struct intel_gvt *gvt,
unsigned long device)
{
return intel_gvt_get_device_type(gvt) & device;
}
static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
}
static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
}
static int new_mmio_info(struct intel_gvt *gvt,
u32 offset, u32 flags, u32 size,
u32 addr_mask, u32 ro_mask, u32 device,
void *read, void *write)
{
struct intel_gvt_mmio_info *info, *p;
u32 start, end, i;
if (!intel_gvt_match_device(gvt, device))
return 0;
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
start = offset;
end = offset + size;
for (i = start; i < end; i += 4) {
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->offset = i;
p = intel_gvt_find_mmio_info(gvt, info->offset);
if (p)
gvt_err("dup mmio definition offset %x\n",
info->offset);
info->size = size;
info->length = (i + 4) < end ? 4 : (end - i);
info->addr_mask = addr_mask;
info->device = device;
info->read = read ? read : intel_vgpu_default_mmio_read;
info->write = write ? write : intel_vgpu_default_mmio_write;
gvt->mmio.mmio_attribute[info->offset / 4] = flags;
INIT_HLIST_NODE(&info->node);
hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
}
return 0;
}
static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
{
int i;
reg &= ~GENMASK(11, 0);
for (i = 0; i < I915_NUM_ENGINES; i++) {
if (gvt->dev_priv->engine[i].mmio_base == reg)
return i;
}
return -1;
}
#define offset_to_fence_num(offset) \
((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
#define fence_num_to_offset(num) \
(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
unsigned int fence_num, void *p_data, unsigned int bytes)
{
if (fence_num >= vgpu_fence_sz(vgpu)) {
gvt_err("vgpu%d: found oob fence register access\n",
vgpu->id);
gvt_err("vgpu%d: total fence num %d access fence num %d\n",
vgpu->id, vgpu_fence_sz(vgpu), fence_num);
memset(p_data, 0, bytes);
}
return 0;
}
static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
int ret;
ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
p_data, bytes);
if (ret)
return ret;
read_vreg(vgpu, off, p_data, bytes);
return 0;
}
static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
unsigned int fence_num = offset_to_fence_num(off);
int ret;
ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
if (ret)
return ret;
write_vreg(vgpu, off, p_data, bytes);
intel_vgpu_write_fence(vgpu, fence_num,
vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
return 0;
}
#define CALC_MODE_MASK_REG(old, new) \
(((new) & GENMASK(31, 16)) \
| ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
| ((new) & ((new) >> 16))))
static int mul_force_wake_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 old, new;
uint32_t ack_reg_offset;
old = vgpu_vreg(vgpu, offset);
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
break;
case FORCEWAKE_BLITTER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_BLITTER_GEN9_REG;
break;
case FORCEWAKE_MEDIA_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
break;
default:
/*should not hit here*/
gvt_err("invalid forcewake offset 0x%x\n", offset);
return 1;
}
} else {
ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
}
vgpu_vreg(vgpu, offset) = new;
vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
return 0;
}
static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes, unsigned long bitmap)
{
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
vgpu->resetting = true;
intel_vgpu_stop_schedule(vgpu);
if (scheduler->current_vgpu == vgpu) {
mutex_unlock(&vgpu->gvt->lock);
intel_gvt_wait_vgpu_idle(vgpu);
mutex_lock(&vgpu->gvt->lock);
}
intel_vgpu_reset_execlist(vgpu, bitmap);
vgpu->resetting = false;
return 0;
}
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 data;
u64 bitmap = 0;
data = vgpu_vreg(vgpu, offset);
if (data & GEN6_GRDOM_FULL) {
gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
bitmap = 0xff;
}
if (data & GEN6_GRDOM_RENDER) {
gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
bitmap |= (1 << RCS);
}
if (data & GEN6_GRDOM_MEDIA) {
gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
bitmap |= (1 << VCS);
}
if (data & GEN6_GRDOM_BLT) {
gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
bitmap |= (1 << BCS);
}
if (data & GEN6_GRDOM_VECS) {
gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
bitmap |= (1 << VECS);
}
if (data & GEN8_GRDOM_MEDIA2) {
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
if (HAS_BSD2(vgpu->gvt->dev_priv))
bitmap |= (1 << VCS2);
}
return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
}
static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
}
static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
}
static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
write_vreg(vgpu, offset, p_data, bytes);
if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_ON;
vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
} else
vgpu_vreg(vgpu, PCH_PP_STATUS) &=
~(PP_ON | PP_SEQUENCE_POWER_DOWN
| PP_CYCLE_DELAY_ACTIVE);
return 0;
}
static int transconf_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
write_vreg(vgpu, offset, p_data, bytes);
if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
else
vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
return 0;
}
static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
write_vreg(vgpu, offset, p_data, bytes);
if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
else
vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
else
vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
return 0;
}
static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
*(u32 *)p_data = (1 << 17);
return 0;
}
static int dpy_reg_mmio_read_2(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
*(u32 *)p_data = 3;
return 0;
}
static int dpy_reg_mmio_read_3(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
*(u32 *)p_data = (0x2f << 16);
return 0;
}
static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 data;
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
if (data & PIPECONF_ENABLE)
vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
else
vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
intel_gvt_check_vblank_emulation(vgpu->gvt);
return 0;
}
static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
write_vreg(vgpu, offset, p_data, bytes);
if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
} else {
vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E))
&= ~DP_TP_STATUS_AUTOTRAIN_DONE;
}
return 0;
}
static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
return 0;
}
#define FDI_LINK_TRAIN_PATTERN1 0
#define FDI_LINK_TRAIN_PATTERN2 1
static int fdi_auto_training_started(struct intel_vgpu *vgpu)
{
u32 ddi_buf_ctl = vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_E));
u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
u32 tx_ctl = vgpu_vreg(vgpu, DP_TP_CTL(PORT_E));
if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
(rx_ctl & FDI_RX_ENABLE) &&
(rx_ctl & FDI_AUTO_TRAINING) &&
(tx_ctl & DP_TP_CTL_ENABLE) &&
(tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
return 1;
else
return 0;
}
static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
enum pipe pipe, unsigned int train_pattern)
{
i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
unsigned int fdi_iir_check_bits;
fdi_rx_imr = FDI_RX_IMR(pipe);
fdi_tx_ctl = FDI_TX_CTL(pipe);
fdi_rx_ctl = FDI_RX_CTL(pipe);
if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
fdi_iir_check_bits = FDI_RX_BIT_LOCK;
} else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
} else {
gvt_err("Invalid train pattern %d\n", train_pattern);
return -EINVAL;
}
fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
/* If imr bit has been masked */
if (vgpu_vreg(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
return 0;
if (((vgpu_vreg(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
== fdi_tx_check_bits)
&& ((vgpu_vreg(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
== fdi_rx_check_bits))
return 1;
else
return 0;
}
#define INVALID_INDEX (~0U)
static unsigned int calc_index(unsigned int offset, unsigned int start,
unsigned int next, unsigned int end, i915_reg_t i915_end)
{
unsigned int range = next - start;
if (!end)
end = i915_mmio_reg_offset(i915_end);
if (offset < start || offset > end)
return INVALID_INDEX;
offset -= start;
return offset / range;
}
#define FDI_RX_CTL_TO_PIPE(offset) \
calc_index(offset, _FDI_RXA_CTL, _FDI_RXB_CTL, 0, FDI_RX_CTL(PIPE_C))
#define FDI_TX_CTL_TO_PIPE(offset) \
calc_index(offset, _FDI_TXA_CTL, _FDI_TXB_CTL, 0, FDI_TX_CTL(PIPE_C))
#define FDI_RX_IMR_TO_PIPE(offset) \
calc_index(offset, _FDI_RXA_IMR, _FDI_RXB_IMR, 0, FDI_RX_IMR(PIPE_C))
static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
i915_reg_t fdi_rx_iir;
unsigned int index;
int ret;
if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
index = FDI_RX_CTL_TO_PIPE(offset);
else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
index = FDI_TX_CTL_TO_PIPE(offset);
else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
index = FDI_RX_IMR_TO_PIPE(offset);
else {
gvt_err("Unsupport registers %x\n", offset);
return -EINVAL;
}
write_vreg(vgpu, offset, p_data, bytes);
fdi_rx_iir = FDI_RX_IIR(index);
ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
if (ret < 0)
return ret;
if (ret)
vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
if (ret < 0)
return ret;
if (ret)
vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
if (offset == _FDI_RXA_CTL)
if (fdi_auto_training_started(vgpu))
vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E)) |=
DP_TP_STATUS_AUTOTRAIN_DONE;
return 0;
}
#define DP_TP_CTL_TO_PORT(offset) \
calc_index(offset, _DP_TP_CTL_A, _DP_TP_CTL_B, 0, DP_TP_CTL(PORT_E))
static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
i915_reg_t status_reg;
unsigned int index;
u32 data;
write_vreg(vgpu, offset, p_data, bytes);
index = DP_TP_CTL_TO_PORT(offset);
data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
if (data == 0x2) {
status_reg = DP_TP_STATUS(index);
vgpu_vreg(vgpu, status_reg) |= (1 << 25);
}
return 0;
}
static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 reg_val;
u32 sticky_mask;
reg_val = *((u32 *)p_data);
sticky_mask = GENMASK(27, 26) | (1 << 24);
vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
(vgpu_vreg(vgpu, offset) & sticky_mask);
vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
return 0;
}
static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 data;
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
return 0;
}
static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 data;
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
if (data & FDI_MPHY_IOSFSB_RESET_CTL)
vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
else
vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
return 0;
}
#define DSPSURF_TO_PIPE(offset) \
calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C))
static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
unsigned int index = DSPSURF_TO_PIPE(offset);
i915_reg_t surflive_reg = DSPSURFLIVE(index);
int flip_event[] = {
[PIPE_A] = PRIMARY_A_FLIP_DONE,
[PIPE_B] = PRIMARY_B_FLIP_DONE,
[PIPE_C] = PRIMARY_C_FLIP_DONE,
};
write_vreg(vgpu, offset, p_data, bytes);
vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
}
#define SPRSURF_TO_PIPE(offset) \
calc_index(offset, _SPRA_SURF, _SPRB_SURF, 0, SPRSURF(PIPE_C))
static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
unsigned int index = SPRSURF_TO_PIPE(offset);
i915_reg_t surflive_reg = SPRSURFLIVE(index);
int flip_event[] = {
[PIPE_A] = SPRITE_A_FLIP_DONE,
[PIPE_B] = SPRITE_B_FLIP_DONE,
[PIPE_C] = SPRITE_C_FLIP_DONE,
};
write_vreg(vgpu, offset, p_data, bytes);
vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
}
static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
unsigned int reg)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
enum intel_gvt_event_type event;
if (reg == _DPA_AUX_CH_CTL)
event = AUX_CHANNEL_A;
else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL)
event = AUX_CHANNEL_B;
else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL)
event = AUX_CHANNEL_C;
else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL)
event = AUX_CHANNEL_D;
else {
WARN_ON(true);
return -EINVAL;
}
intel_vgpu_trigger_virtual_event(vgpu, event);
return 0;
}
static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
unsigned int reg, int len, bool data_valid)
{
/* mark transaction done */
value |= DP_AUX_CH_CTL_DONE;
value &= ~DP_AUX_CH_CTL_SEND_BUSY;
value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
if (data_valid)
value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
else
value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
/* message size */
value &= ~(0xf << 20);
value |= (len << 20);
vgpu_vreg(vgpu, reg) = value;
if (value & DP_AUX_CH_CTL_INTERRUPT)
return trigger_aux_channel_interrupt(vgpu, reg);
return 0;
}
static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
uint8_t t)
{
if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
/* training pattern 1 for CR */
/* set LANE0_CR_DONE, LANE1_CR_DONE */
dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
/* set LANE2_CR_DONE, LANE3_CR_DONE */
dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
} else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
DPCD_TRAINING_PATTERN_2) {
/* training pattern 2 for EQ */
/* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane0_1 */
dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
/* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane2_3 */
dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
/* set INTERLANE_ALIGN_DONE */
dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
DPCD_INTERLANE_ALIGN_DONE;
} else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
DPCD_LINK_TRAINING_DISABLED) {
/* finish link training */
/* set sink status as synchronized */
dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
}
}
#define _REG_HSW_DP_AUX_CH_CTL(dp) \
((dp) ? (_PCH_DPB_AUX_CH_CTL + ((dp)-1)*0x100) : 0x64010)
#define _REG_SKL_DP_AUX_CH_CTL(dp) (0x64010 + (dp) * 0x100)
#define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
#define dpy_is_valid_port(port) \
(((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
struct intel_vgpu_display *display = &vgpu->display;
int msg, addr, ctrl, op, len;
int port_index = OFFSET_TO_DP_AUX_PORT(offset);
struct intel_vgpu_dpcd_data *dpcd = NULL;
struct intel_vgpu_port *port = NULL;
u32 data;
if (!dpy_is_valid_port(port_index)) {
gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id);
return 0;
}
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
if (IS_SKYLAKE(vgpu->gvt->dev_priv) &&
offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */
return 0;
} else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
/* write to the data registers */
return 0;
}
if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
/* just want to clear the sticky bits */
vgpu_vreg(vgpu, offset) = 0;
return 0;
}
port = &display->ports[port_index];
dpcd = port->dpcd;
/* read out message from DATA1 register */
msg = vgpu_vreg(vgpu, offset + 4);
addr = (msg >> 8) & 0xffff;
ctrl = (msg >> 24) & 0xff;
len = msg & 0xff;
op = ctrl >> 4;
if (op == GVT_AUX_NATIVE_WRITE) {
int t;
uint8_t buf[16];
if ((addr + len + 1) >= DPCD_SIZE) {
/*
* Write request exceeds what we supported,
* DCPD spec: When a Source Device is writing a DPCD
* address not supported by the Sink Device, the Sink
* Device shall reply with AUX NACK and “M” equal to
* zero.
*/
/* NAK the write */
vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
return 0;
}
/*
* Write request format: (command + address) occupies
* 3 bytes, followed by (len + 1) bytes of data.
*/
if (WARN_ON((len + 4) > AUX_BURST_SIZE))
return -EINVAL;
/* unpack data from vreg to buf */
for (t = 0; t < 4; t++) {
u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
buf[t * 4] = (r >> 24) & 0xff;
buf[t * 4 + 1] = (r >> 16) & 0xff;
buf[t * 4 + 2] = (r >> 8) & 0xff;
buf[t * 4 + 3] = r & 0xff;
}
/* write to virtual DPCD */
if (dpcd && dpcd->data_valid) {
for (t = 0; t <= len; t++) {
int p = addr + t;
dpcd->data[p] = buf[t];
/* check for link training */
if (p == DPCD_TRAINING_PATTERN_SET)
dp_aux_ch_ctl_link_training(dpcd,
buf[t]);
}
}
/* ACK the write */
vgpu_vreg(vgpu, offset + 4) = 0;
dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
dpcd && dpcd->data_valid);
return 0;
}
if (op == GVT_AUX_NATIVE_READ) {
int idx, i, ret = 0;
if ((addr + len + 1) >= DPCD_SIZE) {
/*
* read request exceeds what we supported
* DPCD spec: A Sink Device receiving a Native AUX CH
* read request for an unsupported DPCD address must
* reply with an AUX ACK and read data set equal to
* zero instead of replying with AUX NACK.
*/
/* ACK the READ*/
vgpu_vreg(vgpu, offset + 4) = 0;
vgpu_vreg(vgpu, offset + 8) = 0;
vgpu_vreg(vgpu, offset + 12) = 0;
vgpu_vreg(vgpu, offset + 16) = 0;
vgpu_vreg(vgpu, offset + 20) = 0;
dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
true);
return 0;
}
for (idx = 1; idx <= 5; idx++) {
/* clear the data registers */
vgpu_vreg(vgpu, offset + 4 * idx) = 0;
}
/*
* Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
*/
if (WARN_ON((len + 2) > AUX_BURST_SIZE))
return -EINVAL;
/* read from virtual DPCD to vreg */
/* first 4 bytes: [ACK][addr][addr+1][addr+2] */
if (dpcd && dpcd->data_valid) {
for (i = 1; i <= (len + 1); i++) {
int t;
t = dpcd->data[addr + i - 1];
t <<= (24 - 8 * (i % 4));
ret |= t;
if ((i % 4 == 3) || (i == (len + 1))) {
vgpu_vreg(vgpu, offset +
(i / 4 + 1) * 4) = ret;
ret = 0;
}
}
}
dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
dpcd && dpcd->data_valid);
return 0;
}
/* i2c transaction starts */
intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
if (data & DP_AUX_CH_CTL_INTERRUPT)
trigger_aux_channel_interrupt(vgpu, offset);
return 0;
}
static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
bool vga_disable;
write_vreg(vgpu, offset, p_data, bytes);
vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
vga_disable ? "Disable" : "Enable");
return 0;
}
static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
unsigned int sbi_offset)
{
struct intel_vgpu_display *display = &vgpu->display;
int num = display->sbi.number;
int i;
for (i = 0; i < num; ++i)
if (display->sbi.registers[i].offset == sbi_offset)
break;
if (i == num)
return 0;
return display->sbi.registers[i].value;
}
static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
unsigned int offset, u32 value)
{
struct intel_vgpu_display *display = &vgpu->display;
int num = display->sbi.number;
int i;
for (i = 0; i < num; ++i) {
if (display->sbi.registers[i].offset == offset)
break;
}
if (i == num) {
if (num == SBI_REG_MAX) {
gvt_err("vgpu%d: SBI caching meets maximum limits\n",
vgpu->id);
return;
}
display->sbi.number++;
}
display->sbi.registers[i].offset = offset;
display->sbi.registers[i].value = value;
}
static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
sbi_offset);
}
read_vreg(vgpu, offset, p_data, bytes);
return 0;
}
static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 data;
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
data |= SBI_READY;
data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
data |= SBI_RESPONSE_SUCCESS;
vgpu_vreg(vgpu, offset) = data;
if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
write_virtual_sbi_register(vgpu, sbi_offset,
vgpu_vreg(vgpu, SBI_DATA));
}
return 0;
}
#define _vgtif_reg(x) \
(VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
bool invalid_read = false;
read_vreg(vgpu, offset, p_data, bytes);
switch (offset) {
case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
if (offset + bytes > _vgtif_reg(vgt_id) + 4)
invalid_read = true;
break;
case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
_vgtif_reg(avail_rs.fence_num):
if (offset + bytes >
_vgtif_reg(avail_rs.fence_num) + 4)
invalid_read = true;
break;
case 0x78010: /* vgt_caps */
case 0x7881c:
break;
default:
invalid_read = true;
break;
}
if (invalid_read)
gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
offset, bytes, *(u32 *)p_data);
return 0;
}
static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
{
int ret = 0;
switch (notification) {
case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 3);
break;
case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 3);
break;
case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 4);
break;
case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 4);
break;
case VGT_G2V_EXECLIST_CONTEXT_CREATE:
case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
case 1: /* Remove this in guest driver. */
break;
default:
gvt_err("Invalid PV notification %d\n", notification);
}
return ret;
}
static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
char *env[3] = {NULL, NULL, NULL};
char vmid_str[20];
char display_ready_str[20];
snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready);
env[0] = display_ready_str;
snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
env[1] = vmid_str;
return kobject_uevent_env(kobj, KOBJ_ADD, env);
}
static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 data;
int ret;
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
switch (offset) {
case _vgtif_reg(display_ready):
send_display_ready_uevent(vgpu, data ? 1 : 0);
break;
case _vgtif_reg(g2v_notify):
ret = handle_g2v_notification(vgpu, data);
break;
/* add xhot and yhot to handled list to avoid error log */
case 0x78830:
case 0x78834:
case _vgtif_reg(pdp[0].lo):
case _vgtif_reg(pdp[0].hi):
case _vgtif_reg(pdp[1].lo):
case _vgtif_reg(pdp[1].hi):
case _vgtif_reg(pdp[2].lo):
case _vgtif_reg(pdp[2].hi):
case _vgtif_reg(pdp[3].lo):
case _vgtif_reg(pdp[3].hi):
case _vgtif_reg(execlist_context_descriptor_lo):
case _vgtif_reg(execlist_context_descriptor_hi):
break;
default:
gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
offset, bytes, data);
break;
}
return 0;
}
static int pf_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 val = *(u32 *)p_data;
if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n",
vgpu->id);
return 0;
}
return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
}
static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
write_vreg(vgpu, offset, p_data, bytes);
if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_ENABLE_REQUEST)
vgpu_vreg(vgpu, offset) |= HSW_PWR_WELL_STATE_ENABLED;
else
vgpu_vreg(vgpu, offset) &= ~HSW_PWR_WELL_STATE_ENABLED;
return 0;
}
static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
write_vreg(vgpu, offset, p_data, bytes);
if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
return 0;
}
static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 mode = *(u32 *)p_data;
if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n",
vgpu->id);
return 0;
}
return 0;
}
static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u32 trtte = *(u32 *)p_data;
if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
WARN(1, "VM(%d): Use physical address for TRTT!\n",
vgpu->id);
return -EINVAL;
}
write_vreg(vgpu, offset, p_data, bytes);
/* TRTTE is not per-context */
I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
return 0;
}
static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u32 val = *(u32 *)p_data;
if (val & 1) {
/* unblock hw logic */
I915_WRITE(_MMIO(offset), val);
}
write_vreg(vgpu, offset, p_data, bytes);
return 0;
}
static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 v = 0;
if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
v |= (1 << 0);
if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
v |= (1 << 8);
if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
v |= (1 << 16);
if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
v |= (1 << 24);
vgpu_vreg(vgpu, offset) = v;
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
}
static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 value = *(u32 *)p_data;
u32 cmd = value & 0xff;
u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
switch (cmd) {
case 0x6:
/**
* "Read memory latency" command on gen9.
* Below memory latency values are read
* from skylake platform.
*/
if (!*data0)
*data0 = 0x1e1a1100;
else
*data0 = 0x61514b3d;
break;
case 0x5:
*data0 |= 0x1;
break;
}
gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
vgpu->id, value, *data0);
value &= ~(1 << 31);
return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
}
static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 v = *(u32 *)p_data;
v &= (1 << 31) | (1 << 29) | (1 << 9) |
(1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
v |= (v >> 1);
return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
}
static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
i915_reg_t reg = {.reg = offset};
switch (offset) {
case 0x4ddc:
vgpu_vreg(vgpu, offset) = 0x8000003c;
break;
case 0x42080:
vgpu_vreg(vgpu, offset) = 0x8000;
break;
default:
return -EINVAL;
}
/**
* TODO: need detect stepping info after gvt contain such information
* 0x4ddc enabled after C0, 0x42080 enabled after E0.
*/
I915_WRITE(reg, vgpu_vreg(vgpu, offset));
return 0;
}
static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 v = *(u32 *)p_data;
/* other bits are MBZ. */
v &= (1 << 31) | (1 << 30);
v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
vgpu_vreg(vgpu, offset) = v;
return 0;
}
static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
}
static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
struct intel_vgpu_execlist *execlist;
u32 data = *(u32 *)p_data;
int ret;
if (WARN_ON(ring_id < 0))
return -EINVAL;
execlist = &vgpu->execlist[ring_id];
execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3)
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
++execlist->elsp_dwords.index;
execlist->elsp_dwords.index &= 0x3;
return 0;
}
static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 data = *(u32 *)p_data;
int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
bool enable_execlist;
write_vreg(vgpu, offset, p_data, bytes);
if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
|| (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
gvt_dbg_core("EXECLIST %s on ring %d\n",
(enable_execlist ? "enabling" : "disabling"),
ring_id);
if (enable_execlist)
intel_vgpu_start_schedule(vgpu);
}
return 0;
}
static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
int rc = 0;
unsigned int id = 0;
switch (offset) {
case 0x4260:
id = RCS;
break;
case 0x4264:
id = VCS;
break;
case 0x4268:
id = VCS2;
break;
case 0x426c:
id = BCS;
break;
case 0x4270:
id = VECS;
break;
default:
rc = -EINVAL;
break;
}
set_bit(id, (void *)vgpu->tlb_handle_pending);
return rc;
}
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \
f, s, am, rm, d, r, w); \
if (ret) \
return ret; \
} while (0)
#define MMIO_D(reg, d) \
MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL)
#define MMIO_DH(reg, d, r, w) \
MMIO_F(reg, 4, 0, 0, 0, d, r, w)
#define MMIO_DFH(reg, d, f, r, w) \
MMIO_F(reg, 4, f, 0, 0, d, r, w)
#define MMIO_GM(reg, d, r, w) \
MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
#define MMIO_RO(reg, d, f, rm, r, w) \
MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
#define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
} while (0)
#define MMIO_RING_D(prefix, d) \
MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL)
#define MMIO_RING_DFH(prefix, d, f, r, w) \
MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
#define MMIO_RING_GM(prefix, d, r, w) \
MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
#define MMIO_RING_RO(prefix, d, f, rm, r, w) \
MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
static int init_generic_mmio_info(struct intel_gvt *gvt)
{
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(SDEISR, D_ALL);
MMIO_RING_D(RING_HWSTAM, D_ALL);
MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
#define RING_REG(base) (base + 0x28)
MMIO_RING_D(RING_REG, D_ALL);
#undef RING_REG
#define RING_REG(base) (base + 0x134)
MMIO_RING_D(RING_REG, D_ALL);
#undef RING_REG
MMIO_GM(0x2148, D_ALL, NULL, NULL);
MMIO_GM(CCID, D_ALL, NULL, NULL);
MMIO_GM(0x12198, D_ALL, NULL, NULL);
MMIO_D(GEN7_CXT_SIZE, D_ALL);
MMIO_RING_D(RING_TAIL, D_ALL);
MMIO_RING_D(RING_HEAD, D_ALL);
MMIO_RING_D(RING_CTL, D_ALL);
MMIO_RING_D(RING_ACTHD, D_ALL);
MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
/* RING MODE */
#define RING_REG(base) (base + 0x29c)
MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write);
#undef RING_REG
MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL);
MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL);
MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_D(GAM_ECOCHK, D_ALL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_D(0x9030, D_ALL);
MMIO_D(0x20a0, D_ALL);
MMIO_D(0x2420, D_ALL);
MMIO_D(0x2430, D_ALL);
MMIO_D(0x2434, D_ALL);
MMIO_D(0x2438, D_ALL);
MMIO_D(0x243c, D_ALL);
MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_DFH(0xe184, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL);
/* display */
MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(0x602a0, D_ALL);
MMIO_D(0x65050, D_ALL);
MMIO_D(0x650b4, D_ALL);
MMIO_D(0xc4040, D_ALL);
MMIO_D(DERRMR, D_ALL);
MMIO_D(PIPEDSL(PIPE_A), D_ALL);
MMIO_D(PIPEDSL(PIPE_B), D_ALL);
MMIO_D(PIPEDSL(PIPE_C), D_ALL);
MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL);
MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
MMIO_D(PIPESTAT(PIPE_A), D_ALL);
MMIO_D(PIPESTAT(PIPE_B), D_ALL);
MMIO_D(PIPESTAT(PIPE_C), D_ALL);
MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL);
MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL);
MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL);
MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL);
MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL);
MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL);
MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL);
MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL);
MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL);
MMIO_D(CURCNTR(PIPE_A), D_ALL);
MMIO_D(CURCNTR(PIPE_B), D_ALL);
MMIO_D(CURCNTR(PIPE_C), D_ALL);
MMIO_D(CURPOS(PIPE_A), D_ALL);
MMIO_D(CURPOS(PIPE_B), D_ALL);
MMIO_D(CURPOS(PIPE_C), D_ALL);
MMIO_D(CURBASE(PIPE_A), D_ALL);
MMIO_D(CURBASE(PIPE_B), D_ALL);
MMIO_D(CURBASE(PIPE_C), D_ALL);
MMIO_D(0x700ac, D_ALL);
MMIO_D(0x710ac, D_ALL);
MMIO_D(0x720ac, D_ALL);
MMIO_D(0x70090, D_ALL);
MMIO_D(0x70094, D_ALL);
MMIO_D(0x70098, D_ALL);
MMIO_D(0x7009c, D_ALL);
MMIO_D(DSPCNTR(PIPE_A), D_ALL);
MMIO_D(DSPADDR(PIPE_A), D_ALL);
MMIO_D(DSPSTRIDE(PIPE_A), D_ALL);
MMIO_D(DSPPOS(PIPE_A), D_ALL);
MMIO_D(DSPSIZE(PIPE_A), D_ALL);
MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
MMIO_D(DSPCNTR(PIPE_B), D_ALL);
MMIO_D(DSPADDR(PIPE_B), D_ALL);
MMIO_D(DSPSTRIDE(PIPE_B), D_ALL);
MMIO_D(DSPPOS(PIPE_B), D_ALL);
MMIO_D(DSPSIZE(PIPE_B), D_ALL);
MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
MMIO_D(DSPCNTR(PIPE_C), D_ALL);
MMIO_D(DSPADDR(PIPE_C), D_ALL);
MMIO_D(DSPSTRIDE(PIPE_C), D_ALL);
MMIO_D(DSPPOS(PIPE_C), D_ALL);
MMIO_D(DSPSIZE(PIPE_C), D_ALL);
MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
MMIO_D(SPRCTL(PIPE_A), D_ALL);
MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
MMIO_D(SPRSTRIDE(PIPE_A), D_ALL);
MMIO_D(SPRPOS(PIPE_A), D_ALL);
MMIO_D(SPRSIZE(PIPE_A), D_ALL);
MMIO_D(SPRKEYVAL(PIPE_A), D_ALL);
MMIO_D(SPRKEYMSK(PIPE_A), D_ALL);
MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
MMIO_D(SPRKEYMAX(PIPE_A), D_ALL);
MMIO_D(SPROFFSET(PIPE_A), D_ALL);
MMIO_D(SPRSCALE(PIPE_A), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
MMIO_D(SPRCTL(PIPE_B), D_ALL);
MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
MMIO_D(SPRSTRIDE(PIPE_B), D_ALL);
MMIO_D(SPRPOS(PIPE_B), D_ALL);
MMIO_D(SPRSIZE(PIPE_B), D_ALL);
MMIO_D(SPRKEYVAL(PIPE_B), D_ALL);
MMIO_D(SPRKEYMSK(PIPE_B), D_ALL);
MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
MMIO_D(SPRKEYMAX(PIPE_B), D_ALL);
MMIO_D(SPROFFSET(PIPE_B), D_ALL);
MMIO_D(SPRSCALE(PIPE_B), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
MMIO_D(SPRCTL(PIPE_C), D_ALL);
MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
MMIO_D(SPRSTRIDE(PIPE_C), D_ALL);
MMIO_D(SPRPOS(PIPE_C), D_ALL);
MMIO_D(SPRSIZE(PIPE_C), D_ALL);
MMIO_D(SPRKEYVAL(PIPE_C), D_ALL);
MMIO_D(SPRKEYMSK(PIPE_C), D_ALL);
MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
MMIO_D(SPRKEYMAX(PIPE_C), D_ALL);
MMIO_D(SPROFFSET(PIPE_C), D_ALL);
MMIO_D(SPRSCALE(PIPE_C), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
MMIO_F(LGC_PALETTE(PIPE_A, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(LGC_PALETTE(PIPE_B, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(LGC_PALETTE(PIPE_C, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
MMIO_D(VTOTAL(TRANSCODER_A), D_ALL);
MMIO_D(VBLANK(TRANSCODER_A), D_ALL);
MMIO_D(VSYNC(TRANSCODER_A), D_ALL);
MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL);
MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL);
MMIO_D(PIPESRC(TRANSCODER_A), D_ALL);
MMIO_D(HTOTAL(TRANSCODER_B), D_ALL);
MMIO_D(HBLANK(TRANSCODER_B), D_ALL);
MMIO_D(HSYNC(TRANSCODER_B), D_ALL);
MMIO_D(VTOTAL(TRANSCODER_B), D_ALL);
MMIO_D(VBLANK(TRANSCODER_B), D_ALL);
MMIO_D(VSYNC(TRANSCODER_B), D_ALL);
MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL);
MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL);
MMIO_D(PIPESRC(TRANSCODER_B), D_ALL);
MMIO_D(HTOTAL(TRANSCODER_C), D_ALL);
MMIO_D(HBLANK(TRANSCODER_C), D_ALL);
MMIO_D(HSYNC(TRANSCODER_C), D_ALL);
MMIO_D(VTOTAL(TRANSCODER_C), D_ALL);
MMIO_D(VBLANK(TRANSCODER_C), D_ALL);
MMIO_D(VSYNC(TRANSCODER_C), D_ALL);
MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL);
MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL);
MMIO_D(PIPESRC(TRANSCODER_C), D_ALL);
MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL);
MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL);
MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL);
MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL);
MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL);
MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL);
MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL);
MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL);
MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL);
MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL);
MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL);
MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL);
MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL);
MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL);
MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL);
MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL);
MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL);
MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL);
MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL);
MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL);
MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL);
MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL);
MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL);
MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL);
MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL);
MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL);
MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL);
MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL);
MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL);
MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL);
MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL);
MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL);
MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL);
MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL);
MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL);
MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL);
MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL);
MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL);
MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL);
MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL);
MMIO_D(PF_CTL(PIPE_A), D_ALL);
MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL);
MMIO_D(PF_WIN_POS(PIPE_A), D_ALL);
MMIO_D(PF_VSCALE(PIPE_A), D_ALL);
MMIO_D(PF_HSCALE(PIPE_A), D_ALL);
MMIO_D(PF_CTL(PIPE_B), D_ALL);
MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL);
MMIO_D(PF_WIN_POS(PIPE_B), D_ALL);
MMIO_D(PF_VSCALE(PIPE_B), D_ALL);
MMIO_D(PF_HSCALE(PIPE_B), D_ALL);
MMIO_D(PF_CTL(PIPE_C), D_ALL);
MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL);
MMIO_D(PF_WIN_POS(PIPE_C), D_ALL);
MMIO_D(PF_VSCALE(PIPE_C), D_ALL);
MMIO_D(PF_HSCALE(PIPE_C), D_ALL);
MMIO_D(WM0_PIPEA_ILK, D_ALL);
MMIO_D(WM0_PIPEB_ILK, D_ALL);
MMIO_D(WM0_PIPEC_IVB, D_ALL);
MMIO_D(WM1_LP_ILK, D_ALL);
MMIO_D(WM2_LP_ILK, D_ALL);
MMIO_D(WM3_LP_ILK, D_ALL);
MMIO_D(WM1S_LP_ILK, D_ALL);
MMIO_D(WM2S_LP_IVB, D_ALL);
MMIO_D(WM3S_LP_IVB, D_ALL);
MMIO_D(BLC_PWM_CPU_CTL2, D_ALL);
MMIO_D(BLC_PWM_CPU_CTL, D_ALL);
MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
MMIO_D(0x48268, D_ALL);
MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
gmbus_mmio_write);
MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
MMIO_F(0xe4f00, 0x28, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_PCH_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
MMIO_F(_PCH_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
MMIO_RO(PCH_ADPA, D_ALL, 0, ADPA_CRT_HOTPLUG_MONITOR_MASK, NULL, pch_adpa_mmio_write);
MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
MMIO_D(_PCH_TRANS_HTOTAL_A, D_ALL);
MMIO_D(_PCH_TRANS_HBLANK_A, D_ALL);
MMIO_D(_PCH_TRANS_HSYNC_A, D_ALL);
MMIO_D(_PCH_TRANS_VTOTAL_A, D_ALL);
MMIO_D(_PCH_TRANS_VBLANK_A, D_ALL);
MMIO_D(_PCH_TRANS_VSYNC_A, D_ALL);
MMIO_D(_PCH_TRANS_VSYNCSHIFT_A, D_ALL);
MMIO_D(_PCH_TRANS_HTOTAL_B, D_ALL);
MMIO_D(_PCH_TRANS_HBLANK_B, D_ALL);
MMIO_D(_PCH_TRANS_HSYNC_B, D_ALL);
MMIO_D(_PCH_TRANS_VTOTAL_B, D_ALL);
MMIO_D(_PCH_TRANS_VBLANK_B, D_ALL);
MMIO_D(_PCH_TRANS_VSYNC_B, D_ALL);
MMIO_D(_PCH_TRANS_VSYNCSHIFT_B, D_ALL);
MMIO_D(_PCH_TRANSA_DATA_M1, D_ALL);
MMIO_D(_PCH_TRANSA_DATA_N1, D_ALL);
MMIO_D(_PCH_TRANSA_DATA_M2, D_ALL);
MMIO_D(_PCH_TRANSA_DATA_N2, D_ALL);
MMIO_D(_PCH_TRANSA_LINK_M1, D_ALL);
MMIO_D(_PCH_TRANSA_LINK_N1, D_ALL);
MMIO_D(_PCH_TRANSA_LINK_M2, D_ALL);
MMIO_D(_PCH_TRANSA_LINK_N2, D_ALL);
MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL);
MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL);
MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL);
MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL);
MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL);
MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL);
MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL);
MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL);
MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
MMIO_D(_FDI_RXA_MISC, D_ALL);
MMIO_D(_FDI_RXB_MISC, D_ALL);
MMIO_D(_FDI_RXA_TUSIZE1, D_ALL);
MMIO_D(_FDI_RXA_TUSIZE2, D_ALL);
MMIO_D(_FDI_RXB_TUSIZE1, D_ALL);
MMIO_D(_FDI_RXB_TUSIZE2, D_ALL);
MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
MMIO_D(PCH_PP_DIVISOR, D_ALL);
MMIO_D(PCH_PP_STATUS, D_ALL);
MMIO_D(PCH_LVDS, D_ALL);
MMIO_D(_PCH_DPLL_A, D_ALL);
MMIO_D(_PCH_DPLL_B, D_ALL);
MMIO_D(_PCH_FPA0, D_ALL);
MMIO_D(_PCH_FPA1, D_ALL);
MMIO_D(_PCH_FPB0, D_ALL);
MMIO_D(_PCH_FPB1, D_ALL);
MMIO_D(PCH_DREF_CONTROL, D_ALL);
MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
MMIO_D(PCH_DPLL_SEL, D_ALL);
MMIO_D(0x61208, D_ALL);
MMIO_D(0x6120c, D_ALL);
MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
MMIO_DH(0xe651c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read_2, NULL);
MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read_3, NULL);
MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
PORTA_HOTPLUG_STATUS_MASK
| PORTB_HOTPLUG_STATUS_MASK
| PORTC_HOTPLUG_STATUS_MASK
| PORTD_HOTPLUG_STATUS_MASK,
NULL, NULL);
MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
MMIO_D(FUSE_STRAP, D_ALL);
MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL);
MMIO_D(DISP_ARB_CTL, D_ALL);
MMIO_D(DISP_ARB_CTL2, D_ALL);
MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL);
MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL);
MMIO_D(ILK_DSPCLK_GATE_D, D_ALL);
MMIO_D(SOUTH_CHICKEN1, D_ALL);
MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
MMIO_D(_TRANSA_CHICKEN1, D_ALL);
MMIO_D(_TRANSB_CHICKEN1, D_ALL);
MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
MMIO_D(_TRANSA_CHICKEN2, D_ALL);
MMIO_D(_TRANSB_CHICKEN2, D_ALL);
MMIO_D(ILK_DPFC_CB_BASE, D_ALL);
MMIO_D(ILK_DPFC_CONTROL, D_ALL);
MMIO_D(ILK_DPFC_RECOMP_CTL, D_ALL);
MMIO_D(ILK_DPFC_STATUS, D_ALL);
MMIO_D(ILK_DPFC_FENCE_YOFF, D_ALL);
MMIO_D(ILK_DPFC_CHICKEN, D_ALL);
MMIO_D(ILK_FBC_RT_BASE, D_ALL);
MMIO_D(IPS_CTL, D_ALL);
MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL);
MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL);
MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL);
MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL);
MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL);
MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL);
MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL);
MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL);
MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL);
MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(0x60110, D_ALL);
MMIO_D(0x61110, D_ALL);
MMIO_F(0x70400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(0x71400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(0x72400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(0x70440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_F(0x71440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_F(0x72440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_F(0x7044c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_F(0x7144c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_F(0x7244c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
MMIO_D(SPLL_CTL, D_ALL);
MMIO_D(_WRPLL_CTL1, D_ALL);
MMIO_D(_WRPLL_CTL2, D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL);
MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL);
MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL);
MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
MMIO_D(0x46508, D_ALL);
MMIO_D(0x49080, D_ALL);
MMIO_D(0x49180, D_ALL);
MMIO_D(0x49280, D_ALL);
MMIO_F(0x49090, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(0x49190, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(0x49290, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
MMIO_D(GAMMA_MODE(PIPE_C), D_ALL);
MMIO_D(PIPE_MULT(PIPE_A), D_ALL);
MMIO_D(PIPE_MULT(PIPE_B), D_ALL);
MMIO_D(PIPE_MULT(PIPE_C), D_ALL);
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL);
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL);
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL);
MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
MMIO_D(SBI_ADDR, D_ALL);
MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
MMIO_D(PIXCLK_GATE, D_ALL);
MMIO_F(_DPA_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_ALL, NULL,
dp_aux_ch_ctl_mmio_write);
MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
MMIO_F(_DDI_BUF_TRANS_A, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(0x64e60, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(0x64eC0, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(0x64f20, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(0x64f80, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
MMIO_DH(_TRANS_DDI_FUNC_CTL_A, D_ALL, NULL, NULL);
MMIO_DH(_TRANS_DDI_FUNC_CTL_B, D_ALL, NULL, NULL);
MMIO_DH(_TRANS_DDI_FUNC_CTL_C, D_ALL, NULL, NULL);
MMIO_DH(_TRANS_DDI_FUNC_CTL_EDP, D_ALL, NULL, NULL);
MMIO_D(_TRANSA_MSA_MISC, D_ALL);
MMIO_D(_TRANSB_MSA_MISC, D_ALL);
MMIO_D(_TRANSC_MSA_MISC, D_ALL);
MMIO_D(_TRANS_EDP_MSA_MISC, D_ALL);
MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
MMIO_D(FORCEWAKE_ACK, D_ALL);
MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
MMIO_D(GTFIFODBG, D_ALL);
MMIO_D(GTFIFOCTL, D_ALL);
MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
MMIO_D(ECOBUS, D_ALL);
MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
MMIO_D(GEN6_RPNSWREQ, D_ALL);
MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL);
MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL);
MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL);
MMIO_D(GEN6_RPSTAT1, D_ALL);
MMIO_D(GEN6_RP_CONTROL, D_ALL);
MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL);
MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL);
MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL);
MMIO_D(GEN6_RP_CUR_UP, D_ALL);
MMIO_D(GEN6_RP_PREV_UP, D_ALL);
MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL);
MMIO_D(GEN6_RP_CUR_DOWN, D_ALL);
MMIO_D(GEN6_RP_PREV_DOWN, D_ALL);
MMIO_D(GEN6_RP_UP_EI, D_ALL);
MMIO_D(GEN6_RP_DOWN_EI, D_ALL);
MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL);
MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL);
MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL);
MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL);
MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL);
MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL);
MMIO_D(GEN6_RC_SLEEP, D_ALL);
MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL);
MMIO_D(GEN6_RC6_THRESHOLD, D_ALL);
MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
MMIO_D(GEN6_PMINTRMSK, D_ALL);
MMIO_DH(HSW_PWR_WELL_BIOS, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_DRIVER, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_KVMR, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_DEBUG, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL5, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL6, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_D(RSTDBYCTL, D_ALL);
MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
MMIO_F(VGT_PVINFO_PAGE, VGT_PVINFO_SIZE, F_UNALIGN, 0, 0, D_ALL, pvinfo_mmio_read, pvinfo_mmio_write);
MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
MMIO_F(MCHBAR_MIRROR_BASE_SNB, 0x40000, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(TILECTL, D_ALL);
MMIO_D(GEN6_UCGCTL1, D_ALL);
MMIO_D(GEN6_UCGCTL2, D_ALL);
MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL);
MMIO_D(GEN6_PCODE_DATA, D_ALL);
MMIO_D(0x13812c, D_ALL);
MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
MMIO_D(HSW_EDRAM_CAP, D_ALL);
MMIO_D(HSW_IDICR, D_ALL);
MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
MMIO_D(0x3c, D_ALL);
MMIO_D(0x860, D_ALL);
MMIO_D(ECOSKPD, D_ALL);
MMIO_D(0x121d0, D_ALL);
MMIO_D(GEN6_BLITTER_ECOSKPD, D_ALL);
MMIO_D(0x41d0, D_ALL);
MMIO_D(GAC_ECO_BITS, D_ALL);
MMIO_D(0x6200, D_ALL);
MMIO_D(0x6204, D_ALL);
MMIO_D(0x6208, D_ALL);
MMIO_D(0x7118, D_ALL);
MMIO_D(0x7180, D_ALL);
MMIO_D(0x7408, D_ALL);
MMIO_D(0x7c00, D_ALL);
MMIO_D(GEN6_MBCTL, D_ALL);
MMIO_D(0x911c, D_ALL);
MMIO_D(0x9120, D_ALL);
MMIO_D(GAB_CTL, D_ALL);
MMIO_D(0x48800, D_ALL);
MMIO_D(0xce044, D_ALL);
MMIO_D(0xe6500, D_ALL);
MMIO_D(0xe6504, D_ALL);
MMIO_D(0xe6600, D_ALL);
MMIO_D(0xe6604, D_ALL);
MMIO_D(0xe6700, D_ALL);
MMIO_D(0xe6704, D_ALL);
MMIO_D(0xe6800, D_ALL);
MMIO_D(0xe6804, D_ALL);
MMIO_D(PCH_GMBUS4, D_ALL);
MMIO_D(PCH_GMBUS5, D_ALL);
MMIO_D(0x902c, D_ALL);
MMIO_D(0xec008, D_ALL);
MMIO_D(0xec00c, D_ALL);
MMIO_D(0xec008 + 0x18, D_ALL);
MMIO_D(0xec00c + 0x18, D_ALL);
MMIO_D(0xec008 + 0x18 * 2, D_ALL);
MMIO_D(0xec00c + 0x18 * 2, D_ALL);
MMIO_D(0xec008 + 0x18 * 3, D_ALL);
MMIO_D(0xec00c + 0x18 * 3, D_ALL);
MMIO_D(0xec408, D_ALL);
MMIO_D(0xec40c, D_ALL);
MMIO_D(0xec408 + 0x18, D_ALL);
MMIO_D(0xec40c + 0x18, D_ALL);
MMIO_D(0xec408 + 0x18 * 2, D_ALL);
MMIO_D(0xec40c + 0x18 * 2, D_ALL);
MMIO_D(0xec408 + 0x18 * 3, D_ALL);
MMIO_D(0xec40c + 0x18 * 3, D_ALL);
MMIO_D(0xfc810, D_ALL);
MMIO_D(0xfc81c, D_ALL);
MMIO_D(0xfc828, D_ALL);
MMIO_D(0xfc834, D_ALL);
MMIO_D(0xfcc00, D_ALL);
MMIO_D(0xfcc0c, D_ALL);
MMIO_D(0xfcc18, D_ALL);
MMIO_D(0xfcc24, D_ALL);
MMIO_D(0xfd000, D_ALL);
MMIO_D(0xfd00c, D_ALL);
MMIO_D(0xfd018, D_ALL);
MMIO_D(0xfd024, D_ALL);
MMIO_D(0xfd034, D_ALL);
MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
MMIO_D(0x2054, D_ALL);
MMIO_D(0x12054, D_ALL);
MMIO_D(0x22054, D_ALL);
MMIO_D(0x1a054, D_ALL);
MMIO_D(0x44070, D_ALL);
MMIO_D(0x215c, D_HSW_PLUS);
MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL);
MMIO_D(OACONTROL, D_HSW);
MMIO_D(0x2b00, D_BDW_PLUS);
MMIO_D(0x2360, D_BDW_PLUS);
MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(BCS_SWCTRL, D_ALL);
MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
MMIO_DH(0x426c, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
}
static int init_broadwell_mmio_info(struct intel_gvt *gvt)
{
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL,
intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS);
MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS);
MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS);
MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS);
MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS);
MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS);
MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS);
MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS);
MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS);
MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS);
MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
intel_vgpu_reg_master_irq_handler);
MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
MMIO_D(0x1c134, D_BDW_PLUS);
MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write);
MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
NULL, NULL);
MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
NULL, NULL);
MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL);
MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS);
#define RING_REG(base) (base + 0x230)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
#undef RING_REG
#define RING_REG(base) (base + 0x234)
MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x244)
MMIO_RING_D(RING_REG, D_BDW_PLUS);
MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
#undef RING_REG
#define RING_REG(base) (base + 0x370)
MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 48, F_RO, 0, ~0, D_BDW_PLUS,
NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x3a0)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
#undef RING_REG
MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
MMIO_D(0x1c1d0, D_BDW_PLUS);
MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
MMIO_D(0x1c054, D_BDW_PLUS);
MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
MMIO_D(GAMTARBMODE, D_BDW_PLUS);
#define RING_REG(base) (base + 0x270)
MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL);
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW);
MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW);
MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW);
MMIO_D(WM_MISC, D_BDW);
MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
MMIO_D(0x66c00, D_BDW_PLUS);
MMIO_D(0x66c04, D_BDW_PLUS);
MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS);
MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
MMIO_D(0xfdc, D_BDW);
MMIO_D(GEN8_ROW_CHICKEN, D_BDW_PLUS);
MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS);
MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS);
MMIO_D(0xb1f0, D_BDW);
MMIO_D(0xb1c0, D_BDW);
MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0xb100, D_BDW);
MMIO_D(0xb10c, D_BDW);
MMIO_D(0xb110, D_BDW);
MMIO_DH(0x24d0, D_BDW_PLUS, NULL, NULL);
MMIO_DH(0x24d4, D_BDW_PLUS, NULL, NULL);
MMIO_DH(0x24d8, D_BDW_PLUS, NULL, NULL);
MMIO_DH(0x24dc, D_BDW_PLUS, NULL, NULL);
MMIO_D(0x83a4, D_BDW);
MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
MMIO_D(0x8430, D_BDW);
MMIO_D(0x110000, D_BDW_PLUS);
MMIO_D(0x48400, D_BDW_PLUS);
MMIO_D(0x6e570, D_BDW_PLUS);
MMIO_D(0x65f10, D_BDW_PLUS);
MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
MMIO_DFH(0xe180, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
MMIO_D(0x2248, D_BDW);
return 0;
}
static int init_skl_mmio_info(struct intel_gvt *gvt)
{
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
MMIO_DH(FORCEWAKE_BLITTER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_BLITTER_GEN9, D_SKL_PLUS, NULL, NULL);
MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write);
MMIO_D(0xa210, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DH(0x4ddc, D_SKL, NULL, skl_misc_ctl_write);
MMIO_DH(0x42080, D_SKL, NULL, skl_misc_ctl_write);
MMIO_D(0x45504, D_SKL);
MMIO_D(0x45520, D_SKL);
MMIO_D(0x46000, D_SKL);
MMIO_DH(0x46010, D_SKL, NULL, skl_lcpll_write);
MMIO_DH(0x46014, D_SKL, NULL, skl_lcpll_write);
MMIO_D(0x6C040, D_SKL);
MMIO_D(0x6C048, D_SKL);
MMIO_D(0x6C050, D_SKL);
MMIO_D(0x6C044, D_SKL);
MMIO_D(0x6C04C, D_SKL);
MMIO_D(0x6C054, D_SKL);
MMIO_D(0x6c058, D_SKL);
MMIO_D(0x6c05c, D_SKL);
MMIO_DH(0X6c060, D_SKL, dpll_status_read, NULL);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL, NULL, pf_write);
MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL, NULL, pf_write);
MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL, NULL, pf_write);
MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL, NULL, pf_write);
MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL, NULL, pf_write);
MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL, NULL, pf_write);
MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL, NULL, pf_write);
MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL, NULL, NULL);
MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL, NULL, NULL);
MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL, NULL, NULL);
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL, NULL, NULL);
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL, NULL, NULL);
MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL, NULL, NULL);
MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL, NULL, NULL);
MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL, NULL, NULL);
MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL, NULL, NULL);
MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL, NULL, NULL);
MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL, NULL, NULL);
MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL, NULL, NULL);
MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL, NULL, NULL);
MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL, NULL, NULL);
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL, NULL, NULL);
MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL, NULL, NULL);
MMIO_D(0x70380, D_SKL);
MMIO_D(0x71380, D_SKL);
MMIO_D(0x72380, D_SKL);
MMIO_D(0x7039c, D_SKL);
MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL, NULL, NULL);
MMIO_D(0x8f074, D_SKL);
MMIO_D(0x8f004, D_SKL);
MMIO_D(0x8f034, D_SKL);
MMIO_D(0xb11c, D_SKL);
MMIO_D(0x51000, D_SKL);
MMIO_D(0x6c00c, D_SKL);
MMIO_F(0xc800, 0x7f8, 0, 0, 0, D_SKL, NULL, NULL);
MMIO_F(0xb020, 0x80, 0, 0, 0, D_SKL, NULL, NULL);
MMIO_D(0xd08, D_SKL);
MMIO_D(0x20e0, D_SKL);
MMIO_D(0x20ec, D_SKL);
/* TRTT */
MMIO_D(0x4de0, D_SKL);
MMIO_D(0x4de4, D_SKL);
MMIO_D(0x4de8, D_SKL);
MMIO_D(0x4dec, D_SKL);
MMIO_D(0x4df0, D_SKL);
MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write);
MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
MMIO_D(0x45008, D_SKL);
MMIO_D(0x46430, D_SKL);
MMIO_D(0x46520, D_SKL);
MMIO_D(0xc403c, D_SKL);
MMIO_D(0xb004, D_SKL);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
MMIO_D(0x65900, D_SKL);
MMIO_D(0x1082c0, D_SKL);
MMIO_D(0x4068, D_SKL);
MMIO_D(0x67054, D_SKL);
MMIO_D(0x6e560, D_SKL);
MMIO_D(0x6e554, D_SKL);
MMIO_D(0x2b20, D_SKL);
MMIO_D(0x65f00, D_SKL);
MMIO_D(0x65f08, D_SKL);
MMIO_D(0x320f0, D_SKL);
MMIO_D(_REG_VCS2_EXCC, D_SKL);
MMIO_D(0x70034, D_SKL);
MMIO_D(0x71034, D_SKL);
MMIO_D(0x72034, D_SKL);
MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL);
MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL);
MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL);
MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL);
MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL);
MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
MMIO_D(0x44500, D_SKL);
return 0;
}
/**
* intel_gvt_find_mmio_info - find MMIO information entry by aligned offset
* @gvt: GVT device
* @offset: register offset
*
* This function is used to find the MMIO information entry from hash table
*
* Returns:
* pointer to MMIO information entry, NULL if not exists
*/
struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
unsigned int offset)
{
struct intel_gvt_mmio_info *e;
WARN_ON(!IS_ALIGNED(offset, 4));
hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
if (e->offset == offset)
return e;
}
return NULL;
}
/**
* intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
* @gvt: GVT device
*
* This function is called at the driver unloading stage, to clean up the MMIO
* information table of GVT device
*
*/
void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
{
struct hlist_node *tmp;
struct intel_gvt_mmio_info *e;
int i;
hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
kfree(e);
vfree(gvt->mmio.mmio_attribute);
gvt->mmio.mmio_attribute = NULL;
}
/**
* intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
* @gvt: GVT device
*
* This function is called at the initialization stage, to setup the MMIO
* information table for GVT device
*
* Returns:
* zero on success, negative if failed.
*/
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
gvt->mmio.mmio_attribute = vzalloc(info->mmio_size);
if (!gvt->mmio.mmio_attribute)
return -ENOMEM;
ret = init_generic_mmio_info(gvt);
if (ret)
goto err;
if (IS_BROADWELL(dev_priv)) {
ret = init_broadwell_mmio_info(gvt);
if (ret)
goto err;
} else if (IS_SKYLAKE(dev_priv)) {
ret = init_broadwell_mmio_info(gvt);
if (ret)
goto err;
ret = init_skl_mmio_info(gvt);
if (ret)
goto err;
}
return 0;
err:
intel_gvt_clean_mmio_info(gvt);
return ret;
}
/**
* intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
* @gvt: a GVT device
* @offset: register offset
*
*/
void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset)
{
gvt->mmio.mmio_attribute[offset >> 2] |=
F_ACCESSED;
}
/**
* intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
* @gvt: a GVT device
* @offset: register offset
*
*/
bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] &
F_CMD_ACCESS;
}
/**
* intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
* @gvt: a GVT device
* @offset: register offset
*
*/
bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt,
unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] &
F_UNALIGN;
}
/**
* intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
* @gvt: a GVT device
* @offset: register offset
*
*/
void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
unsigned int offset)
{
gvt->mmio.mmio_attribute[offset >> 2] |=
F_CMD_ACCESSED;
}
/**
* intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
* True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
*
*/
bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] &
F_MODE_MASK;
}
/**
* intel_vgpu_default_mmio_read - default MMIO read handler
* @vgpu: a vGPU
* @offset: access offset
* @p_data: data return buffer
* @bytes: access data length
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
read_vreg(vgpu, offset, p_data, bytes);
return 0;
}
/**
* intel_t_default_mmio_write - default MMIO write handler
* @vgpu: a vGPU
* @offset: access offset
* @p_data: write data buffer
* @bytes: access data length
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
write_vreg(vgpu, offset, p_data, bytes);
return 0;
}
...@@ -19,17 +19,51 @@ ...@@ -19,17 +19,51 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
*
* Authors:
* Eddie Dong <eddie.dong@intel.com>
* Dexuan Cui
* Jike Song <jike.song@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
*
*/ */
#ifndef _GVT_HYPERCALL_H_ #ifndef _GVT_HYPERCALL_H_
#define _GVT_HYPERCALL_H_ #define _GVT_HYPERCALL_H_
struct intel_gvt_io_emulation_ops {
int (*emulate_cfg_read)(void *, unsigned int, void *, unsigned int);
int (*emulate_cfg_write)(void *, unsigned int, void *, unsigned int);
int (*emulate_mmio_read)(void *, u64, void *, unsigned int);
int (*emulate_mmio_write)(void *, u64, void *, unsigned int);
};
extern struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops;
/* /*
* Specific GVT-g MPT modules function collections. Currently GVT-g supports * Specific GVT-g MPT modules function collections. Currently GVT-g supports
* both Xen and KVM by providing dedicated hypervisor-related MPT modules. * both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/ */
struct intel_gvt_mpt { struct intel_gvt_mpt {
int (*detect_host)(void); int (*detect_host)(void);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(unsigned long handle);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
unsigned long (*from_virt_to_mfn)(void *p);
int (*set_wp_page)(unsigned long handle, u64 gfn);
int (*unset_wp_page)(unsigned long handle, u64 gfn);
int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
unsigned long mfn, unsigned int nr, bool map,
int type);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map);
}; };
extern struct intel_gvt_mpt xengt_mpt; extern struct intel_gvt_mpt xengt_mpt;
......
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Kevin Tian <kevin.tian@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
* Contributors:
* Min he <min.he@intel.com>
*
*/
#include "i915_drv.h"
/* common offset among interrupt control registers */
#define regbase_to_isr(base) (base)
#define regbase_to_imr(base) (base + 0x4)
#define regbase_to_iir(base) (base + 0x8)
#define regbase_to_ier(base) (base + 0xC)
#define iir_to_regbase(iir) (iir - 0x8)
#define ier_to_regbase(ier) (ier - 0xC)
#define get_event_virt_handler(irq, e) (irq->events[e].v_handler)
#define get_irq_info(irq, e) (irq->events[e].info)
#define irq_to_gvt(irq) \
container_of(irq, struct intel_gvt, irq)
static void update_upstream_irq(struct intel_vgpu *vgpu,
struct intel_gvt_irq_info *info);
const char * const irq_name[INTEL_GVT_EVENT_MAX] = {
[RCS_MI_USER_INTERRUPT] = "Render CS MI USER INTERRUPT",
[RCS_DEBUG] = "Render EU debug from SVG",
[RCS_MMIO_SYNC_FLUSH] = "Render MMIO sync flush status",
[RCS_CMD_STREAMER_ERR] = "Render CS error interrupt",
[RCS_PIPE_CONTROL] = "Render PIPE CONTROL notify",
[RCS_WATCHDOG_EXCEEDED] = "Render CS Watchdog counter exceeded",
[RCS_PAGE_DIRECTORY_FAULT] = "Render page directory faults",
[RCS_AS_CONTEXT_SWITCH] = "Render AS Context Switch Interrupt",
[VCS_MI_USER_INTERRUPT] = "Video CS MI USER INTERRUPT",
[VCS_MMIO_SYNC_FLUSH] = "Video MMIO sync flush status",
[VCS_CMD_STREAMER_ERR] = "Video CS error interrupt",
[VCS_MI_FLUSH_DW] = "Video MI FLUSH DW notify",
[VCS_WATCHDOG_EXCEEDED] = "Video CS Watchdog counter exceeded",
[VCS_PAGE_DIRECTORY_FAULT] = "Video page directory faults",
[VCS_AS_CONTEXT_SWITCH] = "Video AS Context Switch Interrupt",
[VCS2_MI_USER_INTERRUPT] = "VCS2 Video CS MI USER INTERRUPT",
[VCS2_MI_FLUSH_DW] = "VCS2 Video MI FLUSH DW notify",
[VCS2_AS_CONTEXT_SWITCH] = "VCS2 Context Switch Interrupt",
[BCS_MI_USER_INTERRUPT] = "Blitter CS MI USER INTERRUPT",
[BCS_MMIO_SYNC_FLUSH] = "Billter MMIO sync flush status",
[BCS_CMD_STREAMER_ERR] = "Blitter CS error interrupt",
[BCS_MI_FLUSH_DW] = "Blitter MI FLUSH DW notify",
[BCS_PAGE_DIRECTORY_FAULT] = "Blitter page directory faults",
[BCS_AS_CONTEXT_SWITCH] = "Blitter AS Context Switch Interrupt",
[VECS_MI_FLUSH_DW] = "Video Enhanced Streamer MI FLUSH DW notify",
[VECS_AS_CONTEXT_SWITCH] = "VECS Context Switch Interrupt",
[PIPE_A_FIFO_UNDERRUN] = "Pipe A FIFO underrun",
[PIPE_A_CRC_ERR] = "Pipe A CRC error",
[PIPE_A_CRC_DONE] = "Pipe A CRC done",
[PIPE_A_VSYNC] = "Pipe A vsync",
[PIPE_A_LINE_COMPARE] = "Pipe A line compare",
[PIPE_A_ODD_FIELD] = "Pipe A odd field",
[PIPE_A_EVEN_FIELD] = "Pipe A even field",
[PIPE_A_VBLANK] = "Pipe A vblank",
[PIPE_B_FIFO_UNDERRUN] = "Pipe B FIFO underrun",
[PIPE_B_CRC_ERR] = "Pipe B CRC error",
[PIPE_B_CRC_DONE] = "Pipe B CRC done",
[PIPE_B_VSYNC] = "Pipe B vsync",
[PIPE_B_LINE_COMPARE] = "Pipe B line compare",
[PIPE_B_ODD_FIELD] = "Pipe B odd field",
[PIPE_B_EVEN_FIELD] = "Pipe B even field",
[PIPE_B_VBLANK] = "Pipe B vblank",
[PIPE_C_VBLANK] = "Pipe C vblank",
[DPST_PHASE_IN] = "DPST phase in event",
[DPST_HISTOGRAM] = "DPST histogram event",
[GSE] = "GSE",
[DP_A_HOTPLUG] = "DP A Hotplug",
[AUX_CHANNEL_A] = "AUX Channel A",
[PERF_COUNTER] = "Performance counter",
[POISON] = "Poison",
[GTT_FAULT] = "GTT fault",
[PRIMARY_A_FLIP_DONE] = "Primary Plane A flip done",
[PRIMARY_B_FLIP_DONE] = "Primary Plane B flip done",
[PRIMARY_C_FLIP_DONE] = "Primary Plane C flip done",
[SPRITE_A_FLIP_DONE] = "Sprite Plane A flip done",
[SPRITE_B_FLIP_DONE] = "Sprite Plane B flip done",
[SPRITE_C_FLIP_DONE] = "Sprite Plane C flip done",
[PCU_THERMAL] = "PCU Thermal Event",
[PCU_PCODE2DRIVER_MAILBOX] = "PCU pcode2driver mailbox event",
[FDI_RX_INTERRUPTS_TRANSCODER_A] = "FDI RX Interrupts Combined A",
[AUDIO_CP_CHANGE_TRANSCODER_A] = "Audio CP Change Transcoder A",
[AUDIO_CP_REQUEST_TRANSCODER_A] = "Audio CP Request Transcoder A",
[FDI_RX_INTERRUPTS_TRANSCODER_B] = "FDI RX Interrupts Combined B",
[AUDIO_CP_CHANGE_TRANSCODER_B] = "Audio CP Change Transcoder B",
[AUDIO_CP_REQUEST_TRANSCODER_B] = "Audio CP Request Transcoder B",
[FDI_RX_INTERRUPTS_TRANSCODER_C] = "FDI RX Interrupts Combined C",
[AUDIO_CP_CHANGE_TRANSCODER_C] = "Audio CP Change Transcoder C",
[AUDIO_CP_REQUEST_TRANSCODER_C] = "Audio CP Request Transcoder C",
[ERR_AND_DBG] = "South Error and Debug Interupts Combined",
[GMBUS] = "Gmbus",
[SDVO_B_HOTPLUG] = "SDVO B hotplug",
[CRT_HOTPLUG] = "CRT Hotplug",
[DP_B_HOTPLUG] = "DisplayPort/HDMI/DVI B Hotplug",
[DP_C_HOTPLUG] = "DisplayPort/HDMI/DVI C Hotplug",
[DP_D_HOTPLUG] = "DisplayPort/HDMI/DVI D Hotplug",
[AUX_CHANNEL_B] = "AUX Channel B",
[AUX_CHANNEL_C] = "AUX Channel C",
[AUX_CHANNEL_D] = "AUX Channel D",
[AUDIO_POWER_STATE_CHANGE_B] = "Audio Power State change Port B",
[AUDIO_POWER_STATE_CHANGE_C] = "Audio Power State change Port C",
[AUDIO_POWER_STATE_CHANGE_D] = "Audio Power State change Port D",
[INTEL_GVT_EVENT_RESERVED] = "RESERVED EVENTS!!!",
};
static inline struct intel_gvt_irq_info *regbase_to_irq_info(
struct intel_gvt *gvt,
unsigned int reg)
{
struct intel_gvt_irq *irq = &gvt->irq;
int i;
for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) {
if (i915_mmio_reg_offset(irq->info[i]->reg_base) == reg)
return irq->info[i];
}
return NULL;
}
/**
* intel_vgpu_reg_imr_handler - Generic IMR register emulation write handler
* @vgpu: a vGPU
* @reg: register offset written by guest
* @p_data: register data written by guest
* @bytes: register data length
*
* This function is used to emulate the generic IMR register bit change
* behavior.
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
u32 changed, masked, unmasked;
u32 imr = *(u32 *)p_data;
gvt_dbg_irq("write IMR %x with val %x\n",
reg, imr);
gvt_dbg_irq("old vIMR %x\n", vgpu_vreg(vgpu, reg));
/* figure out newly masked/unmasked bits */
changed = vgpu_vreg(vgpu, reg) ^ imr;
masked = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
unmasked = masked ^ changed;
gvt_dbg_irq("changed %x, masked %x, unmasked %x\n",
changed, masked, unmasked);
vgpu_vreg(vgpu, reg) = imr;
ops->check_pending_irq(vgpu);
gvt_dbg_irq("IRQ: new vIMR %x\n", vgpu_vreg(vgpu, reg));
return 0;
}
/**
* intel_vgpu_reg_master_irq_handler - master IRQ write emulation handler
* @vgpu: a vGPU
* @reg: register offset written by guest
* @p_data: register data written by guest
* @bytes: register data length
*
* This function is used to emulate the master IRQ register on gen8+.
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
u32 changed, enabled, disabled;
u32 ier = *(u32 *)p_data;
u32 virtual_ier = vgpu_vreg(vgpu, reg);
gvt_dbg_irq("write master irq reg %x with val %x\n",
reg, ier);
gvt_dbg_irq("old vreg %x\n", vgpu_vreg(vgpu, reg));
/*
* GEN8_MASTER_IRQ is a special irq register,
* only bit 31 is allowed to be modified
* and treated as an IER bit.
*/
ier &= GEN8_MASTER_IRQ_CONTROL;
virtual_ier &= GEN8_MASTER_IRQ_CONTROL;
vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL;
vgpu_vreg(vgpu, reg) |= ier;
/* figure out newly enabled/disable bits */
changed = virtual_ier ^ ier;
enabled = (virtual_ier & changed) ^ changed;
disabled = enabled ^ changed;
gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
changed, enabled, disabled);
ops->check_pending_irq(vgpu);
gvt_dbg_irq("new vreg %x\n", vgpu_vreg(vgpu, reg));
return 0;
}
/**
* intel_vgpu_reg_ier_handler - Generic IER write emulation handler
* @vgpu: a vGPU
* @reg: register offset written by guest
* @p_data: register data written by guest
* @bytes: register data length
*
* This function is used to emulate the generic IER register behavior.
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
struct intel_gvt_irq_info *info;
u32 changed, enabled, disabled;
u32 ier = *(u32 *)p_data;
gvt_dbg_irq("write IER %x with val %x\n",
reg, ier);
gvt_dbg_irq("old vIER %x\n", vgpu_vreg(vgpu, reg));
/* figure out newly enabled/disable bits */
changed = vgpu_vreg(vgpu, reg) ^ ier;
enabled = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
disabled = enabled ^ changed;
gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
changed, enabled, disabled);
vgpu_vreg(vgpu, reg) = ier;
info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
if (WARN_ON(!info))
return -EINVAL;
if (info->has_upstream_irq)
update_upstream_irq(vgpu, info);
ops->check_pending_irq(vgpu);
gvt_dbg_irq("new vIER %x\n", vgpu_vreg(vgpu, reg));
return 0;
}
/**
* intel_vgpu_reg_iir_handler - Generic IIR write emulation handler
* @vgpu: a vGPU
* @reg: register offset written by guest
* @p_data: register data written by guest
* @bytes: register data length
*
* This function is used to emulate the generic IIR register behavior.
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
void *p_data, unsigned int bytes)
{
struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt,
iir_to_regbase(reg));
u32 iir = *(u32 *)p_data;
gvt_dbg_irq("write IIR %x with val %x\n", reg, iir);
if (WARN_ON(!info))
return -EINVAL;
vgpu_vreg(vgpu, reg) &= ~iir;
if (info->has_upstream_irq)
update_upstream_irq(vgpu, info);
return 0;
}
static struct intel_gvt_irq_map gen8_irq_map[] = {
{ INTEL_GVT_IRQ_INFO_MASTER, 0, INTEL_GVT_IRQ_INFO_GT0, 0xffff },
{ INTEL_GVT_IRQ_INFO_MASTER, 1, INTEL_GVT_IRQ_INFO_GT0, 0xffff0000 },
{ INTEL_GVT_IRQ_INFO_MASTER, 2, INTEL_GVT_IRQ_INFO_GT1, 0xffff },
{ INTEL_GVT_IRQ_INFO_MASTER, 3, INTEL_GVT_IRQ_INFO_GT1, 0xffff0000 },
{ INTEL_GVT_IRQ_INFO_MASTER, 4, INTEL_GVT_IRQ_INFO_GT2, 0xffff },
{ INTEL_GVT_IRQ_INFO_MASTER, 6, INTEL_GVT_IRQ_INFO_GT3, 0xffff },
{ INTEL_GVT_IRQ_INFO_MASTER, 16, INTEL_GVT_IRQ_INFO_DE_PIPE_A, ~0 },
{ INTEL_GVT_IRQ_INFO_MASTER, 17, INTEL_GVT_IRQ_INFO_DE_PIPE_B, ~0 },
{ INTEL_GVT_IRQ_INFO_MASTER, 18, INTEL_GVT_IRQ_INFO_DE_PIPE_C, ~0 },
{ INTEL_GVT_IRQ_INFO_MASTER, 20, INTEL_GVT_IRQ_INFO_DE_PORT, ~0 },
{ INTEL_GVT_IRQ_INFO_MASTER, 22, INTEL_GVT_IRQ_INFO_DE_MISC, ~0 },
{ INTEL_GVT_IRQ_INFO_MASTER, 23, INTEL_GVT_IRQ_INFO_PCH, ~0 },
{ INTEL_GVT_IRQ_INFO_MASTER, 30, INTEL_GVT_IRQ_INFO_PCU, ~0 },
{ -1, -1, ~0 },
};
static void update_upstream_irq(struct intel_vgpu *vgpu,
struct intel_gvt_irq_info *info)
{
struct intel_gvt_irq *irq = &vgpu->gvt->irq;
struct intel_gvt_irq_map *map = irq->irq_map;
struct intel_gvt_irq_info *up_irq_info = NULL;
u32 set_bits = 0;
u32 clear_bits = 0;
int bit;
u32 val = vgpu_vreg(vgpu,
regbase_to_iir(i915_mmio_reg_offset(info->reg_base)))
& vgpu_vreg(vgpu,
regbase_to_ier(i915_mmio_reg_offset(info->reg_base)));
if (!info->has_upstream_irq)
return;
for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
if (info->group != map->down_irq_group)
continue;
if (!up_irq_info)
up_irq_info = irq->info[map->up_irq_group];
else
WARN_ON(up_irq_info != irq->info[map->up_irq_group]);
bit = map->up_irq_bit;
if (val & map->down_irq_bitmask)
set_bits |= (1 << bit);
else
clear_bits |= (1 << bit);
}
WARN_ON(!up_irq_info);
if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base);
vgpu_vreg(vgpu, isr) &= ~clear_bits;
vgpu_vreg(vgpu, isr) |= set_bits;
} else {
u32 iir = regbase_to_iir(
i915_mmio_reg_offset(up_irq_info->reg_base));
u32 imr = regbase_to_imr(
i915_mmio_reg_offset(up_irq_info->reg_base));
vgpu_vreg(vgpu, iir) |= (set_bits & ~vgpu_vreg(vgpu, imr));
}
if (up_irq_info->has_upstream_irq)
update_upstream_irq(vgpu, up_irq_info);
}
static void init_irq_map(struct intel_gvt_irq *irq)
{
struct intel_gvt_irq_map *map;
struct intel_gvt_irq_info *up_info, *down_info;
int up_bit;
for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
up_info = irq->info[map->up_irq_group];
up_bit = map->up_irq_bit;
down_info = irq->info[map->down_irq_group];
set_bit(up_bit, up_info->downstream_irq_bitmap);
down_info->has_upstream_irq = true;
gvt_dbg_irq("[up] grp %d bit %d -> [down] grp %d bitmask %x\n",
up_info->group, up_bit,
down_info->group, map->down_irq_bitmask);
}
}
/* =======================vEvent injection===================== */
static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
{
return intel_gvt_hypervisor_inject_msi(vgpu);
}
static void propagate_event(struct intel_gvt_irq *irq,
enum intel_gvt_event_type event, struct intel_vgpu *vgpu)
{
struct intel_gvt_irq_info *info;
unsigned int reg_base;
int bit;
info = get_irq_info(irq, event);
if (WARN_ON(!info))
return;
reg_base = i915_mmio_reg_offset(info->reg_base);
bit = irq->events[event].bit;
if (!test_bit(bit, (void *)&vgpu_vreg(vgpu,
regbase_to_imr(reg_base)))) {
gvt_dbg_irq("set bit (%d) for (%s) for vgpu (%d)\n",
bit, irq_name[event], vgpu->id);
set_bit(bit, (void *)&vgpu_vreg(vgpu,
regbase_to_iir(reg_base)));
}
}
/* =======================vEvent Handlers===================== */
static void handle_default_event_virt(struct intel_gvt_irq *irq,
enum intel_gvt_event_type event, struct intel_vgpu *vgpu)
{
if (!vgpu->irq.irq_warn_once[event]) {
gvt_dbg_core("vgpu%d: IRQ receive event %d (%s)\n",
vgpu->id, event, irq_name[event]);
vgpu->irq.irq_warn_once[event] = true;
}
propagate_event(irq, event, vgpu);
}
/* =====================GEN specific logic======================= */
/* GEN8 interrupt routines. */
#define DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(regname, regbase) \
static struct intel_gvt_irq_info gen8_##regname##_info = { \
.name = #regname"-IRQ", \
.reg_base = (regbase), \
.bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] = \
INTEL_GVT_EVENT_RESERVED}, \
}
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt0, GEN8_GT_ISR(0));
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt1, GEN8_GT_ISR(1));
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt2, GEN8_GT_ISR(2));
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt3, GEN8_GT_ISR(3));
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_a, GEN8_DE_PIPE_ISR(PIPE_A));
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_b, GEN8_DE_PIPE_ISR(PIPE_B));
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_c, GEN8_DE_PIPE_ISR(PIPE_C));
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_port, GEN8_DE_PORT_ISR);
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_misc, GEN8_DE_MISC_ISR);
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(pcu, GEN8_PCU_ISR);
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(master, GEN8_MASTER_IRQ);
static struct intel_gvt_irq_info gvt_base_pch_info = {
.name = "PCH-IRQ",
.reg_base = SDEISR,
.bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] =
INTEL_GVT_EVENT_RESERVED},
};
static void gen8_check_pending_irq(struct intel_vgpu *vgpu)
{
struct intel_gvt_irq *irq = &vgpu->gvt->irq;
int i;
if (!(vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ)) &
GEN8_MASTER_IRQ_CONTROL))
return;
for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) {
struct intel_gvt_irq_info *info = irq->info[i];
u32 reg_base;
if (!info->has_upstream_irq)
continue;
reg_base = i915_mmio_reg_offset(info->reg_base);
if ((vgpu_vreg(vgpu, regbase_to_iir(reg_base))
& vgpu_vreg(vgpu, regbase_to_ier(reg_base))))
update_upstream_irq(vgpu, info);
}
if (vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ))
& ~GEN8_MASTER_IRQ_CONTROL)
inject_virtual_interrupt(vgpu);
}
static void gen8_init_irq(
struct intel_gvt_irq *irq)
{
struct intel_gvt *gvt = irq_to_gvt(irq);
#define SET_BIT_INFO(s, b, e, i) \
do { \
s->events[e].bit = b; \
s->events[e].info = s->info[i]; \
s->info[i]->bit_to_event[b] = e;\
} while (0)
#define SET_IRQ_GROUP(s, g, i) \
do { \
s->info[g] = i; \
(i)->group = g; \
set_bit(g, s->irq_info_bitmap); \
} while (0)
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_MASTER, &gen8_master_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT0, &gen8_gt0_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT1, &gen8_gt1_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT2, &gen8_gt2_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT3, &gen8_gt3_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_A, &gen8_de_pipe_a_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_B, &gen8_de_pipe_b_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_C, &gen8_de_pipe_c_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PORT, &gen8_de_port_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_MISC, &gen8_de_misc_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCU, &gen8_pcu_info);
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCH, &gvt_base_pch_info);
/* GEN8 level 2 interrupts. */
/* GEN8 interrupt GT0 events */
SET_BIT_INFO(irq, 0, RCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0);
SET_BIT_INFO(irq, 4, RCS_PIPE_CONTROL, INTEL_GVT_IRQ_INFO_GT0);
SET_BIT_INFO(irq, 8, RCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0);
SET_BIT_INFO(irq, 16, BCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0);
SET_BIT_INFO(irq, 20, BCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT0);
SET_BIT_INFO(irq, 24, BCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0);
/* GEN8 interrupt GT1 events */
SET_BIT_INFO(irq, 0, VCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
if (HAS_BSD2(gvt->dev_priv)) {
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 24, VCS2_AS_CONTEXT_SWITCH,
INTEL_GVT_IRQ_INFO_GT1);
}
/* GEN8 interrupt GT3 events */
SET_BIT_INFO(irq, 0, VECS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT3);
SET_BIT_INFO(irq, 4, VECS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT3);
SET_BIT_INFO(irq, 8, VECS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT3);
SET_BIT_INFO(irq, 0, PIPE_A_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
SET_BIT_INFO(irq, 0, PIPE_B_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
SET_BIT_INFO(irq, 0, PIPE_C_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
/* GEN8 interrupt DE PORT events */
SET_BIT_INFO(irq, 0, AUX_CHANNEL_A, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 3, DP_A_HOTPLUG, INTEL_GVT_IRQ_INFO_DE_PORT);
/* GEN8 interrupt DE MISC events */
SET_BIT_INFO(irq, 0, GSE, INTEL_GVT_IRQ_INFO_DE_MISC);
/* PCH events */
SET_BIT_INFO(irq, 17, GMBUS, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 19, CRT_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 21, DP_B_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
if (IS_BROADWELL(gvt->dev_priv)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 4, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
SET_BIT_INFO(irq, 5, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
SET_BIT_INFO(irq, 4, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
SET_BIT_INFO(irq, 5, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
} else if (IS_SKYLAKE(gvt->dev_priv)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
}
/* GEN8 interrupt PCU events */
SET_BIT_INFO(irq, 24, PCU_THERMAL, INTEL_GVT_IRQ_INFO_PCU);
SET_BIT_INFO(irq, 25, PCU_PCODE2DRIVER_MAILBOX, INTEL_GVT_IRQ_INFO_PCU);
}
static struct intel_gvt_irq_ops gen8_irq_ops = {
.init_irq = gen8_init_irq,
.check_pending_irq = gen8_check_pending_irq,
};
/**
* intel_vgpu_trigger_virtual_event - Trigger a virtual event for a vGPU
* @vgpu: a vGPU
* @event: interrupt event
*
* This function is used to trigger a virtual interrupt event for vGPU.
* The caller provides the event to be triggered, the framework itself
* will emulate the IRQ register bit change.
*
*/
void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
enum intel_gvt_event_type event)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq *irq = &gvt->irq;
gvt_event_virt_handler_t handler;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
handler = get_event_virt_handler(irq, event);
WARN_ON(!handler);
handler(irq, event, vgpu);
ops->check_pending_irq(vgpu);
}
static void init_events(
struct intel_gvt_irq *irq)
{
int i;
for (i = 0; i < INTEL_GVT_EVENT_MAX; i++) {
irq->events[i].info = NULL;
irq->events[i].v_handler = handle_default_event_virt;
}
}
static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data)
{
struct intel_gvt_vblank_timer *vblank_timer;
struct intel_gvt_irq *irq;
struct intel_gvt *gvt;
vblank_timer = container_of(data, struct intel_gvt_vblank_timer, timer);
irq = container_of(vblank_timer, struct intel_gvt_irq, vblank_timer);
gvt = container_of(irq, struct intel_gvt, irq);
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EMULATE_VBLANK);
hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period);
return HRTIMER_RESTART;
}
/**
* intel_gvt_clean_irq - clean up GVT-g IRQ emulation subsystem
* @gvt: a GVT device
*
* This function is called at driver unloading stage, to clean up GVT-g IRQ
* emulation subsystem.
*
*/
void intel_gvt_clean_irq(struct intel_gvt *gvt)
{
struct intel_gvt_irq *irq = &gvt->irq;
hrtimer_cancel(&irq->vblank_timer.timer);
}
#define VBLNAK_TIMER_PERIOD 16000000
/**
* intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem
* @gvt: a GVT device
*
* This function is called at driver loading stage, to initialize the GVT-g IRQ
* emulation subsystem.
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_gvt_init_irq(struct intel_gvt *gvt)
{
struct intel_gvt_irq *irq = &gvt->irq;
struct intel_gvt_vblank_timer *vblank_timer = &irq->vblank_timer;
gvt_dbg_core("init irq framework\n");
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
irq->ops = &gen8_irq_ops;
irq->irq_map = gen8_irq_map;
} else {
WARN_ON(1);
return -ENODEV;
}
/* common event initialization */
init_events(irq);
/* gen specific initialization */
irq->ops->init_irq(irq);
init_irq_map(irq);
hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
vblank_timer->timer.function = vblank_timer_fn;
vblank_timer->period = VBLNAK_TIMER_PERIOD;
return 0;
}
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Kevin Tian <kevin.tian@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
* Contributors:
* Min he <min.he@intel.com>
*
*/
#ifndef _GVT_INTERRUPT_H_
#define _GVT_INTERRUPT_H_
enum intel_gvt_event_type {
RCS_MI_USER_INTERRUPT = 0,
RCS_DEBUG,
RCS_MMIO_SYNC_FLUSH,
RCS_CMD_STREAMER_ERR,
RCS_PIPE_CONTROL,
RCS_L3_PARITY_ERR,
RCS_WATCHDOG_EXCEEDED,
RCS_PAGE_DIRECTORY_FAULT,
RCS_AS_CONTEXT_SWITCH,
RCS_MONITOR_BUFF_HALF_FULL,
VCS_MI_USER_INTERRUPT,
VCS_MMIO_SYNC_FLUSH,
VCS_CMD_STREAMER_ERR,
VCS_MI_FLUSH_DW,
VCS_WATCHDOG_EXCEEDED,
VCS_PAGE_DIRECTORY_FAULT,
VCS_AS_CONTEXT_SWITCH,
VCS2_MI_USER_INTERRUPT,
VCS2_MI_FLUSH_DW,
VCS2_AS_CONTEXT_SWITCH,
BCS_MI_USER_INTERRUPT,
BCS_MMIO_SYNC_FLUSH,
BCS_CMD_STREAMER_ERR,
BCS_MI_FLUSH_DW,
BCS_PAGE_DIRECTORY_FAULT,
BCS_AS_CONTEXT_SWITCH,
VECS_MI_USER_INTERRUPT,
VECS_MI_FLUSH_DW,
VECS_AS_CONTEXT_SWITCH,
PIPE_A_FIFO_UNDERRUN,
PIPE_B_FIFO_UNDERRUN,
PIPE_A_CRC_ERR,
PIPE_B_CRC_ERR,
PIPE_A_CRC_DONE,
PIPE_B_CRC_DONE,
PIPE_A_ODD_FIELD,
PIPE_B_ODD_FIELD,
PIPE_A_EVEN_FIELD,
PIPE_B_EVEN_FIELD,
PIPE_A_LINE_COMPARE,
PIPE_B_LINE_COMPARE,
PIPE_C_LINE_COMPARE,
PIPE_A_VBLANK,
PIPE_B_VBLANK,
PIPE_C_VBLANK,
PIPE_A_VSYNC,
PIPE_B_VSYNC,
PIPE_C_VSYNC,
PRIMARY_A_FLIP_DONE,
PRIMARY_B_FLIP_DONE,
PRIMARY_C_FLIP_DONE,
SPRITE_A_FLIP_DONE,
SPRITE_B_FLIP_DONE,
SPRITE_C_FLIP_DONE,
PCU_THERMAL,
PCU_PCODE2DRIVER_MAILBOX,
DPST_PHASE_IN,
DPST_HISTOGRAM,
GSE,
DP_A_HOTPLUG,
AUX_CHANNEL_A,
PERF_COUNTER,
POISON,
GTT_FAULT,
ERROR_INTERRUPT_COMBINED,
FDI_RX_INTERRUPTS_TRANSCODER_A,
AUDIO_CP_CHANGE_TRANSCODER_A,
AUDIO_CP_REQUEST_TRANSCODER_A,
FDI_RX_INTERRUPTS_TRANSCODER_B,
AUDIO_CP_CHANGE_TRANSCODER_B,
AUDIO_CP_REQUEST_TRANSCODER_B,
FDI_RX_INTERRUPTS_TRANSCODER_C,
AUDIO_CP_CHANGE_TRANSCODER_C,
AUDIO_CP_REQUEST_TRANSCODER_C,
ERR_AND_DBG,
GMBUS,
SDVO_B_HOTPLUG,
CRT_HOTPLUG,
DP_B_HOTPLUG,
DP_C_HOTPLUG,
DP_D_HOTPLUG,
AUX_CHANNEL_B,
AUX_CHANNEL_C,
AUX_CHANNEL_D,
AUDIO_POWER_STATE_CHANGE_B,
AUDIO_POWER_STATE_CHANGE_C,
AUDIO_POWER_STATE_CHANGE_D,
INTEL_GVT_EVENT_RESERVED,
INTEL_GVT_EVENT_MAX,
};
struct intel_gvt_irq;
struct intel_gvt;
typedef void (*gvt_event_virt_handler_t)(struct intel_gvt_irq *irq,
enum intel_gvt_event_type event, struct intel_vgpu *vgpu);
struct intel_gvt_irq_ops {
void (*init_irq)(struct intel_gvt_irq *irq);
void (*check_pending_irq)(struct intel_vgpu *vgpu);
};
/* the list of physical interrupt control register groups */
enum intel_gvt_irq_type {
INTEL_GVT_IRQ_INFO_GT,
INTEL_GVT_IRQ_INFO_DPY,
INTEL_GVT_IRQ_INFO_PCH,
INTEL_GVT_IRQ_INFO_PM,
INTEL_GVT_IRQ_INFO_MASTER,
INTEL_GVT_IRQ_INFO_GT0,
INTEL_GVT_IRQ_INFO_GT1,
INTEL_GVT_IRQ_INFO_GT2,
INTEL_GVT_IRQ_INFO_GT3,
INTEL_GVT_IRQ_INFO_DE_PIPE_A,
INTEL_GVT_IRQ_INFO_DE_PIPE_B,
INTEL_GVT_IRQ_INFO_DE_PIPE_C,
INTEL_GVT_IRQ_INFO_DE_PORT,
INTEL_GVT_IRQ_INFO_DE_MISC,
INTEL_GVT_IRQ_INFO_AUD,
INTEL_GVT_IRQ_INFO_PCU,
INTEL_GVT_IRQ_INFO_MAX,
};
#define INTEL_GVT_IRQ_BITWIDTH 32
/* device specific interrupt bit definitions */
struct intel_gvt_irq_info {
char *name;
i915_reg_t reg_base;
enum intel_gvt_event_type bit_to_event[INTEL_GVT_IRQ_BITWIDTH];
unsigned long warned;
int group;
DECLARE_BITMAP(downstream_irq_bitmap, INTEL_GVT_IRQ_BITWIDTH);
bool has_upstream_irq;
};
/* per-event information */
struct intel_gvt_event_info {
int bit; /* map to register bit */
int policy; /* forwarding policy */
struct intel_gvt_irq_info *info; /* register info */
gvt_event_virt_handler_t v_handler; /* for v_event */
};
struct intel_gvt_irq_map {
int up_irq_group;
int up_irq_bit;
int down_irq_group;
u32 down_irq_bitmask;
};
struct intel_gvt_vblank_timer {
struct hrtimer timer;
u64 period;
};
/* structure containing device specific IRQ state */
struct intel_gvt_irq {
struct intel_gvt_irq_ops *ops;
struct intel_gvt_irq_info *info[INTEL_GVT_IRQ_INFO_MAX];
DECLARE_BITMAP(irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX);
struct intel_gvt_event_info events[INTEL_GVT_EVENT_MAX];
DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
struct intel_gvt_irq_map *irq_map;
struct intel_gvt_vblank_timer vblank_timer;
};
int intel_gvt_init_irq(struct intel_gvt *gvt);
void intel_gvt_clean_irq(struct intel_gvt *gvt);
void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
enum intel_gvt_event_type event);
int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
void *p_data, unsigned int bytes);
int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes);
int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes);
int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes);
int gvt_ring_id_to_pipe_control_notify_event(int ring_id);
int gvt_ring_id_to_mi_flush_dw_event(int ring_id);
int gvt_ring_id_to_mi_user_interrupt_event(int ring_id);
#endif /* _GVT_INTERRUPT_H_ */
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Kevin Tian <kevin.tian@intel.com>
* Dexuan Cui
*
* Contributors:
* Tina Zhang <tina.zhang@intel.com>
* Min He <min.he@intel.com>
* Niu Bing <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include "i915_drv.h"
/**
* intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
* @vgpu: a vGPU
*
* Returns:
* Zero on success, negative error code if failed
*/
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
{
u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) &
~GENMASK(3, 0);
return gpa - gttmmio_gpa;
}
#define reg_is_mmio(gvt, reg) \
(reg >= 0 && reg < gvt->device_info.mmio_size)
#define reg_is_gtt(gvt, reg) \
(reg >= gvt->device_info.gtt_start_offset \
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
/**
* intel_vgpu_emulate_mmio_read - emulate MMIO read
* @vgpu: a vGPU
* @pa: guest physical address
* @p_data: data return buffer
* @bytes: access data length
*
* Returns:
* Zero on success, negative error code if failed
*/
int intel_vgpu_emulate_mmio_read(void *__vgpu, uint64_t pa,
void *p_data, unsigned int bytes)
{
struct intel_vgpu *vgpu = __vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio;
unsigned int offset = 0;
int ret = -EINVAL;
mutex_lock(&gvt->lock);
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
struct intel_vgpu_guest_page *gp;
gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
if (gp) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
p_data, bytes);
if (ret) {
gvt_err("vgpu%d: guest page read error %d, "
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
vgpu->id, ret,
gp->gfn, pa, *(u32 *)p_data, bytes);
}
mutex_unlock(&gvt->lock);
return ret;
}
}
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (WARN_ON(bytes > 8))
goto err;
if (reg_is_gtt(gvt, offset)) {
if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
goto err;
if (WARN_ON(bytes != 4 && bytes != 8))
goto err;
if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
p_data, bytes);
if (ret)
goto err;
mutex_unlock(&gvt->lock);
return ret;
}
if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
mutex_unlock(&gvt->lock);
return ret;
}
if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
goto err;
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
if (!mmio && !vgpu->mmio.disable_warn_untrack) {
gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
vgpu->id, offset, bytes, *(u32 *)p_data);
if (offset == 0x206c) {
gvt_err("------------------------------------------\n");
gvt_err("vgpu%d: likely triggers a gfx reset\n",
vgpu->id);
gvt_err("------------------------------------------\n");
vgpu->mmio.disable_warn_untrack = true;
}
}
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
if (WARN_ON(!IS_ALIGNED(offset, bytes)))
goto err;
}
if (mmio) {
if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
goto err;
if (WARN_ON(mmio->offset != offset))
goto err;
}
ret = mmio->read(vgpu, offset, p_data, bytes);
} else
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
if (ret)
goto err;
intel_gvt_mmio_set_accessed(gvt, offset);
mutex_unlock(&gvt->lock);
return 0;
err:
gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
vgpu->id, offset, bytes);
mutex_unlock(&gvt->lock);
return ret;
}
/**
* intel_vgpu_emulate_mmio_write - emulate MMIO write
* @vgpu: a vGPU
* @pa: guest physical address
* @p_data: write data buffer
* @bytes: access data length
*
* Returns:
* Zero on success, negative error code if failed
*/
int intel_vgpu_emulate_mmio_write(void *__vgpu, uint64_t pa,
void *p_data, unsigned int bytes)
{
struct intel_vgpu *vgpu = __vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio;
unsigned int offset = 0;
u32 old_vreg = 0, old_sreg = 0;
int ret = -EINVAL;
mutex_lock(&gvt->lock);
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
struct intel_vgpu_guest_page *gp;
gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
if (gp) {
ret = gp->handler(gp, pa, p_data, bytes);
if (ret) {
gvt_err("vgpu%d: guest page write error %d, "
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
vgpu->id, ret,
gp->gfn, pa, *(u32 *)p_data, bytes);
}
mutex_unlock(&gvt->lock);
return ret;
}
}
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (WARN_ON(bytes > 8))
goto err;
if (reg_is_gtt(gvt, offset)) {
if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
goto err;
if (WARN_ON(bytes != 4 && bytes != 8))
goto err;
if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
p_data, bytes);
if (ret)
goto err;
mutex_unlock(&gvt->lock);
return ret;
}
if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
mutex_unlock(&gvt->lock);
return ret;
}
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
if (!mmio && !vgpu->mmio.disable_warn_untrack)
gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
vgpu->id, offset, bytes, *(u32 *)p_data);
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
if (WARN_ON(!IS_ALIGNED(offset, bytes)))
goto err;
}
if (mmio) {
u64 ro_mask = mmio->ro_mask;
if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
goto err;
if (WARN_ON(mmio->offset != offset))
goto err;
}
if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
old_vreg = vgpu_vreg(vgpu, offset);
old_sreg = vgpu_sreg(vgpu, offset);
}
if (!ro_mask) {
ret = mmio->write(vgpu, offset, p_data, bytes);
} else {
/* Protect RO bits like HW */
u64 data = 0;
/* all register bits are RO. */
if (ro_mask == ~(u64)0) {
gvt_err("vgpu%d: try to write RO reg %x\n",
vgpu->id, offset);
ret = 0;
goto out;
}
/* keep the RO bits in the virtual register */
memcpy(&data, p_data, bytes);
data &= ~mmio->ro_mask;
data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
ret = mmio->write(vgpu, offset, &data, bytes);
}
/* higher 16bits of mode ctl regs are mask bits for change */
if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
u32 mask = vgpu_vreg(vgpu, offset) >> 16;
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
| (vgpu_vreg(vgpu, offset) & mask);
vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
| (vgpu_sreg(vgpu, offset) & mask);
}
} else
ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
bytes);
if (ret)
goto err;
out:
intel_gvt_mmio_set_accessed(gvt, offset);
mutex_unlock(&gvt->lock);
return 0;
err:
gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
vgpu->id, offset, bytes);
mutex_unlock(&gvt->lock);
return ret;
}
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Kevin Tian <kevin.tian@intel.com>
* Dexuan Cui
*
* Contributors:
* Tina Zhang <tina.zhang@intel.com>
* Min He <min.he@intel.com>
* Niu Bing <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_MMIO_H_
#define _GVT_MMIO_H_
struct intel_gvt;
struct intel_vgpu;
#define D_SNB (1 << 0)
#define D_IVB (1 << 1)
#define D_HSW (1 << 2)
#define D_BDW (1 << 3)
#define D_SKL (1 << 4)
#define D_GEN9PLUS (D_SKL)
#define D_GEN8PLUS (D_BDW | D_SKL)
#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL)
#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
#define D_SKL_PLUS (D_SKL)
#define D_BDW_PLUS (D_BDW | D_SKL)
#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL)
#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
#define D_PRE_BDW (D_SNB | D_IVB | D_HSW)
#define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW)
#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL)
struct intel_gvt_mmio_info {
u32 offset;
u32 size;
u32 length;
u32 addr_mask;
u64 ro_mask;
u32 device;
int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int);
int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int);
u32 addr_range;
struct hlist_node node;
};
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
unsigned int offset);
#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
typeof(reg) __reg = reg; \
u32 *offset = (u32 *)&__reg; \
*offset; \
})
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
int intel_vgpu_emulate_mmio_read(void *__vgpu, u64 pa, void *p_data,
unsigned int bytes);
int intel_vgpu_emulate_mmio_write(void *__vgpu, u64 pa, void *p_data,
unsigned int bytes);
bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
unsigned int offset);
bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset);
void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset);
void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
unsigned int offset);
bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset);
int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
#endif
...@@ -19,6 +19,15 @@ ...@@ -19,6 +19,15 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
*
* Authors:
* Eddie Dong <eddie.dong@intel.com>
* Dexuan Cui
* Jike Song <jike.song@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
*
*/ */
#ifndef _GVT_MPT_H_ #ifndef _GVT_MPT_H_
...@@ -46,4 +55,215 @@ static inline int intel_gvt_hypervisor_detect_host(void) ...@@ -46,4 +55,215 @@ static inline int intel_gvt_hypervisor_detect_host(void)
return intel_gvt_host.mpt->detect_host(); return intel_gvt_host.mpt->detect_host();
} }
/**
* intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
* related stuffs inside hypervisor.
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
{
return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
}
/**
* intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
* related stuffs inside hypervisor.
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
{
intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
}
#define MSI_CAP_CONTROL(offset) (offset + 2)
#define MSI_CAP_ADDRESS(offset) (offset + 4)
#define MSI_CAP_DATA(offset) (offset + 8)
#define MSI_CAP_EN 0x1
/**
* intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
{
unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
u16 control, data;
u32 addr;
int ret;
control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
/* Do not generate MSI if MSIEN is disable */
if (!(control & MSI_CAP_EN))
return 0;
if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
return -EINVAL;
gvt_dbg_irq("vgpu%d: inject msi address %x data%x\n", vgpu->id, addr,
data);
ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
if (ret)
return ret;
return 0;
}
/**
* intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
* @p: host kernel virtual address
*
* Returns:
* MFN on success, INTEL_GVT_INVALID_ADDR if failed.
*/
static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
{
return intel_gvt_host.mpt->from_virt_to_mfn(p);
}
/**
* intel_gvt_hypervisor_set_wp_page - set a guest page to write-protected
* @vgpu: a vGPU
* @p: intel_vgpu_guest_page
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_set_wp_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *p)
{
int ret;
if (p->writeprotection)
return 0;
ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, p->gfn);
if (ret)
return ret;
p->writeprotection = true;
atomic_inc(&vgpu->gtt.n_write_protected_guest_page);
return 0;
}
/**
* intel_gvt_hypervisor_unset_wp_page - remove the write-protection of a
* guest page
* @vgpu: a vGPU
* @p: intel_vgpu_guest_page
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_unset_wp_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *p)
{
int ret;
if (!p->writeprotection)
return 0;
ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, p->gfn);
if (ret)
return ret;
p->writeprotection = false;
atomic_dec(&vgpu->gtt.n_write_protected_guest_page);
return 0;
}
/**
* intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
* @vgpu: a vGPU
* @gpa: guest physical address
* @buf: host data buffer
* @len: data length
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
unsigned long gpa, void *buf, unsigned long len)
{
return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
}
/**
* intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
* @vgpu: a vGPU
* @gpa: guest physical address
* @buf: host data buffer
* @len: data length
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
unsigned long gpa, void *buf, unsigned long len)
{
return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
}
/**
* intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
* @vgpu: a vGPU
* @gpfn: guest pfn
*
* Returns:
* MFN on success, INTEL_GVT_INVALID_ADDR if failed.
*/
static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
struct intel_vgpu *vgpu, unsigned long gfn)
{
return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
}
enum {
GVT_MAP_APERTURE = 0,
GVT_MAP_OPREGION,
};
/**
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
* @vgpu: a vGPU
* @gfn: guest PFN
* @mfn: host PFN
* @nr: amount of PFNs
* @map: map or unmap
* @type: map type
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long mfn, unsigned int nr,
bool map, int type)
{
return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
map, type);
}
/**
* intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
* @vgpu: a vGPU
* @start: the beginning of the guest physical address region
* @end: the end of the guest physical address region
* @map: map or unmap
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_set_trap_area(
struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
{
return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
}
#endif /* _GVT_MPT_H_ */ #endif /* _GVT_MPT_H_ */
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/acpi.h>
#include "i915_drv.h"
static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
{
void *host_va = vgpu->gvt->opregion.opregion_va;
u8 *buf;
int i;
if (WARN((vgpu_opregion(vgpu)->va),
"vgpu%d: opregion has been initialized already.\n",
vgpu->id))
return -EINVAL;
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
GFP_DMA32 | __GFP_ZERO,
INTEL_GVT_OPREGION_PORDER);
if (!vgpu_opregion(vgpu)->va)
return -ENOMEM;
memcpy_fromio(vgpu_opregion(vgpu)->va, host_va,
INTEL_GVT_OPREGION_SIZE);
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
/* for unknown reason, the value in LID field is incorrect
* which block the windows guest, so workaround it by force
* setting it to "OPEN"
*/
buf = (u8 *)vgpu_opregion(vgpu)->va;
buf[INTEL_GVT_OPREGION_CLID] = 0x3;
return 0;
}
static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
{
u64 mfn;
int i, ret;
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)
+ i * PAGE_SIZE);
if (mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to get MFN from VA\n");
return -EINVAL;
}
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
vgpu_opregion(vgpu)->gfn[i],
mfn, 1, map, GVT_MAP_OPREGION);
if (ret) {
gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
return ret;
}
}
return 0;
}
/**
* intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
* @vgpu: a vGPU
*
*/
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
{
int i;
gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
if (!vgpu_opregion(vgpu)->va)
return;
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
vunmap(vgpu_opregion(vgpu)->va);
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
if (vgpu_opregion(vgpu)->pages[i]) {
put_page(vgpu_opregion(vgpu)->pages[i]);
vgpu_opregion(vgpu)->pages[i] = NULL;
}
}
} else {
map_vgpu_opregion(vgpu, false);
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
INTEL_GVT_OPREGION_PORDER);
}
vgpu_opregion(vgpu)->va = NULL;
}
/**
* intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
* @vgpu: a vGPU
* @gpa: guest physical address of opregion
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
{
int ret;
gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id);
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
gvt_dbg_core("emulate opregion from kernel\n");
ret = init_vgpu_opregion(vgpu, gpa);
if (ret)
return ret;
ret = map_vgpu_opregion(vgpu, true);
if (ret)
return ret;
} else {
gvt_dbg_core("emulate opregion from userspace\n");
/*
* If opregion pages are not allocated from host kenrel,
* most of the params are meaningless
*/
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
0, /* not used */
0, /* not used */
2, /* not used */
1,
GVT_MAP_OPREGION);
if (ret)
return ret;
}
return 0;
}
/**
* intel_gvt_clean_opregion - clean host opergion related stuffs
* @gvt: a GVT device
*
*/
void intel_gvt_clean_opregion(struct intel_gvt *gvt)
{
iounmap(gvt->opregion.opregion_va);
gvt->opregion.opregion_va = NULL;
}
/**
* intel_gvt_init_opregion - initialize host opergion related stuffs
* @gvt: a GVT device
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_gvt_init_opregion(struct intel_gvt *gvt)
{
gvt_dbg_core("init host opregion\n");
pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
&gvt->opregion.opregion_pa);
gvt->opregion.opregion_va = acpi_os_ioremap(gvt->opregion.opregion_pa,
INTEL_GVT_OPREGION_SIZE);
if (!gvt->opregion.opregion_va) {
gvt_err("fail to map host opregion\n");
return -EFAULT;
}
return 0;
}
#define GVT_OPREGION_FUNC(scic) \
({ \
u32 __ret; \
__ret = (scic & OPREGION_SCIC_FUNC_MASK) >> \
OPREGION_SCIC_FUNC_SHIFT; \
__ret; \
})
#define GVT_OPREGION_SUBFUNC(scic) \
({ \
u32 __ret; \
__ret = (scic & OPREGION_SCIC_SUBFUNC_MASK) >> \
OPREGION_SCIC_SUBFUNC_SHIFT; \
__ret; \
})
static const char *opregion_func_name(u32 func)
{
const char *name = NULL;
switch (func) {
case 0 ... 3:
case 5:
case 7 ... 15:
name = "Reserved";
break;
case 4:
name = "Get BIOS Data";
break;
case 6:
name = "System BIOS Callbacks";
break;
default:
name = "Unknown";
break;
}
return name;
}
static const char *opregion_subfunc_name(u32 subfunc)
{
const char *name = NULL;
switch (subfunc) {
case 0:
name = "Supported Calls";
break;
case 1:
name = "Requested Callbacks";
break;
case 2 ... 3:
case 8 ... 9:
name = "Reserved";
break;
case 5:
name = "Boot Display";
break;
case 6:
name = "TV-Standard/Video-Connector";
break;
case 7:
name = "Internal Graphics";
break;
case 10:
name = "Spread Spectrum Clocks";
break;
case 11:
name = "Get AKSV";
break;
default:
name = "Unknown";
break;
}
return name;
};
static bool querying_capabilities(u32 scic)
{
u32 func, subfunc;
func = GVT_OPREGION_FUNC(scic);
subfunc = GVT_OPREGION_SUBFUNC(scic);
if ((func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)
|| (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
subfunc == INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS)
|| (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS &&
subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)) {
return true;
}
return false;
}
/**
* intel_vgpu_emulate_opregion_request - emulating OpRegion request
* @vgpu: a vGPU
* @swsci: SWSCI request
*
* Returns:
* Zero on success, negative error code if failed
*/
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
{
u32 *scic, *parm;
u32 func, subfunc;
scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
if (!(swsci & SWSCI_SCI_SELECT)) {
gvt_err("vgpu%d: requesting SMI service\n", vgpu->id);
return 0;
}
/* ignore non 0->1 trasitions */
if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI]
& SWSCI_SCI_TRIGGER) ||
!(swsci & SWSCI_SCI_TRIGGER)) {
return 0;
}
func = GVT_OPREGION_FUNC(*scic);
subfunc = GVT_OPREGION_SUBFUNC(*scic);
if (!querying_capabilities(*scic)) {
gvt_err("vgpu%d: requesting runtime service: func \"%s\","
" subfunc \"%s\"\n",
vgpu->id,
opregion_func_name(func),
opregion_subfunc_name(subfunc));
/*
* emulate exit status of function call, '0' means
* "failure, generic, unsupported or unknown cause"
*/
*scic &= ~OPREGION_SCIC_EXIT_MASK;
return 0;
}
*scic = 0;
*parm = 0;
return 0;
}
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _GVT_REG_H
#define _GVT_REG_H
#define INTEL_GVT_PCI_CLASS_VGA_OTHER 0x80
#define INTEL_GVT_PCI_GMCH_CONTROL 0x50
#define BDW_GMCH_GMS_SHIFT 8
#define BDW_GMCH_GMS_MASK 0xff
#define INTEL_GVT_PCI_SWSCI 0xe8
#define SWSCI_SCI_SELECT (1 << 15)
#define SWSCI_SCI_TRIGGER 1
#define INTEL_GVT_PCI_OPREGION 0xfc
#define INTEL_GVT_OPREGION_CLID 0x1AC
#define INTEL_GVT_OPREGION_SCIC 0x200
#define OPREGION_SCIC_FUNC_MASK 0x1E
#define OPREGION_SCIC_FUNC_SHIFT 1
#define OPREGION_SCIC_SUBFUNC_MASK 0xFF00
#define OPREGION_SCIC_SUBFUNC_SHIFT 8
#define OPREGION_SCIC_EXIT_MASK 0xE0
#define INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA 4
#define INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS 6
#define INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS 0
#define INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS 1
#define INTEL_GVT_OPREGION_PARM 0x204
#define INTEL_GVT_OPREGION_PAGES 2
#define INTEL_GVT_OPREGION_PORDER 1
#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
#define _REG_VECS_EXCC 0x1A028
#define _REG_VCS2_EXCC 0x1c028
#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
#define FORCEWAKE_RENDER_GEN9_REG 0xa278
#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
#define FORCEWAKE_BLITTER_GEN9_REG 0xa188
#define FORCEWAKE_ACK_BLITTER_GEN9_REG 0x130044
#define FORCEWAKE_MEDIA_GEN9_REG 0xa270
#define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88
#define FORCEWAKE_ACK_HSW_REG 0x130044
#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2))
#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3))
#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12))
#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + GTT_PAGE_SIZE)
#endif
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eddie Dong <eddie.dong@intel.com>
* Kevin Tian <kevin.tian@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
* Changbin Du <changbin.du@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Bing Niu <bing.niu@intel.com>
*
*/
#include "i915_drv.h"
struct render_mmio {
int ring_id;
i915_reg_t reg;
u32 mask;
bool in_context;
u32 value;
};
static struct render_mmio gen8_render_mmio_list[] = {
{RCS, _MMIO(0x229c), 0xffff, false},
{RCS, _MMIO(0x2248), 0x0, false},
{RCS, _MMIO(0x2098), 0x0, false},
{RCS, _MMIO(0x20c0), 0xffff, true},
{RCS, _MMIO(0x24d0), 0, false},
{RCS, _MMIO(0x24d4), 0, false},
{RCS, _MMIO(0x24d8), 0, false},
{RCS, _MMIO(0x24dc), 0, false},
{RCS, _MMIO(0x7004), 0xffff, true},
{RCS, _MMIO(0x7008), 0xffff, true},
{RCS, _MMIO(0x7000), 0xffff, true},
{RCS, _MMIO(0x7010), 0xffff, true},
{RCS, _MMIO(0x7300), 0xffff, true},
{RCS, _MMIO(0x83a4), 0xffff, true},
{BCS, _MMIO(0x2229c), 0xffff, false},
{BCS, _MMIO(0x2209c), 0xffff, false},
{BCS, _MMIO(0x220c0), 0xffff, false},
{BCS, _MMIO(0x22098), 0x0, false},
{BCS, _MMIO(0x22028), 0x0, false},
};
static struct render_mmio gen9_render_mmio_list[] = {
{RCS, _MMIO(0x229c), 0xffff, false},
{RCS, _MMIO(0x2248), 0x0, false},
{RCS, _MMIO(0x2098), 0x0, false},
{RCS, _MMIO(0x20c0), 0xffff, true},
{RCS, _MMIO(0x24d0), 0, false},
{RCS, _MMIO(0x24d4), 0, false},
{RCS, _MMIO(0x24d8), 0, false},
{RCS, _MMIO(0x24dc), 0, false},
{RCS, _MMIO(0x7004), 0xffff, true},
{RCS, _MMIO(0x7008), 0xffff, true},
{RCS, _MMIO(0x7000), 0xffff, true},
{RCS, _MMIO(0x7010), 0xffff, true},
{RCS, _MMIO(0x7300), 0xffff, true},
{RCS, _MMIO(0x83a4), 0xffff, true},
{RCS, _MMIO(0x40e0), 0, false},
{RCS, _MMIO(0x40e4), 0, false},
{RCS, _MMIO(0x2580), 0xffff, true},
{RCS, _MMIO(0x7014), 0xffff, true},
{RCS, _MMIO(0x20ec), 0xffff, false},
{RCS, _MMIO(0xb118), 0, false},
{RCS, _MMIO(0xe100), 0xffff, true},
{RCS, _MMIO(0xe180), 0xffff, true},
{RCS, _MMIO(0xe184), 0xffff, true},
{RCS, _MMIO(0xe188), 0xffff, true},
{RCS, _MMIO(0xe194), 0xffff, true},
{RCS, _MMIO(0x4de0), 0, false},
{RCS, _MMIO(0x4de4), 0, false},
{RCS, _MMIO(0x4de8), 0, false},
{RCS, _MMIO(0x4dec), 0, false},
{RCS, _MMIO(0x4df0), 0, false},
{RCS, _MMIO(0x4df4), 0, false},
{BCS, _MMIO(0x2229c), 0xffff, false},
{BCS, _MMIO(0x2209c), 0xffff, false},
{BCS, _MMIO(0x220c0), 0xffff, false},
{BCS, _MMIO(0x22098), 0x0, false},
{BCS, _MMIO(0x22028), 0x0, false},
{VCS2, _MMIO(0x1c028), 0xffff, false},
{VECS, _MMIO(0x1a028), 0xffff, false},
};
static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
static u32 gen9_render_mocs_L3[32];
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
i915_reg_t reg;
u32 regs[] = {
[RCS] = 0x4260,
[VCS] = 0x4264,
[VCS2] = 0x4268,
[BCS] = 0x426c,
[VECS] = 0x4270,
};
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending))
return;
reg = _MMIO(regs[ring_id]);
I915_WRITE(reg, 0x1);
if (wait_for_atomic((I915_READ(reg) == 0), 50))
gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
}
static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
i915_reg_t offset, l3_offset;
u32 regs[] = {
[RCS] = 0xc800,
[VCS] = 0xc900,
[VCS2] = 0xca00,
[BCS] = 0xcc00,
[VECS] = 0xcb00,
};
int i;
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
if (!IS_SKYLAKE(dev_priv))
return;
for (i = 0; i < 64; i++) {
gen9_render_mocs[ring_id][i] = I915_READ(offset);
I915_WRITE(offset, vgpu_vreg(vgpu, offset));
POSTING_READ(offset);
offset.reg += 4;
}
if (ring_id == RCS) {
l3_offset.reg = 0xb020;
for (i = 0; i < 32; i++) {
gen9_render_mocs_L3[i] = I915_READ(l3_offset);
I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset));
POSTING_READ(l3_offset);
l3_offset.reg += 4;
}
}
}
static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
i915_reg_t offset, l3_offset;
u32 regs[] = {
[RCS] = 0xc800,
[VCS] = 0xc900,
[VCS2] = 0xca00,
[BCS] = 0xcc00,
[VECS] = 0xcb00,
};
int i;
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
if (!IS_SKYLAKE(dev_priv))
return;
for (i = 0; i < 64; i++) {
vgpu_vreg(vgpu, offset) = I915_READ(offset);
I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
POSTING_READ(offset);
offset.reg += 4;
}
if (ring_id == RCS) {
l3_offset.reg = 0xb020;
for (i = 0; i < 32; i++) {
vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset);
I915_WRITE(l3_offset, gen9_render_mocs_L3[i]);
POSTING_READ(l3_offset);
l3_offset.reg += 4;
}
}
}
void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct render_mmio *mmio;
u32 v;
int i, array_size;
if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
mmio = gen9_render_mmio_list;
array_size = ARRAY_SIZE(gen9_render_mmio_list);
load_mocs(vgpu, ring_id);
} else {
mmio = gen8_render_mmio_list;
array_size = ARRAY_SIZE(gen8_render_mmio_list);
}
for (i = 0; i < array_size; i++, mmio++) {
if (mmio->ring_id != ring_id)
continue;
mmio->value = I915_READ(mmio->reg);
if (mmio->mask)
v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
else
v = vgpu_vreg(vgpu, mmio->reg);
I915_WRITE(mmio->reg, v);
POSTING_READ(mmio->reg);
gvt_dbg_render("load reg %x old %x new %x\n",
i915_mmio_reg_offset(mmio->reg),
mmio->value, v);
}
handle_tlb_pending_event(vgpu, ring_id);
}
void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct render_mmio *mmio;
u32 v;
int i, array_size;
if (IS_SKYLAKE(dev_priv)) {
mmio = gen9_render_mmio_list;
array_size = ARRAY_SIZE(gen9_render_mmio_list);
restore_mocs(vgpu, ring_id);
} else {
mmio = gen8_render_mmio_list;
array_size = ARRAY_SIZE(gen8_render_mmio_list);
}
for (i = 0; i < array_size; i++, mmio++) {
if (mmio->ring_id != ring_id)
continue;
vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg);
if (mmio->mask) {
vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
v = mmio->value | (mmio->mask << 16);
} else
v = mmio->value;
I915_WRITE(mmio->reg, v);
POSTING_READ(mmio->reg);
gvt_dbg_render("restore reg %x old %x new %x\n",
i915_mmio_reg_offset(mmio->reg),
mmio->value, v);
}
}
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eddie Dong <eddie.dong@intel.com>
* Kevin Tian <kevin.tian@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
* Changbin Du <changbin.du@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Bing Niu <bing.niu@intel.com>
*
*/
#ifndef __GVT_RENDER_H__
#define __GVT_RENDER_H__
void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id);
void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id);
#endif
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Anhua Xu
* Kevin Tian <kevin.tian@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include "i915_drv.h"
static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
{
struct intel_vgpu_execlist *execlist;
int i;
for (i = 0; i < I915_NUM_ENGINES; i++) {
execlist = &vgpu->execlist[i];
if (!list_empty(workload_q_head(vgpu, i)))
return true;
}
return false;
}
static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
int i;
/* no target to schedule */
if (!scheduler->next_vgpu)
return;
gvt_dbg_sched("try to schedule next vgpu %d\n",
scheduler->next_vgpu->id);
/*
* after the flag is set, workload dispatch thread will
* stop dispatching workload for current vgpu
*/
scheduler->need_reschedule = true;
/* still have uncompleted workload? */
for (i = 0; i < I915_NUM_ENGINES; i++) {
if (scheduler->current_workload[i]) {
gvt_dbg_sched("still have running workload\n");
return;
}
}
gvt_dbg_sched("switch to next vgpu %d\n",
scheduler->next_vgpu->id);
/* switch current vgpu */
scheduler->current_vgpu = scheduler->next_vgpu;
scheduler->next_vgpu = NULL;
scheduler->need_reschedule = false;
/* wake up workload dispatch thread */
for (i = 0; i < I915_NUM_ENGINES; i++)
wake_up(&scheduler->waitq[i]);
}
struct tbs_vgpu_data {
struct list_head list;
struct intel_vgpu *vgpu;
/* put some per-vgpu sched stats here */
};
struct tbs_sched_data {
struct intel_gvt *gvt;
struct delayed_work work;
unsigned long period;
struct list_head runq_head;
};
#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
static void tbs_sched_func(struct work_struct *work)
{
struct tbs_sched_data *sched_data = container_of(work,
struct tbs_sched_data, work.work);
struct tbs_vgpu_data *vgpu_data;
struct intel_gvt *gvt = sched_data->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu *vgpu = NULL;
struct list_head *pos, *head;
mutex_lock(&gvt->lock);
/* no vgpu or has already had a target */
if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
goto out;
if (scheduler->current_vgpu) {
vgpu_data = scheduler->current_vgpu->sched_data;
head = &vgpu_data->list;
} else {
gvt_dbg_sched("no current vgpu search from q head\n");
head = &sched_data->runq_head;
}
/* search a vgpu with pending workload */
list_for_each(pos, head) {
if (pos == &sched_data->runq_head)
continue;
vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
if (!vgpu_has_pending_workload(vgpu_data->vgpu))
continue;
vgpu = vgpu_data->vgpu;
break;
}
if (vgpu) {
scheduler->next_vgpu = vgpu;
gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
}
out:
if (scheduler->next_vgpu) {
gvt_dbg_sched("try to schedule next vgpu %d\n",
scheduler->next_vgpu->id);
try_to_schedule_next_vgpu(gvt);
}
/*
* still have vgpu on runq
* or last schedule haven't finished due to running workload
*/
if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
schedule_delayed_work(&sched_data->work, sched_data->period);
mutex_unlock(&gvt->lock);
}
static int tbs_sched_init(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler =
&gvt->scheduler;
struct tbs_sched_data *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
INIT_LIST_HEAD(&data->runq_head);
INIT_DELAYED_WORK(&data->work, tbs_sched_func);
data->period = GVT_DEFAULT_TIME_SLICE;
data->gvt = gvt;
scheduler->sched_data = data;
return 0;
}
static void tbs_sched_clean(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler =
&gvt->scheduler;
struct tbs_sched_data *data = scheduler->sched_data;
cancel_delayed_work(&data->work);
kfree(data);
scheduler->sched_data = NULL;
}
static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
{
struct tbs_vgpu_data *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->vgpu = vgpu;
INIT_LIST_HEAD(&data->list);
vgpu->sched_data = data;
return 0;
}
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
{
kfree(vgpu->sched_data);
vgpu->sched_data = NULL;
}
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
{
struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
if (!list_empty(&vgpu_data->list))
return;
list_add_tail(&vgpu_data->list, &sched_data->runq_head);
schedule_delayed_work(&sched_data->work, sched_data->period);
}
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
{
struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
list_del_init(&vgpu_data->list);
}
struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
.init = tbs_sched_init,
.clean = tbs_sched_clean,
.init_vgpu = tbs_sched_init_vgpu,
.clean_vgpu = tbs_sched_clean_vgpu,
.start_schedule = tbs_sched_start_schedule,
.stop_schedule = tbs_sched_stop_schedule,
};
int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
{
gvt->scheduler.sched_ops = &tbs_schedule_ops;
return gvt->scheduler.sched_ops->init(gvt);
}
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
{
gvt->scheduler.sched_ops->clean(gvt);
}
int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
{
return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
}
void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
{
vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
}
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
{
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
}
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
scheduler->sched_ops->stop_schedule(vgpu);
if (scheduler->next_vgpu == vgpu)
scheduler->next_vgpu = NULL;
if (scheduler->current_vgpu == vgpu) {
/* stop workload dispatching */
scheduler->need_reschedule = true;
scheduler->current_vgpu = NULL;
}
}
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Anhua Xu
* Kevin Tian <kevin.tian@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef __GVT_SCHED_POLICY__
#define __GVT_SCHED_POLICY__
struct intel_gvt_sched_policy_ops {
int (*init)(struct intel_gvt *gvt);
void (*clean)(struct intel_gvt *gvt);
int (*init_vgpu)(struct intel_vgpu *vgpu);
void (*clean_vgpu)(struct intel_vgpu *vgpu);
void (*start_schedule)(struct intel_vgpu *vgpu);
void (*stop_schedule)(struct intel_vgpu *vgpu);
};
int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu);
void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu);
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu);
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu);
#endif
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhi Wang <zhi.a.wang@intel.com>
*
* Contributors:
* Ping Gao <ping.a.gao@intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Chanbin Du <changbin.du@intel.com>
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
*
*/
#include "i915_drv.h"
#include <linux/kthread.h>
#define RING_CTX_OFF(x) \
offsetof(struct execlist_ring_context, x)
void set_context_pdp_root_pointer(struct execlist_ring_context *ring_context,
u32 pdp[8])
{
struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
int i;
for (i = 0; i < 8; i++)
pdp_pair[i].val = pdp[7 - i];
}
static int populate_shadow_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *dst;
unsigned long context_gpa, context_page_num;
int i;
gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
workload->ctx_desc.lrca);
context_page_num = intel_lr_context_size(
&gvt->dev_priv->engine[ring_id]);
context_page_num = context_page_num >> PAGE_SHIFT;
if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
context_page_num = 19;
i = 2;
while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("Invalid guest context descriptor\n");
return -EINVAL;
}
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
dst = kmap_atomic(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
GTT_PAGE_SIZE);
kunmap_atomic(dst);
i++;
}
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page);
#define COPY_REG(name) \
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
COPY_REG(ctx_ctrl);
COPY_REG(ctx_timestamp);
if (ring_id == RCS) {
COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
}
#undef COPY_REG
set_context_pdp_root_pointer(shadow_ring_context,
workload->shadow_mm->shadow_page_table);
intel_gvt_hypervisor_read_gpa(vgpu,
workload->ring_context_gpa +
sizeof(*shadow_ring_context),
(void *)shadow_ring_context +
sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap_atomic(shadow_ring_context);
return 0;
}
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
struct intel_vgpu *vgpu = container_of(nb,
struct intel_vgpu, shadow_ctx_notifier_block);
struct drm_i915_gem_request *req =
(struct drm_i915_gem_request *)data;
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
struct intel_vgpu_workload *workload =
scheduler->current_workload[req->engine->id];
switch (action) {
case INTEL_CONTEXT_SCHEDULE_IN:
intel_gvt_load_render_mmio(workload->vgpu,
workload->ring_id);
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
intel_gvt_restore_render_mmio(workload->vgpu,
workload->ring_id);
atomic_set(&workload->shadow_ctx_active, 0);
break;
default:
WARN_ON(1);
return NOTIFY_OK;
}
wake_up(&workload->shadow_ctx_status_wq);
return NOTIFY_OK;
}
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
ring_id, workload);
shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
workload->req = i915_gem_request_alloc(&dev_priv->engine[ring_id],
shadow_ctx);
if (IS_ERR_OR_NULL(workload->req)) {
gvt_err("fail to allocate gem request\n");
workload->status = PTR_ERR(workload->req);
workload->req = NULL;
return workload->status;
}
gvt_dbg_sched("ring id %d get i915 gem request %p\n",
ring_id, workload->req);
mutex_lock(&gvt->lock);
ret = intel_gvt_scan_and_shadow_workload(workload);
if (ret)
goto err;
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto err;
ret = populate_shadow_context(workload);
if (ret)
goto err;
if (workload->prepare) {
ret = workload->prepare(workload);
if (ret)
goto err;
}
mutex_unlock(&gvt->lock);
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req);
i915_add_request_no_flush(workload->req);
workload->dispatched = true;
return 0;
err:
workload->status = ret;
if (workload->req)
workload->req = NULL;
mutex_unlock(&gvt->lock);
return ret;
}
static struct intel_vgpu_workload *pick_next_workload(
struct intel_gvt *gvt, int ring_id)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
mutex_lock(&gvt->lock);
/*
* no current vgpu / will be scheduled out / no workload
* bail out
*/
if (!scheduler->current_vgpu) {
gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
goto out;
}
if (scheduler->need_reschedule) {
gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
goto out;
}
if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) {
gvt_dbg_sched("ring id %d stop - no available workload\n",
ring_id);
goto out;
}
/*
* still have current workload, maybe the workload disptacher
* fail to submit it for some reason, resubmit it.
*/
if (scheduler->current_workload[ring_id]) {
workload = scheduler->current_workload[ring_id];
gvt_dbg_sched("ring id %d still have current workload %p\n",
ring_id, workload);
goto out;
}
/*
* pick a workload as current workload
* once current workload is set, schedule policy routines
* will wait the current workload is finished when trying to
* schedule out a vgpu.
*/
scheduler->current_workload[ring_id] = container_of(
workload_q_head(scheduler->current_vgpu, ring_id)->next,
struct intel_vgpu_workload, list);
workload = scheduler->current_workload[ring_id];
gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
atomic_inc(&workload->vgpu->running_workload_num);
out:
mutex_unlock(&gvt->lock);
return workload;
}
static void update_guest_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *src;
unsigned long context_gpa, context_page_num;
int i;
gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
workload->ctx_desc.lrca);
context_page_num = intel_lr_context_size(
&gvt->dev_priv->engine[ring_id]);
context_page_num = context_page_num >> PAGE_SHIFT;
if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
context_page_num = 19;
i = 2;
while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_err("invalid guest context descriptor\n");
return;
}
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
src = kmap_atomic(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
GTT_PAGE_SIZE);
kunmap_atomic(src);
i++;
}
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page);
#define COPY_REG(name) \
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
COPY_REG(ctx_ctrl);
COPY_REG(ctx_timestamp);
#undef COPY_REG
intel_gvt_hypervisor_write_gpa(vgpu,
workload->ring_context_gpa +
sizeof(*shadow_ring_context),
(void *)shadow_ring_context +
sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap_atomic(shadow_ring_context);
}
static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload;
int event;
mutex_lock(&gvt->lock);
workload = scheduler->current_workload[ring_id];
if (!workload->status && !workload->vgpu->resetting) {
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
update_guest_context(workload);
for_each_set_bit(event, workload->pending_events,
INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(workload->vgpu,
event);
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
ring_id, workload, workload->status);
scheduler->current_workload[ring_id] = NULL;
atomic_dec(&workload->vgpu->running_workload_num);
list_del_init(&workload->list);
workload->complete(workload);
wake_up(&scheduler->workload_complete_wq);
mutex_unlock(&gvt->lock);
}
struct workload_thread_param {
struct intel_gvt *gvt;
int ring_id;
};
static int workload_thread(void *priv)
{
struct workload_thread_param *p = (struct workload_thread_param *)priv;
struct intel_gvt *gvt = p->gvt;
int ring_id = p->ring_id;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
kfree(p);
gvt_dbg_core("workload thread for ring %d started\n", ring_id);
while (!kthread_should_stop()) {
ret = wait_event_interruptible(scheduler->waitq[ring_id],
kthread_should_stop() ||
(workload = pick_next_workload(gvt, ring_id)));
WARN_ON_ONCE(ret);
if (kthread_should_stop())
break;
gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
workload->ring_id, workload,
workload->vgpu->id);
intel_runtime_pm_get(gvt->dev_priv);
/*
* Always take i915 big lock first
*/
ret = i915_mutex_lock_interruptible(&gvt->dev_priv->drm);
if (ret < 0) {
gvt_err("i915 submission is not available, retry\n");
schedule_timeout(1);
continue;
}
gvt_dbg_sched("ring id %d will dispatch workload %p\n",
workload->ring_id, workload);
if (need_force_wake)
intel_uncore_forcewake_get(gvt->dev_priv,
FORCEWAKE_ALL);
ret = dispatch_workload(workload);
if (ret) {
gvt_err("fail to dispatch workload, skip\n");
goto complete;
}
gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload);
workload->status = i915_wait_request(workload->req,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
NULL, NULL);
if (workload->status != 0)
gvt_err("fail to wait workload, skip\n");
complete:
gvt_dbg_sched("will complete workload %p\n, status: %d\n",
workload, workload->status);
complete_current_workload(gvt, ring_id);
if (need_force_wake)
intel_uncore_forcewake_put(gvt->dev_priv,
FORCEWAKE_ALL);
mutex_unlock(&gvt->dev_priv->drm.struct_mutex);
intel_runtime_pm_put(gvt->dev_priv);
}
return 0;
}
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
if (atomic_read(&vgpu->running_workload_num)) {
gvt_dbg_sched("wait vgpu idle\n");
wait_event(scheduler->workload_complete_wq,
!atomic_read(&vgpu->running_workload_num));
}
}
void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
int i;
gvt_dbg_core("clean workload scheduler\n");
for (i = 0; i < I915_NUM_ENGINES; i++) {
if (scheduler->thread[i]) {
kthread_stop(scheduler->thread[i]);
scheduler->thread[i] = NULL;
}
}
}
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct workload_thread_param *param = NULL;
int ret;
int i;
gvt_dbg_core("init workload scheduler\n");
init_waitqueue_head(&scheduler->workload_complete_wq);
for (i = 0; i < I915_NUM_ENGINES; i++) {
init_waitqueue_head(&scheduler->waitq[i]);
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param) {
ret = -ENOMEM;
goto err;
}
param->gvt = gvt;
param->ring_id = i;
scheduler->thread[i] = kthread_run(workload_thread, param,
"gvt workload %d", i);
if (IS_ERR(scheduler->thread[i])) {
gvt_err("fail to create workload thread\n");
ret = PTR_ERR(scheduler->thread[i]);
goto err;
}
}
return 0;
err:
intel_gvt_clean_workload_scheduler(gvt);
kfree(param);
param = NULL;
return ret;
}
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
&vgpu->shadow_ctx_notifier_block);
mutex_lock(&dev_priv->drm.struct_mutex);
/* a little hacky to mark as ctx closed */
vgpu->shadow_ctx->closed = true;
i915_gem_context_put(vgpu->shadow_ctx);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
{
atomic_set(&vgpu->running_workload_num, 0);
vgpu->shadow_ctx = i915_gem_context_create_gvt(
&vgpu->gvt->dev_priv->drm);
if (IS_ERR(vgpu->shadow_ctx))
return PTR_ERR(vgpu->shadow_ctx);
vgpu->shadow_ctx->engine[RCS].initialised = true;
vgpu->shadow_ctx_notifier_block.notifier_call =
shadow_context_status_change;
atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
&vgpu->shadow_ctx_notifier_block);
return 0;
}
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhi Wang <zhi.a.wang@intel.com>
*
* Contributors:
* Ping Gao <ping.a.gao@intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Chanbin Du <changbin.du@intel.com>
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
*
*/
#ifndef _GVT_SCHEDULER_H_
#define _GVT_SCHEDULER_H_
struct intel_gvt_workload_scheduler {
struct intel_vgpu *current_vgpu;
struct intel_vgpu *next_vgpu;
struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
bool need_reschedule;
wait_queue_head_t workload_complete_wq;
struct task_struct *thread[I915_NUM_ENGINES];
wait_queue_head_t waitq[I915_NUM_ENGINES];
void *sched_data;
struct intel_gvt_sched_policy_ops *sched_ops;
};
#define INDIRECT_CTX_ADDR_MASK 0xffffffc0
#define INDIRECT_CTX_SIZE_MASK 0x3f
struct shadow_indirect_ctx {
struct drm_i915_gem_object *obj;
unsigned long guest_gma;
unsigned long shadow_gma;
void *shadow_va;
uint32_t size;
};
#define PER_CTX_ADDR_MASK 0xfffff000
struct shadow_per_ctx {
unsigned long guest_gma;
unsigned long shadow_gma;
};
struct intel_shadow_wa_ctx {
struct intel_vgpu_workload *workload;
struct shadow_indirect_ctx indirect_ctx;
struct shadow_per_ctx per_ctx;
};
struct intel_vgpu_workload {
struct intel_vgpu *vgpu;
int ring_id;
struct drm_i915_gem_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
int status;
struct intel_vgpu_mm *shadow_mm;
/* different submission model may need different handler */
int (*prepare)(struct intel_vgpu_workload *);
int (*complete)(struct intel_vgpu_workload *);
struct list_head list;
DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
void *shadow_ring_buffer_va;
/* execlist context information */
struct execlist_ctx_descriptor_format ctx_desc;
struct execlist_ring_context *ring_context;
unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
bool restore_inhibit;
struct intel_vgpu_elsp_dwords elsp_dwords;
bool emulate_schedule_in;
atomic_t shadow_ctx_active;
wait_queue_head_t shadow_ctx_status_wq;
u64 ring_context_gpa;
/* shadow batch buffer */
struct list_head shadow_bb;
struct intel_shadow_wa_ctx wa_ctx;
};
/* Intel shadow batch buffer is a i915 gem object */
struct intel_shadow_bb_entry {
struct list_head list;
struct drm_i915_gem_object *obj;
void *va;
unsigned long len;
void *bb_start_cmd_va;
};
#define workload_q_head(vgpu, ring_id) \
(&(vgpu->workload_q_head[ring_id]))
#define queue_workload(workload) do { \
list_add_tail(&workload->list, \
workload_q_head(workload->vgpu, workload->ring_id)); \
wake_up(&workload->vgpu->gvt-> \
scheduler.waitq[workload->ring_id]); \
} while (0)
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
#endif
/*
* Copyright © 2011-2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Jike Song <jike.song@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#if !defined(_GVT_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _GVT_TRACE_H_
#include <linux/types.h>
#include <linux/stringify.h>
#include <linux/tracepoint.h>
#include <asm/tsc.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM gvt
TRACE_EVENT(spt_alloc,
TP_PROTO(int id, void *spt, int type, unsigned long mfn,
unsigned long gpt_gfn),
TP_ARGS(id, spt, type, mfn, gpt_gfn),
TP_STRUCT__entry(
__field(int, id)
__field(void *, spt)
__field(int, type)
__field(unsigned long, mfn)
__field(unsigned long, gpt_gfn)
),
TP_fast_assign(
__entry->id = id;
__entry->spt = spt;
__entry->type = type;
__entry->mfn = mfn;
__entry->gpt_gfn = gpt_gfn;
),
TP_printk("VM%d [alloc] spt %p type %d mfn 0x%lx gfn 0x%lx\n",
__entry->id,
__entry->spt,
__entry->type,
__entry->mfn,
__entry->gpt_gfn)
);
TRACE_EVENT(spt_free,
TP_PROTO(int id, void *spt, int type),
TP_ARGS(id, spt, type),
TP_STRUCT__entry(
__field(int, id)
__field(void *, spt)
__field(int, type)
),
TP_fast_assign(
__entry->id = id;
__entry->spt = spt;
__entry->type = type;
),
TP_printk("VM%u [free] spt %p type %d\n",
__entry->id,
__entry->spt,
__entry->type)
);
#define MAX_BUF_LEN 256
TRACE_EVENT(gma_index,
TP_PROTO(const char *prefix, unsigned long gma,
unsigned long index),
TP_ARGS(prefix, gma, index),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"%s gma 0x%lx index 0x%lx\n", prefix, gma, index);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(gma_translate,
TP_PROTO(int id, char *type, int ring_id, int pt_level,
unsigned long gma, unsigned long gpa),
TP_ARGS(id, type, ring_id, pt_level, gma, gpa),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d %s ring %d pt_level %d gma 0x%lx -> gpa 0x%lx\n",
id, type, ring_id, pt_level, gma, gpa);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(spt_refcount,
TP_PROTO(int id, char *action, void *spt, int before, int after),
TP_ARGS(id, action, spt, before, after),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d [%s] spt %p before %d -> after %d\n",
id, action, spt, before, after);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(spt_change,
TP_PROTO(int id, char *action, void *spt, unsigned long gfn,
int type),
TP_ARGS(id, action, spt, gfn, type),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d [%s] spt %p gfn 0x%lx type %d\n",
id, action, spt, gfn, type);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(gpt_change,
TP_PROTO(int id, const char *tag, void *spt, int type, u64 v,
unsigned long index),
TP_ARGS(id, tag, spt, type, v, index),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d [%s] spt %p type %d entry 0x%llx index 0x%lx\n",
id, tag, spt, type, v, index);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(oos_change,
TP_PROTO(int id, const char *tag, int page_id, void *gpt, int type),
TP_ARGS(id, tag, page_id, gpt, type),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d [oos %s] page id %d gpt %p type %d\n",
id, tag, page_id, gpt, type);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(oos_sync,
TP_PROTO(int id, int page_id, void *gpt, int type, u64 v,
unsigned long index),
TP_ARGS(id, page_id, gpt, type, v, index),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d [oos sync] page id %d gpt %p type %d entry 0x%llx index 0x%lx\n",
id, page_id, gpt, type, v, index);
),
TP_printk("%s", __entry->buf)
);
#define MAX_CMD_STR_LEN 256
TRACE_EVENT(gvt_command,
TP_PROTO(u8 vm_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len, bool ring_buffer_cmd, cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler),
TP_ARGS(vm_id, ring_id, ip_gma, cmd_va, cmd_len, ring_buffer_cmd, cost_pre_cmd_handler, cost_cmd_handler),
TP_STRUCT__entry(
__field(u8, vm_id)
__field(u8, ring_id)
__field(int, i)
__array(char, tmp_buf, MAX_CMD_STR_LEN)
__array(char, cmd_str, MAX_CMD_STR_LEN)
),
TP_fast_assign(
__entry->vm_id = vm_id;
__entry->ring_id = ring_id;
__entry->cmd_str[0] = '\0';
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "VM(%d) Ring(%d): %s ip(%08x) pre handler cost (%llu), handler cost (%llu) ", vm_id, ring_id, ring_buffer_cmd ? "RB":"BB", ip_gma, cost_pre_cmd_handler, cost_cmd_handler);
strcat(__entry->cmd_str, __entry->tmp_buf);
entry->i = 0;
while (cmd_len > 0) {
if (cmd_len >= 8) {
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x %08x %08x %08x %08x ",
cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3],
cmd_va[__entry->i+4], cmd_va[__entry->i+5], cmd_va[__entry->i+6], cmd_va[__entry->i+7]);
__entry->i += 8;
cmd_len -= 8;
strcat(__entry->cmd_str, __entry->tmp_buf);
} else if (cmd_len >= 4) {
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x ",
cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3]);
__entry->i += 4;
cmd_len -= 4;
strcat(__entry->cmd_str, __entry->tmp_buf);
} else if (cmd_len >= 2) {
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x ", cmd_va[__entry->i], cmd_va[__entry->i+1]);
__entry->i += 2;
cmd_len -= 2;
strcat(__entry->cmd_str, __entry->tmp_buf);
} else if (cmd_len == 1) {
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x ", cmd_va[__entry->i]);
__entry->i += 1;
cmd_len -= 1;
strcat(__entry->cmd_str, __entry->tmp_buf);
}
}
strcat(__entry->cmd_str, "\n");
),
TP_printk("%s", __entry->cmd_str)
);
#endif /* _GVT_TRACE_H_ */
/* This part must be out of protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Jike Song <jike.song@intel.com>
*
* Contributors:
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#include "trace.h"
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "trace.h"
#endif
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eddie Dong <eddie.dong@intel.com>
* Kevin Tian <kevin.tian@intel.com>
*
* Contributors:
* Ping Gao <ping.a.gao@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
* Bing Niu <bing.niu@intel.com>
*
*/
#include "i915_drv.h"
static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
{
vfree(vgpu->mmio.vreg);
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
}
static int setup_vgpu_mmio(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
if (!vgpu->mmio.vreg)
return -ENOMEM;
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
return 0;
}
static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
struct intel_vgpu_creation_params *param)
{
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
u16 *gmch_ctl;
int i;
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
info->cfg_space_size);
if (!param->primary) {
vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
INTEL_GVT_PCI_CLASS_VGA_OTHER;
vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
INTEL_GVT_PCI_CLASS_VGA_OTHER;
}
/* Show guest that there isn't any stolen memory.*/
gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
*gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
gvt_aperture_pa_base(gvt), true);
vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER);
/*
* Clear the bar upper 32bit and let guest to assign the new value
*/
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
vgpu->cfg_space.bar[i].size = pci_resource_len(
gvt->dev_priv->drm.pdev, i * 2);
vgpu->cfg_space.bar[i].tracked = false;
}
}
static void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
/* setup the ballooning information */
vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
vgpu_aperture_gmadr_base(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
vgpu_aperture_sz(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
vgpu_hidden_gmadr_base(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
vgpu_hidden_sz(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
/**
* intel_gvt_destroy_vgpu - destroy a virtual GPU
* @vgpu: virtual GPU
*
* This function is called when user wants to destroy a virtual GPU.
*
*/
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
mutex_lock(&gvt->lock);
vgpu->active = false;
idr_remove(&gvt->vgpu_idr, vgpu->id);
if (atomic_read(&vgpu->running_workload_num)) {
mutex_unlock(&gvt->lock);
intel_gvt_wait_vgpu_idle(vgpu);
mutex_lock(&gvt->lock);
}
intel_vgpu_stop_schedule(vgpu);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_gvt_context(vgpu);
intel_vgpu_clean_execlist(vgpu);
intel_vgpu_clean_display(vgpu);
intel_vgpu_clean_opregion(vgpu);
intel_vgpu_clean_gtt(vgpu);
intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_free_resource(vgpu);
clean_vgpu_mmio(vgpu);
vfree(vgpu);
mutex_unlock(&gvt->lock);
}
/**
* intel_gvt_create_vgpu - create a virtual GPU
* @gvt: GVT device
* @param: vGPU creation parameters
*
* This function is called when user wants to create a virtual GPU.
*
* Returns:
* pointer to intel_vgpu, error pointer if failed.
*/
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_creation_params *param)
{
struct intel_vgpu *vgpu;
int ret;
gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
param->handle, param->low_gm_sz, param->high_gm_sz,
param->fence_sz);
vgpu = vzalloc(sizeof(*vgpu));
if (!vgpu)
return ERR_PTR(-ENOMEM);
mutex_lock(&gvt->lock);
ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
if (ret < 0)
goto out_free_vgpu;
vgpu->id = ret;
vgpu->handle = param->handle;
vgpu->gvt = gvt;
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
setup_vgpu_cfg_space(vgpu, param);
ret = setup_vgpu_mmio(vgpu);
if (ret)
goto out_free_vgpu;
ret = intel_vgpu_alloc_resource(vgpu, param);
if (ret)
goto out_clean_vgpu_mmio;
populate_pvinfo_page(vgpu);
ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
if (ret)
goto out_clean_vgpu_resource;
ret = intel_vgpu_init_gtt(vgpu);
if (ret)
goto out_detach_hypervisor_vgpu;
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
ret = intel_vgpu_init_opregion(vgpu, 0);
if (ret)
goto out_clean_gtt;
}
ret = intel_vgpu_init_display(vgpu);
if (ret)
goto out_clean_opregion;
ret = intel_vgpu_init_execlist(vgpu);
if (ret)
goto out_clean_display;
ret = intel_vgpu_init_gvt_context(vgpu);
if (ret)
goto out_clean_execlist;
ret = intel_vgpu_init_sched_policy(vgpu);
if (ret)
goto out_clean_shadow_ctx;
vgpu->active = true;
mutex_unlock(&gvt->lock);
return vgpu;
out_clean_shadow_ctx:
intel_vgpu_clean_gvt_context(vgpu);
out_clean_execlist:
intel_vgpu_clean_execlist(vgpu);
out_clean_display:
intel_vgpu_clean_display(vgpu);
out_clean_opregion:
intel_vgpu_clean_opregion(vgpu);
out_clean_gtt:
intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu:
intel_gvt_hypervisor_detach_vgpu(vgpu);
out_clean_vgpu_resource:
intel_vgpu_free_resource(vgpu);
out_clean_vgpu_mmio:
clean_vgpu_mmio(vgpu);
out_free_vgpu:
vfree(vgpu);
mutex_unlock(&gvt->lock);
return ERR_PTR(ret);
}
...@@ -39,6 +39,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv) ...@@ -39,6 +39,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
{ {
if (IS_BROADWELL(dev_priv)) if (IS_BROADWELL(dev_priv))
return true; return true;
if (IS_SKYLAKE(dev_priv))
return true;
return false; return false;
} }
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#ifndef _INTEL_GVT_H_ #ifndef _INTEL_GVT_H_
#define _INTEL_GVT_H_ #define _INTEL_GVT_H_
#include "i915_pvinfo.h"
#include "gvt/gvt.h" #include "gvt/gvt.h"
#ifdef CONFIG_DRM_I915_GVT #ifdef CONFIG_DRM_I915_GVT
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment