Commit 2707e444 authored by Zhi Wang's avatar Zhi Wang Committed by Zhenyu Wang

drm/i915/gvt: vGPU graphics memory virtualization

The vGPU graphics memory emulation framework is responsible for graphics
memory table virtualization. Under virtualization environment, a VM will
populate the page table entry with guest page frame number(GPFN/GFN), while
HW needs a page table filled with MFN(Machine frame number). The
relationship between GFN and MFN(Machine frame number) is managed by
hypervisor, while GEN HW doesn't have such knowledge to translate a GFN.

To solve this gap, shadow GGTT/PPGTT page table is introdcued.

For GGTT, the GFN inside the guest GGTT page table entry will be translated
into MFN and written into physical GTT MMIO registers when guest write
virtual GTT MMIO registers.

For PPGTT, a shadow PPGTT page table will be created and write-protected
translated from guest PPGTT page table.  And the shadow page table root
pointers will be written into the shadow context after a guest workload
is shadowed.

vGPU graphics memory emulation framework consists:

- Per-GEN HW platform page table entry bits extract/de-extract routines.
- GTT MMIO register emulation handlers, which will call hypercall to do
GFN->MFN translation when guest write GTT MMIO register
- PPGTT shadow page table routines, e.g. shadow create/destroy/out-of-sync
Signed-off-by: default avatarZhi Wang <zhi.a.wang@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent c8fe6a68
GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o
interrupt.o gtt.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
......@@ -33,4 +33,7 @@
#define gvt_dbg_irq(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: irq: "fmt, ##args)
#define gvt_dbg_mm(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: mm: "fmt, ##args)
#endif
This diff is collapsed.
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Zhi Wang <zhi.a.wang@intel.com>
* Zhenyu Wang <zhenyuw@linux.intel.com>
* Xiao Zheng <xiao.zheng@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Bing Niu <bing.niu@intel.com>
*
*/
#ifndef _GVT_GTT_H_
#define _GVT_GTT_H_
#define GTT_PAGE_SHIFT 12
#define GTT_PAGE_SIZE (1UL << GTT_PAGE_SHIFT)
#define GTT_PAGE_MASK (~(GTT_PAGE_SIZE-1))
struct intel_vgpu_mm;
#define INTEL_GVT_GTT_HASH_BITS 8
#define INTEL_GVT_INVALID_ADDR (~0UL)
struct intel_gvt_gtt_entry {
u64 val64;
int type;
};
struct intel_gvt_gtt_pte_ops {
struct intel_gvt_gtt_entry *(*get_entry)(void *pt,
struct intel_gvt_gtt_entry *e,
unsigned long index, bool hypervisor_access, unsigned long gpa,
struct intel_vgpu *vgpu);
struct intel_gvt_gtt_entry *(*set_entry)(void *pt,
struct intel_gvt_gtt_entry *e,
unsigned long index, bool hypervisor_access, unsigned long gpa,
struct intel_vgpu *vgpu);
bool (*test_present)(struct intel_gvt_gtt_entry *e);
void (*clear_present)(struct intel_gvt_gtt_entry *e);
bool (*test_pse)(struct intel_gvt_gtt_entry *e);
void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
};
struct intel_gvt_gtt_gma_ops {
unsigned long (*gma_to_ggtt_pte_index)(unsigned long gma);
unsigned long (*gma_to_pte_index)(unsigned long gma);
unsigned long (*gma_to_pde_index)(unsigned long gma);
unsigned long (*gma_to_l3_pdp_index)(unsigned long gma);
unsigned long (*gma_to_l4_pdp_index)(unsigned long gma);
unsigned long (*gma_to_pml4_index)(unsigned long gma);
};
struct intel_gvt_gtt {
struct intel_gvt_gtt_pte_ops *pte_ops;
struct intel_gvt_gtt_gma_ops *gma_ops;
int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm);
void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head;
struct list_head mm_lru_list_head;
};
enum {
INTEL_GVT_MM_GGTT = 0,
INTEL_GVT_MM_PPGTT,
};
struct intel_vgpu_mm {
int type;
bool initialized;
bool shadowed;
int page_table_entry_type;
u32 page_table_entry_size;
u32 page_table_entry_cnt;
void *virtual_page_table;
void *shadow_page_table;
int page_table_level;
bool has_shadow_page_table;
u32 pde_base_index;
struct list_head list;
struct kref ref;
atomic_t pincount;
struct list_head lru_list;
struct intel_vgpu *vgpu;
};
extern struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(
struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index);
extern struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(
struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index);
#define ggtt_get_guest_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
#define ggtt_set_guest_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
#define ggtt_get_shadow_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
#define ggtt_set_shadow_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
#define ppgtt_get_guest_root_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
#define ppgtt_set_guest_root_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
#define ppgtt_get_shadow_root_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
#define ppgtt_set_shadow_root_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
int mm_type, void *virtual_page_table, int page_table_level,
u32 pde_base_index);
extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
struct intel_vgpu_guest_page;
struct intel_vgpu_gtt {
struct intel_vgpu_mm *ggtt_mm;
unsigned long active_ppgtt_mm_bitmap;
struct list_head mm_list_head;
DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
DECLARE_HASHTABLE(guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
atomic_t n_write_protected_guest_page;
struct list_head oos_page_list_head;
struct list_head post_shadow_list_head;
struct page *scratch_page;
unsigned long scratch_page_mfn;
};
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level, void *root_entry);
struct intel_vgpu_oos_page;
struct intel_vgpu_shadow_page {
void *vaddr;
struct page *page;
int type;
struct hlist_node node;
unsigned long mfn;
};
struct intel_vgpu_guest_page {
struct hlist_node node;
bool writeprotection;
unsigned long gfn;
int (*handler)(void *, u64, void *, int);
void *data;
unsigned long write_cnt;
struct intel_vgpu_oos_page *oos_page;
};
struct intel_vgpu_oos_page {
struct intel_vgpu_guest_page *guest_page;
struct list_head list;
struct list_head vm_list;
int id;
unsigned char mem[GTT_PAGE_SIZE];
};
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
struct intel_vgpu_ppgtt_spt {
struct intel_vgpu_shadow_page shadow_page;
struct intel_vgpu_guest_page guest_page;
int guest_page_type;
atomic_t refcount;
struct intel_vgpu *vgpu;
DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
struct list_head post_shadow_list;
};
int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *guest_page,
unsigned long gfn,
int (*handler)(void *gp, u64, void *, int),
void *data);
void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *guest_page);
int intel_vgpu_set_guest_page_writeprotection(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *guest_page);
void intel_vgpu_clear_guest_page_writeprotection(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *guest_page);
struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
struct intel_vgpu *vgpu, unsigned long gfn);
int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm)
{
kref_get(&mm->ref);
}
static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm)
{
kref_put(&mm->ref, intel_vgpu_destroy_mm);
}
int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
unsigned long gma);
struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level, void *root_entry);
int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level);
int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level);
int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
#endif /* _GVT_GTT_H_ */
......@@ -101,6 +101,9 @@ static void init_device_info(struct intel_gvt *gvt)
info->mmio_size = 2 * 1024 * 1024;
info->mmio_bar = 0;
info->msi_cap_offset = IS_SKYLAKE(gvt->dev_priv) ? 0xac : 0x90;
info->gtt_start_offset = 8 * 1024 * 1024;
info->gtt_entry_size = 8;
info->gtt_entry_size_shift = 3;
}
}
......@@ -119,6 +122,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
if (WARN_ON(!gvt->initialized))
return;
intel_gvt_clean_gtt(gvt);
intel_gvt_clean_irq(gvt);
intel_gvt_clean_mmio_info(gvt);
intel_gvt_free_firmware(gvt);
......@@ -171,10 +175,16 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret)
goto out_free_firmware;
ret = intel_gvt_init_gtt(gvt);
if (ret)
goto out_clean_irq;
gvt_dbg_core("gvt device creation is done\n");
gvt->initialized = true;
return 0;
out_clean_irq:
intel_gvt_clean_irq(gvt);
out_free_firmware:
intel_gvt_free_firmware(gvt);
out_clean_mmio_info:
......
......@@ -38,6 +38,7 @@
#include "mmio.h"
#include "reg.h"
#include "interrupt.h"
#include "gtt.h"
#define GVT_MAX_VGPU 8
......@@ -61,6 +62,9 @@ struct intel_gvt_device_info {
u32 mmio_size;
u32 mmio_bar;
unsigned long msi_cap_offset;
u32 gtt_start_offset;
u32 gtt_entry_size;
u32 gtt_entry_size_shift;
};
/* GM resources owned by a vGPU */
......@@ -116,6 +120,7 @@ struct intel_vgpu {
struct intel_vgpu_cfg_space cfg_space;
struct intel_vgpu_mmio mmio;
struct intel_vgpu_irq irq;
struct intel_vgpu_gtt gtt;
};
struct intel_gvt_gm {
......@@ -153,6 +158,7 @@ struct intel_gvt {
struct intel_gvt_mmio mmio;
struct intel_gvt_firmware firmware;
struct intel_gvt_irq irq;
struct intel_gvt_gtt gtt;
};
void intel_gvt_free_firmware(struct intel_gvt *gvt);
......@@ -262,6 +268,38 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
/* validating GM functions */
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
(gmadr <= vgpu_aperture_gmadr_end(vgpu)))
#define vgpu_gmadr_is_hidden(vgpu, gmadr) \
((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
(gmadr <= vgpu_hidden_gmadr_end(vgpu)))
#define vgpu_gmadr_is_valid(vgpu, gmadr) \
((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
(vgpu_gmadr_is_hidden(vgpu, gmadr))))
#define gvt_gmadr_is_aperture(gvt, gmadr) \
((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
(gmadr <= gvt_aperture_gmadr_end(gvt)))
#define gvt_gmadr_is_hidden(gvt, gmadr) \
((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
(gmadr <= gvt_hidden_gmadr_end(gvt)))
#define gvt_gmadr_is_valid(gvt, gmadr) \
(gvt_gmadr_is_aperture(gvt, gmadr) || \
gvt_gmadr_is_hidden(gvt, gmadr))
bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
unsigned long *h_index);
int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
unsigned long *g_index);
#include "mpt.h"
#endif
......@@ -42,6 +42,14 @@ struct intel_gvt_mpt {
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(unsigned long handle);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
unsigned long (*from_virt_to_mfn)(void *p);
int (*set_wp_page)(unsigned long handle, u64 gfn);
int (*unset_wp_page)(unsigned long handle, u64 gfn);
int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
};
extern struct intel_gvt_mpt xengt_mpt;
......
......@@ -117,4 +117,111 @@ static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
return 0;
}
/**
* intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
* @p: host kernel virtual address
*
* Returns:
* MFN on success, INTEL_GVT_INVALID_ADDR if failed.
*/
static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
{
return intel_gvt_host.mpt->from_virt_to_mfn(p);
}
/**
* intel_gvt_hypervisor_set_wp_page - set a guest page to write-protected
* @vgpu: a vGPU
* @p: intel_vgpu_guest_page
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_set_wp_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *p)
{
int ret;
if (p->writeprotection)
return 0;
ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, p->gfn);
if (ret)
return ret;
p->writeprotection = true;
atomic_inc(&vgpu->gtt.n_write_protected_guest_page);
return 0;
}
/**
* intel_gvt_hypervisor_unset_wp_page - remove the write-protection of a
* guest page
* @vgpu: a vGPU
* @p: intel_vgpu_guest_page
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_unset_wp_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *p)
{
int ret;
if (!p->writeprotection)
return 0;
ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, p->gfn);
if (ret)
return ret;
p->writeprotection = false;
atomic_dec(&vgpu->gtt.n_write_protected_guest_page);
return 0;
}
/**
* intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
* @vgpu: a vGPU
* @gpa: guest physical address
* @buf: host data buffer
* @len: data length
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
unsigned long gpa, void *buf, unsigned long len)
{
return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
}
/**
* intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
* @vgpu: a vGPU
* @gpa: guest physical address
* @buf: host data buffer
* @len: data length
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
unsigned long gpa, void *buf, unsigned long len)
{
return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
}
/**
* intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
* @vgpu: a vGPU
* @gpfn: guest pfn
*
* Returns:
* MFN on success, INTEL_GVT_INVALID_ADDR if failed.
*/
static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
struct intel_vgpu *vgpu, unsigned long gfn)
{
return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
}
#endif /* _GVT_MPT_H_ */
......@@ -39,6 +39,191 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM gvt
TRACE_EVENT(spt_alloc,
TP_PROTO(int id, void *spt, int type, unsigned long mfn,
unsigned long gpt_gfn),
TP_ARGS(id, spt, type, mfn, gpt_gfn),
TP_STRUCT__entry(
__field(int, id)
__field(void *, spt)
__field(int, type)
__field(unsigned long, mfn)
__field(unsigned long, gpt_gfn)
),
TP_fast_assign(
__entry->id = id;
__entry->spt = spt;
__entry->type = type;
__entry->mfn = mfn;
__entry->gpt_gfn = gpt_gfn;
),
TP_printk("VM%d [alloc] spt %p type %d mfn 0x%lx gfn 0x%lx\n",
__entry->id,
__entry->spt,
__entry->type,
__entry->mfn,
__entry->gpt_gfn)
);
TRACE_EVENT(spt_free,
TP_PROTO(int id, void *spt, int type),
TP_ARGS(id, spt, type),
TP_STRUCT__entry(
__field(int, id)
__field(void *, spt)
__field(int, type)
),
TP_fast_assign(
__entry->id = id;
__entry->spt = spt;
__entry->type = type;
),
TP_printk("VM%u [free] spt %p type %d\n",
__entry->id,
__entry->spt,
__entry->type)
);
#define MAX_BUF_LEN 256
TRACE_EVENT(gma_index,
TP_PROTO(const char *prefix, unsigned long gma,
unsigned long index),
TP_ARGS(prefix, gma, index),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"%s gma 0x%lx index 0x%lx\n", prefix, gma, index);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(gma_translate,
TP_PROTO(int id, char *type, int ring_id, int pt_level,
unsigned long gma, unsigned long gpa),
TP_ARGS(id, type, ring_id, pt_level, gma, gpa),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d %s ring %d pt_level %d gma 0x%lx -> gpa 0x%lx\n",
id, type, ring_id, pt_level, gma, gpa);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(spt_refcount,
TP_PROTO(int id, char *action, void *spt, int before, int after),
TP_ARGS(id, action, spt, before, after),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d [%s] spt %p before %d -> after %d\n",
id, action, spt, before, after);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(spt_change,
TP_PROTO(int id, char *action, void *spt, unsigned long gfn,
int type),
TP_ARGS(id, action, spt, gfn, type),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d [%s] spt %p gfn 0x%lx type %d\n",
id, action, spt, gfn, type);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(gpt_change,
TP_PROTO(int id, const char *tag, void *spt, int type, u64 v,
unsigned long index),
TP_ARGS(id, tag, spt, type, v, index),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d [%s] spt %p type %d entry 0x%llx index 0x%lx\n",
id, tag, spt, type, v, index);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(oos_change,
TP_PROTO(int id, const char *tag, int page_id, void *gpt, int type),
TP_ARGS(id, tag, page_id, gpt, type),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d [oos %s] page id %d gpt %p type %d\n",
id, tag, page_id, gpt, type);
),
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(oos_sync,
TP_PROTO(int id, int page_id, void *gpt, int type, u64 v,
unsigned long index),
TP_ARGS(id, page_id, gpt, type, v, index),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
),
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d [oos sync] page id %d gpt %p type %d entry 0x%llx index 0x%lx\n",
id, page_id, gpt, type, v, index);
),
TP_printk("%s", __entry->buf)
);
#endif /* _GVT_TRACE_H_ */
/* This part must be out of protection */
......
......@@ -141,6 +141,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
vgpu->active = false;
idr_remove(&gvt->vgpu_idr, vgpu->id);
intel_vgpu_clean_gtt(vgpu);
intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_free_resource(vgpu);
clean_vgpu_mmio(vgpu);
......@@ -199,11 +200,17 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_vgpu_resource;
ret = intel_vgpu_init_gtt(vgpu);
if (ret)
goto out_detach_hypervisor_vgpu;
vgpu->active = true;
mutex_unlock(&gvt->lock);
return vgpu;
out_detach_hypervisor_vgpu:
intel_gvt_hypervisor_detach_vgpu(vgpu);
out_clean_vgpu_resource:
intel_vgpu_free_resource(vgpu);
out_clean_vgpu_mmio:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment