Commit 963976cf authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2018-03-08' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

UAPI Changes:

- Query uAPI interface (used for GPU topology information currently)
	* Mesa: https://patchwork.freedesktop.org/series/38795/

Driver Changes:

- Increase PSR2 size for CNL (DK)
- Avoid retraining LSPCON link unnecessarily (Ville)
- Decrease request signaling latency (Chris)
- GuC error capture fix (Daniele)

* tag 'drm-intel-next-2018-03-08' of git://anongit.freedesktop.org/drm/drm-intel: (127 commits)
  drm/i915: Update DRIVER_DATE to 20180308
  drm/i915: add schedule out notification of preempted but completed request
  drm/i915: expose rcs topology through query uAPI
  drm/i915: add query uAPI
  drm/i915: add rcs topology to error state
  drm/i915/debugfs: add rcs topology entry
  drm/i915/debugfs: reuse max slice/subslices already stored in sseu
  drm/i915: store all subslice masks
  drm/i915/guc: work around gcc-4.4.4 union initializer issue
  drm/i915/cnl: Add Wa_2201832410
  drm/i915/icl: Gen11 forcewake support
  drm/i915/icl: Add Indirect Context Offset for Gen11
  drm/i915/icl: Enhanced execution list support
  drm/i915/icl: new context descriptor support
  drm/i915/icl: Correctly initialize the Gen11 engines
  drm/i915: Assert that the request is indeed complete when signaled from irq
  drm/i915: Handle changing enable_fbc parameter at runtime better.
  drm/i915: Track whether the DP link is trained or not
  drm/i915: Nuke intel_dp->channel_eq_status
  drm/i915: Move SST DP link retraining into the ->post_hotplug() hook
  ...
parents 6fa7324a cf07a60f
......@@ -450,5 +450,12 @@ See drivers/gpu/drm/amd/display/TODO for tasks.
Contact: Harry Wentland, Alex Deucher
i915
----
- Our early/late pm callbacks could be removed in favour of using
device_link_add to model the dependency between i915 and snd_had. See
https://dri.freedesktop.org/docs/drm/driver-api/device_link.html
Outside DRM
===========
......@@ -1247,12 +1247,15 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_vblank_on);
/**
* drm_vblank_restore - estimated vblanks using timestamps and update it.
* drm_vblank_restore - estimate missed vblanks and update vblank count.
* @dev: DRM device
* @pipe: CRTC index
*
* Power manamement features can cause frame counter resets between vblank
* disable and enable. Drivers can then use this function in their
* &drm_crtc_funcs.enable_vblank implementation to estimate the vblanks since
* the last &drm_crtc_funcs.disable_vblank.
* disable and enable. Drivers can use this function in their
* &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since
* the last &drm_crtc_funcs.disable_vblank using timestamps and update the
* vblank counter.
*
* This function is the legacy version of drm_crtc_vblank_restore().
*/
......@@ -1293,11 +1296,14 @@ void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
EXPORT_SYMBOL(drm_vblank_restore);
/**
* drm_crtc_vblank_restore - estimate vblanks using timestamps and update it.
* drm_crtc_vblank_restore - estimate missed vblanks and update vblank count.
* @crtc: CRTC in question
*
* Power manamement features can cause frame counter resets between vblank
* disable and enable. Drivers can then use this function in their
* &drm_crtc_funcs.enable_vblank implementation to estimate the vblanks since
* the last &drm_crtc_funcs.disable_vblank.
* disable and enable. Drivers can use this function in their
* &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since
* the last &drm_crtc_funcs.disable_vblank using timestamps and update the
* vblank counter.
*/
void drm_crtc_vblank_restore(struct drm_crtc *crtc)
{
......
......@@ -63,13 +63,14 @@ i915-y += i915_cmd_parser.o \
i915_gem.o \
i915_gem_object.o \
i915_gem_render_state.o \
i915_gem_request.o \
i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_gem_timeline.o \
i915_gem_userptr.o \
i915_gemfs.o \
i915_query.o \
i915_request.o \
i915_trace_points.o \
i915_vma.o \
intel_breadcrumbs.o \
......@@ -89,7 +90,8 @@ i915-y += intel_uc.o \
intel_guc_fw.o \
intel_guc_log.o \
intel_guc_submission.o \
intel_huc.o
intel_huc.o \
intel_huc_fw.o
# autogenerated null render state
i915-y += intel_renderstate_gen6.o \
......
......@@ -3,7 +3,7 @@ GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
fb_decoder.o dmabuf.o
fb_decoder.o dmabuf.o page_track.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
......
......@@ -459,7 +459,7 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
obj = vgpu_create_gem(dev, dmabuf_obj->info);
if (obj == NULL) {
gvt_vgpu_err("create gvt gem obj failed:%d\n", vgpu->id);
gvt_vgpu_err("create gvt gem obj failed\n");
ret = -ENOMEM;
goto out;
}
......
This diff is collapsed.
......@@ -39,7 +39,6 @@
struct intel_vgpu_mm;
#define INTEL_GVT_GTT_HASH_BITS 8
#define INTEL_GVT_INVALID_ADDR (~0UL)
struct intel_gvt_gtt_entry {
......@@ -84,17 +83,12 @@ struct intel_gvt_gtt {
void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head;
struct list_head mm_lru_list_head;
struct list_head ppgtt_mm_lru_list_head;
struct page *scratch_page;
unsigned long scratch_mfn;
};
enum {
INTEL_GVT_MM_GGTT = 0,
INTEL_GVT_MM_PPGTT,
};
typedef enum {
GTT_TYPE_INVALID = -1,
......@@ -125,66 +119,60 @@ typedef enum {
GTT_TYPE_MAX,
} intel_gvt_gtt_type_t;
struct intel_vgpu_mm {
int type;
bool initialized;
bool shadowed;
enum intel_gvt_mm_type {
INTEL_GVT_MM_GGTT,
INTEL_GVT_MM_PPGTT,
};
int page_table_entry_type;
u32 page_table_entry_size;
u32 page_table_entry_cnt;
void *virtual_page_table;
void *shadow_page_table;
#define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES
int page_table_level;
bool has_shadow_page_table;
u32 pde_base_index;
struct intel_vgpu_mm {
enum intel_gvt_mm_type type;
struct intel_vgpu *vgpu;
struct list_head list;
struct kref ref;
atomic_t pincount;
struct list_head lru_list;
struct intel_vgpu *vgpu;
};
extern int intel_vgpu_mm_get_entry(
struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index);
extern int intel_vgpu_mm_set_entry(
struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index);
#define ggtt_get_guest_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
#define ggtt_set_guest_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
#define ggtt_get_shadow_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
#define ggtt_set_shadow_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
union {
struct {
intel_gvt_gtt_type_t root_entry_type;
/*
* The 4 PDPs in ring context. For 48bit addressing,
* only PDP0 is valid and point to PML4. For 32it
* addressing, all 4 are used as true PDPs.
*/
u64 guest_pdps[GVT_RING_CTX_NR_PDPS];
u64 shadow_pdps[GVT_RING_CTX_NR_PDPS];
bool shadowed;
struct list_head list;
struct list_head lru_list;
} ppgtt_mm;
struct {
void *virtual_ggtt;
} ggtt_mm;
};
};
#define ppgtt_get_guest_root_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
#define ppgtt_set_guest_root_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
{
kref_get(&mm->ref);
}
#define ppgtt_get_shadow_root_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
void _intel_vgpu_mm_release(struct kref *mm_ref);
#define ppgtt_set_shadow_root_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
static inline void intel_vgpu_mm_put(struct intel_vgpu_mm *mm)
{
kref_put(&mm->ref, _intel_vgpu_mm_release);
}
extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
int mm_type, void *virtual_page_table, int page_table_level,
u32 pde_base_index);
extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
static inline void intel_vgpu_destroy_mm(struct intel_vgpu_mm *mm)
{
intel_vgpu_mm_put(mm);
}
struct intel_vgpu_guest_page;
......@@ -196,10 +184,8 @@ struct intel_vgpu_scratch_pt {
struct intel_vgpu_gtt {
struct intel_vgpu_mm *ggtt_mm;
unsigned long active_ppgtt_mm_bitmap;
struct list_head mm_list_head;
DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
DECLARE_HASHTABLE(tracked_guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
atomic_t n_tracked_guest_page;
struct list_head ppgtt_mm_list_head;
struct radix_tree_root spt_tree;
struct list_head oos_page_list_head;
struct list_head post_shadow_list_head;
struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
......@@ -216,32 +202,8 @@ extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level, void *root_entry);
struct intel_vgpu_oos_page;
struct intel_vgpu_shadow_page {
void *vaddr;
struct page *page;
int type;
struct hlist_node node;
unsigned long mfn;
};
struct intel_vgpu_page_track {
struct hlist_node node;
bool tracked;
unsigned long gfn;
int (*handler)(void *, u64, void *, int);
void *data;
};
struct intel_vgpu_guest_page {
struct intel_vgpu_page_track track;
unsigned long write_cnt;
struct intel_vgpu_oos_page *oos_page;
};
struct intel_vgpu_oos_page {
struct intel_vgpu_guest_page *guest_page;
struct intel_vgpu_ppgtt_spt *spt;
struct list_head list;
struct list_head vm_list;
int id;
......@@ -250,42 +212,33 @@ struct intel_vgpu_oos_page {
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
/* Represent a vgpu shadow page table. */
struct intel_vgpu_ppgtt_spt {
struct intel_vgpu_shadow_page shadow_page;
struct intel_vgpu_guest_page guest_page;
int guest_page_type;
atomic_t refcount;
struct intel_vgpu *vgpu;
DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
struct list_head post_shadow_list;
};
int intel_vgpu_init_page_track(struct intel_vgpu *vgpu,
struct intel_vgpu_page_track *t,
unsigned long gfn,
int (*handler)(void *gp, u64, void *, int),
void *data);
struct {
intel_gvt_gtt_type_t type;
void *vaddr;
struct page *page;
unsigned long mfn;
} shadow_page;
void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu,
struct intel_vgpu_page_track *t);
struct {
intel_gvt_gtt_type_t type;
unsigned long gfn;
unsigned long write_cnt;
struct intel_vgpu_oos_page *oos_page;
} guest_page;
struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
struct intel_vgpu *vgpu, unsigned long gfn);
DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
struct list_head post_shadow_list;
};
int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm)
{
kref_get(&mm->ref);
}
static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm)
{
kref_put(&mm->ref, intel_vgpu_destroy_mm);
}
int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
......@@ -294,21 +247,17 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
unsigned long gma);
struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level, void *root_entry);
u64 pdps[]);
int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level);
struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level);
int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]);
int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes);
#endif /* _GVT_GTT_H_ */
......@@ -183,7 +183,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.get_gvt_attrs = intel_get_gvt_attrs,
.vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
.write_protect_handler = intel_vgpu_write_protect_handler,
.write_protect_handler = intel_vgpu_page_track_handler,
};
/**
......
......@@ -48,6 +48,7 @@
#include "cmd_parser.h"
#include "fb_decoder.h"
#include "dmabuf.h"
#include "page_track.h"
#define GVT_MAX_VGPU 8
......@@ -131,11 +132,9 @@ struct intel_vgpu_opregion {
#define vgpu_opregion(vgpu) (&(vgpu->opregion))
#define INTEL_GVT_MAX_PORT 5
struct intel_vgpu_display {
struct intel_vgpu_i2c_edid i2c_edid;
struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
struct intel_vgpu_port ports[I915_MAX_PORTS];
struct intel_vgpu_sbi sbi;
};
......@@ -190,6 +189,7 @@ struct intel_vgpu {
struct intel_vgpu_opregion opregion;
struct intel_vgpu_display display;
struct intel_vgpu_submission submission;
struct radix_tree_root page_track_tree;
u32 hws_pga[I915_NUM_ENGINES];
struct dentry *debugfs;
......@@ -201,8 +201,16 @@ struct intel_vgpu {
int num_regions;
struct eventfd_ctx *intx_trigger;
struct eventfd_ctx *msi_trigger;
struct rb_root cache;
/*
* Two caches are used to avoid mapping duplicated pages (eg.
* scratch pages). This help to reduce dma setup overhead.
*/
struct rb_root gfn_cache;
struct rb_root dma_addr_cache;
unsigned long nr_cache_entries;
struct mutex cache_lock;
struct notifier_block iommu_notifier;
struct notifier_block group_notifier;
struct kvm *kvm;
......@@ -308,7 +316,10 @@ struct intel_gvt {
wait_queue_head_t service_thread_wq;
unsigned long service_request;
struct engine_mmio *engine_mmio_list;
struct {
struct engine_mmio *mmio;
int ctx_mmio_count[I915_NUM_ENGINES];
} engine_mmio_list;
struct dentry *debugfs_root;
};
......
......@@ -188,7 +188,9 @@ void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
unsigned int fence_num, void *p_data, unsigned int bytes)
{
if (fence_num >= vgpu_fence_sz(vgpu)) {
unsigned int max_fence = vgpu_fence_sz(vgpu);
if (fence_num >= max_fence) {
/* When guest access oob fence regs without access
* pv_info first, we treat guest not supporting GVT,
......@@ -201,7 +203,7 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
if (!vgpu->mmio.disable_warn_untrack) {
gvt_vgpu_err("found oob fence register access\n");
gvt_vgpu_err("total fence %d, access fence %d\n",
vgpu_fence_sz(vgpu), fence_num);
max_fence, fence_num);
}
memset(p_data, 0, bytes);
return -EINVAL;
......@@ -320,7 +322,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
/* sw will wait for the device to ack the reset request */
vgpu_vreg(vgpu, offset) = 0;
vgpu_vreg(vgpu, offset) = 0;
return 0;
}
......@@ -1139,21 +1141,21 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
{
int ret = 0;
intel_gvt_gtt_type_t root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
struct intel_vgpu_mm *mm;
u64 *pdps;
pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
switch (notification) {
case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 3);
break;
case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 3);
break;
root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 4);
break;
mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
return PTR_ERR_OR_ZERO(mm);
case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 4);
break;
return intel_vgpu_put_ppgtt_mm(vgpu, pdps);
case VGT_G2V_EXECLIST_CONTEXT_CREATE:
case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
case 1: /* Remove this in guest driver. */
......@@ -1161,7 +1163,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
default:
gvt_vgpu_err("Invalid PV notification %d\n", notification);
}
return ret;
return 0;
}
static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
......@@ -1389,8 +1391,8 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
gvt_vgpu_err("VM(%d) write invalid HWSP address, reg:0x%x, value:0x%x\n",
vgpu->id, offset, value);
gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
offset, value);
return -EINVAL;
}
/*
......@@ -1399,8 +1401,8 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
* support BDW, SKL or other platforms with same HWSP registers.
*/
if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) {
gvt_vgpu_err("VM(%d) access unknown hardware status page register:0x%x\n",
vgpu->id, offset);
gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
offset);
return -EINVAL;
}
vgpu->hws_pga[ring_id] = value;
......
......@@ -44,13 +44,18 @@ struct intel_gvt_mpt {
void (*detach_vgpu)(unsigned long handle);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
unsigned long (*from_virt_to_mfn)(void *p);
int (*set_wp_page)(unsigned long handle, u64 gfn);
int (*unset_wp_page)(unsigned long handle, u64 gfn);
int (*enable_page_track)(unsigned long handle, u64 gfn);
int (*disable_page_track)(unsigned long handle, u64 gfn);
int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
dma_addr_t *dma_addr);
void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
......
This diff is collapsed.
......@@ -76,10 +76,9 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
else
intel_vgpu_default_mmio_write(vgpu, offset, p_data,
bytes);
} else if (reg_is_gtt(gvt, offset) &&
vgpu->gtt.ggtt_mm->virtual_page_table) {
} else if (reg_is_gtt(gvt, offset)) {
offset -= gvt->device_info.gtt_start_offset;
pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
pt = vgpu->gtt.ggtt_mm->ggtt_mm.virtual_ggtt + offset;
if (read)
memcpy(p_data, pt, bytes);
else
......@@ -125,7 +124,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
ret = intel_vgpu_emulate_ggtt_mmio_read(vgpu, offset,
p_data, bytes);
if (ret)
goto err;
......@@ -198,7 +197,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
ret = intel_vgpu_emulate_ggtt_mmio_write(vgpu, offset,
p_data, bytes);
if (ret)
goto err;
......
......@@ -50,6 +50,8 @@
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
#define VF_GUARDBAND _MMIO(0x83a4)
#define GEN9_MOCS_SIZE 64
/* Raw offset is appened to each line for convenience. */
static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
{RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
......@@ -151,8 +153,8 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
static struct {
bool initialized;
u32 control_table[I915_NUM_ENGINES][64];
u32 l3cc_table[32];
u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE];
u32 l3cc_table[GEN9_MOCS_SIZE / 2];
} gen9_render_mocs;
static void load_render_mocs(struct drm_i915_private *dev_priv)
......@@ -169,7 +171,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
for (i = 0; i < GEN9_MOCS_SIZE; i++) {
gen9_render_mocs.control_table[ring_id][i] =
I915_READ_FW(offset);
offset.reg += 4;
......@@ -177,7 +179,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
}
offset.reg = 0xb020;
for (i = 0; i < 32; i++) {
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
gen9_render_mocs.l3cc_table[i] =
I915_READ_FW(offset);
offset.reg += 4;
......@@ -185,6 +187,153 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
gen9_render_mocs.initialized = true;
}
static int
restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
struct i915_request *req)
{
u32 *cs;
int ret;
struct engine_mmio *mmio;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = req->engine->id;
int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id];
if (count == 0)
return 0;
ret = req->engine->emit_flush(req, EMIT_BARRIER);
if (ret)
return ret;
cs = intel_ring_begin(req, count * 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(count);
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->ring_id != ring_id ||
!mmio->in_context)
continue;
*cs++ = i915_mmio_reg_offset(mmio->reg);
*cs++ = vgpu_vreg_t(vgpu, mmio->reg) |
(mmio->mask << 16);
gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
*(cs-2), *(cs-1), vgpu->id, ring_id);
}
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
ret = req->engine->emit_flush(req, EMIT_BARRIER);
if (ret)
return ret;
return 0;
}
static int
restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu,
struct i915_request *req)
{
unsigned int index;
u32 *cs;
cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE);
for (index = 0; index < GEN9_MOCS_SIZE; index++) {
*cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index));
*cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index));
gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
*(cs-2), *(cs-1), vgpu->id, req->engine->id);
}
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return 0;
}
static int
restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu,
struct i915_request *req)
{
unsigned int index;
u32 *cs;
cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2);
for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) {
*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index));
*cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index));
gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
*(cs-2), *(cs-1), vgpu->id, req->engine->id);
}
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return 0;
}
/*
* Use lri command to initialize the mmio which is in context state image for
* inhibit context, it contains tracked engine mmio, render_mocs and
* render_mocs_l3cc.
*/
int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
struct i915_request *req)
{
int ret;
u32 *cs;
cs = intel_ring_begin(req, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
ret = restore_context_mmio_for_inhibit(vgpu, req);
if (ret)
goto out;
/* no MOCS register in context except render engine */
if (req->engine->id != RCS)
goto out;
ret = restore_render_mocs_control_for_inhibit(vgpu, req);
if (ret)
goto out;
ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req);
if (ret)
goto out;
out:
cs = intel_ring_begin(req, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return ret;
}
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
......@@ -251,11 +400,14 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
if (IS_KABYLAKE(dev_priv) && ring_id == RCS)
return;
if (!pre && !gen9_render_mocs.initialized)
load_render_mocs(dev_priv);
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
for (i = 0; i < GEN9_MOCS_SIZE; i++) {
if (pre)
old_v = vgpu_vreg_t(pre, offset);
else
......@@ -273,7 +425,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (ring_id == RCS) {
l3_offset.reg = 0xb020;
for (i = 0; i < 32; i++) {
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
if (pre)
old_v = vgpu_vreg_t(pre, l3_offset);
else
......@@ -293,6 +445,16 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
#define CTX_CONTEXT_CONTROL_VAL 0x03
bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
{
u32 *reg_state = ctx->engine[ring_id].lrc_reg_state;
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
return inhibit_mask ==
(reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask);
}
/* Switch ring mmio values (context). */
static void switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next,
......@@ -300,9 +462,6 @@ static void switch_mmio(struct intel_vgpu *pre,
{
struct drm_i915_private *dev_priv;
struct intel_vgpu_submission *s;
u32 *reg_state, ctx_ctrl;
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
struct engine_mmio *mmio;
u32 old_v, new_v;
......@@ -310,10 +469,18 @@ static void switch_mmio(struct intel_vgpu *pre,
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
switch_mocs(pre, next, ring_id);
for (mmio = dev_priv->gvt->engine_mmio_list;
for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->ring_id != ring_id)
continue;
/*
* No need to do save or restore of the mmio which is in context
* state image on kabylake, it's initialized by lri command and
* save or restore with context together.
*/
if (IS_KABYLAKE(dev_priv) && mmio->in_context)
continue;
// save
if (pre) {
vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
......@@ -327,16 +494,13 @@ static void switch_mmio(struct intel_vgpu *pre,
// restore
if (next) {
s = &next->submission;
reg_state =
s->shadow_ctx->engine[ring_id].lrc_reg_state;
ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
/*
* if it is an inhibit context, load in_context mmio
* into HW by mmio write. If it is not, skip this mmio
* write.
* No need to restore the mmio which is in context state
* image if it's not inhibit context, it will restore
* itself.
*/
if (mmio->in_context &&
(ctx_ctrl & inhibit_mask) != inhibit_mask)
!is_inhibit_context(s->shadow_ctx, ring_id))
continue;
if (mmio->mask)
......@@ -405,8 +569,16 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
*/
void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
struct engine_mmio *mmio;
if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
gvt->engine_mmio_list = gen9_engine_mmio_list;
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
else
gvt->engine_mmio_list = gen8_engine_mmio_list;
gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->in_context)
gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
}
}
......@@ -49,4 +49,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
struct i915_request *req);
#endif
......@@ -154,54 +154,31 @@ static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
}
/**
* intel_gvt_hypervisor_enable - set a guest page to write-protected
* intel_gvt_hypervisor_enable_page_track - track a guest page
* @vgpu: a vGPU
* @t: page track data structure
* @gfn: the gfn of guest
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_enable_page_track(
struct intel_vgpu *vgpu,
struct intel_vgpu_page_track *t)
struct intel_vgpu *vgpu, unsigned long gfn)
{
int ret;
if (t->tracked)
return 0;
ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, t->gfn);
if (ret)
return ret;
t->tracked = true;
atomic_inc(&vgpu->gtt.n_tracked_guest_page);
return 0;
return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn);
}
/**
* intel_gvt_hypervisor_disable_page_track - remove the write-protection of a
* guest page
* intel_gvt_hypervisor_disable_page_track - untrack a guest page
* @vgpu: a vGPU
* @t: page track data structure
* @gfn: the gfn of guest
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_disable_page_track(
struct intel_vgpu *vgpu,
struct intel_vgpu_page_track *t)
struct intel_vgpu *vgpu, unsigned long gfn)
{
int ret;
if (!t->tracked)
return 0;
ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, t->gfn);
if (ret)
return ret;
t->tracked = false;
atomic_dec(&vgpu->gtt.n_tracked_guest_page);
return 0;
return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn);
}
/**
......@@ -250,6 +227,34 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
}
/**
* intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
* @vgpu: a vGPU
* @gpfn: guest pfn
* @dma_addr: retrieve allocated dma addr
*
* Returns:
* 0 on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_dma_map_guest_page(
struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t *dma_addr)
{
return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn,
dma_addr);
}
/**
* intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page
* @vgpu: a vGPU
* @dma_addr: the mapped dma addr
*/
static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
struct intel_vgpu *vgpu, dma_addr_t dma_addr)
{
intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
}
/**
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
* @vgpu: a vGPU
......
/*
* Copyright(c) 2011-2017 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "i915_drv.h"
#include "gvt.h"
/**
* intel_vgpu_find_page_track - find page track rcord of guest page
* @vgpu: a vGPU
* @gfn: the gfn of guest page
*
* Returns:
* A pointer to struct intel_vgpu_page_track if found, else NULL returned.
*/
struct intel_vgpu_page_track *intel_vgpu_find_page_track(
struct intel_vgpu *vgpu, unsigned long gfn)
{
return radix_tree_lookup(&vgpu->page_track_tree, gfn);
}
/**
* intel_vgpu_register_page_track - register a guest page to be tacked
* @vgpu: a vGPU
* @gfn: the gfn of guest page
*
* Returns:
* zero on success, negative error code if failed.
*/
int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn,
gvt_page_track_handler_t handler, void *priv)
{
struct intel_vgpu_page_track *track;
int ret;
track = intel_vgpu_find_page_track(vgpu, gfn);
if (track)
return -EEXIST;
track = kzalloc(sizeof(*track), GFP_KERNEL);
if (!track)
return -ENOMEM;
track->handler = handler;
track->priv_data = priv;
ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track);
if (ret) {
kfree(track);
return ret;
}
return 0;
}
/**
* intel_vgpu_unregister_page_track - unregister the tracked guest page
* @vgpu: a vGPU
* @gfn: the gfn of guest page
*
*/
void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu,
unsigned long gfn)
{
struct intel_vgpu_page_track *track;
track = radix_tree_delete(&vgpu->page_track_tree, gfn);
if (track) {
if (track->tracked)
intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
kfree(track);
}
}
/**
* intel_vgpu_enable_page_track - set write-protection on guest page
* @vgpu: a vGPU
* @gfn: the gfn of guest page
*
* Returns:
* zero on success, negative error code if failed.
*/
int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
{
struct intel_vgpu_page_track *track;
int ret;
track = intel_vgpu_find_page_track(vgpu, gfn);
if (!track)
return -ENXIO;
if (track->tracked)
return 0;
ret = intel_gvt_hypervisor_enable_page_track(vgpu, gfn);
if (ret)
return ret;
track->tracked = true;
return 0;
}
/**
* intel_vgpu_enable_page_track - cancel write-protection on guest page
* @vgpu: a vGPU
* @gfn: the gfn of guest page
*
* Returns:
* zero on success, negative error code if failed.
*/
int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
{
struct intel_vgpu_page_track *track;
int ret;
track = intel_vgpu_find_page_track(vgpu, gfn);
if (!track)
return -ENXIO;
if (!track->tracked)
return 0;
ret = intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
if (ret)
return ret;
track->tracked = false;
return 0;
}
/**
* intel_vgpu_page_track_handler - called when write to write-protected page
* @vgpu: a vGPU
* @gpa: the gpa of this write
* @data: the writed data
* @bytes: the length of this write
*
* Returns:
* zero on success, negative error code if failed.
*/
int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
void *data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_page_track *page_track;
int ret = 0;
mutex_lock(&gvt->lock);
page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
if (!page_track) {
ret = -ENXIO;
goto out;
}
if (unlikely(vgpu->failsafe)) {
/* Remove write protection to prevent furture traps. */
intel_vgpu_disable_page_track(vgpu, gpa >> PAGE_SHIFT);
} else {
ret = page_track->handler(page_track, gpa, data, bytes);
if (ret)
gvt_err("guest page write error, gpa %llx\n", gpa);
}
out:
mutex_unlock(&gvt->lock);
return ret;
}
/*
* Copyright(c) 2011-2017 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _GVT_PAGE_TRACK_H_
#define _GVT_PAGE_TRACK_H_
struct intel_vgpu_page_track;
typedef int (*gvt_page_track_handler_t)(
struct intel_vgpu_page_track *page_track,
u64 gpa, void *data, int bytes);
/* Track record for a write-protected guest page. */
struct intel_vgpu_page_track {
gvt_page_track_handler_t handler;
bool tracked;
void *priv_data;
};
struct intel_vgpu_page_track *intel_vgpu_find_page_track(
struct intel_vgpu *vgpu, unsigned long gfn);
int intel_vgpu_register_page_track(struct intel_vgpu *vgpu,
unsigned long gfn, gvt_page_track_handler_t handler,
void *priv);
void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu,
unsigned long gfn);
int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
void *data, unsigned int bytes);
#endif
......@@ -103,9 +103,8 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
list_for_each(pos, &sched_data->lru_runq_head) {
vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) *
vgpu_data->sched_ctl.weight /
total_weight;
fair_timeslice = ktime_divns(ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS),
total_weight) * vgpu_data->sched_ctl.weight;
vgpu_data->allocated_ts = fair_timeslice;
vgpu_data->left_ts = vgpu_data->allocated_ts;
......
......@@ -113,7 +113,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
#undef COPY_REG
set_context_pdp_root_pointer(shadow_ring_context,
workload->shadow_mm->shadow_page_table);
(void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
intel_gvt_hypervisor_read_gpa(vgpu,
workload->ring_context_gpa +
......@@ -126,7 +126,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return 0;
}
static inline bool is_gvt_request(struct drm_i915_gem_request *req)
static inline bool is_gvt_request(struct i915_request *req)
{
return i915_gem_context_force_single_submission(req->ctx);
}
......@@ -148,7 +148,7 @@ static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
struct i915_request *req = data;
struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
shadow_ctx_notifier_block[req->engine->id]);
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
......@@ -225,6 +225,11 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
void *shadow_ring_buffer_va;
u32 *cs;
struct i915_request *req = workload->req;
if (IS_KABYLAKE(req->i915) &&
is_inhibit_context(req->ctx, req->engine->id))
intel_vgpu_restore_inhibit_context(vgpu, req);
/* allocate shadow ring buffer */
cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
......@@ -333,13 +338,13 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
int ring_id = workload->ring_id;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct i915_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ret;
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq);
......@@ -348,7 +353,7 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
workload->req = i915_gem_request_get(rq);
workload->req = i915_request_get(rq);
ret = copy_workload_to_ring_buffer(workload);
if (ret)
goto err_unpin;
......@@ -582,7 +587,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (!IS_ERR_OR_NULL(workload->req)) {
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req);
i915_add_request(workload->req);
i915_request_add(workload->req);
workload->dispatched = true;
}
......@@ -769,7 +774,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->status = 0;
}
i915_gem_request_put(fetch_and_zero(&workload->req));
i915_request_put(fetch_and_zero(&workload->req));
if (!workload->status && !(vgpu->resetting_eng &
ENGINE_MASK(ring_id))) {
......@@ -886,7 +891,7 @@ static int workload_thread(void *priv)
gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload);
i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
complete:
gvt_dbg_sched("will complete workload %p, status: %d\n",
......@@ -1132,7 +1137,7 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu_submission *s = &workload->vgpu->submission;
if (workload->shadow_mm)
intel_gvt_mm_unreference(workload->shadow_mm);
intel_vgpu_mm_put(workload->shadow_mm);
kmem_cache_free(s->workloads, workload);
}
......@@ -1181,32 +1186,27 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm;
struct intel_vgpu *vgpu = workload->vgpu;
int page_table_level;
u32 pdp[8];
if (desc->addressing_mode == 1) { /* legacy 32-bit */
page_table_level = 3;
} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
page_table_level = 4;
} else {
intel_gvt_gtt_type_t root_entry_type;
u64 pdps[GVT_RING_CTX_NR_PDPS];
switch (desc->addressing_mode) {
case 1: /* legacy 32-bit */
root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
break;
case 3: /* legacy 64-bit */
root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
break;
default:
gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
return -EINVAL;
}
read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
if (mm) {
intel_gvt_mm_reference(mm);
} else {
mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
if (IS_ERR(mm))
return PTR_ERR(mm);
mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
pdp, page_table_level, 0);
if (IS_ERR(mm)) {
gvt_vgpu_err("fail to create mm object.\n");
return PTR_ERR(mm);
}
}
workload->shadow_mm = mm;
return 0;
}
......
......@@ -80,7 +80,7 @@ struct intel_shadow_wa_ctx {
struct intel_vgpu_workload {
struct intel_vgpu *vgpu;
int ring_id;
struct drm_i915_gem_request *req;
struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
bool shadowed;
......
......@@ -113,10 +113,10 @@ TRACE_EVENT(gma_index,
);
TRACE_EVENT(gma_translate,
TP_PROTO(int id, char *type, int ring_id, int pt_level,
TP_PROTO(int id, char *type, int ring_id, int root_entry_type,
unsigned long gma, unsigned long gpa),
TP_ARGS(id, type, ring_id, pt_level, gma, gpa),
TP_ARGS(id, type, ring_id, root_entry_type, gma, gpa),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
......@@ -124,8 +124,8 @@ TRACE_EVENT(gma_translate,
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d %s ring %d pt_level %d gma 0x%lx -> gpa 0x%lx\n",
id, type, ring_id, pt_level, gma, gpa);
"VM%d %s ring %d root_entry_type %d gma 0x%lx -> gpa 0x%lx\n",
id, type, ring_id, root_entry_type, gma, gpa);
),
TP_printk("%s", __entry->buf)
......@@ -168,7 +168,7 @@ TRACE_EVENT(spt_change,
TP_printk("%s", __entry->buf)
);
TRACE_EVENT(gpt_change,
TRACE_EVENT(spt_guest_change,
TP_PROTO(int id, const char *tag, void *spt, int type, u64 v,
unsigned long index),
......
......@@ -354,6 +354,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init(&vgpu->object_idr);
intel_vgpu_init_cfg_space(vgpu, param->primary);
......
This diff is collapsed.
......@@ -49,6 +49,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "i915_pmu.h"
#include "i915_query.h"
#include "i915_vgpu.h"
#include "intel_drv.h"
#include "intel_uc.h"
......@@ -428,7 +429,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
break;
case I915_PARAM_SUBSLICE_MASK:
value = INTEL_INFO(dev_priv)->sseu.subslice_mask;
value = INTEL_INFO(dev_priv)->sseu.subslice_mask[0];
if (!value)
return -ENODEV;
break;
......@@ -808,7 +809,7 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
/*
* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
* by the GPU. i915_gem_retire_requests() is called directly when we
* by the GPU. i915_retire_requests() is called directly when we
* need high-priority retirement, such as waiting for an explicit
* bo.
*
......@@ -1992,7 +1993,7 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
error:
i915_gem_set_wedged(i915);
i915_gem_retire_requests(i915);
i915_retire_requests(i915);
intel_gpu_reset(i915, ALL_ENGINES);
goto finish;
}
......@@ -2019,7 +2020,7 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
{
struct i915_gpu_error *error = &engine->i915->gpu_error;
struct drm_i915_gem_request *active_request;
struct i915_request *active_request;
int ret;
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
......@@ -2575,7 +2576,7 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
intel_guc_suspend(dev_priv);
intel_uc_suspend(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv);
......@@ -2597,7 +2598,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_enable_interrupts(dev_priv);
intel_guc_resume(dev_priv);
intel_uc_resume(dev_priv);
i915_gem_init_swizzling(dev_priv);
i915_gem_restore_fences(dev_priv);
......@@ -2683,7 +2684,7 @@ static int intel_runtime_resume(struct device *kdev)
intel_runtime_pm_enable_interrupts(dev_priv);
intel_guc_resume(dev_priv);
intel_uc_resume(dev_priv);
/*
* No point of rolling back things in case of an error, as the best
......@@ -2832,6 +2833,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
static struct drm_driver driver = {
......
......@@ -71,9 +71,9 @@
#include "i915_gem_fence_reg.h"
#include "i915_gem_object.h"
#include "i915_gem_gtt.h"
#include "i915_gem_request.h"
#include "i915_gem_timeline.h"
#include "i915_request.h"
#include "i915_vma.h"
#include "intel_gvt.h"
......@@ -83,8 +83,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20180221"
#define DRIVER_TIMESTAMP 1519219289
#define DRIVER_DATE "20180308"
#define DRIVER_TIMESTAMP 1520513379
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
......@@ -1231,7 +1231,7 @@ struct i915_gpu_error {
*
* #I915_WEDGED - If reset fails and we can no longer use the GPU,
* we set the #I915_WEDGED bit. Prior to command submission, e.g.
* i915_gem_request_alloc(), this bit is checked and the sequence
* i915_request_alloc(), this bit is checked and the sequence
* aborted (with -EIO reported to userspace) if set.
*/
unsigned long flags;
......@@ -2103,6 +2103,7 @@ struct drm_i915_private {
*/
struct ida hw_ida;
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
} contexts;
u32 fdi_rx_config;
......@@ -2746,6 +2747,9 @@ intel_info(const struct drm_i915_private *dev_priv)
#define BLT_RING ENGINE_MASK(BCS)
#define VEBOX_RING ENGINE_MASK(VECS)
#define BSD2_RING ENGINE_MASK(VCS2)
#define BSD3_RING ENGINE_MASK(VCS3)
#define BSD4_RING ENGINE_MASK(VCS4)
#define VEBOX2_RING ENGINE_MASK(VECS2)
#define ALL_ENGINES (~0)
#define HAS_ENGINE(dev_priv, id) \
......@@ -2768,6 +2772,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
((dev_priv)->info.has_logical_ring_contexts)
#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
((dev_priv)->info.has_logical_ring_elsq)
#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
((dev_priv)->info.has_logical_ring_preemption)
......@@ -2788,9 +2794,10 @@ intel_info(const struct drm_i915_private *dev_priv)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
/* WaRsDisableCoarsePowerGating:skl,bxt */
/* WaRsDisableCoarsePowerGating:skl,cnl */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
(IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
(IS_CANNONLAKE(dev_priv) || \
IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
......@@ -3329,7 +3336,7 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req,
struct i915_request *rq,
unsigned int flags);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
......@@ -3344,11 +3351,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
struct drm_i915_gem_request *
struct i915_request *
i915_gem_find_active_request(struct intel_engine_cs *engine);
void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
static inline bool i915_reset_backoff(struct i915_gpu_error *error)
{
return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
......@@ -3380,7 +3385,7 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
return READ_ONCE(error->reset_engine_count[engine->id]);
}
struct drm_i915_gem_request *
struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
void i915_gem_reset(struct drm_i915_private *dev_priv);
......@@ -3389,7 +3394,7 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
void i915_gem_reset_engine(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request);
struct i915_request *request);
void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
......@@ -4008,9 +4013,9 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
static inline bool
__i915_request_irq_complete(const struct drm_i915_gem_request *req)
__i915_request_irq_complete(const struct i915_request *rq)
{
struct intel_engine_cs *engine = req->engine;
struct intel_engine_cs *engine = rq->engine;
u32 seqno;
/* Note that the engine may have wrapped around the seqno, and
......@@ -4019,7 +4024,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
* this by kicking all the waiters before resetting the seqno
* in hardware, and also signal the fence.
*/
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags))
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
return true;
/* The request was dequeued before we were awoken. We check after
......@@ -4028,14 +4033,14 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
* the request execution are sufficient to ensure that a check
* after reading the value from hw matches this request.
*/
seqno = i915_gem_request_global_seqno(req);
seqno = i915_request_global_seqno(rq);
if (!seqno)
return false;
/* Before we do the heavier coherent read of the seqno,
* check the value (hopefully) in the CPU cacheline.
*/
if (__i915_gem_request_completed(req, seqno))
if (__i915_request_completed(rq, seqno))
return true;
/* Ensure our read of the seqno is coherent so that we
......@@ -4084,7 +4089,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
wake_up_process(b->irq_wait->tsk);
spin_unlock_irq(&b->irq_lock);
if (__i915_gem_request_completed(req, seqno))
if (__i915_request_completed(rq, seqno))
return true;
}
......
This diff is collapsed.
......@@ -29,7 +29,10 @@
#ifdef CONFIG_DRM_I915_DEBUG_GEM
#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
printk(KERN_ERR "GEM_BUG_ON(%s)\n", __stringify(condition)); \
pr_err("%s:%d GEM_BUG_ON(%s)\n", \
__func__, __LINE__, __stringify(condition)); \
GEM_TRACE("%s:%d GEM_BUG_ON(%s)\n", \
__func__, __LINE__, __stringify(condition)); \
BUG(); \
} \
} while(0)
......@@ -54,6 +57,6 @@
#define GEM_TRACE(...) do { } while (0)
#endif
#define I915_NUM_ENGINES 5
#define I915_NUM_ENGINES 8
#endif /* __I915_GEM_H__ */
......@@ -119,7 +119,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
if (!reservation_object_test_signaled_rcu(resv, true))
break;
i915_gem_retire_requests(pool->engine->i915);
i915_retire_requests(pool->engine->i915);
GEM_BUG_ON(i915_gem_object_is_active(obj));
/*
......
......@@ -211,17 +211,23 @@ static void context_close(struct i915_gem_context *ctx)
static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{
int ret;
unsigned int max;
if (INTEL_GEN(dev_priv) >= 11)
max = GEN11_MAX_CONTEXT_HW_ID;
else
max = MAX_CONTEXT_HW_ID;
ret = ida_simple_get(&dev_priv->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
0, max, GFP_KERNEL);
if (ret < 0) {
/* Contexts are only released when no longer active.
* Flush any pending retires to hopefully release some
* stale contexts and try again.
*/
i915_gem_retire_requests(dev_priv);
i915_retire_requests(dev_priv);
ret = ida_simple_get(&dev_priv->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
0, max, GFP_KERNEL);
if (ret < 0)
return ret;
}
......@@ -463,6 +469,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
/* Using the simple ida interface, the max is limited by sizeof(int) */
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
ida_init(&dev_priv->contexts.hw_ida);
/* lowest priority; idle task */
......@@ -590,28 +597,28 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
i915_gem_retire_requests(dev_priv);
i915_retire_requests(dev_priv);
for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *req;
struct i915_request *rq;
if (engine_has_idle_kernel_context(engine))
continue;
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
if (IS_ERR(req))
return PTR_ERR(req);
rq = i915_request_alloc(engine, dev_priv->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
/* Queue this switch after all other activity */
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
struct drm_i915_gem_request *prev;
struct i915_request *prev;
struct intel_timeline *tl;
tl = &timeline->engine[engine->id];
prev = i915_gem_active_raw(&tl->last_request,
&dev_priv->drm.struct_mutex);
if (prev)
i915_sw_fence_await_sw_fence_gfp(&req->submit,
i915_sw_fence_await_sw_fence_gfp(&rq->submit,
&prev->submit,
I915_FENCE_GFP);
}
......@@ -623,7 +630,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
* but an extra layer of paranoia before we declare the system
* idle (on suspend etc) is advisable!
*/
__i915_add_request(req, true);
__i915_request_add(rq, true);
}
return 0;
......
......@@ -38,8 +38,8 @@ struct drm_file;
struct drm_i915_private;
struct drm_i915_file_private;
struct drm_i915_gem_request;
struct i915_hw_ppgtt;
struct i915_request;
struct i915_vma;
struct intel_ring;
......@@ -276,7 +276,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file);
void i915_gem_context_close(struct drm_file *file);
int i915_switch_context(struct drm_i915_gem_request *req);
int i915_switch_context(struct i915_request *rq);
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
void i915_gem_context_release(struct kref *ctx_ref);
......
......@@ -168,7 +168,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
* retiring.
*/
if (!(flags & PIN_NONBLOCK))
i915_gem_retire_requests(dev_priv);
i915_retire_requests(dev_priv);
else
phases[1] = NULL;
......@@ -293,7 +293,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* retiring.
*/
if (!(flags & PIN_NONBLOCK))
i915_gem_retire_requests(vm->i915);
i915_retire_requests(vm->i915);
check_color = vm->mm.color_adjust;
if (check_color) {
......
This diff is collapsed.
......@@ -765,16 +765,16 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
}
/* Broadwell Page Directory Pointer Descriptors */
static int gen8_write_pdp(struct drm_i915_gem_request *req,
static int gen8_write_pdp(struct i915_request *rq,
unsigned entry,
dma_addr_t addr)
{
struct intel_engine_cs *engine = req->engine;
struct intel_engine_cs *engine = rq->engine;
u32 *cs;
BUG_ON(entry >= 4);
cs = intel_ring_begin(req, 6);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
......@@ -784,20 +784,20 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
*cs++ = lower_32_bits(addr);
intel_ring_advance(req, cs);
intel_ring_advance(rq, cs);
return 0;
}
static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
struct i915_request *rq)
{
int i, ret;
for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
ret = gen8_write_pdp(req, i, pd_daddr);
ret = gen8_write_pdp(rq, i, pd_daddr);
if (ret)
return ret;
}
......@@ -806,9 +806,9 @@ static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
}
static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
struct i915_request *rq)
{
return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
return gen8_write_pdp(rq, 0, px_dma(&ppgtt->pml4));
}
/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
......@@ -1732,13 +1732,13 @@ static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
}
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
struct i915_request *rq)
{
struct intel_engine_cs *engine = req->engine;
struct intel_engine_cs *engine = rq->engine;
u32 *cs;
/* NB: TLBs must be flushed and invalidated before a switch */
cs = intel_ring_begin(req, 6);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
......@@ -1748,19 +1748,19 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
*cs++ = get_pd_offset(ppgtt);
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
intel_ring_advance(rq, cs);
return 0;
}
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
struct i915_request *rq)
{
struct intel_engine_cs *engine = req->engine;
struct intel_engine_cs *engine = rq->engine;
u32 *cs;
/* NB: TLBs must be flushed and invalidated before a switch */
cs = intel_ring_begin(req, 6);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
......@@ -1770,16 +1770,16 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
*cs++ = get_pd_offset(ppgtt);
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
intel_ring_advance(rq, cs);
return 0;
}
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
struct i915_request *rq)
{
struct intel_engine_cs *engine = req->engine;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine = rq->engine;
struct drm_i915_private *dev_priv = rq->i915;
I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
......
......@@ -39,7 +39,8 @@
#include <linux/pagevec.h>
#include "i915_gem_timeline.h"
#include "i915_gem_request.h"
#include "i915_request.h"
#include "i915_selftest.h"
#define I915_GTT_PAGE_SIZE_4K BIT(12)
......@@ -398,7 +399,7 @@ struct i915_hw_ppgtt {
gen6_pte_t __iomem *pd_addr;
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req);
struct i915_request *rq);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
......
......@@ -33,7 +33,7 @@
#include <drm/i915_drm.h>
#include "i915_gem_request.h"
#include "i915_request.h"
#include "i915_selftest.h"
struct drm_i915_gem_object;
......
......@@ -177,7 +177,7 @@ static int render_state_setup(struct intel_render_state *so,
#undef OUT_BATCH
int i915_gem_render_state_emit(struct drm_i915_gem_request *rq)
int i915_gem_render_state_emit(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct intel_render_state so = {}; /* keep the compiler happy */
......
......@@ -24,8 +24,8 @@
#ifndef _I915_GEM_RENDER_STATE_H_
#define _I915_GEM_RENDER_STATE_H_
struct drm_i915_gem_request;
struct i915_request;
int i915_gem_render_state_emit(struct drm_i915_gem_request *rq);
int i915_gem_render_state_emit(struct i915_request *rq);
#endif /* _I915_GEM_RENDER_STATE_H_ */
......@@ -175,7 +175,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
trace_i915_gem_shrink(i915, target, flags);
i915_gem_retire_requests(i915);
i915_retire_requests(i915);
/*
* Unbinding of objects will require HW access; Let us not wake the
......@@ -267,7 +267,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
if (flags & I915_SHRINK_BOUND)
intel_runtime_pm_put(i915);
i915_gem_retire_requests(i915);
i915_retire_requests(i915);
shrinker_unlock(i915, unlock);
......
......@@ -27,9 +27,9 @@
#include <linux/list.h>
#include "i915_utils.h"
#include "i915_gem_request.h"
#include "i915_request.h"
#include "i915_syncmap.h"
#include "i915_utils.h"
struct i915_gem_timeline;
......
This diff is collapsed.
This diff is collapsed.
......@@ -594,7 +594,8 @@ static const struct intel_device_info intel_cannonlake_info = {
GEN10_FEATURES, \
GEN(11), \
.ddb_size = 2048, \
.has_csr = 0
.has_csr = 0, \
.has_logical_ring_elsq = 1
static const struct intel_device_info intel_icelake_11_info = {
GEN11_FEATURES,
......@@ -664,6 +665,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_ICL_11_IDS(&intel_icelake_11_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment