Commit 282dc832 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2017-10-12' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Last batch of drm/i915 features for v4.15:

- transparent huge pages support (Matthew)
- uapi: I915_PARAM_HAS_SCHEDULER into a capability bitmask (Chris)
- execlists: preemption (Chris)
- scheduler: user defined priorities (Chris)
- execlists optimization (Michał)
- plenty of display fixes (Imre)
- has_ipc fix (Rodrigo)
- platform features definition refactoring (Rodrigo)
- legacy cursor update fix (Maarten)
- fix vblank waits for cursor updates (Maarten)
- reprogram dmc firmware on resume, dmc state fix (Imre)
- remove use_mmio_flip module parameter (Maarten)
- wa fixes (Oscar)
- huc/guc firmware refacoring (Sagar, Michal)
- push encoder specific code to encoder hooks (Jani)
- DP MST fixes (Dhinakaran)
- eDP power sequencing fixes (Manasi)
- selftest updates (Chris, Matthew)
- mmu notifier cpu hotplug deadlock fix (Daniel)
- more VBT parser refactoring (Jani)
- max pipe refactoring (Mika Kahola)
- rc6/rps refactoring and separation (Sagar)
- userptr lockdep fix (Chris)
- tracepoint fixes and defunct tracepoint removal (Chris)
- use rcu instead of abusing stop_machine (Daniel)
- plenty of other fixes all around (Everyone)

* tag 'drm-intel-next-2017-10-12' of git://anongit.freedesktop.org/drm/drm-intel: (145 commits)
  drm/i915: Update DRIVER_DATE to 20171012
  drm/i915: Simplify intel_sanitize_enable_ppgtt
  drm/i915/userptr: Drop struct_mutex before cleanup
  drm/i915/dp: limit sink rates based on rate
  drm/i915/dp: centralize max source rate conditions more
  drm/i915: Allow PCH platforms fall back to BIOS LVDS mode
  drm/i915: Reuse normal state readout for LVDS/DVO fixed mode
  drm/i915: Use rcu instead of stop_machine in set_wedged
  drm/i915: Introduce separate status variable for RC6 and LLC ring frequency setup
  drm/i915: Create generic functions to control RC6, RPS
  drm/i915: Create generic function to setup LLC ring frequency table
  drm/i915: Rename intel_enable_rc6 to intel_rc6_enabled
  drm/i915: Name structure in dev_priv that contains RPS/RC6 state as "gt_pm"
  drm/i915: Move rps.hw_lock to dev_priv and s/hw_lock/pcu_lock
  drm/i915: Name i915_runtime_pm structure in dev_priv as "runtime_pm"
  drm/i915: Separate RPS and RC6 handling for CHV
  drm/i915: Separate RPS and RC6 handling for VLV
  drm/i915: Separate RPS and RC6 handling for BDW
  drm/i915: Remove superfluous IS_BDW checks and non-BDW changes from gen8_enable_rps
  drm/i915: Separate RPS and RC6 handling for gen6+
  ...
parents 6585d427 fa9caf0b
...@@ -47,6 +47,7 @@ i915-y += i915_cmd_parser.o \ ...@@ -47,6 +47,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_tiling.o \ i915_gem_tiling.o \
i915_gem_timeline.o \ i915_gem_timeline.o \
i915_gem_userptr.o \ i915_gem_userptr.o \
i915_gemfs.o \
i915_trace_points.o \ i915_trace_points.o \
i915_vma.o \ i915_vma.o \
intel_breadcrumbs.o \ intel_breadcrumbs.o \
...@@ -59,6 +60,8 @@ i915-y += i915_cmd_parser.o \ ...@@ -59,6 +60,8 @@ i915-y += i915_cmd_parser.o \
# general-purpose microcontroller (GuC) support # general-purpose microcontroller (GuC) support
i915-y += intel_uc.o \ i915-y += intel_uc.o \
intel_uc_fw.o \
intel_guc.o \
intel_guc_ct.o \ intel_guc_ct.o \
intel_guc_log.o \ intel_guc_log.o \
intel_guc_loader.o \ intel_guc_loader.o \
......
...@@ -174,6 +174,7 @@ static int shadow_context_status_change(struct notifier_block *nb, ...@@ -174,6 +174,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
atomic_set(&workload->shadow_ctx_active, 1); atomic_set(&workload->shadow_ctx_active, 1);
break; break;
case INTEL_CONTEXT_SCHEDULE_OUT: case INTEL_CONTEXT_SCHEDULE_OUT:
case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
atomic_set(&workload->shadow_ctx_active, 0); atomic_set(&workload->shadow_ctx_active, 0);
break; break;
default: default:
......
This diff is collapsed.
...@@ -367,9 +367,18 @@ static int i915_getparam(struct drm_device *dev, void *data, ...@@ -367,9 +367,18 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = i915_gem_mmap_gtt_version(); value = i915_gem_mmap_gtt_version();
break; break;
case I915_PARAM_HAS_SCHEDULER: case I915_PARAM_HAS_SCHEDULER:
value = dev_priv->engine[RCS] && value = 0;
dev_priv->engine[RCS]->schedule; if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) {
value |= I915_SCHEDULER_CAP_ENABLED;
value |= I915_SCHEDULER_CAP_PRIORITY;
if (INTEL_INFO(dev_priv)->has_logical_ring_preemption &&
i915_modparams.enable_execlists &&
!i915_modparams.enable_guc_submission)
value |= I915_SCHEDULER_CAP_PREEMPTION;
}
break; break;
case I915_PARAM_MMAP_VERSION: case I915_PARAM_MMAP_VERSION:
/* Remember to bump this if the version changes! */ /* Remember to bump this if the version changes! */
case I915_PARAM_HAS_GEM: case I915_PARAM_HAS_GEM:
...@@ -606,9 +615,10 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv) ...@@ -606,9 +615,10 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_uc_fini_hw(dev_priv); intel_uc_fini_hw(dev_priv);
i915_gem_cleanup_engines(dev_priv); i915_gem_cleanup_engines(dev_priv);
i915_gem_contexts_fini(dev_priv); i915_gem_contexts_fini(dev_priv);
i915_gem_cleanup_userptr(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_cleanup_userptr(dev_priv);
i915_gem_drain_freed_objects(dev_priv); i915_gem_drain_freed_objects(dev_priv);
WARN_ON(!list_empty(&dev_priv->contexts.list)); WARN_ON(!list_empty(&dev_priv->contexts.list));
...@@ -1007,6 +1017,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) ...@@ -1007,6 +1017,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
intel_uncore_init(dev_priv); intel_uncore_init(dev_priv);
intel_uc_init_mmio(dev_priv);
ret = intel_engines_init_mmio(dev_priv); ret = intel_engines_init_mmio(dev_priv);
if (ret) if (ret)
goto err_uncore; goto err_uncore;
...@@ -1580,7 +1592,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) ...@@ -1580,7 +1592,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
intel_display_set_init_power(dev_priv, false); intel_display_set_init_power(dev_priv, false);
fw_csr = !IS_GEN9_LP(dev_priv) && fw_csr = !IS_GEN9_LP(dev_priv) && !hibernation &&
suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/* /*
* In case of firmware assisted context save/restore don't manually * In case of firmware assisted context save/restore don't manually
...@@ -2070,11 +2082,14 @@ static int i915_pm_resume(struct device *kdev) ...@@ -2070,11 +2082,14 @@ static int i915_pm_resume(struct device *kdev)
/* freeze: before creating the hibernation_image */ /* freeze: before creating the hibernation_image */
static int i915_pm_freeze(struct device *kdev) static int i915_pm_freeze(struct device *kdev)
{ {
struct drm_device *dev = &kdev_to_i915(kdev)->drm;
int ret; int ret;
ret = i915_pm_suspend(kdev); if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
if (ret) ret = i915_drm_suspend(dev);
return ret; if (ret)
return ret;
}
ret = i915_gem_freeze(kdev_to_i915(kdev)); ret = i915_gem_freeze(kdev_to_i915(kdev));
if (ret) if (ret)
...@@ -2085,11 +2100,14 @@ static int i915_pm_freeze(struct device *kdev) ...@@ -2085,11 +2100,14 @@ static int i915_pm_freeze(struct device *kdev)
static int i915_pm_freeze_late(struct device *kdev) static int i915_pm_freeze_late(struct device *kdev)
{ {
struct drm_device *dev = &kdev_to_i915(kdev)->drm;
int ret; int ret;
ret = i915_pm_suspend_late(kdev); if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
if (ret) ret = i915_drm_suspend_late(dev, true);
return ret; if (ret)
return ret;
}
ret = i915_gem_freeze_late(kdev_to_i915(kdev)); ret = i915_gem_freeze_late(kdev_to_i915(kdev));
if (ret) if (ret)
...@@ -2485,7 +2503,7 @@ static int intel_runtime_suspend(struct device *kdev) ...@@ -2485,7 +2503,7 @@ static int intel_runtime_suspend(struct device *kdev)
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
int ret; int ret;
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6()))) if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && intel_rc6_enabled())))
return -ENODEV; return -ENODEV;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
...@@ -2527,12 +2545,12 @@ static int intel_runtime_suspend(struct device *kdev) ...@@ -2527,12 +2545,12 @@ static int intel_runtime_suspend(struct device *kdev)
intel_uncore_suspend(dev_priv); intel_uncore_suspend(dev_priv);
enable_rpm_wakeref_asserts(dev_priv); enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
DRM_ERROR("Unclaimed access detected prior to suspending\n"); DRM_ERROR("Unclaimed access detected prior to suspending\n");
dev_priv->pm.suspended = true; dev_priv->runtime_pm.suspended = true;
/* /*
* FIXME: We really should find a document that references the arguments * FIXME: We really should find a document that references the arguments
...@@ -2578,11 +2596,11 @@ static int intel_runtime_resume(struct device *kdev) ...@@ -2578,11 +2596,11 @@ static int intel_runtime_resume(struct device *kdev)
DRM_DEBUG_KMS("Resuming device\n"); DRM_DEBUG_KMS("Resuming device\n");
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
disable_rpm_wakeref_asserts(dev_priv); disable_rpm_wakeref_asserts(dev_priv);
intel_opregion_notify_adapter(dev_priv, PCI_D0); intel_opregion_notify_adapter(dev_priv, PCI_D0);
dev_priv->pm.suspended = false; dev_priv->runtime_pm.suspended = false;
if (intel_uncore_unclaimed_mmio(dev_priv)) if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
......
...@@ -80,8 +80,8 @@ ...@@ -80,8 +80,8 @@
#define DRIVER_NAME "i915" #define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics" #define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20170929" #define DRIVER_DATE "20171012"
#define DRIVER_TIMESTAMP 1506682238 #define DRIVER_TIMESTAMP 1507831511
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions * WARN_ON()) for hw state sanity checks to check for unexpected conditions
...@@ -609,7 +609,7 @@ struct drm_i915_file_private { ...@@ -609,7 +609,7 @@ struct drm_i915_file_private {
struct intel_rps_client { struct intel_rps_client {
atomic_t boosts; atomic_t boosts;
} rps; } rps_client;
unsigned int bsd_engine; unsigned int bsd_engine;
...@@ -783,6 +783,7 @@ struct intel_csr { ...@@ -783,6 +783,7 @@ struct intel_csr {
func(has_l3_dpf); \ func(has_l3_dpf); \
func(has_llc); \ func(has_llc); \
func(has_logical_ring_contexts); \ func(has_logical_ring_contexts); \
func(has_logical_ring_preemption); \
func(has_overlay); \ func(has_overlay); \
func(has_pipe_cxsr); \ func(has_pipe_cxsr); \
func(has_pooled_eu); \ func(has_pooled_eu); \
...@@ -868,6 +869,8 @@ struct intel_device_info { ...@@ -868,6 +869,8 @@ struct intel_device_info {
u8 num_sprites[I915_MAX_PIPES]; u8 num_sprites[I915_MAX_PIPES];
u8 num_scalers[I915_MAX_PIPES]; u8 num_scalers[I915_MAX_PIPES];
unsigned int page_sizes; /* page sizes supported by the HW */
#define DEFINE_FLAG(name) u8 name:1 #define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG #undef DEFINE_FLAG
...@@ -981,6 +984,7 @@ struct i915_gpu_state { ...@@ -981,6 +984,7 @@ struct i915_gpu_state {
pid_t pid; pid_t pid;
u32 handle; u32 handle;
u32 hw_id; u32 hw_id;
int priority;
int ban_score; int ban_score;
int active; int active;
int guilty; int guilty;
...@@ -1003,6 +1007,7 @@ struct i915_gpu_state { ...@@ -1003,6 +1007,7 @@ struct i915_gpu_state {
long jiffies; long jiffies;
pid_t pid; pid_t pid;
u32 context; u32 context;
int priority;
int ban_score; int ban_score;
u32 seqno; u32 seqno;
u32 head; u32 head;
...@@ -1312,7 +1317,7 @@ struct intel_rps_ei { ...@@ -1312,7 +1317,7 @@ struct intel_rps_ei {
u32 media_c0; u32 media_c0;
}; };
struct intel_gen6_power_mgmt { struct intel_rps {
/* /*
* work, interrupts_enabled and pm_iir are protected by * work, interrupts_enabled and pm_iir are protected by
* dev_priv->irq_lock * dev_priv->irq_lock
...@@ -1353,20 +1358,26 @@ struct intel_gen6_power_mgmt { ...@@ -1353,20 +1358,26 @@ struct intel_gen6_power_mgmt {
enum { LOW_POWER, BETWEEN, HIGH_POWER } power; enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
bool enabled; bool enabled;
struct delayed_work autoenable_work;
atomic_t num_waiters; atomic_t num_waiters;
atomic_t boosts; atomic_t boosts;
/* manual wa residency calculations */ /* manual wa residency calculations */
struct intel_rps_ei ei; struct intel_rps_ei ei;
};
/* struct intel_rc6 {
* Protects RPS/RC6 register access and PCU communication. bool enabled;
* Must be taken after struct_mutex if nested. Note that };
* this lock may be held for long periods of time when
* talking to hw - so only take it when talking to hw! struct intel_llc_pstate {
*/ bool enabled;
struct mutex hw_lock; };
struct intel_gen6_power_mgmt {
struct intel_rps rps;
struct intel_rc6 rc6;
struct intel_llc_pstate llc_pstate;
struct delayed_work autoenable_work;
}; };
/* defined intel_pm.c */ /* defined intel_pm.c */
...@@ -1508,6 +1519,11 @@ struct i915_gem_mm { ...@@ -1508,6 +1519,11 @@ struct i915_gem_mm {
/** Usable portion of the GTT for GEM */ /** Usable portion of the GTT for GEM */
dma_addr_t stolen_base; /* limited to low memory (32-bit) */ dma_addr_t stolen_base; /* limited to low memory (32-bit) */
/**
* tmpfs instance used for shmem backed objects
*/
struct vfsmount *gemfs;
/** PPGTT used for aliasing the PPGTT with the GTT */ /** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt; struct i915_hw_ppgtt *aliasing_ppgtt;
...@@ -2251,8 +2267,11 @@ struct drm_i915_private { ...@@ -2251,8 +2267,11 @@ struct drm_i915_private {
wait_queue_head_t gmbus_wait_queue; wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev; struct pci_dev *bridge_dev;
struct i915_gem_context *kernel_context;
struct intel_engine_cs *engine[I915_NUM_ENGINES]; struct intel_engine_cs *engine[I915_NUM_ENGINES];
/* Context used internally to idle the GPU and setup initial state */
struct i915_gem_context *kernel_context;
/* Context only to be used for injecting preemption commands */
struct i915_gem_context *preempt_context;
struct i915_vma *semaphore; struct i915_vma *semaphore;
struct drm_dma_handle *status_page_dmah; struct drm_dma_handle *status_page_dmah;
...@@ -2408,8 +2427,16 @@ struct drm_i915_private { ...@@ -2408,8 +2427,16 @@ struct drm_i915_private {
/* Cannot be determined by PCIID. You must always read a register. */ /* Cannot be determined by PCIID. You must always read a register. */
u32 edram_cap; u32 edram_cap;
/* gen6+ rps state */ /*
struct intel_gen6_power_mgmt rps; * Protects RPS/RC6 register access and PCU communication.
* Must be taken after struct_mutex if nested. Note that
* this lock may be held for long periods of time when
* talking to hw - so only take it when talking to hw!
*/
struct mutex pcu_lock;
/* gen6+ GT PM state */
struct intel_gen6_power_mgmt gt_pm;
/* ilk-only ips/rps state. Everything in here is protected by the global /* ilk-only ips/rps state. Everything in here is protected by the global
* mchdev_lock in intel_pm.c */ * mchdev_lock in intel_pm.c */
...@@ -2520,7 +2547,7 @@ struct drm_i915_private { ...@@ -2520,7 +2547,7 @@ struct drm_i915_private {
bool distrust_bios_wm; bool distrust_bios_wm;
} wm; } wm;
struct i915_runtime_pm pm; struct i915_runtime_pm runtime_pm;
struct { struct {
bool initialized; bool initialized;
...@@ -2859,6 +2886,21 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg) ...@@ -2859,6 +2886,21 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
(((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
(__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0) (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
{
unsigned int page_sizes;
page_sizes = 0;
while (sg) {
GEM_BUG_ON(sg->offset);
GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE));
page_sizes |= sg->length;
sg = __sg_next(sg);
}
return page_sizes;
}
static inline unsigned int i915_sg_segment_size(void) static inline unsigned int i915_sg_segment_size(void)
{ {
unsigned int size = swiotlb_max_segment(); unsigned int size = swiotlb_max_segment();
...@@ -3088,6 +3130,10 @@ intel_info(const struct drm_i915_private *dev_priv) ...@@ -3088,6 +3130,10 @@ intel_info(const struct drm_i915_private *dev_priv)
#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt) #define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2) #define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3) #define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
GEM_BUG_ON((sizes) == 0); \
((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
})
#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay) #define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
...@@ -3504,7 +3550,8 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, ...@@ -3504,7 +3550,8 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
unsigned long n); unsigned long n);
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages); struct sg_table *pages,
unsigned int sg_page_sizes);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline int __must_check static inline int __must_check
...@@ -3726,8 +3773,6 @@ i915_vm_to_ppgtt(struct i915_address_space *vm) ...@@ -3726,8 +3773,6 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
} }
/* i915_gem_fence_reg.c */ /* i915_gem_fence_reg.c */
int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma);
struct drm_i915_fence_reg * struct drm_i915_fence_reg *
i915_reserve_fence(struct drm_i915_private *dev_priv); i915_reserve_fence(struct drm_i915_private *dev_priv);
void i915_unreserve_fence(struct drm_i915_fence_reg *fence); void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
......
This diff is collapsed.
...@@ -416,14 +416,43 @@ i915_gem_context_create_gvt(struct drm_device *dev) ...@@ -416,14 +416,43 @@ i915_gem_context_create_gvt(struct drm_device *dev)
return ctx; return ctx;
} }
static struct i915_gem_context *
create_kernel_context(struct drm_i915_private *i915, int prio)
{
struct i915_gem_context *ctx;
ctx = i915_gem_create_context(i915, NULL);
if (IS_ERR(ctx))
return ctx;
i915_gem_context_clear_bannable(ctx);
ctx->priority = prio;
ctx->ring_size = PAGE_SIZE;
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
return ctx;
}
static void
destroy_kernel_context(struct i915_gem_context **ctxp)
{
struct i915_gem_context *ctx;
/* Keep the context ref so that we can free it immediately ourselves */
ctx = i915_gem_context_get(fetch_and_zero(ctxp));
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
context_close(ctx);
i915_gem_context_free(ctx);
}
int i915_gem_contexts_init(struct drm_i915_private *dev_priv) int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{ {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
int err;
/* Init should only be called once per module load. Eventually the GEM_BUG_ON(dev_priv->kernel_context);
* restriction on the context_disabled check can be loosened. */
if (WARN_ON(dev_priv->kernel_context))
return 0;
INIT_LIST_HEAD(&dev_priv->contexts.list); INIT_LIST_HEAD(&dev_priv->contexts.list);
INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker); INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
...@@ -441,28 +470,38 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) ...@@ -441,28 +470,38 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
ida_init(&dev_priv->contexts.hw_ida); ida_init(&dev_priv->contexts.hw_ida);
ctx = i915_gem_create_context(dev_priv, NULL); /* lowest priority; idle task */
ctx = create_kernel_context(dev_priv, I915_PRIORITY_MIN);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context (error %ld)\n", DRM_ERROR("Failed to create default global context\n");
PTR_ERR(ctx)); err = PTR_ERR(ctx);
return PTR_ERR(ctx); goto err;
} }
/*
/* For easy recognisablity, we want the kernel context to be 0 and then * For easy recognisablity, we want the kernel context to be 0 and then
* all user contexts will have non-zero hw_id. * all user contexts will have non-zero hw_id.
*/ */
GEM_BUG_ON(ctx->hw_id); GEM_BUG_ON(ctx->hw_id);
i915_gem_context_clear_bannable(ctx);
ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
dev_priv->kernel_context = ctx; dev_priv->kernel_context = ctx;
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); /* highest priority; preempting task */
ctx = create_kernel_context(dev_priv, INT_MAX);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default preempt context\n");
err = PTR_ERR(ctx);
goto err_kernel_context;
}
dev_priv->preempt_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n", DRM_DEBUG_DRIVER("%s context support initialized\n",
dev_priv->engine[RCS]->context_size ? "logical" : dev_priv->engine[RCS]->context_size ? "logical" :
"fake"); "fake");
return 0; return 0;
err_kernel_context:
destroy_kernel_context(&dev_priv->kernel_context);
err:
return err;
} }
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
...@@ -507,15 +546,10 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) ...@@ -507,15 +546,10 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
void i915_gem_contexts_fini(struct drm_i915_private *i915) void i915_gem_contexts_fini(struct drm_i915_private *i915)
{ {
struct i915_gem_context *ctx;
lockdep_assert_held(&i915->drm.struct_mutex); lockdep_assert_held(&i915->drm.struct_mutex);
/* Keep the context so that we can free it immediately ourselves */ destroy_kernel_context(&i915->preempt_context);
ctx = i915_gem_context_get(fetch_and_zero(&i915->kernel_context)); destroy_kernel_context(&i915->kernel_context);
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
context_close(ctx);
i915_gem_context_free(ctx);
/* Must free all deferred contexts (via flush_workqueue) first */ /* Must free all deferred contexts (via flush_workqueue) first */
ida_destroy(&i915->contexts.hw_ida); ida_destroy(&i915->contexts.hw_ida);
...@@ -1036,6 +1070,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, ...@@ -1036,6 +1070,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_BANNABLE: case I915_CONTEXT_PARAM_BANNABLE:
args->value = i915_gem_context_is_bannable(ctx); args->value = i915_gem_context_is_bannable(ctx);
break; break;
case I915_CONTEXT_PARAM_PRIORITY:
args->value = ctx->priority;
break;
default: default:
ret = -EINVAL; ret = -EINVAL;
break; break;
...@@ -1091,6 +1128,26 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, ...@@ -1091,6 +1128,26 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
else else
i915_gem_context_clear_bannable(ctx); i915_gem_context_clear_bannable(ctx);
break; break;
case I915_CONTEXT_PARAM_PRIORITY:
{
int priority = args->value;
if (args->size)
ret = -EINVAL;
else if (!to_i915(dev)->engine[RCS]->schedule)
ret = -ENODEV;
else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
priority < I915_CONTEXT_MIN_USER_PRIORITY)
ret = -EINVAL;
else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
!capable(CAP_SYS_NICE))
ret = -EPERM;
else
ctx->priority = priority;
}
break;
default: default:
ret = -EINVAL; ret = -EINVAL;
break; break;
......
...@@ -256,11 +256,21 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, ...@@ -256,11 +256,21 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return drm_gem_dmabuf_export(dev, &exp_info); return drm_gem_dmabuf_export(dev, &exp_info);
} }
static struct sg_table * static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{ {
return dma_buf_map_attachment(obj->base.import_attach, struct sg_table *pages;
DMA_BIDIRECTIONAL); unsigned int sg_page_sizes;
pages = dma_buf_map_attachment(obj->base.import_attach,
DMA_BIDIRECTIONAL);
if (IS_ERR(pages))
return PTR_ERR(pages);
sg_page_sizes = i915_sg_page_sizes(pages->sgl);
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
return 0;
} }
static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj, static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
......
...@@ -82,7 +82,7 @@ mark_free(struct drm_mm_scan *scan, ...@@ -82,7 +82,7 @@ mark_free(struct drm_mm_scan *scan,
if (i915_vma_is_pinned(vma)) if (i915_vma_is_pinned(vma))
return false; return false;
if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link)) if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
return false; return false;
list_add(&vma->evict_link, unwind); list_add(&vma->evict_link, unwind);
...@@ -315,6 +315,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, ...@@ -315,6 +315,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break; break;
} }
if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma)) {
ret = -ENOSPC;
break;
}
/* Overlap of objects in the same batch? */ /* Overlap of objects in the same batch? */
if (i915_vma_is_pinned(vma)) { if (i915_vma_is_pinned(vma)) {
ret = -ENOSPC; ret = -ENOSPC;
......
...@@ -367,12 +367,12 @@ eb_pin_vma(struct i915_execbuffer *eb, ...@@ -367,12 +367,12 @@ eb_pin_vma(struct i915_execbuffer *eb,
return false; return false;
if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) { if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
if (unlikely(i915_vma_get_fence(vma))) { if (unlikely(i915_vma_pin_fence(vma))) {
i915_vma_unpin(vma); i915_vma_unpin(vma);
return false; return false;
} }
if (i915_vma_pin_fence(vma)) if (vma->fence)
exec_flags |= __EXEC_OBJECT_HAS_FENCE; exec_flags |= __EXEC_OBJECT_HAS_FENCE;
} }
...@@ -385,7 +385,7 @@ static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags) ...@@ -385,7 +385,7 @@ static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN)); GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE)) if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
i915_vma_unpin_fence(vma); __i915_vma_unpin_fence(vma);
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
} }
...@@ -563,13 +563,13 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, ...@@ -563,13 +563,13 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
} }
if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) { if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
err = i915_vma_get_fence(vma); err = i915_vma_pin_fence(vma);
if (unlikely(err)) { if (unlikely(err)) {
i915_vma_unpin(vma); i915_vma_unpin(vma);
return err; return err;
} }
if (i915_vma_pin_fence(vma)) if (vma->fence)
exec_flags |= __EXEC_OBJECT_HAS_FENCE; exec_flags |= __EXEC_OBJECT_HAS_FENCE;
} }
...@@ -974,7 +974,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, ...@@ -974,7 +974,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
return ERR_PTR(err); return ERR_PTR(err);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE | PIN_NONBLOCK); PIN_MAPPABLE |
PIN_NONBLOCK |
PIN_NONFAULT);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node)); memset(&cache->node, 0, sizeof(cache->node));
err = drm_mm_insert_node_in_range err = drm_mm_insert_node_in_range
......
...@@ -240,7 +240,8 @@ static int fence_update(struct drm_i915_fence_reg *fence, ...@@ -240,7 +240,8 @@ static int fence_update(struct drm_i915_fence_reg *fence,
/* Ensure that all userspace CPU access is completed before /* Ensure that all userspace CPU access is completed before
* stealing the fence. * stealing the fence.
*/ */
i915_gem_release_mmap(fence->vma->obj); GEM_BUG_ON(fence->vma->fence != fence);
i915_vma_revoke_mmap(fence->vma);
fence->vma->fence = NULL; fence->vma->fence = NULL;
fence->vma = NULL; fence->vma = NULL;
...@@ -280,8 +281,7 @@ static int fence_update(struct drm_i915_fence_reg *fence, ...@@ -280,8 +281,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
* *
* 0 on success, negative error code on failure. * 0 on success, negative error code on failure.
*/ */
int int i915_vma_put_fence(struct i915_vma *vma)
i915_vma_put_fence(struct i915_vma *vma)
{ {
struct drm_i915_fence_reg *fence = vma->fence; struct drm_i915_fence_reg *fence = vma->fence;
...@@ -299,6 +299,8 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv) ...@@ -299,6 +299,8 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
struct drm_i915_fence_reg *fence; struct drm_i915_fence_reg *fence;
list_for_each_entry(fence, &dev_priv->mm.fence_list, link) { list_for_each_entry(fence, &dev_priv->mm.fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
if (fence->pin_count) if (fence->pin_count)
continue; continue;
...@@ -313,7 +315,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv) ...@@ -313,7 +315,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
} }
/** /**
* i915_vma_get_fence - set up fencing for a vma * i915_vma_pin_fence - set up fencing for a vma
* @vma: vma to map through a fence reg * @vma: vma to map through a fence reg
* *
* When mapping objects through the GTT, userspace wants to be able to write * When mapping objects through the GTT, userspace wants to be able to write
...@@ -331,10 +333,11 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv) ...@@ -331,10 +333,11 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
* 0 on success, negative error code on failure. * 0 on success, negative error code on failure.
*/ */
int int
i915_vma_get_fence(struct i915_vma *vma) i915_vma_pin_fence(struct i915_vma *vma)
{ {
struct drm_i915_fence_reg *fence; struct drm_i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL; struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
int err;
/* Note that we revoke fences on runtime suspend. Therefore the user /* Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence. * must keep the device awake whilst using the fence.
...@@ -344,6 +347,8 @@ i915_vma_get_fence(struct i915_vma *vma) ...@@ -344,6 +347,8 @@ i915_vma_get_fence(struct i915_vma *vma)
/* Just update our place in the LRU if our fence is getting reused. */ /* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) { if (vma->fence) {
fence = vma->fence; fence = vma->fence;
GEM_BUG_ON(fence->vma != vma);
fence->pin_count++;
if (!fence->dirty) { if (!fence->dirty) {
list_move_tail(&fence->link, list_move_tail(&fence->link,
&fence->i915->mm.fence_list); &fence->i915->mm.fence_list);
...@@ -353,10 +358,25 @@ i915_vma_get_fence(struct i915_vma *vma) ...@@ -353,10 +358,25 @@ i915_vma_get_fence(struct i915_vma *vma)
fence = fence_find(vma->vm->i915); fence = fence_find(vma->vm->i915);
if (IS_ERR(fence)) if (IS_ERR(fence))
return PTR_ERR(fence); return PTR_ERR(fence);
GEM_BUG_ON(fence->pin_count);
fence->pin_count++;
} else } else
return 0; return 0;
return fence_update(fence, set); err = fence_update(fence, set);
if (err)
goto out_unpin;
GEM_BUG_ON(fence->vma != set);
GEM_BUG_ON(vma->fence != (set ? fence : NULL));
if (set)
return 0;
out_unpin:
fence->pin_count--;
return err;
} }
/** /**
...@@ -429,8 +449,10 @@ void i915_gem_revoke_fences(struct drm_i915_private *dev_priv) ...@@ -429,8 +449,10 @@ void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
for (i = 0; i < dev_priv->num_fence_regs; i++) { for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i]; struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
if (fence->vma) if (fence->vma)
i915_gem_release_mmap(fence->vma->obj); i915_vma_revoke_mmap(fence->vma);
} }
} }
...@@ -450,13 +472,15 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv) ...@@ -450,13 +472,15 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
struct i915_vma *vma = reg->vma; struct i915_vma *vma = reg->vma;
GEM_BUG_ON(vma && vma->fence != reg);
/* /*
* Commit delayed tiling changes if we have an object still * Commit delayed tiling changes if we have an object still
* attached to the fence, otherwise just clear the fence. * attached to the fence, otherwise just clear the fence.
*/ */
if (vma && !i915_gem_object_is_tiled(vma->obj)) { if (vma && !i915_gem_object_is_tiled(vma->obj)) {
GEM_BUG_ON(!reg->dirty); GEM_BUG_ON(!reg->dirty);
GEM_BUG_ON(!list_empty(&vma->obj->userfault_link)); GEM_BUG_ON(i915_vma_has_userfault(vma));
list_move(&reg->link, &dev_priv->mm.fence_list); list_move(&reg->link, &dev_priv->mm.fence_list);
vma->fence = NULL; vma->fence = NULL;
......
This diff is collapsed.
...@@ -42,7 +42,13 @@ ...@@ -42,7 +42,13 @@
#include "i915_gem_request.h" #include "i915_gem_request.h"
#include "i915_selftest.h" #include "i915_selftest.h"
#define I915_GTT_PAGE_SIZE 4096UL #define I915_GTT_PAGE_SIZE_4K BIT(12)
#define I915_GTT_PAGE_SIZE_64K BIT(16)
#define I915_GTT_PAGE_SIZE_2M BIT(21)
#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
#define I915_FENCE_REG_NONE -1 #define I915_FENCE_REG_NONE -1
...@@ -148,6 +154,9 @@ typedef u64 gen8_ppgtt_pml4e_t; ...@@ -148,6 +154,9 @@ typedef u64 gen8_ppgtt_pml4e_t;
#define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4)) #define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4))
#define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6)) #define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6))
#define GEN8_PDE_IPS_64K BIT(11)
#define GEN8_PDE_PS_2M BIT(7)
struct sg_table; struct sg_table;
struct intel_rotation_info { struct intel_rotation_info {
...@@ -207,6 +216,7 @@ struct i915_vma; ...@@ -207,6 +216,7 @@ struct i915_vma;
struct i915_page_dma { struct i915_page_dma {
struct page *page; struct page *page;
int order;
union { union {
dma_addr_t daddr; dma_addr_t daddr;
...@@ -329,6 +339,8 @@ struct i915_address_space { ...@@ -329,6 +339,8 @@ struct i915_address_space {
int (*bind_vma)(struct i915_vma *vma, int (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags); u32 flags);
int (*set_pages)(struct i915_vma *vma);
void (*clear_pages)(struct i915_vma *vma);
I915_SELFTEST_DECLARE(struct fault_attr fault_attr); I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
}; };
...@@ -341,6 +353,12 @@ i915_vm_is_48bit(const struct i915_address_space *vm) ...@@ -341,6 +353,12 @@ i915_vm_is_48bit(const struct i915_address_space *vm)
return (vm->total - 1) >> 32; return (vm->total - 1) >> 32;
} }
static inline bool
i915_vm_has_scratch_64K(struct i915_address_space *vm)
{
return vm->scratch_page.order == get_order(I915_GTT_PAGE_SIZE_64K);
}
/* The Graphics Translation Table is the way in which GEN hardware translates a /* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal * Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a * collateral associated with any va->pa translations GEN hardware also has a
......
...@@ -44,12 +44,12 @@ static void internal_free_pages(struct sg_table *st) ...@@ -44,12 +44,12 @@ static void internal_free_pages(struct sg_table *st)
kfree(st); kfree(st);
} }
static struct sg_table * static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st; struct sg_table *st;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int sg_page_sizes;
unsigned int npages; unsigned int npages;
int max_order; int max_order;
gfp_t gfp; gfp_t gfp;
...@@ -78,16 +78,17 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -78,16 +78,17 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
create_st: create_st:
st = kmalloc(sizeof(*st), GFP_KERNEL); st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st) if (!st)
return ERR_PTR(-ENOMEM); return -ENOMEM;
npages = obj->base.size / PAGE_SIZE; npages = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, npages, GFP_KERNEL)) { if (sg_alloc_table(st, npages, GFP_KERNEL)) {
kfree(st); kfree(st);
return ERR_PTR(-ENOMEM); return -ENOMEM;
} }
sg = st->sgl; sg = st->sgl;
st->nents = 0; st->nents = 0;
sg_page_sizes = 0;
do { do {
int order = min(fls(npages) - 1, max_order); int order = min(fls(npages) - 1, max_order);
...@@ -105,6 +106,7 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -105,6 +106,7 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
} while (1); } while (1);
sg_set_page(sg, page, PAGE_SIZE << order, 0); sg_set_page(sg, page, PAGE_SIZE << order, 0);
sg_page_sizes |= PAGE_SIZE << order;
st->nents++; st->nents++;
npages -= 1 << order; npages -= 1 << order;
...@@ -132,13 +134,17 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -132,13 +134,17 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
* object are only valid whilst active and pinned. * object are only valid whilst active and pinned.
*/ */
obj->mm.madv = I915_MADV_DONTNEED; obj->mm.madv = I915_MADV_DONTNEED;
return st;
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0;
err: err:
sg_set_page(sg, NULL, 0, 0); sg_set_page(sg, NULL, 0, 0);
sg_mark_end(sg); sg_mark_end(sg);
internal_free_pages(st); internal_free_pages(st);
return ERR_PTR(-ENOMEM);
return -ENOMEM;
} }
static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj, static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
......
...@@ -69,7 +69,7 @@ struct drm_i915_gem_object_ops { ...@@ -69,7 +69,7 @@ struct drm_i915_gem_object_ops {
* being released or under memory pressure (where we attempt to * being released or under memory pressure (where we attempt to
* reap pages for the shrinker). * reap pages for the shrinker).
*/ */
struct sg_table *(*get_pages)(struct drm_i915_gem_object *); int (*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
int (*pwrite)(struct drm_i915_gem_object *, int (*pwrite)(struct drm_i915_gem_object *,
...@@ -123,6 +123,7 @@ struct drm_i915_gem_object { ...@@ -123,6 +123,7 @@ struct drm_i915_gem_object {
/** /**
* Whether the object is currently in the GGTT mmap. * Whether the object is currently in the GGTT mmap.
*/ */
unsigned int userfault_count;
struct list_head userfault_link; struct list_head userfault_link;
struct list_head batch_pool_link; struct list_head batch_pool_link;
...@@ -169,6 +170,35 @@ struct drm_i915_gem_object { ...@@ -169,6 +170,35 @@ struct drm_i915_gem_object {
struct sg_table *pages; struct sg_table *pages;
void *mapping; void *mapping;
/* TODO: whack some of this into the error state */
struct i915_page_sizes {
/**
* The sg mask of the pages sg_table. i.e the mask of
* of the lengths for each sg entry.
*/
unsigned int phys;
/**
* The gtt page sizes we are allowed to use given the
* sg mask and the supported page sizes. This will
* express the smallest unit we can use for the whole
* object, as well as the larger sizes we may be able
* to use opportunistically.
*/
unsigned int sg;
/**
* The actual gtt page size usage. Since we can have
* multiple vma associated with this object we need to
* prevent any trampling of state, hence a copy of this
* struct also lives in each vma, therefore the gtt
* value here should only be read/write through the vma.
*/
unsigned int gtt;
} page_sizes;
I915_SELFTEST_DECLARE(unsigned int page_mask);
struct i915_gem_object_page_iter { struct i915_gem_object_page_iter {
struct scatterlist *sg_pos; struct scatterlist *sg_pos;
unsigned int sg_idx; /* in pages, but 32bit eek! */ unsigned int sg_idx; /* in pages, but 32bit eek! */
......
...@@ -186,7 +186,7 @@ i915_priotree_init(struct i915_priotree *pt) ...@@ -186,7 +186,7 @@ i915_priotree_init(struct i915_priotree *pt)
INIT_LIST_HEAD(&pt->signalers_list); INIT_LIST_HEAD(&pt->signalers_list);
INIT_LIST_HEAD(&pt->waiters_list); INIT_LIST_HEAD(&pt->waiters_list);
INIT_LIST_HEAD(&pt->link); INIT_LIST_HEAD(&pt->link);
pt->priority = INT_MIN; pt->priority = I915_PRIORITY_INVALID;
} }
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
...@@ -416,7 +416,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) ...@@ -416,7 +416,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
spin_lock_irq(&request->lock); spin_lock_irq(&request->lock);
if (request->waitboost) if (request->waitboost)
atomic_dec(&request->i915->rps.num_waiters); atomic_dec(&request->i915->gt_pm.rps.num_waiters);
dma_fence_signal_locked(&request->fence); dma_fence_signal_locked(&request->fence);
spin_unlock_irq(&request->lock); spin_unlock_irq(&request->lock);
...@@ -556,7 +556,16 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) ...@@ -556,7 +556,16 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
switch (state) { switch (state) {
case FENCE_COMPLETE: case FENCE_COMPLETE:
trace_i915_gem_request_submit(request); trace_i915_gem_request_submit(request);
/*
* We need to serialize use of the submit_request() callback with its
* hotplugging performed during an emergency i915_gem_set_wedged().
* We use the RCU mechanism to mark the critical section in order to
* force i915_gem_set_wedged() to wait until the submit_request() is
* completed before proceeding.
*/
rcu_read_lock();
request->engine->submit_request(request); request->engine->submit_request(request);
rcu_read_unlock();
break; break;
case FENCE_FREE: case FENCE_FREE:
...@@ -587,6 +596,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -587,6 +596,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
lockdep_assert_held(&dev_priv->drm.struct_mutex); lockdep_assert_held(&dev_priv->drm.struct_mutex);
/*
* Preempt contexts are reserved for exclusive use to inject a
* preemption context switch. They are never to be used for any trivial
* request!
*/
GEM_BUG_ON(ctx == dev_priv->preempt_context);
/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged. * EIO if the GPU is already wedged.
*/ */
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#include "i915_gem.h" #include "i915_gem.h"
#include "i915_sw_fence.h" #include "i915_sw_fence.h"
#include <uapi/drm/i915_drm.h>
struct drm_file; struct drm_file;
struct drm_i915_gem_object; struct drm_i915_gem_object;
struct drm_i915_gem_request; struct drm_i915_gem_request;
...@@ -69,9 +71,14 @@ struct i915_priotree { ...@@ -69,9 +71,14 @@ struct i915_priotree {
struct list_head waiters_list; /* those after us, they depend upon us */ struct list_head waiters_list; /* those after us, they depend upon us */
struct list_head link; struct list_head link;
int priority; int priority;
#define I915_PRIORITY_MAX 1024 };
#define I915_PRIORITY_NORMAL 0
#define I915_PRIORITY_MIN (-I915_PRIORITY_MAX) enum {
I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
I915_PRIORITY_INVALID = INT_MIN
}; };
struct i915_gem_capture_list { struct i915_gem_capture_list {
......
...@@ -539,12 +539,18 @@ i915_pages_create_for_stolen(struct drm_device *dev, ...@@ -539,12 +539,18 @@ i915_pages_create_for_stolen(struct drm_device *dev,
return st; return st;
} }
static struct sg_table * static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{ {
return i915_pages_create_for_stolen(obj->base.dev, struct sg_table *pages =
obj->stolen->start, i915_pages_create_for_stolen(obj->base.dev,
obj->stolen->size); obj->stolen->start,
obj->stolen->size);
if (IS_ERR(pages))
return PTR_ERR(pages);
__i915_gem_object_set_pages(obj, pages, obj->stolen->size);
return 0;
} }
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
......
...@@ -164,7 +164,6 @@ static struct i915_mmu_notifier * ...@@ -164,7 +164,6 @@ static struct i915_mmu_notifier *
i915_mmu_notifier_create(struct mm_struct *mm) i915_mmu_notifier_create(struct mm_struct *mm)
{ {
struct i915_mmu_notifier *mn; struct i915_mmu_notifier *mn;
int ret;
mn = kmalloc(sizeof(*mn), GFP_KERNEL); mn = kmalloc(sizeof(*mn), GFP_KERNEL);
if (mn == NULL) if (mn == NULL)
...@@ -179,14 +178,6 @@ i915_mmu_notifier_create(struct mm_struct *mm) ...@@ -179,14 +178,6 @@ i915_mmu_notifier_create(struct mm_struct *mm)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
/* Protected by mmap_sem (write-lock) */
ret = __mmu_notifier_register(&mn->mn, mm);
if (ret) {
destroy_workqueue(mn->wq);
kfree(mn);
return ERR_PTR(ret);
}
return mn; return mn;
} }
...@@ -210,23 +201,40 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) ...@@ -210,23 +201,40 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
static struct i915_mmu_notifier * static struct i915_mmu_notifier *
i915_mmu_notifier_find(struct i915_mm_struct *mm) i915_mmu_notifier_find(struct i915_mm_struct *mm)
{ {
struct i915_mmu_notifier *mn = mm->mn; struct i915_mmu_notifier *mn;
int err = 0;
mn = mm->mn; mn = mm->mn;
if (mn) if (mn)
return mn; return mn;
mn = i915_mmu_notifier_create(mm->mm);
if (IS_ERR(mn))
err = PTR_ERR(mn);
down_write(&mm->mm->mmap_sem); down_write(&mm->mm->mmap_sem);
mutex_lock(&mm->i915->mm_lock); mutex_lock(&mm->i915->mm_lock);
if ((mn = mm->mn) == NULL) { if (mm->mn == NULL && !err) {
mn = i915_mmu_notifier_create(mm->mm); /* Protected by mmap_sem (write-lock) */
if (!IS_ERR(mn)) err = __mmu_notifier_register(&mn->mn, mm->mm);
mm->mn = mn; if (!err) {
/* Protected by mm_lock */
mm->mn = fetch_and_zero(&mn);
}
} else {
/* someone else raced and successfully installed the mmu
* notifier, we can cancel our own errors */
err = 0;
} }
mutex_unlock(&mm->i915->mm_lock); mutex_unlock(&mm->i915->mm_lock);
up_write(&mm->mm->mmap_sem); up_write(&mm->mm->mmap_sem);
return mn; if (mn) {
destroy_workqueue(mn->wq);
kfree(mn);
}
return err ? ERR_PTR(err) : mm->mn;
} }
static int static int
...@@ -405,6 +413,7 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, ...@@ -405,6 +413,7 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
{ {
unsigned int max_segment = i915_sg_segment_size(); unsigned int max_segment = i915_sg_segment_size();
struct sg_table *st; struct sg_table *st;
unsigned int sg_page_sizes;
int ret; int ret;
st = kmalloc(sizeof(*st), GFP_KERNEL); st = kmalloc(sizeof(*st), GFP_KERNEL);
...@@ -434,6 +443,10 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, ...@@ -434,6 +443,10 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
sg_page_sizes = i915_sg_page_sizes(st->sgl);
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return st; return st;
} }
...@@ -521,7 +534,6 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ...@@ -521,7 +534,6 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
pages = __i915_gem_userptr_alloc_pages(obj, pvec, pages = __i915_gem_userptr_alloc_pages(obj, pvec,
npages); npages);
if (!IS_ERR(pages)) { if (!IS_ERR(pages)) {
__i915_gem_object_set_pages(obj, pages);
pinned = 0; pinned = 0;
pages = NULL; pages = NULL;
} }
...@@ -582,8 +594,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj) ...@@ -582,8 +594,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
} }
static struct sg_table * static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{ {
const int num_pages = obj->base.size >> PAGE_SHIFT; const int num_pages = obj->base.size >> PAGE_SHIFT;
struct mm_struct *mm = obj->userptr.mm->mm; struct mm_struct *mm = obj->userptr.mm->mm;
...@@ -612,9 +623,9 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) ...@@ -612,9 +623,9 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (obj->userptr.work) { if (obj->userptr.work) {
/* active flag should still be held for the pending work */ /* active flag should still be held for the pending work */
if (IS_ERR(obj->userptr.work)) if (IS_ERR(obj->userptr.work))
return ERR_CAST(obj->userptr.work); return PTR_ERR(obj->userptr.work);
else else
return ERR_PTR(-EAGAIN); return -EAGAIN;
} }
pvec = NULL; pvec = NULL;
...@@ -650,7 +661,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) ...@@ -650,7 +661,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
release_pages(pvec, pinned, 0); release_pages(pvec, pinned, 0);
kvfree(pvec); kvfree(pvec);
return pages; return PTR_ERR_OR_ZERO(pages);
} }
static void static void
......
/*
* Copyright © 2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include "i915_drv.h"
#include "i915_gemfs.h"
int i915_gemfs_init(struct drm_i915_private *i915)
{
struct file_system_type *type;
struct vfsmount *gemfs;
type = get_fs_type("tmpfs");
if (!type)
return -ENODEV;
gemfs = kern_mount(type);
if (IS_ERR(gemfs))
return PTR_ERR(gemfs);
/*
* Enable huge-pages for objects that are at least HPAGE_PMD_SIZE, most
* likely 2M. Note that within_size may overallocate huge-pages, if say
* we allocate an object of size 2M + 4K, we may get 2M + 2M, but under
* memory pressure shmem should split any huge-pages which can be
* shrunk.
*/
if (has_transparent_hugepage()) {
struct super_block *sb = gemfs->mnt_sb;
char options[] = "huge=within_size";
int flags = 0;
int err;
err = sb->s_op->remount_fs(sb, &flags, options);
if (err) {
kern_unmount(gemfs);
return err;
}
}
i915->mm.gemfs = gemfs;
return 0;
}
void i915_gemfs_fini(struct drm_i915_private *i915)
{
kern_unmount(i915->mm.gemfs);
}
/*
* Copyright © 2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_GEMFS_H__
#define __I915_GEMFS_H__
struct drm_i915_private;
int i915_gemfs_init(struct drm_i915_private *i915);
void i915_gemfs_fini(struct drm_i915_private *i915);
#endif
...@@ -377,9 +377,9 @@ static void error_print_request(struct drm_i915_error_state_buf *m, ...@@ -377,9 +377,9 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
if (!erq->seqno) if (!erq->seqno)
return; return;
err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n", err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms ago, head %08x, tail %08x\n",
prefix, erq->pid, erq->ban_score, prefix, erq->pid, erq->ban_score,
erq->context, erq->seqno, erq->context, erq->seqno, erq->priority,
jiffies_to_msecs(jiffies - erq->jiffies), jiffies_to_msecs(jiffies - erq->jiffies),
erq->head, erq->tail); erq->head, erq->tail);
} }
...@@ -388,9 +388,9 @@ static void error_print_context(struct drm_i915_error_state_buf *m, ...@@ -388,9 +388,9 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header, const char *header,
const struct drm_i915_error_context *ctx) const struct drm_i915_error_context *ctx)
{ {
err_printf(m, "%s%s[%d] user_handle %d hw_id %d, ban score %d guilty %d active %d\n", err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d guilty %d active %d\n",
header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id, header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
ctx->ban_score, ctx->guilty, ctx->active); ctx->priority, ctx->ban_score, ctx->guilty, ctx->active);
} }
static void error_print_engine(struct drm_i915_error_state_buf *m, static void error_print_engine(struct drm_i915_error_state_buf *m,
...@@ -1271,6 +1271,7 @@ static void record_request(struct drm_i915_gem_request *request, ...@@ -1271,6 +1271,7 @@ static void record_request(struct drm_i915_gem_request *request,
struct drm_i915_error_request *erq) struct drm_i915_error_request *erq)
{ {
erq->context = request->ctx->hw_id; erq->context = request->ctx->hw_id;
erq->priority = request->priotree.priority;
erq->ban_score = atomic_read(&request->ctx->ban_score); erq->ban_score = atomic_read(&request->ctx->ban_score);
erq->seqno = request->global_seqno; erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies; erq->jiffies = request->emitted_jiffies;
...@@ -1364,6 +1365,7 @@ static void record_context(struct drm_i915_error_context *e, ...@@ -1364,6 +1365,7 @@ static void record_context(struct drm_i915_error_context *e,
e->handle = ctx->user_handle; e->handle = ctx->user_handle;
e->hw_id = ctx->hw_id; e->hw_id = ctx->hw_id;
e->priority = ctx->priority;
e->ban_score = atomic_read(&ctx->ban_score); e->ban_score = atomic_read(&ctx->ban_score);
e->guilty = atomic_read(&ctx->guilty_count); e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count); e->active = atomic_read(&ctx->active_count);
...@@ -1672,8 +1674,8 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv, ...@@ -1672,8 +1674,8 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
struct i915_gpu_state *error) struct i915_gpu_state *error)
{ {
error->awake = dev_priv->gt.awake; error->awake = dev_priv->gt.awake;
error->wakelock = atomic_read(&dev_priv->pm.wakeref_count); error->wakelock = atomic_read(&dev_priv->runtime_pm.wakeref_count);
error->suspended = dev_priv->pm.suspended; error->suspended = dev_priv->runtime_pm.suspended;
error->iommu = -1; error->iommu = -1;
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
......
...@@ -21,12 +21,13 @@ ...@@ -21,12 +21,13 @@
* IN THE SOFTWARE. * IN THE SOFTWARE.
* *
*/ */
#include <linux/circ_buf.h>
#include "i915_drv.h"
#include "intel_uc.h"
#include <linux/circ_buf.h>
#include <trace/events/dma_fence.h> #include <trace/events/dma_fence.h>
#include "i915_guc_submission.h"
#include "i915_drv.h"
/** /**
* DOC: GuC-based command submission * DOC: GuC-based command submission
* *
...@@ -337,7 +338,7 @@ static void guc_stage_desc_init(struct intel_guc *guc, ...@@ -337,7 +338,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
for_each_engine_masked(engine, dev_priv, client->engines, tmp) { for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce = &ctx->engine[engine->id];
uint32_t guc_engine_id = engine->guc_id; u32 guc_engine_id = engine->guc_id;
struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id]; struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
/* TODO: We have a design issue to be solved here. Only when we /* TODO: We have a design issue to be solved here. Only when we
...@@ -387,13 +388,13 @@ static void guc_stage_desc_init(struct intel_guc *guc, ...@@ -387,13 +388,13 @@ static void guc_stage_desc_init(struct intel_guc *guc,
gfx_addr = guc_ggtt_offset(client->vma); gfx_addr = guc_ggtt_offset(client->vma);
desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) + desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
client->doorbell_offset; client->doorbell_offset;
desc->db_trigger_cpu = (uintptr_t)__get_doorbell(client); desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client));
desc->db_trigger_uk = gfx_addr + client->doorbell_offset; desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
desc->process_desc = gfx_addr + client->proc_desc_offset; desc->process_desc = gfx_addr + client->proc_desc_offset;
desc->wq_addr = gfx_addr + GUC_DB_SIZE; desc->wq_addr = gfx_addr + GUC_DB_SIZE;
desc->wq_size = GUC_WQ_SIZE; desc->wq_size = GUC_WQ_SIZE;
desc->desc_private = (uintptr_t)client; desc->desc_private = ptr_to_u64(client);
} }
static void guc_stage_desc_fini(struct intel_guc *guc, static void guc_stage_desc_fini(struct intel_guc *guc,
...@@ -499,7 +500,7 @@ static void i915_guc_submit(struct intel_engine_cs *engine) ...@@ -499,7 +500,7 @@ static void i915_guc_submit(struct intel_engine_cs *engine)
const unsigned int engine_id = engine->id; const unsigned int engine_id = engine->id;
unsigned int n; unsigned int n;
for (n = 0; n < ARRAY_SIZE(execlists->port); n++) { for (n = 0; n < execlists_num_ports(execlists); n++) {
struct drm_i915_gem_request *rq; struct drm_i915_gem_request *rq;
unsigned int count; unsigned int count;
...@@ -643,48 +644,6 @@ static void i915_guc_irq_handler(unsigned long data) ...@@ -643,48 +644,6 @@ static void i915_guc_irq_handler(unsigned long data)
* path of i915_guc_submit() above. * path of i915_guc_submit() above.
*/ */
/**
* intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
* @guc: the guc
* @size: size of area to allocate (both virtual space and memory)
*
* This is a wrapper to create an object for use with the GuC. In order to
* use it inside the GuC, an object needs to be pinned lifetime, so we allocate
* both some backing storage and a range inside the Global GTT. We must pin
* it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that
* range is reserved inside GuC.
*
* Return: A i915_vma if successful, otherwise an ERR_PTR.
*/
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int ret;
obj = i915_gem_object_create(dev_priv, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma))
goto err;
ret = i915_vma_pin(vma, 0, PAGE_SIZE,
PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret) {
vma = ERR_PTR(ret);
goto err;
}
return vma;
err:
i915_gem_object_put(obj);
return vma;
}
/* Check that a doorbell register is in the expected state */ /* Check that a doorbell register is in the expected state */
static bool doorbell_ok(struct intel_guc *guc, u16 db_id) static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
{ {
...@@ -796,8 +755,8 @@ static int guc_init_doorbell_hw(struct intel_guc *guc) ...@@ -796,8 +755,8 @@ static int guc_init_doorbell_hw(struct intel_guc *guc)
*/ */
static struct i915_guc_client * static struct i915_guc_client *
guc_client_alloc(struct drm_i915_private *dev_priv, guc_client_alloc(struct drm_i915_private *dev_priv,
uint32_t engines, u32 engines,
uint32_t priority, u32 priority,
struct i915_gem_context *ctx) struct i915_gem_context *ctx)
{ {
struct i915_guc_client *client; struct i915_guc_client *client;
...@@ -1069,6 +1028,7 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv) ...@@ -1069,6 +1028,7 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
static void guc_interrupts_capture(struct drm_i915_private *dev_priv) static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
{ {
struct intel_rps *rps = &dev_priv->gt_pm.rps;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
int irqs; int irqs;
...@@ -1105,12 +1065,13 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv) ...@@ -1105,12 +1065,13 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
* Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
* result in the register bit being left SET! * result in the register bit being left SET!
*/ */
dev_priv->rps.pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK; rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
dev_priv->rps.pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
} }
static void guc_interrupts_release(struct drm_i915_private *dev_priv) static void guc_interrupts_release(struct drm_i915_private *dev_priv)
{ {
struct intel_rps *rps = &dev_priv->gt_pm.rps;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
int irqs; int irqs;
...@@ -1129,8 +1090,8 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv) ...@@ -1129,8 +1090,8 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
I915_WRITE(GUC_VCS2_VCS1_IER, 0); I915_WRITE(GUC_VCS2_VCS1_IER, 0);
I915_WRITE(GUC_WD_VECS_IER, 0); I915_WRITE(GUC_WD_VECS_IER, 0);
dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK; rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
} }
int i915_guc_submission_enable(struct drm_i915_private *dev_priv) int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
...@@ -1212,55 +1173,3 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv) ...@@ -1212,55 +1173,3 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
guc_client_free(guc->execbuf_client); guc_client_free(guc->execbuf_client);
guc->execbuf_client = NULL; guc->execbuf_client = NULL;
} }
/**
* intel_guc_suspend() - notify GuC entering suspend state
* @dev_priv: i915 device private
*/
int intel_guc_suspend(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
struct i915_gem_context *ctx;
u32 data[3];
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return 0;
gen9_disable_guc_interrupts(dev_priv);
ctx = dev_priv->kernel_context;
data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
/* any value greater than GUC_POWER_D0 */
data[1] = GUC_POWER_D1;
/* first page is shared data with GuC */
data[2] = guc_ggtt_offset(ctx->engine[RCS].state) + LRC_GUCSHR_PN * PAGE_SIZE;
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
/**
* intel_guc_resume() - notify GuC resuming from suspend state
* @dev_priv: i915 device private
*/
int intel_guc_resume(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
struct i915_gem_context *ctx;
u32 data[3];
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return 0;
if (i915_modparams.guc_log_level >= 0)
gen9_enable_guc_interrupts(dev_priv);
ctx = dev_priv->kernel_context;
data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
data[1] = GUC_POWER_D0;
/* first page is shared data with GuC */
data[2] = guc_ggtt_offset(ctx->engine[RCS].state) + LRC_GUCSHR_PN * PAGE_SIZE;
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
/*
* Copyright © 2014-2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef _I915_GUC_SUBMISSION_H_
#define _I915_GUC_SUBMISSION_H_
#include <linux/spinlock.h>
#include "i915_gem.h"
struct drm_i915_private;
/*
* This structure primarily describes the GEM object shared with the GuC.
* The specs sometimes refer to this object as a "GuC context", but we use
* the term "client" to avoid confusion with hardware contexts. This
* GEM object is held for the entire lifetime of our interaction with
* the GuC, being allocated before the GuC is loaded with its firmware.
* Because there's no way to update the address used by the GuC after
* initialisation, the shared object must stay pinned into the GGTT as
* long as the GuC is in use. We also keep the first page (only) mapped
* into kernel address space, as it includes shared data that must be
* updated on every request submission.
*
* The single GEM object described here is actually made up of several
* separate areas, as far as the GuC is concerned. The first page (kept
* kmap'd) includes the "process descriptor" which holds sequence data for
* the doorbell, and one cacheline which actually *is* the doorbell; a
* write to this will "ring the doorbell" (i.e. send an interrupt to the
* GuC). The subsequent pages of the client object constitute the work
* queue (a circular array of work items), again described in the process
* descriptor. Work queue pages are mapped momentarily as required.
*/
struct i915_guc_client {
struct i915_vma *vma;
void *vaddr;
struct i915_gem_context *owner;
struct intel_guc *guc;
/* bitmap of (host) engine ids */
u32 engines;
u32 priority;
u32 stage_id;
u32 proc_desc_offset;
u16 doorbell_id;
unsigned long doorbell_offset;
spinlock_t wq_lock;
/* Per-engine counts of GuC submissions */
u64 submissions[I915_NUM_ENGINES];
};
int i915_guc_submission_init(struct drm_i915_private *dev_priv);
int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
#endif
This diff is collapsed.
...@@ -146,9 +146,6 @@ i915_param_named(disable_display, bool, 0400, ...@@ -146,9 +146,6 @@ i915_param_named(disable_display, bool, 0400,
i915_param_named_unsafe(enable_cmd_parser, bool, 0400, i915_param_named_unsafe(enable_cmd_parser, bool, 0400,
"Enable command parsing (true=enabled [default], false=disabled)"); "Enable command parsing (true=enabled [default], false=disabled)");
i915_param_named_unsafe(use_mmio_flip, int, 0600,
"use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
i915_param_named(mmio_debug, int, 0600, i915_param_named(mmio_debug, int, 0600,
"Enable the MMIO debug code for the first N failures (default: off). " "Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance."); "This may negatively affect performance.");
......
...@@ -49,7 +49,6 @@ ...@@ -49,7 +49,6 @@
param(int, guc_log_level, -1) \ param(int, guc_log_level, -1) \
param(char *, guc_firmware_path, NULL) \ param(char *, guc_firmware_path, NULL) \
param(char *, huc_firmware_path, NULL) \ param(char *, huc_firmware_path, NULL) \
param(int, use_mmio_flip, 0) \
param(int, mmio_debug, 0) \ param(int, mmio_debug, 0) \
param(int, edp_vswing, 0) \ param(int, edp_vswing, 0) \
param(int, reset, 2) \ param(int, reset, 2) \
......
...@@ -54,8 +54,14 @@ ...@@ -54,8 +54,14 @@
.color = { .degamma_lut_size = 512, .gamma_lut_size = 512 } .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
#define CHV_COLORS \ #define CHV_COLORS \
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257 } .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
#define GLK_COLORS \
.color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
/* Keep in gen based order, and chronological order within a gen */ /* Keep in gen based order, and chronological order within a gen */
#define GEN_DEFAULT_PAGE_SIZES \
.page_sizes = I915_GTT_PAGE_SIZE_4K
#define GEN2_FEATURES \ #define GEN2_FEATURES \
.gen = 2, .num_pipes = 1, \ .gen = 2, .num_pipes = 1, \
.has_overlay = 1, .overlay_needs_physical = 1, \ .has_overlay = 1, .overlay_needs_physical = 1, \
...@@ -65,6 +71,7 @@ ...@@ -65,6 +71,7 @@
.ring_mask = RENDER_RING, \ .ring_mask = RENDER_RING, \
.has_snoop = true, \ .has_snoop = true, \
GEN_DEFAULT_PIPEOFFSETS, \ GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS CURSOR_OFFSETS
static const struct intel_device_info intel_i830_info __initconst = { static const struct intel_device_info intel_i830_info __initconst = {
...@@ -98,6 +105,7 @@ static const struct intel_device_info intel_i865g_info __initconst = { ...@@ -98,6 +105,7 @@ static const struct intel_device_info intel_i865g_info __initconst = {
.ring_mask = RENDER_RING, \ .ring_mask = RENDER_RING, \
.has_snoop = true, \ .has_snoop = true, \
GEN_DEFAULT_PIPEOFFSETS, \ GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS CURSOR_OFFSETS
static const struct intel_device_info intel_i915g_info __initconst = { static const struct intel_device_info intel_i915g_info __initconst = {
...@@ -161,6 +169,7 @@ static const struct intel_device_info intel_pineview_info __initconst = { ...@@ -161,6 +169,7 @@ static const struct intel_device_info intel_pineview_info __initconst = {
.ring_mask = RENDER_RING, \ .ring_mask = RENDER_RING, \
.has_snoop = true, \ .has_snoop = true, \
GEN_DEFAULT_PIPEOFFSETS, \ GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS CURSOR_OFFSETS
static const struct intel_device_info intel_i965g_info __initconst = { static const struct intel_device_info intel_i965g_info __initconst = {
...@@ -203,6 +212,7 @@ static const struct intel_device_info intel_gm45_info __initconst = { ...@@ -203,6 +212,7 @@ static const struct intel_device_info intel_gm45_info __initconst = {
.ring_mask = RENDER_RING | BSD_RING, \ .ring_mask = RENDER_RING | BSD_RING, \
.has_snoop = true, \ .has_snoop = true, \
GEN_DEFAULT_PIPEOFFSETS, \ GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS CURSOR_OFFSETS
static const struct intel_device_info intel_ironlake_d_info __initconst = { static const struct intel_device_info intel_ironlake_d_info __initconst = {
...@@ -226,6 +236,7 @@ static const struct intel_device_info intel_ironlake_m_info __initconst = { ...@@ -226,6 +236,7 @@ static const struct intel_device_info intel_ironlake_m_info __initconst = {
.has_rc6p = 1, \ .has_rc6p = 1, \
.has_aliasing_ppgtt = 1, \ .has_aliasing_ppgtt = 1, \
GEN_DEFAULT_PIPEOFFSETS, \ GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS CURSOR_OFFSETS
#define SNB_D_PLATFORM \ #define SNB_D_PLATFORM \
...@@ -269,6 +280,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst = ...@@ -269,6 +280,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst =
.has_aliasing_ppgtt = 1, \ .has_aliasing_ppgtt = 1, \
.has_full_ppgtt = 1, \ .has_full_ppgtt = 1, \
GEN_DEFAULT_PIPEOFFSETS, \ GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
IVB_CURSOR_OFFSETS IVB_CURSOR_OFFSETS
#define IVB_D_PLATFORM \ #define IVB_D_PLATFORM \
...@@ -325,11 +337,12 @@ static const struct intel_device_info intel_valleyview_info __initconst = { ...@@ -325,11 +337,12 @@ static const struct intel_device_info intel_valleyview_info __initconst = {
.has_snoop = true, .has_snoop = true,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.display_mmio_offset = VLV_DISPLAY_BASE, .display_mmio_offset = VLV_DISPLAY_BASE,
GEN_DEFAULT_PAGE_SIZES,
GEN_DEFAULT_PIPEOFFSETS, GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS CURSOR_OFFSETS
}; };
#define HSW_FEATURES \ #define G75_FEATURES \
GEN7_FEATURES, \ GEN7_FEATURES, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
.has_ddi = 1, \ .has_ddi = 1, \
...@@ -341,7 +354,7 @@ static const struct intel_device_info intel_valleyview_info __initconst = { ...@@ -341,7 +354,7 @@ static const struct intel_device_info intel_valleyview_info __initconst = {
.has_runtime_pm = 1 .has_runtime_pm = 1
#define HSW_PLATFORM \ #define HSW_PLATFORM \
HSW_FEATURES, \ G75_FEATURES, \
.platform = INTEL_HASWELL, \ .platform = INTEL_HASWELL, \
.has_l3_dpf = 1 .has_l3_dpf = 1
...@@ -360,16 +373,18 @@ static const struct intel_device_info intel_haswell_gt3_info __initconst = { ...@@ -360,16 +373,18 @@ static const struct intel_device_info intel_haswell_gt3_info __initconst = {
.gt = 3, .gt = 3,
}; };
#define BDW_FEATURES \ #define GEN8_FEATURES \
HSW_FEATURES, \ G75_FEATURES, \
BDW_COLORS, \ BDW_COLORS, \
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
I915_GTT_PAGE_SIZE_2M, \
.has_logical_ring_contexts = 1, \ .has_logical_ring_contexts = 1, \
.has_full_48bit_ppgtt = 1, \ .has_full_48bit_ppgtt = 1, \
.has_64bit_reloc = 1, \ .has_64bit_reloc = 1, \
.has_reset_engine = 1 .has_reset_engine = 1
#define BDW_PLATFORM \ #define BDW_PLATFORM \
BDW_FEATURES, \ GEN8_FEATURES, \
.gen = 8, \ .gen = 8, \
.platform = INTEL_BROADWELL .platform = INTEL_BROADWELL
...@@ -415,19 +430,31 @@ static const struct intel_device_info intel_cherryview_info __initconst = { ...@@ -415,19 +430,31 @@ static const struct intel_device_info intel_cherryview_info __initconst = {
.has_reset_engine = 1, .has_reset_engine = 1,
.has_snoop = true, .has_snoop = true,
.display_mmio_offset = VLV_DISPLAY_BASE, .display_mmio_offset = VLV_DISPLAY_BASE,
GEN_DEFAULT_PAGE_SIZES,
GEN_CHV_PIPEOFFSETS, GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS, CURSOR_OFFSETS,
CHV_COLORS, CHV_COLORS,
}; };
#define SKL_PLATFORM \ #define GEN9_DEFAULT_PAGE_SIZES \
BDW_FEATURES, \ .page_sizes = I915_GTT_PAGE_SIZE_4K | \
.gen = 9, \ I915_GTT_PAGE_SIZE_64K | \
.platform = INTEL_SKYLAKE, \ I915_GTT_PAGE_SIZE_2M
#define GEN9_FEATURES \
GEN8_FEATURES, \
GEN9_DEFAULT_PAGE_SIZES, \
.has_logical_ring_preemption = 1, \
.has_csr = 1, \ .has_csr = 1, \
.has_guc = 1, \ .has_guc = 1, \
.has_ipc = 1, \
.ddb_size = 896 .ddb_size = 896
#define SKL_PLATFORM \
GEN9_FEATURES, \
.gen = 9, \
.platform = INTEL_SKYLAKE
static const struct intel_device_info intel_skylake_gt1_info __initconst = { static const struct intel_device_info intel_skylake_gt1_info __initconst = {
SKL_PLATFORM, SKL_PLATFORM,
.gt = 1, .gt = 1,
...@@ -463,6 +490,7 @@ static const struct intel_device_info intel_skylake_gt4_info __initconst = { ...@@ -463,6 +490,7 @@ static const struct intel_device_info intel_skylake_gt4_info __initconst = {
.has_ddi = 1, \ .has_ddi = 1, \
.has_fpga_dbg = 1, \ .has_fpga_dbg = 1, \
.has_fbc = 1, \ .has_fbc = 1, \
.has_psr = 1, \
.has_runtime_pm = 1, \ .has_runtime_pm = 1, \
.has_pooled_eu = 0, \ .has_pooled_eu = 0, \
.has_csr = 1, \ .has_csr = 1, \
...@@ -470,6 +498,7 @@ static const struct intel_device_info intel_skylake_gt4_info __initconst = { ...@@ -470,6 +498,7 @@ static const struct intel_device_info intel_skylake_gt4_info __initconst = {
.has_rc6 = 1, \ .has_rc6 = 1, \
.has_dp_mst = 1, \ .has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \ .has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
.has_guc = 1, \ .has_guc = 1, \
.has_aliasing_ppgtt = 1, \ .has_aliasing_ppgtt = 1, \
.has_full_ppgtt = 1, \ .has_full_ppgtt = 1, \
...@@ -477,6 +506,7 @@ static const struct intel_device_info intel_skylake_gt4_info __initconst = { ...@@ -477,6 +506,7 @@ static const struct intel_device_info intel_skylake_gt4_info __initconst = {
.has_reset_engine = 1, \ .has_reset_engine = 1, \
.has_snoop = true, \ .has_snoop = true, \
.has_ipc = 1, \ .has_ipc = 1, \
GEN9_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_PIPEOFFSETS, \ GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS, \ IVB_CURSOR_OFFSETS, \
BDW_COLORS BDW_COLORS
...@@ -491,17 +521,13 @@ static const struct intel_device_info intel_geminilake_info __initconst = { ...@@ -491,17 +521,13 @@ static const struct intel_device_info intel_geminilake_info __initconst = {
GEN9_LP_FEATURES, GEN9_LP_FEATURES,
.platform = INTEL_GEMINILAKE, .platform = INTEL_GEMINILAKE,
.ddb_size = 1024, .ddb_size = 1024,
.color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 } GLK_COLORS,
}; };
#define KBL_PLATFORM \ #define KBL_PLATFORM \
BDW_FEATURES, \ GEN9_FEATURES, \
.gen = 9, \ .gen = 9, \
.platform = INTEL_KABYLAKE, \ .platform = INTEL_KABYLAKE
.has_csr = 1, \
.has_guc = 1, \
.has_ipc = 1, \
.ddb_size = 896
static const struct intel_device_info intel_kabylake_gt1_info __initconst = { static const struct intel_device_info intel_kabylake_gt1_info __initconst = {
KBL_PLATFORM, KBL_PLATFORM,
...@@ -520,13 +546,9 @@ static const struct intel_device_info intel_kabylake_gt3_info __initconst = { ...@@ -520,13 +546,9 @@ static const struct intel_device_info intel_kabylake_gt3_info __initconst = {
}; };
#define CFL_PLATFORM \ #define CFL_PLATFORM \
BDW_FEATURES, \ GEN9_FEATURES, \
.gen = 9, \ .gen = 9, \
.platform = INTEL_COFFEELAKE, \ .platform = INTEL_COFFEELAKE
.has_csr = 1, \
.has_guc = 1, \
.has_ipc = 1, \
.ddb_size = 896
static const struct intel_device_info intel_coffeelake_gt1_info __initconst = { static const struct intel_device_info intel_coffeelake_gt1_info __initconst = {
CFL_PLATFORM, CFL_PLATFORM,
...@@ -544,16 +566,17 @@ static const struct intel_device_info intel_coffeelake_gt3_info __initconst = { ...@@ -544,16 +566,17 @@ static const struct intel_device_info intel_coffeelake_gt3_info __initconst = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
}; };
#define GEN10_FEATURES \
GEN9_FEATURES, \
.ddb_size = 1024, \
GLK_COLORS
static const struct intel_device_info intel_cannonlake_gt2_info __initconst = { static const struct intel_device_info intel_cannonlake_gt2_info __initconst = {
BDW_FEATURES, GEN10_FEATURES,
.is_alpha_support = 1, .is_alpha_support = 1,
.platform = INTEL_CANNONLAKE, .platform = INTEL_CANNONLAKE,
.gen = 10, .gen = 10,
.gt = 2, .gt = 2,
.ddb_size = 1024,
.has_csr = 1,
.has_ipc = 1,
.color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
}; };
/* /*
......
...@@ -2371,6 +2371,9 @@ enum i915_power_well_id { ...@@ -2371,6 +2371,9 @@ enum i915_power_well_id {
#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0) #define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1<<18) #define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1<<18)
#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28) #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1<<24) #define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1<<24)
...@@ -3819,6 +3822,16 @@ enum { ...@@ -3819,6 +3822,16 @@ enum {
#define PWM2_GATING_DIS (1 << 14) #define PWM2_GATING_DIS (1 << 14)
#define PWM1_GATING_DIS (1 << 13) #define PWM1_GATING_DIS (1 << 13)
#define _CLKGATE_DIS_PSL_A 0x46520
#define _CLKGATE_DIS_PSL_B 0x46524
#define _CLKGATE_DIS_PSL_C 0x46528
#define DPF_GATING_DIS (1 << 10)
#define DPF_RAM_GATING_DIS (1 << 9)
#define DPFR_GATING_DIS (1 << 8)
#define CLKGATE_DIS_PSL(pipe) \
_MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B)
/* /*
* GEN10 clock gating regs * GEN10 clock gating regs
*/ */
...@@ -5671,8 +5684,7 @@ enum { ...@@ -5671,8 +5684,7 @@ enum {
#define CBR_PWM_CLOCK_MUX_SELECT (1<<30) #define CBR_PWM_CLOCK_MUX_SELECT (1<<30)
#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450) #define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
#define CBR_DPLLBMD_PIPE_C (1<<29) #define CBR_DPLLBMD_PIPE(pipe) (1<<(7+(pipe)*11)) /* pipes B and C */
#define CBR_DPLLBMD_PIPE_B (1<<18)
/* FIFO watermark sizes etc */ /* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64 #define G4X_FIFO_LINE_SIZE 64
...@@ -6993,6 +7005,12 @@ enum { ...@@ -6993,6 +7005,12 @@ enum {
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec) #define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248) #define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
#define GEN8_CS_CHICKEN1 _MMIO(0x2580) #define GEN8_CS_CHICKEN1 _MMIO(0x2580)
#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1<<0)
#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
#define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 0)
#define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 1)
#define GEN9_PREEMPT_GPGPU_COMMAND_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(1, 0)
#define GEN9_PREEMPT_GPGPU_LEVEL_MASK GEN9_PREEMPT_GPGPU_LEVEL(1, 1)
/* GEN7 chicken */ /* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010) #define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
...@@ -7164,9 +7182,6 @@ enum { ...@@ -7164,9 +7182,6 @@ enum {
#define SERR_INT _MMIO(0xc4040) #define SERR_INT _MMIO(0xc4040)
#define SERR_INT_POISON (1<<31) #define SERR_INT_POISON (1<<31)
#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3)) #define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
/* digital port hotplug */ /* digital port hotplug */
......
...@@ -108,8 +108,6 @@ int i915_restore_state(struct drm_i915_private *dev_priv) ...@@ -108,8 +108,6 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_restore_fences(dev_priv);
if (IS_GEN4(dev_priv)) if (IS_GEN4(dev_priv))
pci_write_config_word(pdev, GCDGMBUS, pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS); dev_priv->regfile.saveGCDGMBUS);
......
...@@ -49,7 +49,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv, ...@@ -49,7 +49,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
static ssize_t static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{ {
return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6()); return snprintf(buf, PAGE_SIZE, "%x\n", intel_rc6_enabled());
} }
static ssize_t static ssize_t
...@@ -246,7 +246,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, ...@@ -246,7 +246,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->pcu_lock);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 freq; u32 freq;
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
...@@ -261,7 +261,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, ...@@ -261,7 +261,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
ret = intel_gpu_freq(dev_priv, ret); ret = intel_gpu_freq(dev_priv, ret);
} }
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
...@@ -275,7 +275,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, ...@@ -275,7 +275,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
return snprintf(buf, PAGE_SIZE, "%d\n", return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv, intel_gpu_freq(dev_priv,
dev_priv->rps.cur_freq)); dev_priv->gt_pm.rps.cur_freq));
} }
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
...@@ -284,7 +284,7 @@ static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribu ...@@ -284,7 +284,7 @@ static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribu
return snprintf(buf, PAGE_SIZE, "%d\n", return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv, intel_gpu_freq(dev_priv,
dev_priv->rps.boost_freq)); dev_priv->gt_pm.rps.boost_freq));
} }
static ssize_t gt_boost_freq_mhz_store(struct device *kdev, static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
...@@ -292,6 +292,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, ...@@ -292,6 +292,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
const char *buf, size_t count) const char *buf, size_t count)
{ {
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val; u32 val;
ssize_t ret; ssize_t ret;
...@@ -301,12 +302,12 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, ...@@ -301,12 +302,12 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
/* Validate against (static) hardware limits */ /* Validate against (static) hardware limits */
val = intel_freq_opcode(dev_priv, val); val = intel_freq_opcode(dev_priv, val);
if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq) if (val < rps->min_freq || val > rps->max_freq)
return -EINVAL; return -EINVAL;
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->pcu_lock);
dev_priv->rps.boost_freq = val; rps->boost_freq = val;
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->pcu_lock);
return count; return count;
} }
...@@ -318,7 +319,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, ...@@ -318,7 +319,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
return snprintf(buf, PAGE_SIZE, "%d\n", return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv, intel_gpu_freq(dev_priv,
dev_priv->rps.efficient_freq)); dev_priv->gt_pm.rps.efficient_freq));
} }
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
...@@ -327,7 +328,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute ...@@ -327,7 +328,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n", return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv, intel_gpu_freq(dev_priv,
dev_priv->rps.max_freq_softlimit)); dev_priv->gt_pm.rps.max_freq_softlimit));
} }
static ssize_t gt_max_freq_mhz_store(struct device *kdev, static ssize_t gt_max_freq_mhz_store(struct device *kdev,
...@@ -335,6 +336,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -335,6 +336,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
const char *buf, size_t count) const char *buf, size_t count)
{ {
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val; u32 val;
ssize_t ret; ssize_t ret;
...@@ -344,34 +346,34 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -344,34 +346,34 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->pcu_lock);
val = intel_freq_opcode(dev_priv, val); val = intel_freq_opcode(dev_priv, val);
if (val < dev_priv->rps.min_freq || if (val < rps->min_freq ||
val > dev_priv->rps.max_freq || val > rps->max_freq ||
val < dev_priv->rps.min_freq_softlimit) { val < rps->min_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
return -EINVAL; return -EINVAL;
} }
if (val > dev_priv->rps.rp0_freq) if (val > rps->rp0_freq)
DRM_DEBUG("User requested overclocking to %d\n", DRM_DEBUG("User requested overclocking to %d\n",
intel_gpu_freq(dev_priv, val)); intel_gpu_freq(dev_priv, val));
dev_priv->rps.max_freq_softlimit = val; rps->max_freq_softlimit = val;
val = clamp_t(int, dev_priv->rps.cur_freq, val = clamp_t(int, rps->cur_freq,
dev_priv->rps.min_freq_softlimit, rps->min_freq_softlimit,
dev_priv->rps.max_freq_softlimit); rps->max_freq_softlimit);
/* We still need *_set_rps to process the new max_delay and /* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though * update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */ * frequency request may be unchanged. */
ret = intel_set_rps(dev_priv, val); ret = intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
...@@ -384,7 +386,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute ...@@ -384,7 +386,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n", return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv, intel_gpu_freq(dev_priv,
dev_priv->rps.min_freq_softlimit)); dev_priv->gt_pm.rps.min_freq_softlimit));
} }
static ssize_t gt_min_freq_mhz_store(struct device *kdev, static ssize_t gt_min_freq_mhz_store(struct device *kdev,
...@@ -392,6 +394,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -392,6 +394,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
const char *buf, size_t count) const char *buf, size_t count)
{ {
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val; u32 val;
ssize_t ret; ssize_t ret;
...@@ -401,30 +404,30 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -401,30 +404,30 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->pcu_lock);
val = intel_freq_opcode(dev_priv, val); val = intel_freq_opcode(dev_priv, val);
if (val < dev_priv->rps.min_freq || if (val < rps->min_freq ||
val > dev_priv->rps.max_freq || val > rps->max_freq ||
val > dev_priv->rps.max_freq_softlimit) { val > rps->max_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
return -EINVAL; return -EINVAL;
} }
dev_priv->rps.min_freq_softlimit = val; rps->min_freq_softlimit = val;
val = clamp_t(int, dev_priv->rps.cur_freq, val = clamp_t(int, rps->cur_freq,
dev_priv->rps.min_freq_softlimit, rps->min_freq_softlimit,
dev_priv->rps.max_freq_softlimit); rps->max_freq_softlimit);
/* We still need *_set_rps to process the new min_delay and /* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though * update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */ * frequency request may be unchanged. */
ret = intel_set_rps(dev_priv, val); ret = intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
...@@ -448,14 +451,15 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); ...@@ -448,14 +451,15 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{ {
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val; u32 val;
if (attr == &dev_attr_gt_RP0_freq_mhz) if (attr == &dev_attr_gt_RP0_freq_mhz)
val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq); val = intel_gpu_freq(dev_priv, rps->rp0_freq);
else if (attr == &dev_attr_gt_RP1_freq_mhz) else if (attr == &dev_attr_gt_RP1_freq_mhz)
val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq); val = intel_gpu_freq(dev_priv, rps->rp1_freq);
else if (attr == &dev_attr_gt_RPn_freq_mhz) else if (attr == &dev_attr_gt_RPn_freq_mhz)
val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq); val = intel_gpu_freq(dev_priv, rps->min_freq);
else else
BUG(); BUG();
......
...@@ -345,7 +345,7 @@ TRACE_EVENT(i915_gem_object_create, ...@@ -345,7 +345,7 @@ TRACE_EVENT(i915_gem_object_create,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj) __field(struct drm_i915_gem_object *, obj)
__field(u32, size) __field(u64, size)
), ),
TP_fast_assign( TP_fast_assign(
...@@ -353,7 +353,7 @@ TRACE_EVENT(i915_gem_object_create, ...@@ -353,7 +353,7 @@ TRACE_EVENT(i915_gem_object_create,
__entry->size = obj->base.size; __entry->size = obj->base.size;
), ),
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) TP_printk("obj=%p, size=0x%llx", __entry->obj, __entry->size)
); );
TRACE_EVENT(i915_gem_shrink, TRACE_EVENT(i915_gem_shrink,
...@@ -384,7 +384,7 @@ TRACE_EVENT(i915_vma_bind, ...@@ -384,7 +384,7 @@ TRACE_EVENT(i915_vma_bind,
__field(struct drm_i915_gem_object *, obj) __field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm) __field(struct i915_address_space *, vm)
__field(u64, offset) __field(u64, offset)
__field(u32, size) __field(u64, size)
__field(unsigned, flags) __field(unsigned, flags)
), ),
...@@ -396,7 +396,7 @@ TRACE_EVENT(i915_vma_bind, ...@@ -396,7 +396,7 @@ TRACE_EVENT(i915_vma_bind,
__entry->flags = flags; __entry->flags = flags;
), ),
TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p", TP_printk("obj=%p, offset=0x%016llx size=0x%llx%s vm=%p",
__entry->obj, __entry->offset, __entry->size, __entry->obj, __entry->offset, __entry->size,
__entry->flags & PIN_MAPPABLE ? ", mappable" : "", __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
__entry->vm) __entry->vm)
...@@ -410,7 +410,7 @@ TRACE_EVENT(i915_vma_unbind, ...@@ -410,7 +410,7 @@ TRACE_EVENT(i915_vma_unbind,
__field(struct drm_i915_gem_object *, obj) __field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm) __field(struct i915_address_space *, vm)
__field(u64, offset) __field(u64, offset)
__field(u32, size) __field(u64, size)
), ),
TP_fast_assign( TP_fast_assign(
...@@ -420,18 +420,18 @@ TRACE_EVENT(i915_vma_unbind, ...@@ -420,18 +420,18 @@ TRACE_EVENT(i915_vma_unbind,
__entry->size = vma->node.size; __entry->size = vma->node.size;
), ),
TP_printk("obj=%p, offset=%016llx size=%x vm=%p", TP_printk("obj=%p, offset=0x%016llx size=0x%llx vm=%p",
__entry->obj, __entry->offset, __entry->size, __entry->vm) __entry->obj, __entry->offset, __entry->size, __entry->vm)
); );
TRACE_EVENT(i915_gem_object_pwrite, TRACE_EVENT(i915_gem_object_pwrite,
TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
TP_ARGS(obj, offset, len), TP_ARGS(obj, offset, len),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj) __field(struct drm_i915_gem_object *, obj)
__field(u32, offset) __field(u64, offset)
__field(u32, len) __field(u64, len)
), ),
TP_fast_assign( TP_fast_assign(
...@@ -440,18 +440,18 @@ TRACE_EVENT(i915_gem_object_pwrite, ...@@ -440,18 +440,18 @@ TRACE_EVENT(i915_gem_object_pwrite,
__entry->len = len; __entry->len = len;
), ),
TP_printk("obj=%p, offset=%u, len=%u", TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
__entry->obj, __entry->offset, __entry->len) __entry->obj, __entry->offset, __entry->len)
); );
TRACE_EVENT(i915_gem_object_pread, TRACE_EVENT(i915_gem_object_pread,
TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
TP_ARGS(obj, offset, len), TP_ARGS(obj, offset, len),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj) __field(struct drm_i915_gem_object *, obj)
__field(u32, offset) __field(u64, offset)
__field(u32, len) __field(u64, len)
), ),
TP_fast_assign( TP_fast_assign(
...@@ -460,17 +460,17 @@ TRACE_EVENT(i915_gem_object_pread, ...@@ -460,17 +460,17 @@ TRACE_EVENT(i915_gem_object_pread,
__entry->len = len; __entry->len = len;
), ),
TP_printk("obj=%p, offset=%u, len=%u", TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
__entry->obj, __entry->offset, __entry->len) __entry->obj, __entry->offset, __entry->len)
); );
TRACE_EVENT(i915_gem_object_fault, TRACE_EVENT(i915_gem_object_fault,
TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write), TP_PROTO(struct drm_i915_gem_object *obj, u64 index, bool gtt, bool write),
TP_ARGS(obj, index, gtt, write), TP_ARGS(obj, index, gtt, write),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj) __field(struct drm_i915_gem_object *, obj)
__field(u32, index) __field(u64, index)
__field(bool, gtt) __field(bool, gtt)
__field(bool, write) __field(bool, write)
), ),
...@@ -482,7 +482,7 @@ TRACE_EVENT(i915_gem_object_fault, ...@@ -482,7 +482,7 @@ TRACE_EVENT(i915_gem_object_fault,
__entry->write = write; __entry->write = write;
), ),
TP_printk("obj=%p, %s index=%u %s", TP_printk("obj=%p, %s index=%llu %s",
__entry->obj, __entry->obj,
__entry->gtt ? "GTT" : "CPU", __entry->gtt ? "GTT" : "CPU",
__entry->index, __entry->index,
...@@ -515,14 +515,14 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, ...@@ -515,14 +515,14 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
); );
TRACE_EVENT(i915_gem_evict, TRACE_EVENT(i915_gem_evict,
TP_PROTO(struct i915_address_space *vm, u32 size, u32 align, unsigned int flags), TP_PROTO(struct i915_address_space *vm, u64 size, u64 align, unsigned int flags),
TP_ARGS(vm, size, align, flags), TP_ARGS(vm, size, align, flags),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
__field(struct i915_address_space *, vm) __field(struct i915_address_space *, vm)
__field(u32, size) __field(u64, size)
__field(u32, align) __field(u64, align)
__field(unsigned int, flags) __field(unsigned int, flags)
), ),
...@@ -534,43 +534,11 @@ TRACE_EVENT(i915_gem_evict, ...@@ -534,43 +534,11 @@ TRACE_EVENT(i915_gem_evict,
__entry->flags = flags; __entry->flags = flags;
), ),
TP_printk("dev=%d, vm=%p, size=%d, align=%d %s", TP_printk("dev=%d, vm=%p, size=0x%llx, align=0x%llx %s",
__entry->dev, __entry->vm, __entry->size, __entry->align, __entry->dev, __entry->vm, __entry->size, __entry->align,
__entry->flags & PIN_MAPPABLE ? ", mappable" : "") __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
); );
TRACE_EVENT(i915_gem_evict_everything,
TP_PROTO(struct drm_device *dev),
TP_ARGS(dev),
TP_STRUCT__entry(
__field(u32, dev)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
),
TP_printk("dev=%d", __entry->dev)
);
TRACE_EVENT(i915_gem_evict_vm,
TP_PROTO(struct i915_address_space *vm),
TP_ARGS(vm),
TP_STRUCT__entry(
__field(u32, dev)
__field(struct i915_address_space *, vm)
),
TP_fast_assign(
__entry->dev = vm->i915->drm.primary->index;
__entry->vm = vm;
),
TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
);
TRACE_EVENT(i915_gem_evict_node, TRACE_EVENT(i915_gem_evict_node,
TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags), TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
TP_ARGS(vm, node, flags), TP_ARGS(vm, node, flags),
...@@ -593,12 +561,29 @@ TRACE_EVENT(i915_gem_evict_node, ...@@ -593,12 +561,29 @@ TRACE_EVENT(i915_gem_evict_node,
__entry->flags = flags; __entry->flags = flags;
), ),
TP_printk("dev=%d, vm=%p, start=%llx size=%llx, color=%lx, flags=%x", TP_printk("dev=%d, vm=%p, start=0x%llx size=0x%llx, color=0x%lx, flags=%x",
__entry->dev, __entry->vm, __entry->dev, __entry->vm,
__entry->start, __entry->size, __entry->start, __entry->size,
__entry->color, __entry->flags) __entry->color, __entry->flags)
); );
TRACE_EVENT(i915_gem_evict_vm,
TP_PROTO(struct i915_address_space *vm),
TP_ARGS(vm),
TP_STRUCT__entry(
__field(u32, dev)
__field(struct i915_address_space *, vm)
),
TP_fast_assign(
__entry->dev = vm->i915->drm.primary->index;
__entry->vm = vm;
),
TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
);
TRACE_EVENT(i915_gem_ring_sync_to, TRACE_EVENT(i915_gem_ring_sync_to,
TP_PROTO(struct drm_i915_gem_request *to, TP_PROTO(struct drm_i915_gem_request *to,
struct drm_i915_gem_request *from), struct drm_i915_gem_request *from),
...@@ -649,29 +634,6 @@ TRACE_EVENT(i915_gem_request_queue, ...@@ -649,29 +634,6 @@ TRACE_EVENT(i915_gem_request_queue,
__entry->flags) __entry->flags)
); );
TRACE_EVENT(i915_gem_ring_flush,
TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
TP_ARGS(req, invalidate, flush),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, invalidate)
__field(u32, flush)
),
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
__entry->invalidate = invalidate;
__entry->flush = flush;
),
TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
__entry->dev, __entry->ring,
__entry->invalidate, __entry->flush)
);
DECLARE_EVENT_CLASS(i915_gem_request, DECLARE_EVENT_CLASS(i915_gem_request,
TP_PROTO(struct drm_i915_gem_request *req), TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req), TP_ARGS(req),
......
...@@ -99,6 +99,11 @@ ...@@ -99,6 +99,11 @@
__T; \ __T; \
}) })
static inline u64 ptr_to_u64(const void *ptr)
{
return (uintptr_t)ptr;
}
#define u64_to_ptr(T, x) ({ \ #define u64_to_ptr(T, x) ({ \
typecheck(u64, x); \ typecheck(u64, x); \
(T *)(uintptr_t)(x); \ (T *)(uintptr_t)(x); \
...@@ -119,4 +124,17 @@ static inline void __list_del_many(struct list_head *head, ...@@ -119,4 +124,17 @@ static inline void __list_del_many(struct list_head *head,
WRITE_ONCE(head->next, first); WRITE_ONCE(head->next, first);
} }
/*
* Wait until the work is finally complete, even if it tries to postpone
* by requeueing itself. Note, that if the worker never cancels itself,
* we will spin forever.
*/
static inline void drain_delayed_work(struct delayed_work *dw)
{
do {
while (flush_delayed_work(dw))
;
} while (delayed_work_pending(dw));
}
#endif /* !__I915_UTILS_H */ #endif /* !__I915_UTILS_H */
...@@ -266,6 +266,8 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, ...@@ -266,6 +266,8 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
if (bind_flags == 0) if (bind_flags == 0)
return 0; return 0;
GEM_BUG_ON(!vma->pages);
trace_i915_vma_bind(vma, bind_flags); trace_i915_vma_bind(vma, bind_flags);
ret = vma->vm->bind_vma(vma, cache_level, bind_flags); ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
if (ret) if (ret)
...@@ -278,13 +280,16 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, ...@@ -278,13 +280,16 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
{ {
void __iomem *ptr; void __iomem *ptr;
int err;
/* Access through the GTT requires the device to be awake. */ /* Access through the GTT requires the device to be awake. */
assert_rpm_wakelock_held(vma->vm->i915); assert_rpm_wakelock_held(vma->vm->i915);
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
return IO_ERR_PTR(-ENODEV); err = -ENODEV;
goto err;
}
GEM_BUG_ON(!i915_vma_is_ggtt(vma)); GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
...@@ -294,14 +299,36 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) ...@@ -294,14 +299,36 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable, ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
vma->node.start, vma->node.start,
vma->node.size); vma->node.size);
if (ptr == NULL) if (ptr == NULL) {
return IO_ERR_PTR(-ENOMEM); err = -ENOMEM;
goto err;
}
vma->iomap = ptr; vma->iomap = ptr;
} }
__i915_vma_pin(vma); __i915_vma_pin(vma);
err = i915_vma_pin_fence(vma);
if (err)
goto err_unpin;
return ptr; return ptr;
err_unpin:
__i915_vma_unpin(vma);
err:
return IO_ERR_PTR(err);
}
void i915_vma_unpin_iomap(struct i915_vma *vma)
{
lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
GEM_BUG_ON(vma->iomap == NULL);
i915_vma_unpin_fence(vma);
i915_vma_unpin(vma);
} }
void i915_vma_unpin_and_release(struct i915_vma **p_vma) void i915_vma_unpin_and_release(struct i915_vma **p_vma)
...@@ -471,25 +498,64 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -471,25 +498,64 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (ret) if (ret)
return ret; return ret;
GEM_BUG_ON(vma->pages);
ret = vma->vm->set_pages(vma);
if (ret)
goto err_unpin;
if (flags & PIN_OFFSET_FIXED) { if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK; u64 offset = flags & PIN_OFFSET_MASK;
if (!IS_ALIGNED(offset, alignment) || if (!IS_ALIGNED(offset, alignment) ||
range_overflows(offset, size, end)) { range_overflows(offset, size, end)) {
ret = -EINVAL; ret = -EINVAL;
goto err_unpin; goto err_clear;
} }
ret = i915_gem_gtt_reserve(vma->vm, &vma->node, ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
size, offset, obj->cache_level, size, offset, obj->cache_level,
flags); flags);
if (ret) if (ret)
goto err_unpin; goto err_clear;
} else { } else {
/*
* We only support huge gtt pages through the 48b PPGTT,
* however we also don't want to force any alignment for
* objects which need to be tightly packed into the low 32bits.
*
* Note that we assume that GGTT are limited to 4GiB for the
* forseeable future. See also i915_ggtt_offset().
*/
if (upper_32_bits(end - 1) &&
vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
/*
* We can't mix 64K and 4K PTEs in the same page-table
* (2M block), and so to avoid the ugliness and
* complexity of coloring we opt for just aligning 64K
* objects to 2M.
*/
u64 page_alignment =
rounddown_pow_of_two(vma->page_sizes.sg |
I915_GTT_PAGE_SIZE_2M);
/*
* Check we don't expand for the limited Global GTT
* (mappable aperture is even more precious!). This
* also checks that we exclude the aliasing-ppgtt.
*/
GEM_BUG_ON(i915_vma_is_ggtt(vma));
alignment = max(alignment, page_alignment);
if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
size = round_up(size, I915_GTT_PAGE_SIZE_2M);
}
ret = i915_gem_gtt_insert(vma->vm, &vma->node, ret = i915_gem_gtt_insert(vma->vm, &vma->node,
size, alignment, obj->cache_level, size, alignment, obj->cache_level,
start, end, flags); start, end, flags);
if (ret) if (ret)
goto err_unpin; goto err_clear;
GEM_BUG_ON(vma->node.start < start); GEM_BUG_ON(vma->node.start < start);
GEM_BUG_ON(vma->node.start + vma->node.size > end); GEM_BUG_ON(vma->node.start + vma->node.size > end);
...@@ -504,6 +570,8 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -504,6 +570,8 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
return 0; return 0;
err_clear:
vma->vm->clear_pages(vma);
err_unpin: err_unpin:
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
return ret; return ret;
...@@ -517,6 +585,8 @@ i915_vma_remove(struct i915_vma *vma) ...@@ -517,6 +585,8 @@ i915_vma_remove(struct i915_vma *vma)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
vma->vm->clear_pages(vma);
drm_mm_remove_node(&vma->node); drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list); list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
...@@ -569,8 +639,8 @@ int __i915_vma_do_pin(struct i915_vma *vma, ...@@ -569,8 +639,8 @@ int __i915_vma_do_pin(struct i915_vma *vma,
err_remove: err_remove:
if ((bound & I915_VMA_BIND_MASK) == 0) { if ((bound & I915_VMA_BIND_MASK) == 0) {
GEM_BUG_ON(vma->pages);
i915_vma_remove(vma); i915_vma_remove(vma);
GEM_BUG_ON(vma->pages);
} }
err_unpin: err_unpin:
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
...@@ -620,6 +690,30 @@ static void __i915_vma_iounmap(struct i915_vma *vma) ...@@ -620,6 +690,30 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
vma->iomap = NULL; vma->iomap = NULL;
} }
void i915_vma_revoke_mmap(struct i915_vma *vma)
{
struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
u64 vma_offset;
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (!i915_vma_has_userfault(vma))
return;
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
GEM_BUG_ON(!vma->obj->userfault_count);
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
vma->size,
1);
i915_vma_unset_userfault(vma);
if (!--vma->obj->userfault_count)
list_del(&vma->obj->userfault_link);
}
int i915_vma_unbind(struct i915_vma *vma) int i915_vma_unbind(struct i915_vma *vma)
{ {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
...@@ -683,11 +777,13 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -683,11 +777,13 @@ int i915_vma_unbind(struct i915_vma *vma)
return ret; return ret;
/* Force a pagefault for domain tracking on next user access */ /* Force a pagefault for domain tracking on next user access */
i915_gem_release_mmap(obj); i915_vma_revoke_mmap(vma);
__i915_vma_iounmap(vma); __i915_vma_iounmap(vma);
vma->flags &= ~I915_VMA_CAN_FENCE; vma->flags &= ~I915_VMA_CAN_FENCE;
} }
GEM_BUG_ON(vma->fence);
GEM_BUG_ON(i915_vma_has_userfault(vma));
if (likely(!vma->vm->closed)) { if (likely(!vma->vm->closed)) {
trace_i915_vma_unbind(vma); trace_i915_vma_unbind(vma);
...@@ -695,13 +791,6 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -695,13 +791,6 @@ int i915_vma_unbind(struct i915_vma *vma)
} }
vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
if (vma->pages != obj->mm.pages) {
GEM_BUG_ON(!vma->pages);
sg_free_table(vma->pages);
kfree(vma->pages);
}
vma->pages = NULL;
i915_vma_remove(vma); i915_vma_remove(vma);
destroy: destroy:
......
...@@ -55,6 +55,7 @@ struct i915_vma { ...@@ -55,6 +55,7 @@ struct i915_vma {
void __iomem *iomap; void __iomem *iomap;
u64 size; u64 size;
u64 display_alignment; u64 display_alignment;
struct i915_page_sizes page_sizes;
u32 fence_size; u32 fence_size;
u32 fence_alignment; u32 fence_alignment;
...@@ -65,7 +66,7 @@ struct i915_vma { ...@@ -65,7 +66,7 @@ struct i915_vma {
* that exist in the ctx->handle_vmas LUT for this vma. * that exist in the ctx->handle_vmas LUT for this vma.
*/ */
unsigned int open_count; unsigned int open_count;
unsigned int flags; unsigned long flags;
/** /**
* How many users have pinned this object in GTT space. The following * How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, execbuffer * users can each hold at most one reference: pwrite/pread, execbuffer
...@@ -87,6 +88,8 @@ struct i915_vma { ...@@ -87,6 +88,8 @@ struct i915_vma {
#define I915_VMA_GGTT BIT(8) #define I915_VMA_GGTT BIT(8)
#define I915_VMA_CAN_FENCE BIT(9) #define I915_VMA_CAN_FENCE BIT(9)
#define I915_VMA_CLOSED BIT(10) #define I915_VMA_CLOSED BIT(10)
#define I915_VMA_USERFAULT_BIT 11
#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
unsigned int active; unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES]; struct i915_gem_active last_read[I915_NUM_ENGINES];
...@@ -145,6 +148,22 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma) ...@@ -145,6 +148,22 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
return vma->flags & I915_VMA_CLOSED; return vma->flags & I915_VMA_CLOSED;
} }
static inline bool i915_vma_set_userfault(struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
}
static inline void i915_vma_unset_userfault(struct i915_vma *vma)
{
return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
}
static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
{
return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
}
static inline unsigned int i915_vma_get_active(const struct i915_vma *vma) static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
{ {
return vma->active; return vma->active;
...@@ -243,6 +262,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level); ...@@ -243,6 +262,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
bool i915_vma_misplaced(const struct i915_vma *vma, bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags); u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma); int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma); void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma); void i915_vma_close(struct i915_vma *vma);
...@@ -321,12 +341,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma); ...@@ -321,12 +341,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
* Callers must hold the struct_mutex. This function is only valid to be * Callers must hold the struct_mutex. This function is only valid to be
* called on a VMA previously iomapped by the caller with i915_vma_pin_iomap(). * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
*/ */
static inline void i915_vma_unpin_iomap(struct i915_vma *vma) void i915_vma_unpin_iomap(struct i915_vma *vma);
{
lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
GEM_BUG_ON(vma->iomap == NULL);
i915_vma_unpin(vma);
}
static inline struct page *i915_vma_first_page(struct i915_vma *vma) static inline struct page *i915_vma_first_page(struct i915_vma *vma)
{ {
...@@ -349,15 +364,13 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma) ...@@ -349,15 +364,13 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
* *
* True if the vma has a fence, false otherwise. * True if the vma has a fence, false otherwise.
*/ */
static inline bool int i915_vma_pin_fence(struct i915_vma *vma);
i915_vma_pin_fence(struct i915_vma *vma) int __must_check i915_vma_put_fence(struct i915_vma *vma);
static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
{ {
lockdep_assert_held(&vma->obj->base.dev->struct_mutex); GEM_BUG_ON(vma->fence->pin_count <= 0);
if (vma->fence) { vma->fence->pin_count--;
vma->fence->pin_count++;
return true;
} else
return false;
} }
/** /**
...@@ -372,10 +385,8 @@ static inline void ...@@ -372,10 +385,8 @@ static inline void
i915_vma_unpin_fence(struct i915_vma *vma) i915_vma_unpin_fence(struct i915_vma *vma)
{ {
lockdep_assert_held(&vma->obj->base.dev->struct_mutex); lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
if (vma->fence) { if (vma->fence)
GEM_BUG_ON(vma->fence->pin_count <= 0); __i915_vma_unpin_fence(vma);
vma->fence->pin_count--;
}
} }
#endif #endif
......
...@@ -754,7 +754,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, ...@@ -754,7 +754,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
{ {
struct intel_encoder *encoder; struct intel_encoder *encoder;
if (WARN_ON(pipe >= I915_MAX_PIPES)) if (WARN_ON(pipe >= INTEL_INFO(dev_priv)->num_pipes))
return NULL; return NULL;
/* MST */ /* MST */
......
...@@ -431,27 +431,6 @@ parse_general_features(struct drm_i915_private *dev_priv, ...@@ -431,27 +431,6 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->vbt.fdi_rx_polarity_inverted); dev_priv->vbt.fdi_rx_polarity_inverted);
} }
static void
parse_general_definitions(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
const struct bdb_general_definitions *general;
general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (general) {
u16 block_size = get_blocksize(general);
if (block_size >= sizeof(*general)) {
int bus_pin = general->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
dev_priv->vbt.crt_ddc_pin = bus_pin;
} else {
DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
block_size);
}
}
}
static const struct child_device_config * static const struct child_device_config *
child_device_ptr(const struct bdb_general_definitions *defs, int i) child_device_ptr(const struct bdb_general_definitions *defs, int i)
{ {
...@@ -459,41 +438,24 @@ child_device_ptr(const struct bdb_general_definitions *defs, int i) ...@@ -459,41 +438,24 @@ child_device_ptr(const struct bdb_general_definitions *defs, int i)
} }
static void static void
parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
const struct bdb_header *bdb)
{ {
struct sdvo_device_mapping *mapping; struct sdvo_device_mapping *mapping;
const struct bdb_general_definitions *defs;
const struct child_device_config *child; const struct child_device_config *child;
int i, child_device_num, count; int i, count = 0;
u16 block_size;
defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!defs) {
DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
return;
}
/* /*
* Only parse SDVO mappings when the general definitions block child * Only parse SDVO mappings on gens that could have SDVO. This isn't
* device size matches that of the *legacy* child device config * accurate and doesn't have to be, as long as it's not too strict.
* struct. Thus, SDVO mapping will be skipped for newer VBT.
*/ */
if (defs->child_dev_size != LEGACY_CHILD_DEVICE_CONFIG_SIZE) { if (!IS_GEN(dev_priv, 3, 7)) {
DRM_DEBUG_KMS("Unsupported child device size for SDVO mapping.\n"); DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
return; return;
} }
/* get the block size of general definitions */
block_size = get_blocksize(defs); for (i = 0, count = 0; i < dev_priv->vbt.child_dev_num; i++) {
/* get the number of child device */ child = dev_priv->vbt.child_dev + i;
child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
count = 0;
for (i = 0; i < child_device_num; i++) {
child = child_device_ptr(defs, i);
if (!child->device_type) {
/* skip the device block if device type is invalid */
continue;
}
if (child->slave_addr != SLAVE_ADDR1 && if (child->slave_addr != SLAVE_ADDR1 &&
child->slave_addr != SLAVE_ADDR2) { child->slave_addr != SLAVE_ADDR2) {
/* /*
...@@ -544,7 +506,6 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, ...@@ -544,7 +506,6 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
/* No SDVO device info is found */ /* No SDVO device info is found */
DRM_DEBUG_KMS("No SDVO device info is found in VBT\n"); DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
} }
return;
} }
static void static void
...@@ -1111,7 +1072,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, ...@@ -1111,7 +1072,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
} }
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
const struct bdb_header *bdb) u8 bdb_version)
{ {
struct child_device_config *it, *child = NULL; struct child_device_config *it, *child = NULL;
struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port]; struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
...@@ -1215,7 +1176,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, ...@@ -1215,7 +1176,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
sanitize_aux_ch(dev_priv, port); sanitize_aux_ch(dev_priv, port);
} }
if (bdb->version >= 158) { if (bdb_version >= 158) {
/* The VBT HDMI level shift values match the table we have. */ /* The VBT HDMI level shift values match the table we have. */
hdmi_level_shift = child->hdmi_level_shifter_value; hdmi_level_shift = child->hdmi_level_shifter_value;
DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n", DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
...@@ -1225,7 +1186,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, ...@@ -1225,7 +1186,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
} }
/* Parse the I_boost config for SKL and above */ /* Parse the I_boost config for SKL and above */
if (bdb->version >= 196 && child->iboost) { if (bdb_version >= 196 && child->iboost) {
info->dp_boost_level = translate_iboost(child->dp_iboost_level); info->dp_boost_level = translate_iboost(child->dp_iboost_level);
DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n", DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
port_name(port), info->dp_boost_level); port_name(port), info->dp_boost_level);
...@@ -1235,40 +1196,52 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, ...@@ -1235,40 +1196,52 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
} }
} }
static void parse_ddi_ports(struct drm_i915_private *dev_priv, static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
const struct bdb_header *bdb)
{ {
enum port port; enum port port;
if (!HAS_DDI(dev_priv)) if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
return; return;
if (!dev_priv->vbt.child_dev_num) if (!dev_priv->vbt.child_dev_num)
return; return;
if (bdb->version < 155) if (bdb_version < 155)
return; return;
for (port = PORT_A; port < I915_MAX_PORTS; port++) for (port = PORT_A; port < I915_MAX_PORTS; port++)
parse_ddi_port(dev_priv, port, bdb); parse_ddi_port(dev_priv, port, bdb_version);
} }
static void static void
parse_device_mapping(struct drm_i915_private *dev_priv, parse_general_definitions(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb) const struct bdb_header *bdb)
{ {
const struct bdb_general_definitions *defs; const struct bdb_general_definitions *defs;
const struct child_device_config *child; const struct child_device_config *child;
struct child_device_config *child_dev_ptr;
int i, child_device_num, count; int i, child_device_num, count;
u8 expected_size; u8 expected_size;
u16 block_size; u16 block_size;
int bus_pin;
defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!defs) { if (!defs) {
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return; return;
} }
block_size = get_blocksize(defs);
if (block_size < sizeof(*defs)) {
DRM_DEBUG_KMS("General definitions block too small (%u)\n",
block_size);
return;
}
bus_pin = defs->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
dev_priv->vbt.crt_ddc_pin = bus_pin;
if (bdb->version < 106) { if (bdb->version < 106) {
expected_size = 22; expected_size = 22;
} else if (bdb->version < 111) { } else if (bdb->version < 111) {
...@@ -1298,18 +1271,14 @@ parse_device_mapping(struct drm_i915_private *dev_priv, ...@@ -1298,18 +1271,14 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
return; return;
} }
/* get the block size of general definitions */
block_size = get_blocksize(defs);
/* get the number of child device */ /* get the number of child device */
child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size; child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
count = 0; count = 0;
/* get the number of child device that is present */ /* get the number of child device that is present */
for (i = 0; i < child_device_num; i++) { for (i = 0; i < child_device_num; i++) {
child = child_device_ptr(defs, i); child = child_device_ptr(defs, i);
if (!child->device_type) { if (!child->device_type)
/* skip the device block if device type is invalid */
continue; continue;
}
count++; count++;
} }
if (!count) { if (!count) {
...@@ -1326,36 +1295,18 @@ parse_device_mapping(struct drm_i915_private *dev_priv, ...@@ -1326,36 +1295,18 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
count = 0; count = 0;
for (i = 0; i < child_device_num; i++) { for (i = 0; i < child_device_num; i++) {
child = child_device_ptr(defs, i); child = child_device_ptr(defs, i);
if (!child->device_type) { if (!child->device_type)
/* skip the device block if device type is invalid */
continue; continue;
}
child_dev_ptr = dev_priv->vbt.child_dev + count;
count++;
/* /*
* Copy as much as we know (sizeof) and is available * Copy as much as we know (sizeof) and is available
* (child_dev_size) of the child device. Accessing the data must * (child_dev_size) of the child device. Accessing the data must
* depend on VBT version. * depend on VBT version.
*/ */
memcpy(child_dev_ptr, child, memcpy(dev_priv->vbt.child_dev + count, child,
min_t(size_t, defs->child_dev_size, sizeof(*child))); min_t(size_t, defs->child_dev_size, sizeof(*child)));
count++;
/*
* copied full block, now init values when they are not
* available in current version
*/
if (bdb->version < 196) {
/* Set default values for bits added from v196 */
child_dev_ptr->iboost = 0;
child_dev_ptr->hpd_invert = 0;
}
if (bdb->version < 192)
child_dev_ptr->lspcon = 0;
} }
return;
} }
/* Common defaults which may be overridden by VBT. */ /* Common defaults which may be overridden by VBT. */
...@@ -1536,14 +1487,15 @@ void intel_bios_init(struct drm_i915_private *dev_priv) ...@@ -1536,14 +1487,15 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
parse_lfp_panel_data(dev_priv, bdb); parse_lfp_panel_data(dev_priv, bdb);
parse_lfp_backlight(dev_priv, bdb); parse_lfp_backlight(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb); parse_sdvo_panel_data(dev_priv, bdb);
parse_sdvo_device_mapping(dev_priv, bdb);
parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb); parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb); parse_edp(dev_priv, bdb);
parse_psr(dev_priv, bdb); parse_psr(dev_priv, bdb);
parse_mipi_config(dev_priv, bdb); parse_mipi_config(dev_priv, bdb);
parse_mipi_sequence(dev_priv, bdb); parse_mipi_sequence(dev_priv, bdb);
parse_ddi_ports(dev_priv, bdb);
/* Further processing on pre-parsed data */
parse_sdvo_device_mapping(dev_priv, bdb->version);
parse_ddi_ports(dev_priv, bdb->version);
out: out:
if (!vbt) { if (!vbt) {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -216,7 +216,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) ...@@ -216,7 +216,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
mask = DC_STATE_DEBUG_MASK_MEMORY_UP; mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
if (IS_BROXTON(dev_priv)) if (IS_GEN9_LP(dev_priv))
mask |= DC_STATE_DEBUG_MASK_CORES; mask |= DC_STATE_DEBUG_MASK_CORES;
/* The below bit doesn't need to be cleared ever afterwards */ /* The below bit doesn't need to be cleared ever afterwards */
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -162,14 +162,19 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, ...@@ -162,14 +162,19 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port); drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
/*
* Power down mst path before disabling the port, otherwise we end
* up getting interrupts from the sink upon detecting link loss.
*/
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
false);
intel_dp->active_mst_links--; intel_dp->active_mst_links--;
intel_mst->connector = NULL; intel_mst->connector = NULL;
if (intel_dp->active_mst_links == 0) { if (intel_dp->active_mst_links == 0) {
intel_dig_port->base.post_disable(&intel_dig_port->base, intel_dig_port->base.post_disable(&intel_dig_port->base,
NULL, NULL); NULL, NULL);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
} }
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
} }
...@@ -196,6 +201,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, ...@@ -196,6 +201,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
if (intel_dp->active_mst_links == 0) if (intel_dp->active_mst_links == 0)
intel_dig_port->base.pre_enable(&intel_dig_port->base, intel_dig_port->base.pre_enable(&intel_dig_port->base,
pipe_config, NULL); pipe_config, NULL);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment