Commit be5df20a authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2017-03-20' of git://anongit.freedesktop.org/git/drm-intel into drm-next

More in i915 for 4.12:

- designware i2c fixes from Hans de Goede, in a topic branch shared
  with other subsystems (maybe, they didn't confirm, but requested the
  pull)
- drop drm_panel usage from the intel dsi vbt panel (Jani)
- vblank evasion improvements and tracing (Maarten and Ville)
- clarify spinlock irq semantics again a bit (Tvrtko)
- new ->pwrite backend hook (right now just for shmem pageche writes),
  from Chris
- more planar/ccs work from Ville
- hotplug safe connector iterators everywhere
- userptr fixes (Chris)
- selftests for cache coloring eviction (Matthew Auld)
- extend debugfs drop_caches interface for shrinker testing (Chris)
- baytrail "the rps kills the machine" fix (Chris)
- use new atomic state iterators, a lot (Maarten)
- refactor guc/huc code some (Arkadiusz Hiler)
- tighten breadcrumbs rbtree a bit (Chris)
- improve wrap-around and time handling in rps residency counters
  (Mika)
- split reset-in-progress in two flags, backoff and handoff (Chris)
- other misc reset improvements from a few people
- bunch of vgpu interaction fixes with recent code changes
- misc stuff all over, as usual

* tag 'drm-intel-next-2017-03-20' of git://anongit.freedesktop.org/git/drm-intel: (144 commits)
  drm/i915: Update DRIVER_DATE to 20170320
  drm/i915: Initialise i915_gem_object_create_from_data() directly
  drm/i915: Correct error handling for i915_gem_object_create_from_data()
  drm/i915: i915_gem_object_create_from_data() doesn't require struct_mutex
  drm/i915: Retire an active batch pool object rather than allocate new
  drm/i915: Add i810/i815 pci-ids for completeness
  drm/i915: Skip execlists_dequeue() early if the list is empty
  drm/i915: Stop using obj->obj_exec_link outside of execbuf
  drm/i915: Squelch WARN for VLV_COUNTER_CONTROL
  drm/i915/glk: Enable pooled EUs for Geminilake
  drm/i915: Remove superfluous i915_add_request_no_flush() helper
  drm/i915/vgpu: Neuter forcewakes for VGPU more thoroughly
  drm/i915: Fix vGPU balloon for ggtt guard page
  drm/i915: Avoid use-after-free of ctx in request tracepoints
  drm/i915: Assert that the context pin_counts do not overflow
  drm/i915: Wait for reset to complete before returning from debugfs/i915_wedged
  drm/i915: Restore engine->submit_request before unwedging
  drm/i915: Move engine->submit_request selection to a vfunc
  drm/i915: Split I915_RESET_IN_PROGRESS into two flags
  drm/i915: make context status notifier head be per engine
  ...
parents 33d5f513 c5bd2e14
......@@ -5,6 +5,8 @@
#ifndef IOSF_MBI_SYMS_H
#define IOSF_MBI_SYMS_H
#include <linux/notifier.h>
#define MBI_MCR_OFFSET 0xD0
#define MBI_MDR_OFFSET 0xD4
#define MBI_MCRX_OFFSET 0xD8
......@@ -47,6 +49,10 @@
#define QRK_MBI_UNIT_MM 0x05
#define QRK_MBI_UNIT_SOC 0x31
/* Action values for the pmic_bus_access_notifier functions */
#define MBI_PMIC_BUS_ACCESS_BEGIN 1
#define MBI_PMIC_BUS_ACCESS_END 2
#if IS_ENABLED(CONFIG_IOSF_MBI)
bool iosf_mbi_available(void);
......@@ -88,6 +94,65 @@ int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr);
*/
int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);
/**
* iosf_mbi_punit_acquire() - Acquire access to the P-Unit
*
* One some systems the P-Unit accesses the PMIC to change various voltages
* through the same bus as other kernel drivers use for e.g. battery monitoring.
*
* If a driver sends requests to the P-Unit which require the P-Unit to access
* the PMIC bus while another driver is also accessing the PMIC bus various bad
* things happen.
*
* To avoid these problems this function must be called before accessing the
* P-Unit or the PMIC, be it through iosf_mbi* functions or through other means.
*
* Note on these systems the i2c-bus driver will request a sempahore from the
* P-Unit for exclusive access to the PMIC bus when i2c drivers are accessing
* it, but this does not appear to be sufficient, we still need to avoid making
* certain P-Unit requests during the access window to avoid problems.
*
* This function locks a mutex, as such it may sleep.
*/
void iosf_mbi_punit_acquire(void);
/**
* iosf_mbi_punit_release() - Release access to the P-Unit
*/
void iosf_mbi_punit_release(void);
/**
* iosf_mbi_register_pmic_bus_access_notifier - Register PMIC bus notifier
*
* This function can be used by drivers which may need to acquire P-Unit
* managed resources from interrupt context, where iosf_mbi_punit_acquire()
* can not be used.
*
* This function allows a driver to register a notifier to get notified (in a
* process context) before other drivers start accessing the PMIC bus.
*
* This allows the driver to acquire any resources, which it may need during
* the window the other driver is accessing the PMIC, before hand.
*
* @nb: notifier_block to register
*/
int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb);
/**
* iosf_mbi_register_pmic_bus_access_notifier - Unregister PMIC bus notifier
*
* @nb: notifier_block to unregister
*/
int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb);
/**
* iosf_mbi_call_pmic_bus_access_notifier_chain - Call PMIC bus notifier chain
*
* @val: action to pass into listener's notifier_call function
* @v: data pointer to pass into listener's notifier_call function
*/
int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v);
#else /* CONFIG_IOSF_MBI is not enabled */
static inline
bool iosf_mbi_available(void)
......@@ -115,6 +180,28 @@ int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
WARN(1, "IOSF_MBI driver not available");
return -EPERM;
}
static inline void iosf_mbi_punit_acquire(void) {}
static inline void iosf_mbi_punit_release(void) {}
static inline
int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb)
{
return 0;
}
static inline
int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
{
return 0;
}
static inline
int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v)
{
return 0;
}
#endif /* CONFIG_IOSF_MBI */
#endif /* IOSF_MBI_SYMS_H */
......@@ -34,6 +34,8 @@
static struct pci_dev *mbi_pdev;
static DEFINE_SPINLOCK(iosf_mbi_lock);
static DEFINE_MUTEX(iosf_mbi_punit_mutex);
static BLOCKING_NOTIFIER_HEAD(iosf_mbi_pmic_bus_access_notifier);
static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
{
......@@ -190,6 +192,53 @@ bool iosf_mbi_available(void)
}
EXPORT_SYMBOL(iosf_mbi_available);
void iosf_mbi_punit_acquire(void)
{
mutex_lock(&iosf_mbi_punit_mutex);
}
EXPORT_SYMBOL(iosf_mbi_punit_acquire);
void iosf_mbi_punit_release(void)
{
mutex_unlock(&iosf_mbi_punit_mutex);
}
EXPORT_SYMBOL(iosf_mbi_punit_release);
int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb)
{
int ret;
/* Wait for the bus to go inactive before registering */
mutex_lock(&iosf_mbi_punit_mutex);
ret = blocking_notifier_chain_register(
&iosf_mbi_pmic_bus_access_notifier, nb);
mutex_unlock(&iosf_mbi_punit_mutex);
return ret;
}
EXPORT_SYMBOL(iosf_mbi_register_pmic_bus_access_notifier);
int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
{
int ret;
/* Wait for the bus to go inactive before unregistering */
mutex_lock(&iosf_mbi_punit_mutex);
ret = blocking_notifier_chain_unregister(
&iosf_mbi_pmic_bus_access_notifier, nb);
mutex_unlock(&iosf_mbi_punit_mutex);
return ret;
}
EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier);
int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v)
{
return blocking_notifier_call_chain(
&iosf_mbi_pmic_bus_access_notifier, val, v);
}
EXPORT_SYMBOL(iosf_mbi_call_pmic_bus_access_notifier_chain);
#ifdef CONFIG_IOSF_MBI_DEBUG
static u32 dbg_mdr;
static u32 dbg_mcr;
......
......@@ -20,6 +20,7 @@ config DRM_I915
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
select SYNC_FILE
select IOSF_MBI
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
......
......@@ -105,8 +105,8 @@ i915-y += dvo_ch7017.o \
intel_dp.o \
intel_dsi.o \
intel_dsi_dcs_backlight.o \
intel_dsi_panel_vbt.o \
intel_dsi_pll.o \
intel_dsi_vbt.o \
intel_dvo.o \
intel_hdmi.o \
intel_i2c.o \
......
......@@ -160,7 +160,6 @@ struct intel_vgpu {
atomic_t running_workload_num;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
struct notifier_block shadow_ctx_notifier_block;
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
......@@ -231,6 +230,7 @@ struct intel_gvt {
struct intel_gvt_gtt gtt;
struct intel_gvt_opregion opregion;
struct intel_gvt_workload_scheduler scheduler;
struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
struct intel_vgpu_type *types;
unsigned int num_types;
......
......@@ -130,12 +130,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
struct intel_vgpu *vgpu = container_of(nb,
struct intel_vgpu, shadow_ctx_notifier_block);
struct drm_i915_gem_request *req =
(struct drm_i915_gem_request *)data;
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
shadow_ctx_notifier_block[req->engine->id]);
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload =
scheduler->current_workload[req->engine->id];
......@@ -214,7 +212,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
workload->status = ret;
if (!IS_ERR_OR_NULL(rq))
i915_add_request_no_flush(rq);
i915_add_request(rq);
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
......@@ -493,15 +491,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
int i;
struct intel_engine_cs *engine;
enum intel_engine_id i;
gvt_dbg_core("clean workload scheduler\n");
for (i = 0; i < I915_NUM_ENGINES; i++) {
if (scheduler->thread[i]) {
for_each_engine(engine, gvt->dev_priv, i) {
atomic_notifier_chain_unregister(
&engine->context_status_notifier,
&gvt->shadow_ctx_notifier_block[i]);
kthread_stop(scheduler->thread[i]);
scheduler->thread[i] = NULL;
}
}
}
......@@ -509,18 +508,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct workload_thread_param *param = NULL;
struct intel_engine_cs *engine;
enum intel_engine_id i;
int ret;
int i;
gvt_dbg_core("init workload scheduler\n");
init_waitqueue_head(&scheduler->workload_complete_wq);
for (i = 0; i < I915_NUM_ENGINES; i++) {
/* check ring mask at init time */
if (!HAS_ENGINE(gvt->dev_priv, i))
continue;
for_each_engine(engine, gvt->dev_priv, i) {
init_waitqueue_head(&scheduler->waitq[i]);
param = kzalloc(sizeof(*param), GFP_KERNEL);
......@@ -539,6 +535,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
ret = PTR_ERR(scheduler->thread[i]);
goto err;
}
gvt->shadow_ctx_notifier_block[i].notifier_call =
shadow_context_status_change;
atomic_notifier_chain_register(&engine->context_status_notifier,
&gvt->shadow_ctx_notifier_block[i]);
}
return 0;
err:
......@@ -550,9 +551,6 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
{
atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
&vgpu->shadow_ctx_notifier_block);
i915_gem_context_put_unlocked(vgpu->shadow_ctx);
}
......@@ -567,10 +565,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
vgpu->shadow_ctx->engine[RCS].initialised = true;
vgpu->shadow_ctx_notifier_block.notifier_call =
shadow_context_status_change;
atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
&vgpu->shadow_ctx_notifier_block);
return 0;
}
......@@ -1279,11 +1279,17 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
* space. Parsing should be faster in some cases this way.
*/
batch_end = cmd + (batch_len / sizeof(*batch_end));
while (cmd < batch_end) {
do {
u32 length;
if (*cmd == MI_BATCH_BUFFER_END)
if (*cmd == MI_BATCH_BUFFER_END) {
if (needs_clflush_after) {
void *ptr = ptr_mask_bits(shadow_batch_obj->mm.mapping);
drm_clflush_virt_range(ptr,
(void *)(cmd + 1) - ptr);
}
break;
}
desc = find_cmd(engine, *cmd, desc, &default_desc);
if (!desc) {
......@@ -1323,17 +1329,14 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
}
cmd += length;
}
if (cmd >= batch_end) {
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL;
break;
}
} while (1);
if (ret == 0 && needs_clflush_after)
drm_clflush_virt_range(shadow_batch_obj->mm.mapping, batch_len);
i915_gem_object_unpin_map(shadow_batch_obj);
return ret;
}
......
This diff is collapsed.
......@@ -567,9 +567,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (i915_inject_load_failure())
return -ENODEV;
ret = intel_bios_init(dev_priv);
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
intel_bios_init(dev_priv);
/* If we have > 1 VGA cards, then we need to arbitrate access
* to the common VGA resources.
......@@ -607,8 +605,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_irq;
intel_huc_init(dev_priv);
intel_guc_init(dev_priv);
intel_uc_init_fw(dev_priv);
ret = i915_gem_init(dev_priv);
if (ret)
......@@ -827,7 +824,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
spin_lock_init(&dev_priv->mm.object_stat_lock);
spin_lock_init(&dev_priv->mmio_flip_lock);
spin_lock_init(&dev_priv->wm.dsparb_lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
......@@ -992,6 +988,8 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
intel_uc_sanitize_options(dev_priv);
}
/**
......@@ -1423,17 +1421,14 @@ static void i915_driver_lastclose(struct drm_device *dev)
vga_switcheroo_process_delayed_switch();
}
static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
mutex_lock(&dev->struct_mutex);
i915_gem_context_close(dev, file);
i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
}
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
kfree(file_priv);
}
......@@ -1512,7 +1507,7 @@ static int i915_drm_suspend(struct drm_device *dev)
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev_priv, opregion_target_state);
intel_uncore_forcewake_reset(dev_priv, false);
intel_uncore_suspend(dev_priv);
intel_opregion_unregister(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
......@@ -1757,7 +1752,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret);
intel_uncore_early_sanitize(dev_priv, true);
intel_uncore_resume_early(dev_priv);
if (IS_GEN9_LP(dev_priv)) {
if (!dev_priv->suspended_to_idle)
......@@ -1820,12 +1815,15 @@ void i915_reset(struct drm_i915_private *dev_priv)
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags))
if (!test_bit(I915_RESET_HANDOFF, &error->flags))
return;
/* Clear any previous failed attempts at recovery. Time to try again. */
__clear_bit(I915_WEDGED, &error->flags);
if (!i915_gem_unset_wedged(dev_priv))
goto wakeup;
error->reset_count++;
pr_notice("drm/i915: Resetting chip after gpu hang\n");
......@@ -1871,15 +1869,18 @@ void i915_reset(struct drm_i915_private *dev_priv)
i915_queue_hangcheck(dev_priv);
wakeup:
finish:
i915_gem_reset_finish(dev_priv);
enable_irq(dev_priv->drm.irq);
wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
wakeup:
clear_bit(I915_RESET_HANDOFF, &error->flags);
wake_up_bit(&error->flags, I915_RESET_HANDOFF);
return;
error:
i915_gem_set_wedged(dev_priv);
goto wakeup;
goto finish;
}
static int i915_pm_suspend(struct device *kdev)
......@@ -2402,7 +2403,7 @@ static int intel_runtime_suspend(struct device *kdev)
return ret;
}
intel_uncore_forcewake_reset(dev_priv, false);
intel_uncore_suspend(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
......@@ -2638,7 +2639,6 @@ static struct drm_driver driver = {
.release = i915_driver_release,
.open = i915_driver_open,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.postclose = i915_driver_postclose,
.set_busid = drm_pci_set_busid,
......
......@@ -79,8 +79,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20170306"
#define DRIVER_TIMESTAMP 1488785683
#define DRIVER_DATE "20170320"
#define DRIVER_TIMESTAMP 1489994464
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
......@@ -489,10 +489,8 @@ struct i915_hotplug {
&(dev)->mode_config.encoder_list, \
base.head)
#define for_each_intel_connector(dev, intel_connector) \
list_for_each_entry(intel_connector, \
&(dev)->mode_config.connector_list, \
base.head)
#define for_each_intel_connector_iter(intel_connector, iter) \
while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
......@@ -764,6 +762,7 @@ struct intel_uncore {
const struct intel_forcewake_range *fw_domains_table;
unsigned int fw_domains_table_entries;
struct notifier_block pmic_bus_access_nb;
struct intel_uncore_funcs funcs;
unsigned fifo_count;
......@@ -1324,7 +1323,7 @@ struct vlv_s0ix_state {
};
struct intel_rps_ei {
u32 cz_clock;
ktime_t ktime;
u32 render_c0;
u32 media_c0;
};
......@@ -1339,7 +1338,7 @@ struct intel_gen6_power_mgmt {
u32 pm_iir;
/* PM interrupt bits that should never be masked */
u32 pm_intr_keep;
u32 pm_intrmsk_mbz;
/* Frequencies are stored in potentially platform dependent multiples.
* In other words, *_freq needs to be multiplied by X to be interesting.
......@@ -1378,7 +1377,7 @@ struct intel_gen6_power_mgmt {
unsigned boosts;
/* manual wa residency calculations */
struct intel_rps_ei up_ei, down_ei;
struct intel_rps_ei ei;
/*
* Protects RPS/RC6 register access and PCU communication.
......@@ -1596,8 +1595,33 @@ struct i915_gpu_error {
*/
unsigned long reset_count;
/**
* flags: Control various stages of the GPU reset
*
* #I915_RESET_BACKOFF - When we start a reset, we want to stop any
* other users acquiring the struct_mutex. To do this we set the
* #I915_RESET_BACKOFF bit in the error flags when we detect a reset
* and then check for that bit before acquiring the struct_mutex (in
* i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
* secondary role in preventing two concurrent global reset attempts.
*
* #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the
* struct_mutex. We try to acquire the struct_mutex in the reset worker,
* but it may be held by some long running waiter (that we cannot
* interrupt without causing trouble). Once we are ready to do the GPU
* reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If
* they already hold the struct_mutex and want to participate they can
* inspect the bit and do the reset directly, otherwise the worker
* waits for the struct_mutex.
*
* #I915_WEDGED - If reset fails and we can no longer use the GPU,
* we set the #I915_WEDGED bit. Prior to command submission, e.g.
* i915_gem_request_alloc(), this bit is checked and the sequence
* aborted (with -EIO reported to userspace) if set.
*/
unsigned long flags;
#define I915_RESET_IN_PROGRESS 0
#define I915_RESET_BACKOFF 0
#define I915_RESET_HANDOFF 1
#define I915_WEDGED (BITS_PER_LONG - 1)
/**
......@@ -2376,9 +2400,6 @@ struct drm_i915_private {
} sagv_status;
struct {
/* protects DSPARB registers on pre-g4x/vlv/chv */
spinlock_t dsparb_lock;
/*
* Raw watermark latency values:
* in 0.1us units for WM0,
......@@ -2545,6 +2566,11 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
return container_of(guc, struct drm_i915_private, guc);
}
static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
{
return container_of(huc, struct drm_i915_private, huc);
}
/* Simple iterator over all initialised engines */
#define for_each_engine(engine__, dev_priv__, id__) \
for ((id__) = 0; \
......@@ -3057,14 +3083,12 @@ int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
bool restore_forcewake);
extern void intel_uncore_init(struct drm_i915_private *dev_priv);
extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
bool restore);
extern void intel_uncore_suspend(struct drm_i915_private *dev_priv);
extern void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
......@@ -3356,8 +3380,8 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush);
int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush);
#define CLFLUSH_BEFORE 0x1
#define CLFLUSH_AFTER 0x2
#define CLFLUSH_BEFORE BIT(0)
#define CLFLUSH_AFTER BIT(1)
#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
static inline void
......@@ -3388,9 +3412,14 @@ i915_gem_find_active_request(struct intel_engine_cs *engine);
void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
static inline bool i915_reset_backoff(struct i915_gpu_error *error)
{
return unlikely(test_bit(I915_RESET_IN_PROGRESS, &error->flags));
return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
}
static inline bool i915_reset_handoff(struct i915_gpu_error *error)
{
return unlikely(test_bit(I915_RESET_HANDOFF, &error->flags));
}
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
......@@ -3398,9 +3427,9 @@ static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
return unlikely(test_bit(I915_WEDGED, &error->flags));
}
static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error)
{
return i915_reset_in_progress(error) | i915_terminally_wedged(error);
return i915_reset_backoff(error) | i915_terminally_wedged(error);
}
static inline u32 i915_reset_count(struct i915_gpu_error *error)
......@@ -3412,6 +3441,7 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
void i915_gem_reset(struct drm_i915_private *dev_priv);
void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
......@@ -3717,7 +3747,7 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
/* intel_bios.c */
int intel_bios_init(struct drm_i915_private *dev_priv);
void intel_bios_init(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
......@@ -3880,6 +3910,8 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder);
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
const i915_reg_t reg);
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
......@@ -4087,7 +4119,6 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
if (engine->irq_seqno_barrier &&
test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
struct intel_breadcrumbs *b = &engine->breadcrumbs;
unsigned long flags;
/* The ordering of irq_posted versus applying the barrier
* is crucial. The clearing of the current irq_posted must
......@@ -4109,7 +4140,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
* the seqno before we believe it coherent since they see
* irq_posted == false but we are still running).
*/
spin_lock_irqsave(&b->irq_lock, flags);
spin_lock_irq(&b->irq_lock);
if (b->irq_wait && b->irq_wait->tsk != current)
/* Note that if the bottom-half is changed as we
* are sending the wake-up, the new bottom-half will
......@@ -4118,7 +4149,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
* ourself.
*/
wake_up_process(b->irq_wait->tsk);
spin_unlock_irqrestore(&b->irq_lock, flags);
spin_unlock_irq(&b->irq_lock);
if (__i915_gem_request_completed(req, seqno))
return true;
......
This diff is collapsed.
......@@ -96,8 +96,7 @@ struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
size_t size)
{
struct drm_i915_gem_object *obj = NULL;
struct drm_i915_gem_object *tmp;
struct drm_i915_gem_object *obj;
struct list_head *list;
int n, ret;
......@@ -112,31 +111,29 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
n = ARRAY_SIZE(pool->cache_list) - 1;
list = &pool->cache_list[n];
list_for_each_entry(tmp, list, batch_pool_link) {
list_for_each_entry(obj, list, batch_pool_link) {
/* The batches are strictly LRU ordered */
if (i915_gem_object_is_active(tmp))
if (i915_gem_object_is_active(obj)) {
if (!reservation_object_test_signaled_rcu(obj->resv,
true))
break;
GEM_BUG_ON(!reservation_object_test_signaled_rcu(tmp->resv,
true));
i915_gem_retire_requests(pool->engine->i915);
GEM_BUG_ON(i915_gem_object_is_active(obj));
}
if (tmp->base.size >= size) {
/* Clear the set of shared fences early */
reservation_object_lock(tmp->resv, NULL);
reservation_object_add_excl_fence(tmp->resv, NULL);
reservation_object_unlock(tmp->resv);
GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,
true));
obj = tmp;
break;
}
if (obj->base.size >= size)
goto found;
}
if (obj == NULL) {
obj = i915_gem_object_create_internal(pool->engine->i915, size);
if (IS_ERR(obj))
return obj;
}
found:
ret = i915_gem_object_pin_pages(obj);
if (ret)
return ERR_PTR(ret);
......
......@@ -318,7 +318,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
ctx->ring_size = 4 * PAGE_SIZE;
ctx->desc_template =
default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
/* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
* present or not in use we still need a small bias as ring wraparound
......@@ -934,7 +933,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
}
ret = i915_switch_context(req);
i915_add_request_no_flush(req);
i915_add_request(req);
if (ret)
return ret;
}
......
......@@ -158,9 +158,6 @@ struct i915_gem_context {
/** desc_template: invariant fields for the HW context descriptor */
u32 desc_template;
/** status_notifier: list of callbacks for context-switch changes */
struct atomic_notifier_head status_notifier;
/** guilty_count: How many times this context has caused a GPU hang. */
unsigned int guilty_count;
/**
......
......@@ -299,12 +299,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* those as well to make room for our guard pages.
*/
if (check_color) {
if (vma->node.start + vma->node.size == node->start) {
if (vma->node.color == node->color)
if (node->start + node->size == target->start) {
if (node->color == target->color)
continue;
}
if (vma->node.start == node->start + node->size) {
if (vma->node.color == node->color)
if (node->start == target->start + target->size) {
if (node->color == target->color)
continue;
}
}
......
......@@ -248,7 +248,14 @@ static int fence_update(struct drm_i915_fence_reg *fence,
list_move(&fence->link, &fence->i915->mm.fence_list);
}
/* We only need to update the register itself if the device is awake.
* If the device is currently powered down, we will defer the write
* to the runtime resume, see i915_gem_restore_fences().
*/
if (intel_runtime_pm_get_if_in_use(fence->i915)) {
fence_write(fence, vma);
intel_runtime_pm_put(fence->i915);
}
if (vma) {
if (fence->vma != vma) {
......@@ -278,8 +285,6 @@ i915_vma_put_fence(struct i915_vma *vma)
{
struct drm_i915_fence_reg *fence = vma->fence;
assert_rpm_wakelock_held(vma->vm->i915);
if (!fence)
return 0;
......
......@@ -56,6 +56,9 @@ struct drm_i915_gem_object_ops {
struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
int (*pwrite)(struct drm_i915_gem_object *,
const struct drm_i915_gem_pwrite *);
int (*dmabuf_export)(struct drm_i915_gem_object *);
void (*release)(struct drm_i915_gem_object *);
};
......@@ -270,12 +273,6 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
reservation_object_unlock(obj->resv);
}
static inline bool
i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
{
return kref_read(&obj->base.refcount) == 0;
}
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
......
......@@ -1012,7 +1012,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *request)
{
if (likely(!i915_reset_in_progress(&request->i915->gpu_error)))
if (likely(!i915_reset_handoff(&request->i915->gpu_error)))
return false;
__set_current_state(TASK_RUNNING);
......
......@@ -267,8 +267,6 @@ int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
#define i915_add_request(req) \
__i915_add_request(req, true)
#define i915_add_request_no_flush(req) \
__i915_add_request(req, false)
void __i915_gem_request_submit(struct drm_i915_gem_request *request);
......@@ -348,6 +346,9 @@ static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
u32 seqno;
seqno = i915_gem_request_global_seqno(request);
if (!seqno)
return 0;
return (__i915_gem_request_started(request, seqno) &&
__i915_spin_request(request, seqno, state, timeout_us));
}
......
......@@ -266,7 +266,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
I915_SHRINK_ACTIVE);
intel_runtime_pm_put(dev_priv);
rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
return freed;
}
......
......@@ -66,13 +66,18 @@ static void cancel_userptr(struct work_struct *work)
{
struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
struct drm_i915_gem_object *obj = mo->obj;
struct drm_device *dev = obj->base.dev;
struct work_struct *active;
/* Cancel any active worker and force us to re-evaluate gup */
mutex_lock(&obj->mm.lock);
active = fetch_and_zero(&obj->userptr.work);
mutex_unlock(&obj->mm.lock);
if (active)
goto out;
i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
mutex_lock(&dev->struct_mutex);
/* Cancel any active worker and force us to re-evaluate gup */
obj->userptr.work = NULL;
mutex_lock(&obj->base.dev->struct_mutex);
/* We are inside a kthread context and can't be interrupted */
if (i915_gem_object_unbind(obj) == 0)
......@@ -83,8 +88,10 @@ static void cancel_userptr(struct work_struct *work)
atomic_read(&obj->mm.pages_pin_count),
obj->pin_display);
mutex_unlock(&obj->base.dev->struct_mutex);
out:
i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
}
static void add_object(struct i915_mmu_object *mo)
......@@ -145,6 +152,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
del_object(mo);
spin_unlock(&mn->lock);
if (!list_empty(&cancelled))
flush_workqueue(mn->wq);
}
......@@ -541,6 +549,8 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
obj->userptr.work = ERR_CAST(pages);
if (IS_ERR(pages))
__i915_gem_userptr_set_active(obj, false);
}
mutex_unlock(&obj->mm.lock);
......@@ -553,8 +563,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
static struct sg_table *
__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
bool *active)
__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
{
struct get_pages_work *work;
......@@ -591,7 +600,6 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
schedule_work(&work->work);
*active = true;
return ERR_PTR(-EAGAIN);
}
......@@ -599,10 +607,11 @@ static struct sg_table *
i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
const int num_pages = obj->base.size >> PAGE_SHIFT;
struct mm_struct *mm = obj->userptr.mm->mm;
struct page **pvec;
struct sg_table *pages;
int pinned, ret;
bool active;
int pinned;
/* If userspace should engineer that these pages are replaced in
* the vma between us binding this page into the GTT and completion
......@@ -629,37 +638,39 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
return ERR_PTR(-EAGAIN);
}
/* Let the mmu-notifier know that we have begun and need cancellation */
ret = __i915_gem_userptr_set_active(obj, true);
if (ret)
return ERR_PTR(ret);
pvec = NULL;
pinned = 0;
if (obj->userptr.mm->mm == current->mm) {
pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
GFP_TEMPORARY);
if (pvec == NULL) {
__i915_gem_userptr_set_active(obj, false);
return ERR_PTR(-ENOMEM);
}
pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
!obj->userptr.read_only, pvec);
if (mm == current->mm) {
pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
GFP_TEMPORARY |
__GFP_NORETRY |
__GFP_NOWARN);
if (pvec) /* defer to worker if malloc fails */
pinned = __get_user_pages_fast(obj->userptr.ptr,
num_pages,
!obj->userptr.read_only,
pvec);
}
active = false;
if (pinned < 0)
pages = ERR_PTR(pinned), pinned = 0;
else if (pinned < num_pages)
pages = __i915_gem_userptr_get_pages_schedule(obj, &active);
else
if (pinned < 0) {
pages = ERR_PTR(pinned);
pinned = 0;
} else if (pinned < num_pages) {
pages = __i915_gem_userptr_get_pages_schedule(obj);
active = pages == ERR_PTR(-EAGAIN);
} else {
pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
if (IS_ERR(pages)) {
__i915_gem_userptr_set_active(obj, active);
release_pages(pvec, pinned, 0);
active = !IS_ERR(pages);
}
if (active)
__i915_gem_userptr_set_active(obj, true);
if (IS_ERR(pages))
release_pages(pvec, pinned, 0);
drm_free_large(pvec);
return pages;
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -59,6 +59,8 @@ struct i915_params i915 __read_mostly = {
.enable_guc_loading = 0,
.enable_guc_submission = 0,
.guc_log_level = -1,
.guc_firmware_path = NULL,
.huc_firmware_path = NULL,
.enable_dp_mst = true,
.inject_load_failure = 0,
.enable_dpcd_backlight = false,
......@@ -230,6 +232,14 @@ module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
MODULE_PARM_DESC(guc_log_level,
"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
module_param_named_unsafe(guc_firmware_path, i915.guc_firmware_path, charp, 0400);
MODULE_PARM_DESC(guc_firmware_path,
"GuC firmware path to use instead of the default one");
module_param_named_unsafe(huc_firmware_path, i915.huc_firmware_path, charp, 0400);
MODULE_PARM_DESC(huc_firmware_path,
"HuC firmware path to use instead of the default one");
module_param_named_unsafe(enable_dp_mst, i915.enable_dp_mst, bool, 0600);
MODULE_PARM_DESC(enable_dp_mst,
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
......
......@@ -46,6 +46,8 @@
func(int, enable_guc_loading); \
func(int, enable_guc_submission); \
func(int, guc_log_level); \
func(char *, guc_firmware_path); \
func(char *, huc_firmware_path); \
func(int, use_mmio_flip); \
func(int, mmio_debug); \
func(int, edp_vswing); \
......
......@@ -1140,8 +1140,6 @@ enum skl_disp_power_wells {
#define VLV_BIAS_CPU_125_SOC_875 (6 << 2)
#define CHV_BIAS_CPU_50_SOC_50 (3 << 2)
#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
#define CCK_FUSE_HPLL_FREQ_MASK 0x3
......@@ -7453,7 +7451,8 @@ enum {
#define VLV_RCEDATA _MMIO(0xA0BC)
#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
#define GEN6_PMINTRMSK _MMIO(0xA168)
#define GEN8_PMINTR_REDIRECT_TO_GUC (1<<31)
#define GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC (1<<31)
#define ARAT_EXPIRED_INTRMSK (1<<9)
#define GEN8_MISC_CTRL0 _MMIO(0xA180)
#define VLV_PWRDWNUPCTL _MMIO(0xA294)
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
......
......@@ -42,32 +42,8 @@ static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
static u32 calc_residency(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
u64 raw_time; /* 32b value may overflow during fixed point math */
u64 units = 128ULL, div = 100000ULL;
u32 ret;
if (!intel_enable_rc6())
return 0;
intel_runtime_pm_get(dev_priv);
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
units = 1;
div = dev_priv->czclk_freq;
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
units <<= 8;
} else if (IS_GEN9_LP(dev_priv)) {
units = 1;
div = 1200; /* 833.33ns */
}
raw_time = I915_READ(reg) * units;
ret = DIV_ROUND_UP_ULL(raw_time, div);
intel_runtime_pm_put(dev_priv);
return ret;
return DIV_ROUND_CLOSEST_ULL(intel_rc6_residency_us(dev_priv, reg),
1000);
}
static ssize_t
......
......@@ -590,7 +590,7 @@ TRACE_EVENT(i915_gem_request_queue,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
__entry->ctx = req->ctx->hw_id;
__entry->ctx = req->fence.context;
__entry->seqno = req->fence.seqno;
__entry->flags = flags;
),
......@@ -637,8 +637,8 @@ DECLARE_EVENT_CLASS(i915_gem_request,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ctx = req->ctx->hw_id;
__entry->ring = req->engine->id;
__entry->ctx = req->fence.context;
__entry->seqno = req->fence.seqno;
__entry->global = req->global_seqno;
),
......@@ -681,7 +681,7 @@ DECLARE_EVENT_CLASS(i915_gem_request_hw,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
__entry->ctx = req->ctx->hw_id;
__entry->ctx = req->fence.context;
__entry->seqno = req->fence.seqno;
__entry->global_seqno = req->global_seqno;
__entry->port = port;
......@@ -776,7 +776,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
__entry->ctx = req->ctx->hw_id;
__entry->ctx = req->fence.context;
__entry->seqno = req->fence.seqno;
__entry->global = req->global_seqno;
__entry->flags = flags;
......
......@@ -66,6 +66,8 @@
#define ptr_pack_bits(ptr, bits) \
((typeof(ptr))((unsigned long)(ptr) | (bits)))
#define ptr_offset(ptr, member) offsetof(typeof(*(ptr)), member)
#define fetch_and_zero(ptr) ({ \
typeof(*ptr) __T = *(ptr); \
*(ptr) = (typeof(*ptr))0; \
......
......@@ -218,13 +218,9 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
goto err;
}
/*
* No need to partition out the last physical page,
* because it is reserved to the guard page.
*/
if (unmappable_end < ggtt_end - PAGE_SIZE) {
if (unmappable_end < ggtt_end) {
ret = vgt_balloon_space(ggtt, &bl_info.space[3],
unmappable_end, ggtt_end - PAGE_SIZE);
unmappable_end, ggtt_end);
if (ret)
goto err;
}
......
......@@ -1341,6 +1341,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
return;
}
/* Common defaults which may be overridden by VBT. */
static void
init_vbt_defaults(struct drm_i915_private *dev_priv)
{
......@@ -1377,6 +1378,18 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
&dev_priv->vbt.ddi_port_info[port];
info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
}
}
/* Defaults to initialize only if there is no VBT. */
static void
init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
{
enum port port;
for (port = PORT_A; port < I915_MAX_PORTS; port++) {
struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
info->supports_dvi = (port != PORT_A && port != PORT_E);
info->supports_hdmi = info->supports_dvi;
......@@ -1462,36 +1475,35 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
* intel_bios_init - find VBT and initialize settings from the BIOS
* @dev_priv: i915 device instance
*
* Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
* to appropriate values.
*
* Returns 0 on success, nonzero on failure.
* Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT
* was not found in ACPI OpRegion, try to find it in PCI ROM first. Also
* initialize some defaults if the VBT is not present at all.
*/
int
intel_bios_init(struct drm_i915_private *dev_priv)
void intel_bios_init(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
const struct vbt_header *vbt = dev_priv->opregion.vbt;
const struct bdb_header *bdb;
u8 __iomem *bios = NULL;
if (HAS_PCH_NOP(dev_priv))
return -ENODEV;
if (HAS_PCH_NOP(dev_priv)) {
DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
return;
}
init_vbt_defaults(dev_priv);
/* If the OpRegion does not have VBT, look in PCI ROM. */
if (!vbt) {
size_t size;
bios = pci_map_rom(pdev, &size);
if (!bios)
return -1;
goto out;
vbt = find_vbt(bios, size);
if (!vbt) {
pci_unmap_rom(pdev, bios);
return -1;
}
if (!vbt)
goto out;
DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
}
......@@ -1516,10 +1528,14 @@ intel_bios_init(struct drm_i915_private *dev_priv)
parse_mipi_sequence(dev_priv, bdb);
parse_ddi_ports(dev_priv, bdb);
out:
if (!vbt) {
DRM_INFO("Failed to find VBIOS tables (VBT)\n");
init_vbt_missing_defaults(dev_priv);
}
if (bios)
pci_unmap_rom(pdev, bios);
return 0;
}
/**
......
......@@ -47,12 +47,11 @@ static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
unsigned long flags;
unsigned int result;
spin_lock_irqsave(&b->irq_lock, flags);
spin_lock_irq(&b->irq_lock);
result = __intel_breadcrumbs_wakeup(b);
spin_unlock_irqrestore(&b->irq_lock, flags);
spin_unlock_irq(&b->irq_lock);
return result;
}
......@@ -86,7 +85,7 @@ static void intel_breadcrumbs_hangcheck(unsigned long data)
return;
}
/* We keep the hangcheck time alive until we disarm the irq, even
/* We keep the hangcheck timer alive until we disarm the irq, even
* if there are no waiters at present.
*
* If the waiter was currently running, assume it hasn't had a chance
......@@ -110,20 +109,18 @@ static void intel_breadcrumbs_fake_irq(unsigned long data)
{
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
unsigned long flags;
/*
* The timer persists in case we cannot enable interrupts,
/* The timer persists in case we cannot enable interrupts,
* or if we have previously seen seqno/interrupt incoherency
* ("missed interrupt" syndrome). Here the worker will wake up
* every jiffie in order to kick the oldest waiter to do the
* coherent seqno check.
* ("missed interrupt" syndrome, better known as a "missed breadcrumb").
* Here the worker will wake up every jiffie in order to kick the
* oldest waiter to do the coherent seqno check.
*/
spin_lock_irqsave(&b->irq_lock, flags);
spin_lock_irq(&b->irq_lock);
if (!__intel_breadcrumbs_wakeup(b))
__intel_engine_disarm_breadcrumbs(engine);
spin_unlock_irqrestore(&b->irq_lock, flags);
spin_unlock_irq(&b->irq_lock);
if (!b->irq_armed)
return;
......@@ -168,6 +165,7 @@ void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
struct intel_breadcrumbs *b = &engine->breadcrumbs;
lockdep_assert_held(&b->irq_lock);
GEM_BUG_ON(b->irq_wait);
if (b->irq_enabled) {
irq_disable(engine);
......@@ -180,23 +178,31 @@ void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
unsigned long flags;
struct intel_wait *wait, *n, *first;
if (!b->irq_armed)
return;
spin_lock_irqsave(&b->irq_lock, flags);
/* We only disarm the irq when we are idle (all requests completed),
* so if there remains a sleeping waiter, it missed the request
* so if the bottom-half remains asleep, it missed the request
* completion.
*/
if (__intel_breadcrumbs_wakeup(b) & ENGINE_WAKEUP_ASLEEP)
missed_breadcrumb(engine);
spin_lock_irq(&b->rb_lock);
spin_lock(&b->irq_lock);
first = fetch_and_zero(&b->irq_wait);
__intel_engine_disarm_breadcrumbs(engine);
spin_unlock(&b->irq_lock);
rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
RB_CLEAR_NODE(&wait->node);
if (wake_up_process(wait->tsk) && wait == first)
missed_breadcrumb(engine);
}
b->waiters = RB_ROOT;
spin_unlock_irqrestore(&b->irq_lock, flags);
spin_unlock_irq(&b->rb_lock);
}
static bool use_fake_irq(const struct intel_breadcrumbs *b)
......@@ -280,9 +286,15 @@ static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
struct intel_wait *wait)
{
lockdep_assert_held(&b->rb_lock);
GEM_BUG_ON(b->irq_wait == wait);
/* This request is completed, so remove it from the tree, mark it as
* complete, and *then* wake up the associated task.
* complete, and *then* wake up the associated task. N.B. when the
* task wakes up, it will find the empty rb_node, discern that it
* has already been removed from the tree and skip the serialisation
* of the b->rb_lock and b->irq_lock. This means that the destruction
* of the intel_wait is not serialised with the interrupt handler
* by the waiter - it must instead be serialised by the caller.
*/
rb_erase(&wait->node, &b->waiters);
RB_CLEAR_NODE(&wait->node);
......@@ -297,6 +309,7 @@ static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
spin_lock(&b->irq_lock);
GEM_BUG_ON(!b->irq_armed);
GEM_BUG_ON(!b->irq_wait);
b->irq_wait = to_wait(next);
spin_unlock(&b->irq_lock);
......@@ -372,25 +385,8 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
rb_link_node(&wait->node, parent, p);
rb_insert_color(&wait->node, &b->waiters);
if (completed) {
struct rb_node *next = rb_next(completed);
GEM_BUG_ON(!next && !first);
if (next && next != &wait->node) {
GEM_BUG_ON(first);
__intel_breadcrumbs_next(engine, next);
}
do {
struct intel_wait *crumb = to_wait(completed);
completed = rb_prev(completed);
__intel_breadcrumbs_finish(b, crumb);
} while (completed);
}
if (first) {
spin_lock(&b->irq_lock);
GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
b->irq_wait = wait;
/* After assigning ourselves as the new bottom-half, we must
* perform a cursory check to prevent a missed interrupt.
......@@ -403,7 +399,28 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
__intel_breadcrumbs_enable_irq(b);
spin_unlock(&b->irq_lock);
}
if (completed) {
/* Advance the bottom-half (b->irq_wait) before we wake up
* the waiters who may scribble over their intel_wait
* just as the interrupt handler is dereferencing it via
* b->irq_wait.
*/
if (!first) {
struct rb_node *next = rb_next(completed);
GEM_BUG_ON(next == &wait->node);
__intel_breadcrumbs_next(engine, next);
}
do {
struct intel_wait *crumb = to_wait(completed);
completed = rb_prev(completed);
__intel_breadcrumbs_finish(b, crumb);
} while (completed);
}
GEM_BUG_ON(!b->irq_wait);
GEM_BUG_ON(!b->irq_armed);
GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
return first;
......@@ -505,8 +522,10 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
* the tree by the bottom-half to avoid contention on the spinlock
* by the herd.
*/
if (RB_EMPTY_NODE(&wait->node))
if (RB_EMPTY_NODE(&wait->node)) {
GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait);
return;
}
spin_lock_irq(&b->rb_lock);
__intel_engine_remove_wait(engine, wait);
......@@ -643,7 +662,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
/* Note that we may be called from an interrupt handler on another
* device (e.g. nouveau signaling a fence completion causing us
* to submit a request, and so enable signaling). As such,
* we need to make sure that all other users of b->lock protect
* we need to make sure that all other users of b->rb_lock protect
* against interrupts, i.e. use spin_lock_irqsave.
*/
......@@ -822,12 +841,12 @@ bool intel_breadcrumbs_busy(struct intel_engine_cs *engine)
if (b->irq_wait) {
wake_up_process(b->irq_wait->tsk);
busy |= intel_engine_flag(engine);
busy = true;
}
if (rcu_access_pointer(b->first_signal)) {
wake_up_process(b->signaler);
busy |= intel_engine_flag(engine);
busy = true;
}
spin_unlock_irq(&b->rb_lock);
......
......@@ -223,7 +223,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
/* FIXME other chipsets? */
if (IS_GM45(dev_priv))
vco_table = ctg_vco;
else if (IS_G4X(dev_priv))
else if (IS_G45(dev_priv))
vco_table = elk_vco;
else if (IS_I965GM(dev_priv))
vco_table = cl_vco;
......@@ -1470,7 +1470,7 @@ static int intel_max_pixel_rate(struct drm_atomic_state *state)
memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
sizeof(intel_state->min_pixclk));
for_each_crtc_in_state(state, crtc, cstate, i) {
for_each_new_crtc_in_state(state, crtc, cstate, i) {
int pixel_rate;
crtc_state = to_intel_crtc_state(cstate);
......@@ -1859,7 +1859,7 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk;
else if (IS_GM45(dev_priv))
dev_priv->display.get_cdclk = gm45_get_cdclk;
else if (IS_G4X(dev_priv))
else if (IS_G45(dev_priv))
dev_priv->display.get_cdclk = g33_get_cdclk;
else if (IS_I965GM(dev_priv))
dev_priv->display.get_cdclk = i965gm_get_cdclk;
......
......@@ -465,14 +465,14 @@ static void glk_load_degamma_lut(struct drm_crtc_state *state)
* different values per channel, so this just loads a linear table.
*/
for (i = 0; i < lut_size; i++) {
uint32_t v = (i * ((1 << 16) - 1)) / (lut_size - 1);
uint32_t v = (i * (1 << 16)) / (lut_size - 1);
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
}
/* Clamp values > 1.0. */
while (i++ < 35)
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16) - 1);
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16));
}
static void glk_load_luts(struct drm_crtc_state *state)
......
......@@ -34,9 +34,8 @@
* low-power state and comes back to normal.
*/
#define I915_CSR_GLK "i915/glk_dmc_ver1_03.bin"
MODULE_FIRMWARE(I915_CSR_GLK);
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 3)
#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
......
......@@ -821,11 +821,11 @@ static struct intel_encoder *
intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *intel_encoder, *ret = NULL;
struct intel_encoder *encoder, *ret = NULL;
int num_encoders = 0;
for_each_encoder_on_crtc(dev, &crtc->base, intel_encoder) {
ret = intel_encoder;
for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
ret = encoder;
num_encoders++;
}
......@@ -837,7 +837,7 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
return ret;
}
struct intel_encoder *
static struct intel_encoder *
intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
......@@ -850,7 +850,7 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
state = crtc_state->base.state;
for_each_connector_in_state(state, connector, connector_state, i) {
for_each_new_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc_state->base.crtc)
continue;
......@@ -1130,12 +1130,12 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
static bool
hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
struct intel_encoder *intel_encoder)
struct intel_encoder *encoder)
{
struct intel_shared_dpll *pll;
pll = intel_get_shared_dpll(intel_crtc, crtc_state,
intel_encoder);
encoder);
if (!pll)
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
......@@ -1146,11 +1146,11 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
static bool
skl_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
struct intel_encoder *intel_encoder)
struct intel_encoder *encoder)
{
struct intel_shared_dpll *pll;
pll = intel_get_shared_dpll(intel_crtc, crtc_state, intel_encoder);
pll = intel_get_shared_dpll(intel_crtc, crtc_state, encoder);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
......@@ -1163,9 +1163,9 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
static bool
bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
struct intel_encoder *intel_encoder)
struct intel_encoder *encoder)
{
return !!intel_get_shared_dpll(intel_crtc, crtc_state, intel_encoder);
return !!intel_get_shared_dpll(intel_crtc, crtc_state, encoder);
}
/*
......@@ -1179,27 +1179,27 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct intel_encoder *intel_encoder =
struct intel_encoder *encoder =
intel_ddi_get_crtc_new_encoder(crtc_state);
if (IS_GEN9_BC(dev_priv))
return skl_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
encoder);
else if (IS_GEN9_LP(dev_priv))
return bxt_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
encoder);
else
return hsw_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
encoder);
}
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
int type = intel_encoder->type;
int type = encoder->type;
uint32_t temp;
if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
......@@ -1244,12 +1244,12 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
enum port port = intel_ddi_get_encoder_port(encoder);
int type = encoder->type;
uint32_t temp;
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
......@@ -1321,7 +1321,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
} else {
WARN(1, "Invalid encoder type %d for pipe %c\n",
intel_encoder->type, pipe_name(pipe));
encoder->type, pipe_name(pipe));
}
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
......@@ -1342,19 +1342,19 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *intel_encoder = intel_connector->encoder;
struct intel_encoder *encoder = intel_connector->encoder;
int type = intel_connector->base.connector_type;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
enum port port = intel_ddi_get_encoder_port(encoder);
enum pipe pipe = 0;
enum transcoder cpu_transcoder;
uint32_t tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
intel_encoder->power_domain))
encoder->power_domain))
return false;
if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) {
if (!encoder->get_hw_state(encoder, &pipe)) {
ret = false;
goto out;
}
......@@ -1393,7 +1393,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
}
out:
intel_display_power_put(dev_priv, intel_encoder->power_domain);
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
}
......@@ -1486,8 +1486,8 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = intel_ddi_get_encoder_port(encoder);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
......@@ -1762,14 +1762,14 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
crtc_state, conn_state);
}
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
static void intel_ddi_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
int type = intel_encoder->type;
int type = encoder->type;
if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
intel_ddi_pre_enable_dp(intel_encoder,
intel_ddi_pre_enable_dp(encoder,
pipe_config->port_clock,
pipe_config->lane_count,
pipe_config->shared_dpll,
......@@ -1777,7 +1777,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
INTEL_OUTPUT_DP_MST));
}
if (type == INTEL_OUTPUT_HDMI) {
intel_ddi_pre_enable_hdmi(intel_encoder,
intel_ddi_pre_enable_hdmi(encoder,
pipe_config->has_hdmi_sink,
pipe_config, conn_state,
pipe_config->shared_dpll);
......@@ -1836,11 +1836,11 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
}
}
void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
uint32_t val;
/*
......@@ -1853,7 +1853,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
val &= ~FDI_RX_ENABLE;
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
intel_ddi_post_disable(intel_encoder, old_crtc_state, old_conn_state);
intel_ddi_post_disable(encoder, old_crtc_state, old_conn_state);
val = I915_READ(FDI_RX_MISC(PIPE_A));
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
......
......@@ -197,8 +197,10 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
IS_GEN9_LP(dev_priv) && sseu_subslice_total(sseu) > 1;
sseu->has_eu_pg = sseu->eu_per_subslice > 2;
if (IS_BROXTON(dev_priv)) {
if (IS_GEN9_LP(dev_priv)) {
#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask & BIT(ss)))
info->has_pooled_eu = hweight8(sseu->subslice_mask) == 3;
/*
* There is a HW issue in 2x6 fused down parts that requires
* Pooled EU to be enabled as a WA. The pool configuration
......@@ -206,9 +208,8 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
* doesn't affect if the device has all 3 subslices enabled.
*/
/* WaEnablePooledEuFor2x6:bxt */
info->has_pooled_eu = ((hweight8(sseu->subslice_mask) == 3) ||
(hweight8(sseu->subslice_mask) == 2 &&
INTEL_REVID(dev_priv) < BXT_REVID_C0));
info->has_pooled_eu |= (hweight8(sseu->subslice_mask) == 2 &&
IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST));
sseu->min_eu_in_pool = 0;
if (info->has_pooled_eu) {
......
This diff is collapsed.
......@@ -1188,7 +1188,13 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
u32 mask)
{
return mask & ~i915->rps.pm_intrmsk_mbz;
}
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
......@@ -1239,8 +1245,6 @@ bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
struct intel_encoder *
intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
void intel_ddi_clock_get(struct intel_encoder *encoder,
......@@ -1250,12 +1254,8 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
unsigned int intel_fb_align_height(struct drm_i915_private *dev_priv,
unsigned int height,
uint32_t pixel_format,
uint64_t fb_format_modifier);
u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, uint32_t pixel_format);
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
int plane, unsigned int height);
/* intel_audio.c */
void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
......@@ -1269,6 +1269,10 @@ void intel_audio_init(struct drm_i915_private *dev_priv);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
/* intel_cdclk.c */
void skl_init_cdclk(struct drm_i915_private *dev_priv);
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
void bxt_init_cdclk(struct drm_i915_private *dev_priv);
void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
......@@ -1380,9 +1384,6 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state);
unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, unsigned int cpp);
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
......@@ -1414,14 +1415,10 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv);
void intel_finish_reset(struct drm_i915_private *dev_priv);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void bxt_init_cdclk(struct drm_i915_private *dev_priv);
void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
void gen9_enable_dc5(struct drm_i915_private *dev_priv);
void skl_init_cdclk(struct drm_i915_private *dev_priv);
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
unsigned int skl_cdclk_get_vco(unsigned int freq);
void skl_enable_dc6(struct drm_i915_private *dev_priv);
void skl_disable_dc6(struct drm_i915_private *dev_priv);
......
This diff is collapsed.
......@@ -39,7 +39,6 @@ struct intel_dsi_host;
struct intel_dsi {
struct intel_encoder base;
struct drm_panel *panel;
struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
/* GPIO Desc for CRC based Panel control */
......@@ -130,11 +129,11 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
return container_of(encoder, struct intel_dsi, base.base);
}
/* intel_dsi.c */
void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
void intel_dsi_exec_vbt_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
/* intel_dsi_pll.c */
bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
int intel_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config);
......@@ -146,7 +145,10 @@ u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
void intel_dsi_reset_clocks(struct intel_encoder *encoder,
enum port port);
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
/* intel_dsi_vbt.c */
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
#endif /* _INTEL_DSI_H */
......@@ -28,7 +28,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
#include <drm/drm_panel.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <video/mipi_display.h>
......@@ -38,16 +37,6 @@
#include "intel_drv.h"
#include "intel_dsi.h"
struct vbt_panel {
struct drm_panel panel;
struct intel_dsi *intel_dsi;
};
static inline struct vbt_panel *to_vbt_panel(struct drm_panel *panel)
{
return container_of(panel, struct vbt_panel, panel);
}
#define MIPI_TRANSFER_MODE_SHIFT 0
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
#define MIPI_PORT_SHIFT 3
......@@ -426,7 +415,7 @@ static const char *sequence_name(enum mipi_seq seq_id)
return "(unknown)";
}
void intel_dsi_exec_vbt_sequence(struct intel_dsi *intel_dsi,
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id)
{
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
......@@ -492,40 +481,31 @@ void intel_dsi_exec_vbt_sequence(struct intel_dsi *intel_dsi,
}
}
static int vbt_panel_get_modes(struct drm_panel *panel)
int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
{
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct intel_connector *connector = intel_dsi->attached_connector;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_display_mode *mode;
if (!panel->connector)
return 0;
mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
if (!mode)
return 0;
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(panel->connector, mode);
drm_mode_probed_add(&connector->base, mode);
return 1;
}
static const struct drm_panel_funcs vbt_panel_funcs = {
.get_modes = vbt_panel_get_modes,
};
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
struct vbt_panel *vbt_panel;
u32 bpp;
u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
u32 ui_num, ui_den;
......@@ -588,7 +568,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
if (mipi_config->target_burst_mode_freq <
computed_ddr) {
DRM_ERROR("Burst mode freq is less than computed\n");
return NULL;
return false;
}
burst_mode_ratio = DIV_ROUND_UP(
......@@ -598,7 +578,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
} else {
DRM_ERROR("Burst mode target is not set\n");
return NULL;
return false;
}
} else
burst_mode_ratio = 100;
......@@ -809,20 +789,10 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
/* This is cheating a bit with the cleanup. */
vbt_panel = devm_kzalloc(dev->dev, sizeof(*vbt_panel), GFP_KERNEL);
if (!vbt_panel)
return NULL;
vbt_panel->intel_dsi = intel_dsi;
drm_panel_init(&vbt_panel->panel);
vbt_panel->panel.funcs = &vbt_panel_funcs;
drm_panel_add(&vbt_panel->panel);
/* a regular driver would get the device in probe */
for_each_dsi_port(port, intel_dsi->ports) {
mipi_dsi_attach(intel_dsi->dsi_hosts[port]->device);
}
return &vbt_panel->panel;
return true;
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -87,6 +87,5 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
int enable_execlists);
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
#endif /* _INTEL_LRC_H_ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment