Commit a18b29f0 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2014-09-01' of git://anongit.freedesktop.org/drm-intel into drm-next

drm-intel-next-2014-08-22:
- basic code for execlist, which is the fancy new cmd submission on gen8. Still
  disabled by default (Ben, Oscar Mateo, Thomas Daniel et al)
- remove the useless usage of console_lock for I915_FBDEV=n (Chris)
- clean up relations between ctx and ppgtt
- clean up ppgtt lifetime handling (Michel Thierry)
- various cursor code improvements from Ville
- execbuffer code cleanups and secure batch fixes (Chris)
- prep work for dev -> dev_priv transition (Chris)
- some of the prep patches for the seqno -> request object transition (Chris)
- various small improvements all over

* tag 'drm-intel-next-2014-09-01' of git://anongit.freedesktop.org/drm-intel: (86 commits)
  drm/i915: fix suspend/resume for GENs w/o runtime PM support
  drm/i915: Update DRIVER_DATE to 20140822
  drm: fix plane rotation when restoring fbdev configuration
  drm/i915/bdw: Disable execlists by default
  drm/i915/bdw: Enable Logical Ring Contexts (hence, Execlists)
  drm/i915/bdw: Document Logical Rings, LR contexts and Execlists
  drm/i915/bdw: Print context state in debugfs
  drm/i915/bdw: Display context backing obj & ringbuffer info in debugfs
  drm/i915/bdw: Display execlists info in debugfs
  drm/i915/bdw: Disable semaphores for Execlists
  drm/i915/bdw: Make sure gpu reset still works with Execlists
  drm/i915/bdw: Don't write PDP in the legacy way when using LRCs
  drm/i915: Track cursor changes as frontbuffer tracking flushes
  drm/i915/bdw: Help out the ctx switch interrupt handler
  drm/i915/bdw: Avoid non-lite-restore preemptions
  drm/i915/bdw: Handle context switch events
  drm/i915/bdw: Two-stage execlist submit process
  drm/i915/bdw: Write the tail pointer, LRC style
  drm/i915/bdw: Implement context switching (somewhat)
  drm/i915/bdw: Emission of requests with logical rings
  ...

Conflicts:
	drivers/gpu/drm/i915/i915_drv.c
parents 04cd2145 604effb7
......@@ -3919,6 +3919,11 @@ int num_ioctls;</synopsis>
!Pdrivers/gpu/drm/i915/i915_cmd_parser.c batch buffer command parser
!Idrivers/gpu/drm/i915/i915_cmd_parser.c
</sect2>
<sect2>
<title>Logical Rings, Logical Ring Contexts and Execlists</title>
!Pdrivers/gpu/drm/i915/intel_lrc.c Logical Rings, Logical Ring Contexts and Execlists
!Idrivers/gpu/drm/i915/intel_lrc.c
</sect2>
</sect1>
</chapter>
</part>
......
......@@ -4175,12 +4175,25 @@ static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
return ret;
}
static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
/**
* drm_mode_plane_set_obj_prop - set the value of a property
* @plane: drm plane object to set property value for
* @property: property to set
* @value: value the property should be set to
*
* This functions sets a given property on a given plane object. This function
* calls the driver's ->set_property callback and changes the software state of
* the property if the callback succeeds.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
struct drm_property *property,
uint64_t value)
{
int ret = -EINVAL;
struct drm_plane *plane = obj_to_plane(obj);
struct drm_mode_object *obj = &plane->base;
if (plane->funcs->set_property)
ret = plane->funcs->set_property(plane, property, value);
......@@ -4189,6 +4202,7 @@ static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
return ret;
}
EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
/**
* drm_mode_getproperty_ioctl - get the current value of a object's property
......@@ -4327,7 +4341,8 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
break;
case DRM_MODE_OBJECT_PLANE:
ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
property, arg->value);
break;
}
......
......@@ -296,9 +296,9 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
drm_plane_force_disable(plane);
if (dev->mode_config.rotation_property) {
drm_object_property_set_value(&plane->base,
dev->mode_config.rotation_property,
BIT(DRM_ROTATE_0));
drm_mode_plane_set_obj_prop(plane,
dev->mode_config.rotation_property,
BIT(DRM_ROTATE_0));
}
}
......
......@@ -31,6 +31,7 @@ i915-y += i915_cmd_parser.o \
i915_gpu_error.o \
i915_irq.o \
i915_trace_points.o \
intel_lrc.o \
intel_ringbuffer.o \
intel_uncore.o
......
......@@ -842,8 +842,6 @@ static u32 *vmap_batch(struct drm_i915_gem_object *obj)
*/
bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
if (!ring->needs_cmd_parser)
return false;
......@@ -852,7 +850,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
* disabled. That will cause all of the parser's PPGTT checks to
* fail. For now, disable parsing when PPGTT is off.
*/
if (!dev_priv->mm.aliasing_ppgtt)
if (USES_PPGTT(ring->dev))
return false;
return (i915.enable_cmd_parser == 1);
......
......@@ -333,7 +333,7 @@ static int per_file_stats(int id, void *ptr, void *data)
}
ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv)
if (ppgtt->file_priv != stats->file_priv)
continue;
if (obj->ring) /* XXX per-vma statistic */
......@@ -703,6 +703,12 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
for_each_pipe(pipe) {
if (!intel_display_power_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe))) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
continue;
}
seq_printf(m, "Pipe %c IMR:\t%08x\n",
pipe_name(pipe),
I915_READ(GEN8_DE_PIPE_IMR(pipe)));
......@@ -1671,6 +1677,14 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
return 0;
}
static void describe_ctx_ringbuf(struct seq_file *m,
struct intel_ringbuffer *ringbuf)
{
seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
ringbuf->space, ringbuf->head, ringbuf->tail,
ringbuf->last_retired_head);
}
static int i915_context_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
......@@ -1697,16 +1711,168 @@ static int i915_context_status(struct seq_file *m, void *unused)
}
list_for_each_entry(ctx, &dev_priv->context_list, link) {
if (ctx->legacy_hw_ctx.rcs_state == NULL)
if (!i915.enable_execlists &&
ctx->legacy_hw_ctx.rcs_state == NULL)
continue;
seq_puts(m, "HW context ");
describe_ctx(m, ctx);
for_each_ring(ring, dev_priv, i)
for_each_ring(ring, dev_priv, i) {
if (ring->default_context == ctx)
seq_printf(m, "(default context %s) ",
ring->name);
}
if (i915.enable_execlists) {
seq_putc(m, '\n');
for_each_ring(ring, dev_priv, i) {
struct drm_i915_gem_object *ctx_obj =
ctx->engine[i].state;
struct intel_ringbuffer *ringbuf =
ctx->engine[i].ringbuf;
seq_printf(m, "%s: ", ring->name);
if (ctx_obj)
describe_obj(m, ctx_obj);
if (ringbuf)
describe_ctx_ringbuf(m, ringbuf);
seq_putc(m, '\n');
}
} else {
describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
}
seq_putc(m, '\n');
}
mutex_unlock(&dev->struct_mutex);
return 0;
}
static int i915_dump_lrc(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_context *ctx;
int ret, i;
if (!i915.enable_execlists) {
seq_printf(m, "Logical Ring Contexts are disabled\n");
return 0;
}
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link) {
for_each_ring(ring, dev_priv, i) {
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
if (ring->default_context == ctx)
seq_printf(m, "(default context %s) ", ring->name);
continue;
if (ctx_obj) {
struct page *page = i915_gem_object_get_page(ctx_obj, 1);
uint32_t *reg_state = kmap_atomic(page);
int j;
seq_printf(m, "CONTEXT: %s %u\n", ring->name,
intel_execlists_ctx_id(ctx_obj));
for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4),
reg_state[j], reg_state[j + 1],
reg_state[j + 2], reg_state[j + 3]);
}
kunmap_atomic(reg_state);
seq_putc(m, '\n');
}
}
}
mutex_unlock(&dev->struct_mutex);
return 0;
}
static int i915_execlists(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
u32 status_pointer;
u8 read_pointer;
u8 write_pointer;
u32 status;
u32 ctx_id;
struct list_head *cursor;
int ring_id, i;
int ret;
if (!i915.enable_execlists) {
seq_puts(m, "Logical Ring Contexts are disabled\n");
return 0;
}
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
for_each_ring(ring, dev_priv, ring_id) {
struct intel_ctx_submit_request *head_req = NULL;
int count = 0;
unsigned long flags;
seq_printf(m, "%s\n", ring->name);
status = I915_READ(RING_EXECLIST_STATUS(ring));
ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4);
seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
status, ctx_id);
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
read_pointer = ring->next_context_status_buffer;
write_pointer = status_pointer & 0x07;
if (read_pointer > write_pointer)
write_pointer += 6;
seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
read_pointer, write_pointer);
for (i = 0; i < 6; i++) {
status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i);
ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4);
seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
i, status, ctx_id);
}
spin_lock_irqsave(&ring->execlist_lock, flags);
list_for_each(cursor, &ring->execlist_queue)
count++;
head_req = list_first_entry_or_null(&ring->execlist_queue,
struct intel_ctx_submit_request, execlist_link);
spin_unlock_irqrestore(&ring->execlist_lock, flags);
seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) {
struct drm_i915_gem_object *ctx_obj;
ctx_obj = head_req->ctx->engine[ring_id].state;
seq_printf(m, "\tHead request id: %u\n",
intel_execlists_ctx_id(ctx_obj));
seq_printf(m, "\tHead request tail: %u\n",
head_req->tail);
}
describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
seq_putc(m, '\n');
}
......@@ -1815,7 +1981,13 @@ static int per_file_ctx(int id, void *ptr, void *data)
{
struct intel_context *ctx = ptr;
struct seq_file *m = data;
struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
if (!ppgtt) {
seq_printf(m, " no ppgtt for context %d\n",
ctx->user_handle);
return 0;
}
if (i915_gem_context_is_default(ctx))
seq_puts(m, " default context:\n");
......@@ -1875,8 +2047,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
ppgtt->debug_dump(ppgtt, m);
} else
return;
}
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv;
......@@ -3963,6 +4134,8 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_opregion", i915_opregion, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
{"i915_dump_lrc", i915_dump_lrc, 0},
{"i915_execlists", i915_execlists, 0},
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
......
......@@ -999,7 +999,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = HAS_WT(dev);
break;
case I915_PARAM_HAS_ALIASING_PPGTT:
value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev);
value = USES_PPGTT(dev);
break;
case I915_PARAM_HAS_WAIT_TIMEOUT:
value = 1;
......@@ -1350,8 +1350,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_irq;
INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
intel_modeset_gem_init(dev);
/* Always safe in the mode setting case. */
......@@ -1388,7 +1386,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
WARN_ON(dev_priv->mm.aliasing_ppgtt);
cleanup_irq:
drm_irq_uninstall(dev);
cleanup_gem_stolen:
......@@ -1603,9 +1600,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = dev_priv;
dev_priv->dev = dev;
/* copy initial configuration to dev_priv->info */
/* Setup the write-once "constant" device info */
device_info = (struct intel_device_info *)&dev_priv->info;
*device_info = *info;
memcpy(device_info, info, sizeof(dev_priv->info));
device_info->device_id = dev->pdev->device;
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
......@@ -1817,7 +1815,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
arch_phys_wc_del(dev_priv->gtt.mtrr);
io_mapping_free(dev_priv->gtt.mappable);
out_gtt:
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
i915_global_gtt_cleanup(dev);
out_regs:
intel_uncore_fini(dev);
pci_iounmap(dev->pdev, dev_priv->regs);
......@@ -1864,7 +1862,6 @@ int i915_driver_unload(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fbdev_fini(dev);
intel_modeset_cleanup(dev);
cancel_work_sync(&dev_priv->console_resume_work);
/*
* free the memory space allocated for the child device
......@@ -1897,7 +1894,6 @@ int i915_driver_unload(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
WARN_ON(dev_priv->mm.aliasing_ppgtt);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_stolen(dev);
......@@ -1905,8 +1901,6 @@ int i915_driver_unload(struct drm_device *dev)
i915_free_hws(dev);
}
WARN_ON(!list_empty(&dev_priv->vm_list));
drm_vblank_cleanup(dev);
intel_teardown_gmbus(dev);
......@@ -1916,7 +1910,7 @@ int i915_driver_unload(struct drm_device *dev)
destroy_workqueue(dev_priv->wq);
pm_qos_remove_request(&dev_priv->pm_qos);
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
i915_global_gtt_cleanup(dev);
intel_uncore_fini(dev);
if (dev_priv->regs != NULL)
......
......@@ -481,6 +481,10 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (i915.semaphores >= 0)
return i915.semaphores;
/* TODO: make semaphores and Execlists play nicely together */
if (i915.enable_execlists)
return false;
/* Until we get further testing... */
if (IS_GEN8(dev))
return false;
......@@ -524,6 +528,10 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
drm_modeset_unlock_all(dev);
}
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
static int intel_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume);
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
......@@ -591,9 +599,7 @@ static int i915_drm_freeze(struct drm_device *dev)
intel_uncore_forcewake_reset(dev, false);
intel_opregion_fini(dev);
console_lock();
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
console_unlock();
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
dev_priv->suspend_count++;
......@@ -632,30 +638,20 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
return 0;
}
void intel_console_resume(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
console_resume_work);
struct drm_device *dev = dev_priv->dev;
console_lock();
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
console_unlock();
}
static int i915_drm_thaw_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_disable_pc8(dev_priv);
ret = intel_resume_prepare(dev_priv, false);
if (ret)
DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
intel_uncore_early_sanitize(dev, true);
intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv);
return 0;
return ret;
}
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
......@@ -714,17 +710,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
intel_opregion_init(dev);
/*
* The console lock can be pretty contented on resume due
* to all the printk activity. Try to keep it out of the hot
* path of resume if possible.
*/
if (console_trylock()) {
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
console_unlock();
} else {
schedule_work(&dev_priv->console_resume_work);
}
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_DONE;
......@@ -941,6 +927,7 @@ static int i915_pm_suspend_late(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = drm_dev->dev_private;
int ret;
/*
* We have a suspedn ordering issue with the snd-hda driver also
......@@ -954,13 +941,16 @@ static int i915_pm_suspend_late(struct device *dev)
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
hsw_enable_pc8(dev_priv);
ret = intel_suspend_complete(dev_priv);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
if (ret)
DRM_ERROR("Suspend complete failed: %d\n", ret);
else {
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
}
return 0;
return ret;
}
static int i915_pm_resume_early(struct device *dev)
......@@ -1016,23 +1006,26 @@ static int i915_pm_poweroff(struct device *dev)
return i915_drm_freeze(drm_dev);
}
static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
{
hsw_enable_pc8(dev_priv);
return 0;
}
static int snb_runtime_resume(struct drm_i915_private *dev_priv)
static int snb_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume)
{
struct drm_device *dev = dev_priv->dev;
intel_init_pch_refclk(dev);
if (rpm_resume)
intel_init_pch_refclk(dev);
return 0;
}
static int hsw_runtime_resume(struct drm_i915_private *dev_priv)
static int hsw_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume)
{
hsw_disable_pc8(dev_priv);
......@@ -1328,7 +1321,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}
static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
{
u32 mask;
int err;
......@@ -1368,7 +1361,8 @@ static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
return err;
}
static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume)
{
struct drm_device *dev = dev_priv->dev;
int err;
......@@ -1393,8 +1387,10 @@ static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
vlv_check_no_gt_access(dev_priv);
intel_init_clock_gating(dev);
i915_gem_restore_fences(dev);
if (rpm_resume) {
intel_init_clock_gating(dev);
i915_gem_restore_fences(dev);
}
return ret;
}
......@@ -1409,7 +1405,9 @@ static int intel_runtime_suspend(struct device *device)
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
return -ENODEV;
WARN_ON(!HAS_RUNTIME_PM(dev));
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
return -ENODEV;
assert_force_wake_inactive(dev_priv);
DRM_DEBUG_KMS("Suspending device\n");
......@@ -1446,17 +1444,7 @@ static int intel_runtime_suspend(struct device *device)
cancel_work_sync(&dev_priv->rps.work);
intel_runtime_pm_disable_interrupts(dev);
if (IS_GEN6(dev)) {
ret = 0;
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ret = hsw_runtime_suspend(dev_priv);
} else if (IS_VALLEYVIEW(dev)) {
ret = vlv_runtime_suspend(dev_priv);
} else {
ret = -ENODEV;
WARN_ON(1);
}
ret = intel_suspend_complete(dev_priv);
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
intel_runtime_pm_restore_interrupts(dev);
......@@ -1487,24 +1475,15 @@ static int intel_runtime_resume(struct device *device)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
WARN_ON(!HAS_RUNTIME_PM(dev));
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
return -ENODEV;
DRM_DEBUG_KMS("Resuming device\n");
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
if (IS_GEN6(dev)) {
ret = snb_runtime_resume(dev_priv);
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ret = hsw_runtime_resume(dev_priv);
} else if (IS_VALLEYVIEW(dev)) {
ret = vlv_runtime_resume(dev_priv);
} else {
WARN_ON(1);
ret = -ENODEV;
}
ret = intel_resume_prepare(dev_priv, true);
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
......@@ -1523,6 +1502,48 @@ static int intel_runtime_resume(struct device *device)
return ret;
}
/*
* This function implements common functionality of runtime and system
* suspend sequence.
*/
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
int ret;
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ret = hsw_suspend_complete(dev_priv);
else if (IS_VALLEYVIEW(dev))
ret = vlv_suspend_complete(dev_priv);
else
ret = 0;
return ret;
}
/*
* This function implements common functionality of runtime and system
* resume sequence. Variable rpm_resume used for implementing different
* code paths.
*/
static int intel_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume)
{
struct drm_device *dev = dev_priv->dev;
int ret;
if (IS_GEN6(dev))
ret = snb_resume_prepare(dev_priv, rpm_resume);
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ret = hsw_resume_prepare(dev_priv, rpm_resume);
else if (IS_VALLEYVIEW(dev))
ret = vlv_resume_prepare(dev_priv, rpm_resume);
else
ret = 0;
return ret;
}
static const struct dev_pm_ops i915_pm_ops = {
.suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late,
......
This diff is collapsed.
This diff is collapsed.
......@@ -96,50 +96,6 @@
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &ppgtt->base;
if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
(list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
ppgtt->base.cleanup(&ppgtt->base);
return;
}
/*
* Make sure vmas are unbound before we take down the drm_mm
*
* FIXME: Proper refcounting should take care of this, this shouldn't be
* needed at all.
*/
if (!list_empty(&vm->active_list)) {
struct i915_vma *vma;
list_for_each_entry(vma, &vm->active_list, mm_list)
if (WARN_ON(list_empty(&vma->vma_link) ||
list_is_singular(&vma->vma_link)))
break;
i915_gem_evict_vm(&ppgtt->base, true);
} else {
i915_gem_retire_requests(dev);
i915_gem_evict_vm(&ppgtt->base, false);
}
ppgtt->base.cleanup(&ppgtt->base);
}
static void ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
do_ppgtt_cleanup(ppgtt);
kfree(ppgtt);
}
static size_t get_context_alignment(struct drm_device *dev)
{
if (IS_GEN6(dev))
......@@ -179,24 +135,20 @@ static int get_context_size(struct drm_device *dev)
void i915_gem_context_free(struct kref *ctx_ref)
{
struct intel_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
struct i915_hw_ppgtt *ppgtt = NULL;
typeof(*ctx), ref);
if (ctx->legacy_hw_ctx.rcs_state) {
/* We refcount even the aliasing PPGTT to keep the code symmetric */
if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
ppgtt = ctx_to_ppgtt(ctx);
}
if (i915.enable_execlists)
intel_lr_context_free(ctx);
i915_ppgtt_put(ctx->ppgtt);
if (ppgtt)
kref_put(&ppgtt->ref, ppgtt_release);
if (ctx->legacy_hw_ctx.rcs_state)
drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
list_del(&ctx->link);
kfree(ctx);
}
static struct drm_i915_gem_object *
struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
struct drm_i915_gem_object *obj;
......@@ -226,29 +178,9 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
return obj;
}
static struct i915_hw_ppgtt *
create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
{
struct i915_hw_ppgtt *ppgtt;
int ret;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return ERR_PTR(-ENOMEM);
ret = i915_gem_init_ppgtt(dev, ppgtt);
if (ret) {
kfree(ppgtt);
return ERR_PTR(ret);
}
ppgtt->ctx = ctx;
return ppgtt;
}
static struct intel_context *
__create_hw_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
struct drm_i915_file_private *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *ctx;
......@@ -301,11 +233,9 @@ __create_hw_context(struct drm_device *dev,
*/
static struct intel_context *
i915_gem_create_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv,
bool create_vm)
struct drm_i915_file_private *file_priv)
{
const bool is_global_default_ctx = file_priv == NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *ctx;
int ret = 0;
......@@ -331,34 +261,18 @@ i915_gem_create_context(struct drm_device *dev,
}
}
if (create_vm) {
struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);
if (USES_FULL_PPGTT(dev)) {
struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
if (IS_ERR_OR_NULL(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
ret = PTR_ERR(ppgtt);
goto err_unpin;
} else
ctx->vm = &ppgtt->base;
/* This case is reserved for the global default context and
* should only happen once. */
if (is_global_default_ctx) {
if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
ret = -EEXIST;
goto err_unpin;
}
dev_priv->mm.aliasing_ppgtt = ppgtt;
}
} else if (USES_PPGTT(dev)) {
/* For platforms which only have aliasing PPGTT, we fake the
* address space and refcounting. */
ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
} else
ctx->vm = &dev_priv->gtt.base;
ctx->ppgtt = ppgtt;
}
return ctx;
......@@ -417,7 +331,11 @@ int i915_gem_context_init(struct drm_device *dev)
if (WARN_ON(dev_priv->ring[RCS].default_context))
return 0;
if (HAS_HW_CONTEXTS(dev)) {
if (i915.enable_execlists) {
/* NB: intentionally left blank. We will allocate our own
* backing objects as we need them, thank you very much */
dev_priv->hw_context_size = 0;
} else if (HAS_HW_CONTEXTS(dev)) {
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
if (dev_priv->hw_context_size > (1<<20)) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
......@@ -426,18 +344,23 @@ int i915_gem_context_init(struct drm_device *dev)
}
}
ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
ctx = i915_gem_create_context(dev, NULL);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context (error %ld)\n",
PTR_ERR(ctx));
return PTR_ERR(ctx);
}
/* NB: RCS will hold a ref for all rings */
for (i = 0; i < I915_NUM_RINGS; i++)
dev_priv->ring[i].default_context = ctx;
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake");
/* NB: RCS will hold a ref for all rings */
ring->default_context = ctx;
}
DRM_DEBUG_DRIVER("%s context support initialized\n",
i915.enable_execlists ? "LR" :
dev_priv->hw_context_size ? "HW" : "fake");
return 0;
}
......@@ -489,13 +412,6 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
struct intel_engine_cs *ring;
int ret, i;
/* This is the only place the aliasing PPGTT gets enabled, which means
* it has to happen before we bail on reset */
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
ppgtt->enable(ppgtt);
}
/* FIXME: We should make this work, even in reset */
if (i915_reset_in_progress(&dev_priv->gpu_error))
return 0;
......@@ -527,7 +443,7 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
idr_init(&file_priv->context_idr);
mutex_lock(&dev->struct_mutex);
ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
ctx = i915_gem_create_context(dev, file_priv);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ctx)) {
......@@ -614,7 +530,6 @@ static int do_switch(struct intel_engine_cs *ring,
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_context *from = ring->last_context;
struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
u32 hw_flags = 0;
bool uninitialized = false;
int ret, i;
......@@ -642,8 +557,8 @@ static int do_switch(struct intel_engine_cs *ring,
*/
from = ring->last_context;
if (USES_FULL_PPGTT(ring->dev)) {
ret = ppgtt->switch_mm(ppgtt, ring, false);
if (to->ppgtt) {
ret = to->ppgtt->switch_mm(to->ppgtt, ring, false);
if (ret)
goto unpin_out;
}
......@@ -766,9 +681,9 @@ int i915_switch_context(struct intel_engine_cs *ring,
return do_switch(ring, to);
}
static bool hw_context_enabled(struct drm_device *dev)
static bool contexts_enabled(struct drm_device *dev)
{
return to_i915(dev)->hw_context_size;
return i915.enable_execlists || to_i915(dev)->hw_context_size;
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
......@@ -779,14 +694,14 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct intel_context *ctx;
int ret;
if (!hw_context_enabled(dev))
if (!contexts_enabled(dev))
return -ENODEV;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
ctx = i915_gem_create_context(dev, file_priv);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
......
This diff is collapsed.
This diff is collapsed.
......@@ -34,6 +34,8 @@
#ifndef __I915_GEM_GTT_H__
#define __I915_GEM_GTT_H__
struct drm_i915_file_private;
typedef uint32_t gen6_gtt_pte_t;
typedef uint64_t gen8_gtt_pte_t;
typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
......@@ -258,7 +260,7 @@ struct i915_hw_ppgtt {
dma_addr_t *gen8_pt_dma_addr[4];
};
struct intel_context *ctx;
struct drm_i915_file_private *file_priv;
int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
......@@ -269,10 +271,26 @@ struct i915_hw_ppgtt {
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
unsigned long mappable_end, unsigned long end);
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
unsigned long mappable_end, unsigned long end);
void i915_global_gtt_cleanup(struct drm_device *dev);
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init_hw(struct drm_device *dev);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
struct drm_i915_file_private *fpriv);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
kref_get(&ppgtt->ref);
}
static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
kref_put(&ppgtt->ref, i915_ppgtt_release);
}
void i915_check_and_clear_faults(struct drm_device *dev);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
......
......@@ -376,7 +376,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (ret == 0) {
obj->fence_dirty =
obj->fenced_gpu_access ||
obj->last_fenced_seqno ||
obj->fence_reg != I915_FENCE_REG_NONE;
obj->tiling_mode = args->tiling_mode;
......
......@@ -192,10 +192,10 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
struct drm_i915_error_buffer *err,
int count)
{
err_printf(m, "%s [%d]:\n", name, count);
err_printf(m, " %s [%d]:\n", name, count);
while (count--) {
err_printf(m, " %08x %8u %02x %02x %x %x",
err_printf(m, " %08x %8u %02x %02x %x %x",
err->gtt_offset,
err->size,
err->read_domains,
......@@ -393,15 +393,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
i915_ring_error_state(m, dev, &error->ring[i]);
}
if (error->active_bo)
for (i = 0; i < error->vm_count; i++) {
err_printf(m, "vm[%d]\n", i);
print_error_buffers(m, "Active",
error->active_bo[0],
error->active_bo_count[0]);
error->active_bo[i],
error->active_bo_count[i]);
if (error->pinned_bo)
print_error_buffers(m, "Pinned",
error->pinned_bo[0],
error->pinned_bo_count[0]);
error->pinned_bo[i],
error->pinned_bo_count[i]);
}
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
obj = error->ring[i].batchbuffer;
......@@ -644,13 +646,15 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
(src)->base.size>>PAGE_SHIFT)
static void capture_bo(struct drm_i915_error_buffer *err,
struct drm_i915_gem_object *obj)
struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
err->size = obj->base.size;
err->name = obj->base.name;
err->rseqno = obj->last_read_seqno;
err->wseqno = obj->last_write_seqno;
err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
err->fence_reg = obj->fence_reg;
......@@ -674,7 +678,7 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err,
int i = 0;
list_for_each_entry(vma, head, mm_list) {
capture_bo(err++, vma->obj);
capture_bo(err++, vma);
if (++i == count)
break;
}
......@@ -683,21 +687,27 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err,
}
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
int count, struct list_head *head)
int count, struct list_head *head,
struct i915_address_space *vm)
{
struct drm_i915_gem_object *obj;
int i = 0;
struct drm_i915_error_buffer * const first = err;
struct drm_i915_error_buffer * const last = err + count;
list_for_each_entry(obj, head, global_list) {
if (!i915_gem_obj_is_pinned(obj))
continue;
struct i915_vma *vma;
capture_bo(err++, obj);
if (++i == count)
if (err == last)
break;
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->vm == vm && vma->pin_count > 0) {
capture_bo(err++, vma);
break;
}
}
return i;
return err - first;
}
/* Generate a semi-unique error code. The code is not meant to have meaning, The
......@@ -967,6 +977,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
request = i915_gem_find_active_request(ring);
if (request) {
struct i915_address_space *vm;
vm = request->ctx && request->ctx->ppgtt ?
&request->ctx->ppgtt->base :
&dev_priv->gtt.base;
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
* by userspace.
......@@ -974,9 +990,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].batchbuffer =
i915_error_object_create(dev_priv,
request->batch_obj,
request->ctx ?
request->ctx->vm :
&dev_priv->gtt.base);
vm);
if (HAS_BROKEN_CS_TLB(dev_priv->dev) &&
ring->scratch.obj)
......@@ -1049,9 +1063,14 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
list_for_each_entry(vma, &vm->active_list, mm_list)
i++;
error->active_bo_count[ndx] = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (i915_gem_obj_is_pinned(obj))
i++;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->vm == vm && vma->pin_count > 0) {
i++;
break;
}
}
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
if (i) {
......@@ -1070,7 +1089,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
error->pinned_bo_count[ndx] =
capture_pinned_bo(pinned_bo,
error->pinned_bo_count[ndx],
&dev_priv->mm.bound_list);
&dev_priv->mm.bound_list, vm);
error->active_bo[ndx] = active_bo;
error->pinned_bo[ndx] = pinned_bo;
}
......@@ -1091,8 +1110,25 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
GFP_ATOMIC);
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
i915_gem_capture_vm(dev_priv, error, vm, i++);
if (error->active_bo == NULL ||
error->pinned_bo == NULL ||
error->active_bo_count == NULL ||
error->pinned_bo_count == NULL) {
kfree(error->active_bo);
kfree(error->active_bo_count);
kfree(error->pinned_bo);
kfree(error->pinned_bo_count);
error->active_bo = NULL;
error->active_bo_count = NULL;
error->pinned_bo = NULL;
error->pinned_bo_count = NULL;
} else {
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
i915_gem_capture_vm(dev_priv, error, vm, i++);
error->vm_count = cnt;
}
}
/* Capture all registers which don't fit into another category. */
......
......@@ -1322,10 +1322,10 @@ static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
* @dev_priv: DRM device private
*
*/
static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
{
u32 residency_C0_up = 0, residency_C0_down = 0;
u8 new_delay, adj;
int new_delay, adj;
dev_priv->rps.ei_interrupt_count++;
......@@ -1627,6 +1627,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 master_ctl)
{
struct intel_engine_cs *ring;
u32 rcs, bcs, vcs;
uint32_t tmp = 0;
irqreturn_t ret = IRQ_NONE;
......@@ -1636,12 +1637,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (tmp) {
I915_WRITE(GEN8_GT_IIR(0), tmp);
ret = IRQ_HANDLED;
rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
ring = &dev_priv->ring[RCS];
if (rcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
notify_ring(dev, ring);
if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
ring = &dev_priv->ring[BCS];
if (bcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[BCS]);
notify_ring(dev, ring);
if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
......@@ -1651,12 +1660,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (tmp) {
I915_WRITE(GEN8_GT_IIR(1), tmp);
ret = IRQ_HANDLED;
vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
ring = &dev_priv->ring[VCS];
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
ring = &dev_priv->ring[VCS2];
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS2]);
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
......@@ -1677,9 +1694,13 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (tmp) {
I915_WRITE(GEN8_GT_IIR(3), tmp);
ret = IRQ_HANDLED;
vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
ring = &dev_priv->ring[VECS];
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VECS]);
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
......@@ -1772,7 +1793,9 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
}
DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
port_name(port),
long_hpd ? "long" : "short");
/* for long HPD pulses we want to have the digital queue happen,
but we still want HPD storm detection to function. */
if (long_hpd) {
......@@ -3781,12 +3804,17 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
/* These are interrupts we'll toggle with the ring mask register */
uint32_t gt_interrupts[] = {
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
0,
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
};
for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
......
......@@ -35,6 +35,7 @@ struct i915_params i915 __read_mostly = {
.vbt_sdvo_panel_type = -1,
.enable_rc6 = -1,
.enable_fbc = -1,
.enable_execlists = 0,
.enable_hangcheck = true,
.enable_ppgtt = -1,
.enable_psr = 0,
......@@ -118,6 +119,11 @@ MODULE_PARM_DESC(enable_ppgtt,
"Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
module_param_named(enable_execlists, i915.enable_execlists, int, 0400);
MODULE_PARM_DESC(enable_execlists,
"Override execlists usage. "
"(-1=auto, 0=disabled [default], 1=enabled)");
module_param_named(enable_psr, i915.enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
......
......@@ -272,6 +272,7 @@
#define MI_SEMAPHORE_POLL (1<<15)
#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_STORE_DWORD_IMM_GEN8 MI_INSTR(0x20, 2)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
......@@ -282,6 +283,7 @@
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
#define MI_LRI_FORCE_POSTED (1<<12)
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1)
#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1)
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
......@@ -1085,6 +1087,7 @@ enum punit_power_well {
#define RING_ACTHD_UDW(base) ((base)+0x5c)
#define RING_NOPID(base) ((base)+0x94)
#define RING_IMR(base) ((base)+0xa8)
#define RING_HWSTAM(base) ((base)+0x98)
#define RING_TIMESTAMP(base) ((base)+0x358)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
......@@ -1401,6 +1404,7 @@ enum punit_power_well {
#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
#define GT_BSD_USER_INTERRUPT (1 << 12)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
#define GT_CONTEXT_SWITCH_INTERRUPT (1 << 8)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
......@@ -4124,7 +4128,8 @@ enum punit_power_well {
/* Old style CUR*CNTR flags (desktop 8xx) */
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
#define CURSOR_STRIDE_MASK 0x30000000
#define CURSOR_STRIDE_SHIFT 28
#define CURSOR_STRIDE(x) ((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
#define CURSOR_PIPE_CSC_ENABLE (1<<24)
#define CURSOR_FORMAT_SHIFT 24
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
......
......@@ -169,14 +169,14 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp;
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi) / 2;
hdmi_800mV_0dB = 7;
} else if (IS_HASWELL(dev)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_edp = hsw_ddi_translations_dp;
ddi_translations_hdmi = hsw_ddi_translations_hdmi;
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi) / 2;
hdmi_800mV_0dB = 6;
} else {
WARN(1, "ddi translation table missing\n");
......@@ -184,7 +184,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi) / 2;
hdmi_800mV_0dB = 7;
}
......
This diff is collapsed.
......@@ -4059,7 +4059,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
port_name(intel_dig_port->port),
long_hpd ? "long" : "short");
power_domain = intel_display_port_power_domain(intel_encoder);
......
......@@ -411,6 +411,7 @@ struct intel_crtc {
uint32_t cursor_addr;
int16_t cursor_width, cursor_height;
uint32_t cursor_cntl;
uint32_t cursor_size;
uint32_t cursor_base;
struct intel_plane_config plane_config;
......@@ -952,7 +953,7 @@ void intel_dvo_init(struct drm_device *dev);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
extern void intel_fbdev_restore_mode(struct drm_device *dev);
#else
......@@ -969,7 +970,7 @@ static inline void intel_fbdev_fini(struct drm_device *dev)
{
}
static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state)
static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
{
}
......
......@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
......@@ -636,6 +637,15 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
return false;
}
static void intel_fbdev_suspend_worker(struct work_struct *work)
{
intel_fbdev_set_suspend(container_of(work,
struct drm_i915_private,
fbdev_suspend_work)->dev,
FBINFO_STATE_RUNNING,
true);
}
int intel_fbdev_init(struct drm_device *dev)
{
struct intel_fbdev *ifbdev;
......@@ -662,6 +672,8 @@ int intel_fbdev_init(struct drm_device *dev)
}
dev_priv->fbdev = ifbdev;
INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
return 0;
......@@ -682,12 +694,14 @@ void intel_fbdev_fini(struct drm_device *dev)
if (!dev_priv->fbdev)
return;
flush_work(&dev_priv->fbdev_suspend_work);
intel_fbdev_destroy(dev, dev_priv->fbdev);
kfree(dev_priv->fbdev);
dev_priv->fbdev = NULL;
}
void intel_fbdev_set_suspend(struct drm_device *dev, int state)
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_fbdev *ifbdev = dev_priv->fbdev;
......@@ -698,6 +712,33 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
info = ifbdev->helper.fbdev;
if (synchronous) {
/* Flush any pending work to turn the console on, and then
* wait to turn it off. It must be synchronous as we are
* about to suspend or unload the driver.
*
* Note that from within the work-handler, we cannot flush
* ourselves, so only flush outstanding work upon suspend!
*/
if (state != FBINFO_STATE_RUNNING)
flush_work(&dev_priv->fbdev_suspend_work);
console_lock();
} else {
/*
* The console lock can be pretty contented on resume due
* to all the printk activity. Try to keep it out of the hot
* path of resume if possible.
*/
WARN_ON(state != FBINFO_STATE_RUNNING);
if (!console_trylock()) {
/* Don't block our own workqueue as this can
* be run in parallel with other i915.ko tasks.
*/
schedule_work(&dev_priv->fbdev_suspend_work);
return;
}
}
/* On resume from hibernation: If the object is shmemfs backed, it has
* been restored from swap. If the object is stolen however, it will be
* full of whatever garbage was left in there.
......@@ -706,6 +747,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
memset_io(info->screen_base, 0, info->screen_size);
fb_set_suspend(info, state);
console_unlock();
}
void intel_fbdev_output_poll_changed(struct drm_device *dev)
......
This diff is collapsed.
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _INTEL_LRC_H_
#define _INTEL_LRC_H_
/* Execlists regs */
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
/* Logical Rings */
void intel_logical_ring_stop(struct intel_engine_cs *ring);
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
int intel_logical_rings_init(struct drm_device *dev);
int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf);
void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf);
/**
* intel_logical_ring_advance() - advance the ringbuffer tail
* @ringbuf: Ringbuffer to advance.
*
* The tail is only updated in our logical ringbuffer struct.
*/
static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
{
ringbuf->tail &= ringbuf->size - 1;
}
/**
* intel_logical_ring_emit() - write a DWORD to the ringbuffer.
* @ringbuf: Ringbuffer to write to.
* @data: DWORD to write.
*/
static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
u32 data)
{
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4;
}
int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
/* Logical Ring Contexts */
void intel_lr_context_free(struct intel_context *ctx);
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring);
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
/**
* struct intel_ctx_submit_request - queued context submission request
* @ctx: Context to submit to the ELSP.
* @ring: Engine to submit it to.
* @tail: how far in the context's ringbuffer this request goes to.
* @execlist_link: link in the submission queue.
* @work: workqueue for processing this request in a bottom half.
* @elsp_submitted: no. of times this request has been sent to the ELSP.
*
* The ELSP only accepts two elements at a time, so we queue context/tail
* pairs on a given queue (ring->execlist_queue) until the hardware is
* available. The queue serves a double purpose: we also use it to keep track
* of the up to 2 contexts currently in the hardware (usually one in execution
* and the other queued up by the GPU): We only remove elements from the head
* of the queue when the hardware informs us that an element has been
* completed.
*
* All accesses to the queue are mediated by a spinlock (ring->execlist_lock).
*/
struct intel_ctx_submit_request {
struct intel_context *ctx;
struct intel_engine_cs *ring;
u32 tail;
struct list_head execlist_link;
struct work_struct work;
int elsp_submitted;
};
void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
#endif /* _INTEL_LRC_H_ */
......@@ -3719,7 +3719,6 @@ static void gen6_enable_rps(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
u32 rp_state_cap;
u32 gt_perf_status;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
......@@ -3744,7 +3743,6 @@ static void gen6_enable_rps(struct drm_device *dev)
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
parse_rp_state_cap(dev_priv, rp_state_cap);
......
......@@ -33,14 +33,24 @@
#include "i915_trace.h"
#include "intel_drv.h"
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
* to give some inclination as to some of the magic values used in the various
* workarounds!
*/
#define CACHELINE_BYTES 64
bool
intel_ring_initialized(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
if (!dev)
return false;
static inline int __ring_space(int head, int tail, int size)
if (i915.enable_execlists) {
struct intel_context *dctx = ring->default_context;
struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
return ringbuf->obj;
} else
return ring->buffer && ring->buffer->obj;
}
int __intel_ring_space(int head, int tail, int size)
{
int space = head - (tail + I915_RING_FREE_SPACE);
if (space < 0)
......@@ -48,12 +58,13 @@ static inline int __ring_space(int head, int tail, int size)
return space;
}
static inline int ring_space(struct intel_ringbuffer *ringbuf)
int intel_ring_space(struct intel_ringbuffer *ringbuf)
{
return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
return __intel_ring_space(ringbuf->head & HEAD_ADDR,
ringbuf->tail, ringbuf->size);
}
static bool intel_ring_stopped(struct intel_engine_cs *ring)
bool intel_ring_stopped(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
......@@ -478,7 +489,12 @@ static bool stop_ring(struct intel_engine_cs *ring)
I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
return false;
/* Sometimes we observe that the idle flag is not
* set even though the ring is empty. So double
* check before giving up.
*/
if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
return false;
}
}
......@@ -563,7 +579,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
else {
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ringbuf->space = ring_space(ringbuf);
ringbuf->space = intel_ring_space(ringbuf);
ringbuf->last_retired_head = -1;
}
......@@ -575,8 +591,25 @@ static int init_ring_common(struct intel_engine_cs *ring)
return ret;
}
static int
init_pipe_control(struct intel_engine_cs *ring)
void
intel_fini_pipe_control(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
if (ring->scratch.obj == NULL)
return;
if (INTEL_INFO(dev)->gen >= 5) {
kunmap(sg_page(ring->scratch.obj->pages->sgl));
i915_gem_object_ggtt_unpin(ring->scratch.obj);
}
drm_gem_object_unreference(&ring->scratch.obj->base);
ring->scratch.obj = NULL;
}
int
intel_init_pipe_control(struct intel_engine_cs *ring)
{
int ret;
......@@ -651,7 +684,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
if (INTEL_INFO(dev)->gen >= 5) {
ret = init_pipe_control(ring);
ret = intel_init_pipe_control(ring);
if (ret)
return ret;
}
......@@ -686,16 +719,7 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
dev_priv->semaphore_obj = NULL;
}
if (ring->scratch.obj == NULL)
return;
if (INTEL_INFO(dev)->gen >= 5) {
kunmap(sg_page(ring->scratch.obj->pages->sgl));
i915_gem_object_ggtt_unpin(ring->scratch.obj);
}
drm_gem_object_unreference(&ring->scratch.obj->base);
ring->scratch.obj = NULL;
intel_fini_pipe_control(ring);
}
static int gen8_rcs_signal(struct intel_engine_cs *signaller,
......@@ -1514,7 +1538,7 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
return 0;
}
static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
if (!ringbuf->obj)
return;
......@@ -1525,8 +1549,8 @@ static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
ringbuf->obj = NULL;
}
static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf)
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
......@@ -1588,7 +1612,9 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->execlist_queue);
ringbuf->size = 32 * PAGE_SIZE;
ringbuf->ring = ring;
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
init_waitqueue_head(&ring->irq_queue);
......@@ -1671,13 +1697,14 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
ringbuf->space = ring_space(ringbuf);
ringbuf->space = intel_ring_space(ringbuf);
if (ringbuf->space >= n)
return 0;
}
list_for_each_entry(request, &ring->request_list, list) {
if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) {
if (__intel_ring_space(request->tail, ringbuf->tail,
ringbuf->size) >= n) {
seqno = request->seqno;
break;
}
......@@ -1694,7 +1721,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
ringbuf->space = ring_space(ringbuf);
ringbuf->space = intel_ring_space(ringbuf);
return 0;
}
......@@ -1723,7 +1750,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
trace_i915_ring_wait_begin(ring);
do {
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->space = ring_space(ringbuf);
ringbuf->space = intel_ring_space(ringbuf);
if (ringbuf->space >= n) {
ret = 0;
break;
......@@ -1775,7 +1802,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
iowrite32(MI_NOOP, virt++);
ringbuf->tail = 0;
ringbuf->space = ring_space(ringbuf);
ringbuf->space = intel_ring_space(ringbuf);
return 0;
}
......@@ -1980,9 +2007,7 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
!(flags & I915_DISPATCH_SECURE);
bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
int ret;
ret = intel_ring_begin(ring, 4);
......
......@@ -5,6 +5,13 @@
#define I915_CMD_HASH_ORDER 9
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
* to give some inclination as to some of the magic values used in the various
* workarounds!
*/
#define CACHELINE_BYTES 64
/*
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
* Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
......@@ -90,6 +97,15 @@ struct intel_ringbuffer {
struct drm_i915_gem_object *obj;
void __iomem *virtual_start;
struct intel_engine_cs *ring;
/*
* FIXME: This backpointer is an artifact of the history of how the
* execlist patches came into being. It will get removed once the basic
* code has landed.
*/
struct intel_context *FIXME_lrc_ctx;
u32 head;
u32 tail;
int space;
......@@ -214,6 +230,18 @@ struct intel_engine_cs {
unsigned int num_dwords);
} semaphore;
/* Execlists */
spinlock_t execlist_lock;
struct list_head execlist_queue;
u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct intel_ringbuffer *ringbuf);
int (*emit_flush)(struct intel_ringbuffer *ringbuf,
u32 invalidate_domains,
u32 flush_domains);
int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
u64 offset, unsigned flags);
/**
* List of objects currently involved in rendering from the
* ringbuffer.
......@@ -287,11 +315,7 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
static inline bool
intel_ring_initialized(struct intel_engine_cs *ring)
{
return ring->buffer && ring->buffer->obj;
}
bool intel_ring_initialized(struct intel_engine_cs *ring);
static inline unsigned
intel_ring_flag(struct intel_engine_cs *ring)
......@@ -355,6 +379,10 @@ intel_write_status_page(struct intel_engine_cs *ring,
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf);
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
......@@ -372,6 +400,9 @@ static inline void intel_ring_advance(struct intel_engine_cs *ring)
struct intel_ringbuffer *ringbuf = ring->buffer;
ringbuf->tail &= ringbuf->size - 1;
}
int __intel_ring_space(int head, int tail, int size);
int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_ring_stopped(struct intel_engine_cs *ring);
void __intel_ring_advance(struct intel_engine_cs *ring);
int __must_check intel_ring_idle(struct intel_engine_cs *ring);
......@@ -379,6 +410,9 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
void intel_fini_pipe_control(struct intel_engine_cs *ring);
int intel_init_pipe_control(struct intel_engine_cs *ring);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
int intel_init_bsd2_ring_buffer(struct drm_device *dev);
......
......@@ -1127,6 +1127,9 @@ extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
struct drm_property *property,
uint64_t value);
extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment