Commit ca5a1b9b authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2014-06-20' of git://anongit.freedesktop.org/drm-intel into drm-next

- Accurate frontbuffer tracking and frontbuffer rendering invalidate, flush and
  flip events. This is prep work for proper PSR support and should also be
  useful for DRRS&fbc.
- Runtime suspend hardware on system suspend to support the new SOix sleep
  states, from Jesse.
- PSR updates for broadwell (Rodrigo)
- Universal plane support for cursors (Matt Roper), including core drm patches.
- Prefault gtt mappings (Chris)
- baytrail write-enable pte bit support (Akash Goel)
- mmio based flips (Sourab Gupta) instead of blitter ring flips
- interrupt handling race fixes (Oscar Mateo)

And old, not yet merged features from the previous round:
- rps/turbo support for chv (Deepak)
- some other straggling chv patches (Ville)
- proper universal plane conversion for the primary plane (Matt Roper)
- ppgtt on vlv from Jesse
- pile of cleanups, little fixes for insane corner cases and improved debug
  support all over

* tag 'drm-intel-next-2014-06-20' of git://anongit.freedesktop.org/drm-intel: (99 commits)
  drm/i915: Update DRIVER_DATE to 20140620
  drivers/i915: Fix unnoticed failure of init_ring_common()
  drm/i915: Track frontbuffer invalidation/flushing
  drm/i915: Use new frontbuffer bits to increase pll clock
  drm/i915: don't take runtime PM reference around freeze/thaw
  drm/i915: use runtime irq suspend/resume in freeze/thaw
  drm/i915: Properly track domain of the fbcon fb
  drm/i915: Print obj->frontbuffer_bits in debugfs output
  drm/i915: Introduce accurate frontbuffer tracking
  drm/i915: Drop schedule_back from psr_exit
  drm/i915: Ditch intel_edp_psr_update
  drm/i915: Drop unecessary complexity from psr_inactivate
  drm/i915: Remove ctx->last_ring
  drm/i915/chv: Ack interrupts before handling them (CHV)
  drm/i915/bdw: Ack interrupts before handling them (GEN8)
  drm/i915/vlv: Ack interrupts before handling them (VLV)
  drm/i915: Ack interrupts before handling them (GEN5 - GEN7)
  drm/i915: Don't BUG_ON in i915_gem_obj_offset
  drm/i915: Grab dev->struct_mutex in i915_gem_pageflip_info
  drm/i915: Add some L3 registers to the parser whitelist
  ...

Conflicts:
	drivers/gpu/drm/i915/i915_drv.c
parents c7dbc6c9 34882298
This diff is collapsed.
......@@ -426,6 +426,9 @@ static const u32 gen7_render_regs[] = {
GEN7_SO_WRITE_OFFSET(1),
GEN7_SO_WRITE_OFFSET(2),
GEN7_SO_WRITE_OFFSET(3),
GEN7_L3SQCREG1,
GEN7_L3CNTLREG2,
GEN7_L3CNTLREG3,
};
static const u32 gen7_blt_regs[] = {
......
......@@ -170,6 +170,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
}
if (obj->ring != NULL)
seq_printf(m, " (%s)", obj->ring->name);
if (obj->frontbuffer_bits)
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
......@@ -515,6 +517,11 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
unsigned long flags;
struct intel_crtc *crtc;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
for_each_intel_crtc(dev, crtc) {
const char pipe = pipe_name(crtc->pipe);
......@@ -556,6 +563,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
mutex_unlock(&dev->struct_mutex);
return 0;
}
......@@ -1029,7 +1038,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
MEMSTAT_VID_SHIFT);
seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
} else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
IS_BROADWELL(dev)) {
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
......@@ -1048,7 +1058,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
reqf = I915_READ(GEN6_RPNSWREQ);
reqf &= ~GEN6_TURBO_DISABLE;
if (IS_HASWELL(dev))
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
reqf >>= 24;
else
reqf >>= 25;
......@@ -1065,7 +1075,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
if (IS_HASWELL(dev))
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
......@@ -1677,9 +1687,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
#ifdef CONFIG_DRM_I915_FBDEV
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
if (ret)
return ret;
ifbdev = dev_priv->fbdev;
fb = to_intel_framebuffer(ifbdev->helper.fb);
......@@ -1692,7 +1699,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
seq_putc(m, '\n');
mutex_unlock(&dev->mode_config.mutex);
#endif
mutex_lock(&dev->mode_config.fb_lock);
......@@ -1723,7 +1729,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct intel_context *ctx;
int ret, i;
ret = mutex_lock_interruptible(&dev->mode_config.mutex);
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
......@@ -1753,7 +1759,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&dev->struct_mutex);
return 0;
}
......@@ -1978,10 +1984,12 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
enabled = HAS_PSR(dev) &&
I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
seq_printf(m, "Enabled: %s\n", yesno(enabled));
seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
if (HAS_PSR(dev))
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
......@@ -2223,9 +2231,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder;
seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
crtc->primary->fb->base.id, crtc->x, crtc->y,
crtc->primary->fb->width, crtc->primary->fb->height);
if (crtc->primary->fb)
seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
crtc->primary->fb->base.id, crtc->x, crtc->y,
crtc->primary->fb->width, crtc->primary->fb->height);
else
seq_puts(m, "\tprimary plane disabled\n");
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
intel_encoder_info(m, intel_crtc, intel_encoder);
}
......@@ -2929,11 +2940,16 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
/* real source -> none transition */
if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
struct intel_pipe_crc_entry *entries;
struct intel_crtc *crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
pipe_name(pipe));
intel_wait_for_vblank(dev, pipe);
drm_modeset_lock(&crtc->base.mutex, NULL);
if (crtc->active)
intel_wait_for_vblank(dev, pipe);
drm_modeset_unlock(&crtc->base.mutex);
spin_lock_irq(&pipe_crc->lock);
entries = pipe_crc->entries;
......@@ -3506,7 +3522,7 @@ i915_max_freq_get(void *data, u64 *val)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
......@@ -3532,7 +3548,7 @@ i915_max_freq_set(void *data, u64 val)
u32 rp_state_cap, hw_max, hw_min;
int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
......@@ -3587,7 +3603,7 @@ i915_min_freq_get(void *data, u64 *val)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
......@@ -3613,7 +3629,7 @@ i915_min_freq_set(void *data, u64 val)
u32 rp_state_cap, hw_max, hw_min;
int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
......
......@@ -138,7 +138,7 @@ static void i915_free_hws(struct drm_device *dev)
I915_WRITE(HWS_PGA, 0x1ffff000);
}
void i915_kernel_lost_context(struct drm_device * dev)
void i915_kernel_lost_context(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
......@@ -166,7 +166,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
static int i915_dma_cleanup(struct drm_device * dev)
static int i915_dma_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
......@@ -190,7 +190,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
return 0;
}
static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
......@@ -235,7 +235,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
return 0;
}
static int i915_dma_resume(struct drm_device * dev)
static int i915_dma_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = LP_RING(dev_priv);
......@@ -359,7 +359,7 @@ static int validate_cmd(int cmd)
return 0;
}
static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i, ret;
......@@ -369,6 +369,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
for (i = 0; i < dwords;) {
int sz = validate_cmd(buffer[i]);
if (sz == 0 || i + sz > dwords)
return -EINVAL;
i += sz;
......@@ -453,7 +454,7 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
}
}
static int i915_dispatch_cmdbuffer(struct drm_device * dev,
static int i915_dispatch_cmdbuffer(struct drm_device *dev,
drm_i915_cmdbuffer_t *cmd,
struct drm_clip_rect *cliprects,
void *cmdbuf)
......@@ -487,8 +488,8 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
return 0;
}
static int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch,
static int i915_dispatch_batchbuffer(struct drm_device *dev,
drm_i915_batchbuffer_t *batch,
struct drm_clip_rect *cliprects)
{
struct drm_i915_private *dev_priv = dev->dev_private;
......@@ -549,7 +550,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
return 0;
}
static int i915_dispatch_flip(struct drm_device * dev)
static int i915_dispatch_flip(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
......@@ -755,7 +756,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
return ret;
}
static int i915_emit_irq(struct drm_device * dev)
static int i915_emit_irq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
......@@ -781,7 +782,7 @@ static int i915_emit_irq(struct drm_device * dev)
return dev_priv->dri1.counter;
}
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
static int i915_wait_irq(struct drm_device *dev, int irq_nr)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
......@@ -1266,6 +1267,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
{
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
pr_info("switched on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
......@@ -1488,10 +1490,11 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
#define SEP_EMPTY
#define PRINT_FLAG(name) info->name ? #name "," : ""
#define SEP_COMMA ,
DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
info->gen,
dev_priv->dev->pdev->device,
dev_priv->dev->pdev->revision,
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
#undef PRINT_S
#undef SEP_EMPTY
......@@ -1602,6 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
spin_lock_init(&dev_priv->mmio_flip_lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->modeset_restore_lock);
......@@ -1929,7 +1933,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
* and DMA structures, since the kernel won't be using them, and clea
* up any GEM state.
*/
void i915_driver_lastclose(struct drm_device * dev)
void i915_driver_lastclose(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
......@@ -1950,7 +1954,7 @@ void i915_driver_lastclose(struct drm_device * dev)
i915_dma_cleanup(dev);
}
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
void i915_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
{
mutex_lock(&dev->struct_mutex);
i915_gem_context_close(dev, file_priv);
......@@ -2027,7 +2031,7 @@ int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
* manage the gtt, we need to claim that all intel devices are agp. For
* otherwise the drm core refuses to initialize the agp support code.
*/
int i915_driver_device_is_agp(struct drm_device * dev)
int i915_driver_device_is_agp(struct drm_device *dev)
{
return 1;
}
......@@ -28,6 +28,7 @@
*/
#include <linux/device.h>
#include <linux/acpi.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
......@@ -46,8 +47,6 @@ static struct drm_driver driver;
PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
.dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
.dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
#define GEN_CHV_PIPEOFFSETS \
......@@ -55,10 +54,6 @@ static struct drm_driver driver;
CHV_PIPE_C_OFFSET }, \
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
CHV_TRANSCODER_C_OFFSET, }, \
.dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \
CHV_DPLL_C_OFFSET }, \
.dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \
CHV_DPLL_C_MD_OFFSET }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
CHV_PALETTE_C_OFFSET }
......@@ -499,8 +494,7 @@ static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
intel_runtime_pm_get(dev_priv);
pci_power_t opregion_target_state;
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
......@@ -526,9 +520,9 @@ static int i915_drm_freeze(struct drm_device *dev)
return error;
}
drm_irq_uninstall(dev);
intel_runtime_pm_disable_interrupts(dev);
intel_disable_gt_powersave(dev);
intel_suspend_gt_powersave(dev);
/*
* Disable CRTCs directly since we want to preserve sw state
......@@ -547,8 +541,14 @@ static int i915_drm_freeze(struct drm_device *dev)
i915_save_state(dev);
if (acpi_target_system_state() >= ACPI_STATE_S3)
opregion_target_state = PCI_D3cold;
else
opregion_target_state = PCI_D1;
intel_opregion_notify_adapter(dev, opregion_target_state);
intel_uncore_forcewake_reset(dev, false);
intel_opregion_fini(dev);
intel_uncore_fini(dev);
console_lock();
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
......@@ -556,6 +556,8 @@ static int i915_drm_freeze(struct drm_device *dev)
dev_priv->suspend_count++;
intel_display_set_init_power(dev_priv, false);
return 0;
}
......@@ -605,7 +607,10 @@ static int i915_drm_thaw_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
intel_uncore_early_sanitize(dev);
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_disable_pc8(dev_priv);
intel_uncore_early_sanitize(dev, true);
intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv);
......@@ -638,8 +643,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
}
mutex_unlock(&dev->struct_mutex);
/* We need working interrupts for modeset enabling ... */
drm_irq_install(dev, dev->pdev->irq);
intel_runtime_pm_restore_interrupts(dev);
intel_modeset_init_hw(dev);
......@@ -676,7 +680,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
intel_runtime_pm_put(dev_priv);
intel_opregion_notify_adapter(dev, PCI_D0);
return 0;
}
......@@ -885,6 +890,7 @@ static int i915_pm_suspend_late(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = drm_dev->dev_private;
/*
* We have a suspedn ordering issue with the snd-hda driver also
......@@ -898,6 +904,9 @@ static int i915_pm_suspend_late(struct device *dev)
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
hsw_enable_pc8(dev_priv);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
......
......@@ -53,7 +53,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20080730"
#define DRIVER_DATE "20140620"
enum pipe {
INVALID_PIPE = -1,
......@@ -552,8 +552,6 @@ struct intel_device_info {
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
int trans_offsets[I915_MAX_TRANSCODERS];
int dpll_offsets[I915_MAX_PIPES];
int dpll_md_offsets[I915_MAX_PIPES];
int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
};
......@@ -593,7 +591,6 @@ struct intel_context {
bool is_initialized;
uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
struct intel_engine_cs *last_ring;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
struct i915_address_space *vm;
......@@ -638,6 +635,10 @@ struct i915_drrs {
struct i915_psr {
bool sink_support;
bool source_ok;
bool setup_done;
bool enabled;
bool active;
struct delayed_work work;
};
enum intel_pch {
......@@ -1331,6 +1332,17 @@ struct intel_pipe_crc {
wait_queue_head_t wq;
};
struct i915_frontbuffer_tracking {
struct mutex lock;
/*
* Tracking bits for delayed frontbuffer flushing du to gpu activity or
* scheduled flips.
*/
unsigned busy_bits;
unsigned flip_bits;
};
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
......@@ -1370,6 +1382,9 @@ struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
/* protects the mmio flip data */
spinlock_t mmio_flip_lock;
bool display_irqs_enabled;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
......@@ -1473,6 +1488,9 @@ struct drm_i915_private {
bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
struct i915_frontbuffer_tracking fb_tracking;
u16 orig_clock;
bool mchbar_need_disable;
......@@ -1590,6 +1608,28 @@ struct drm_i915_gem_object_ops {
void (*release)(struct drm_i915_gem_object *);
};
/*
* Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
* considered to be the frontbuffer for the given plane interface-vise. This
* doesn't mean that the hw necessarily already scans it out, but that any
* rendering (by the cpu or gpu) will land in the frontbuffer eventually.
*
* We have one bit per pipe and per scanout plane type.
*/
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
#define INTEL_FRONTBUFFER_BITS \
(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
#define INTEL_FRONTBUFFER_CURSOR(pipe) \
(1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
#define INTEL_FRONTBUFFER_SPRITE(pipe) \
(1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
(1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
(0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
struct drm_i915_gem_object {
struct drm_gem_object base;
......@@ -1659,6 +1699,12 @@ struct drm_i915_gem_object {
unsigned int pin_mappable:1;
unsigned int pin_display:1;
/*
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
*/
unsigned long gt_ro:1;
/*
* Is the GPU currently using a fence to access this buffer,
*/
......@@ -1671,6 +1717,8 @@ struct drm_i915_gem_object {
unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1;
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
struct sg_table *pages;
int pages_pin_count;
......@@ -1717,6 +1765,10 @@ struct drm_i915_gem_object {
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits);
/**
* Request queue structure.
*
......@@ -1938,10 +1990,8 @@ struct drm_i915_cmd_table {
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && \
(!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 \
&& !IS_GEN8(dev))
#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
......@@ -2038,6 +2088,7 @@ struct i915_params {
bool reset;
bool disable_display;
bool disable_vtd_wa;
int use_mmio_flip;
};
extern struct i915_params i915 __read_mostly;
......@@ -2082,10 +2133,12 @@ extern void intel_irq_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev,
bool restore_forcewake);
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev);
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
......@@ -2233,6 +2286,8 @@ bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
return unlikely(atomic_read(&error->reset_counter)
......@@ -2443,7 +2498,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 stolen_offset,
u32 gtt_offset,
u32 size);
void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
......@@ -2603,6 +2657,8 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
void intel_notify_mmio_flip(struct intel_engine_cs *ring);
/* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
......
......@@ -1095,7 +1095,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
* Compare seqno against outstanding lazy request. Emit a request if they are
* equal.
*/
static int
int
i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
{
int ret;
......@@ -1561,14 +1561,29 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto unpin;
obj->fault_mappable = true;
/* Finally, remap it using the new GTT offset */
pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
pfn >>= PAGE_SHIFT;
pfn += page_offset;
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
if (!obj->fault_mappable) {
unsigned long size = min_t(unsigned long,
vma->vm_end - vma->vm_start,
obj->base.size);
int i;
for (i = 0; i < size >> PAGE_SHIFT; i++) {
ret = vm_insert_pfn(vma,
(unsigned long)vma->vm_start + i * PAGE_SIZE,
pfn + i);
if (ret)
break;
}
obj->fault_mappable = true;
} else
ret = vm_insert_pfn(vma,
(unsigned long)vmf->virtual_address,
pfn + page_offset);
unpin:
i915_gem_object_ggtt_unpin(obj);
unlock:
......@@ -1616,22 +1631,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return ret;
}
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
{
struct i915_vma *vma;
/*
* Only the global gtt is relevant for gtt memory mappings, so restrict
* list traversal to objects bound into the global address space. Note
* that the active list should be empty, but better safe than sorry.
*/
WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
i915_gem_release_mmap(vma->obj);
list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
i915_gem_release_mmap(vma->obj);
}
/**
* i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question
......@@ -1657,6 +1656,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
obj->fault_mappable = false;
}
void
i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
i915_gem_release_mmap(obj);
}
uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
......@@ -2211,6 +2219,8 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
list_move_tail(&vma->mm_list, &vm->inactive_list);
}
intel_fb_obj_flush(obj, true);
list_del_init(&obj->ring_list);
obj->ring = NULL;
......@@ -3540,6 +3550,8 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
intel_fb_obj_flush(obj, false);
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
old_write_domain);
......@@ -3561,6 +3573,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
intel_fb_obj_flush(obj, false);
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
old_write_domain);
......@@ -3614,6 +3628,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
obj->dirty = 1;
}
if (write)
intel_fb_obj_invalidate(obj, NULL);
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
......@@ -3950,6 +3967,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
if (write)
intel_fb_obj_invalidate(obj, NULL);
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
......@@ -4438,13 +4458,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->stolen)
i915_gem_object_unpin_pages(obj);
WARN_ON(obj->frontbuffer_bits);
if (WARN_ON(obj->pages_pin_count))
obj->pages_pin_count = 0;
if (discard_backing_storage(obj))
obj->madv = I915_MADV_DONTNEED;
i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj);
i915_gem_object_release_stolen(obj);
BUG_ON(obj->pages);
......@@ -4922,6 +4943,8 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
register_oom_notifier(&dev_priv->mm.oom_notifier);
mutex_init(&dev_priv->fb_tracking.lock);
}
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
......@@ -4983,6 +5006,23 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
return ret;
}
void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits)
{
if (old) {
WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
old->frontbuffer_bits &= ~frontbuffer_bits;
}
if (new) {
WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
new->frontbuffer_bits |= frontbuffer_bits;
}
}
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
if (!mutex_is_locked(mutex))
......@@ -5065,12 +5105,13 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
vm == &dev_priv->mm.aliasing_ppgtt->base)
vm = &dev_priv->gtt.base;
BUG_ON(list_empty(&o->vma_list));
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (vma->vm == vm)
return vma->node.start;
}
WARN(1, "%s vma for this object not found.\n",
i915_is_ggtt(vm) ? "global" : "ppgtt");
return -1;
}
......
......@@ -606,7 +606,7 @@ static int do_switch(struct intel_engine_cs *ring,
BUG_ON(!i915_gem_obj_is_pinned(from->obj));
}
if (from == to && from->last_ring == ring && !to->remap_slice)
if (from == to && !to->remap_slice)
return 0;
/* Trying to pin first makes error handling easier. */
......@@ -703,7 +703,6 @@ static int do_switch(struct intel_engine_cs *ring,
done:
i915_gem_context_reference(to);
ring->last_context = to;
to->last_ring = ring;
if (uninitialized) {
ret = i915_gem_render_state_init(ring);
......
......@@ -975,10 +975,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
/* check for potential scanout */
if (i915_gem_obj_ggtt_bound(obj) &&
i915_gem_obj_to_ggtt(obj)->pin_count)
intel_mark_fb_busy(obj, ring);
intel_fb_obj_invalidate(obj, ring);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
......@@ -1525,7 +1523,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
struct drm_i915_gem_exec_object2 *user_exec_list =
struct drm_i915_gem_exec_object2 __user *user_exec_list =
to_user_ptr(args->buffers_ptr);
int i;
......
......@@ -63,6 +63,12 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
}
#endif
/* Early VLV doesn't have this */
if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
return 0;
}
return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
}
......@@ -110,7 +116,7 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
......@@ -132,7 +138,7 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
......@@ -156,7 +162,7 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
bool valid, u32 flags)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
......@@ -164,7 +170,8 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
/* Mark the page as writeable. Other platforms don't have a
* setting for read-only/writable, so this matches that behavior.
*/
pte |= BYT_PTE_WRITEABLE;
if (!(flags & PTE_READ_ONLY))
pte |= BYT_PTE_WRITEABLE;
if (level != I915_CACHE_NONE)
pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
......@@ -174,7 +181,7 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
......@@ -187,7 +194,7 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
......@@ -301,7 +308,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
uint64_t start,
enum i915_cache_level cache_level)
enum i915_cache_level cache_level, u32 unused)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
......@@ -639,7 +646,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint32_t pd_entry;
int pte, pde;
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
......@@ -941,7 +948,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
while (num_entries) {
last_pte = first_pte + num_entries;
......@@ -964,7 +971,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
uint64_t start,
enum i915_cache_level cache_level)
enum i915_cache_level cache_level, u32 flags)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
......@@ -981,7 +988,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true);
cache_level, true, flags);
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
......@@ -1218,8 +1226,12 @@ ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
/* Currently applicable only to VLV */
if (vma->obj->gt_ro)
flags |= PTE_READ_ONLY;
vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
cache_level);
cache_level, flags);
}
static void ppgtt_unbind_vma(struct i915_vma *vma)
......@@ -1394,7 +1406,7 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
enum i915_cache_level level)
enum i915_cache_level level, u32 unused)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
......@@ -1440,7 +1452,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
enum i915_cache_level level)
enum i915_cache_level level, u32 flags)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
......@@ -1452,7 +1464,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
i++;
}
......@@ -1464,7 +1476,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1]) !=
vm->pte_encode(addr, level, true));
vm->pte_encode(addr, level, true, flags));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
......@@ -1518,7 +1530,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
......@@ -1567,6 +1579,10 @@ static void ggtt_bind_vma(struct i915_vma *vma,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
/* Currently applicable only to VLV */
if (obj->gt_ro)
flags |= PTE_READ_ONLY;
/* If there is no aliasing PPGTT, or the caller needs a global mapping,
* or we have a global mapping already but the cacheability flags have
* changed, set the global PTEs.
......@@ -1583,7 +1599,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
(cache_level != obj->cache_level)) {
vma->vm->insert_entries(vma->vm, obj->pages,
vma->node.start,
cache_level);
cache_level, flags);
obj->has_global_gtt_mapping = 1;
}
}
......@@ -1595,7 +1611,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
appgtt->base.insert_entries(&appgtt->base,
vma->obj->pages,
vma->node.start,
cache_level);
cache_level, flags);
vma->obj->has_aliasing_ppgtt_mapping = 1;
}
}
......
......@@ -154,6 +154,7 @@ struct i915_vma {
void (*unbind_vma)(struct i915_vma *vma);
/* Map an object into an address space with the given cache flags. */
#define GLOBAL_BIND (1<<0)
#define PTE_READ_ONLY (1<<1)
void (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
......@@ -197,7 +198,7 @@ struct i915_address_space {
/* FIXME: Need a more generic return type */
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
bool valid); /* Create a valid PTE */
bool valid, u32 flags); /* Create a valid PTE */
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
......@@ -205,7 +206,7 @@ struct i915_address_space {
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
enum i915_cache_level cache_level);
enum i915_cache_level cache_level, u32 flags);
void (*cleanup)(struct i915_address_space *vm);
};
......
......@@ -28,64 +28,13 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
struct i915_render_state {
struct render_state {
const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
unsigned long ggtt_offset;
void *batch;
u32 size;
u32 len;
u64 ggtt_offset;
int gen;
};
static struct i915_render_state *render_state_alloc(struct drm_device *dev)
{
struct i915_render_state *so;
struct page *page;
int ret;
so = kzalloc(sizeof(*so), GFP_KERNEL);
if (!so)
return ERR_PTR(-ENOMEM);
so->obj = i915_gem_alloc_object(dev, 4096);
if (so->obj == NULL) {
ret = -ENOMEM;
goto free;
}
so->size = 4096;
ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
if (ret)
goto free_gem;
BUG_ON(so->obj->pages->nents != 1);
page = sg_page(so->obj->pages->sgl);
so->batch = kmap(page);
if (!so->batch) {
ret = -ENOMEM;
goto unpin;
}
so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
return so;
unpin:
i915_gem_object_ggtt_unpin(so->obj);
free_gem:
drm_gem_object_unreference(&so->obj->base);
free:
kfree(so);
return ERR_PTR(ret);
}
static void render_state_free(struct i915_render_state *so)
{
kunmap(so->batch);
i915_gem_object_ggtt_unpin(so->obj);
drm_gem_object_unreference(&so->obj->base);
kfree(so);
}
static const struct intel_renderstate_rodata *
render_state_get_rodata(struct drm_device *dev, const int gen)
{
......@@ -101,98 +50,120 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
return NULL;
}
static int render_state_setup(const int gen,
const struct intel_renderstate_rodata *rodata,
struct i915_render_state *so)
static int render_state_init(struct render_state *so, struct drm_device *dev)
{
const u64 goffset = i915_gem_obj_ggtt_offset(so->obj);
u32 reloc_index = 0;
u32 * const d = so->batch;
unsigned int i = 0;
int ret;
if (!rodata || rodata->batch_items * 4 > so->size)
so->gen = INTEL_INFO(dev)->gen;
so->rodata = render_state_get_rodata(dev, so->gen);
if (so->rodata == NULL)
return 0;
if (so->rodata->batch_items * 4 > 4096)
return -EINVAL;
so->obj = i915_gem_alloc_object(dev, 4096);
if (so->obj == NULL)
return -ENOMEM;
ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
if (ret)
goto free_gem;
so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
return 0;
free_gem:
drm_gem_object_unreference(&so->obj->base);
return ret;
}
static int render_state_setup(struct render_state *so)
{
const struct intel_renderstate_rodata *rodata = so->rodata;
unsigned int i = 0, reloc_index = 0;
struct page *page;
u32 *d;
int ret;
ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
if (ret)
return ret;
page = sg_page(so->obj->pages->sgl);
d = kmap(page);
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
if (reloc_index < rodata->reloc_items &&
i * 4 == rodata->reloc[reloc_index]) {
s += goffset & 0xffffffff;
/* We keep batch offsets max 32bit */
if (gen >= 8) {
if (i * 4 == rodata->reloc[reloc_index]) {
u64 r = s + so->ggtt_offset;
s = lower_32_bits(r);
if (so->gen >= 8) {
if (i + 1 >= rodata->batch_items ||
rodata->batch[i + 1] != 0)
return -EINVAL;
d[i] = s;
i++;
s = (goffset & 0xffffffff00000000ull) >> 32;
d[i++] = s;
s = upper_32_bits(r);
}
reloc_index++;
}
d[i] = s;
i++;
d[i++] = s;
}
kunmap(page);
ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
if (ret)
return ret;
if (rodata->reloc_items != reloc_index) {
DRM_ERROR("not all relocs resolved, %d out of %d\n",
reloc_index, rodata->reloc_items);
if (rodata->reloc[reloc_index] != -1) {
DRM_ERROR("only %d relocs resolved\n", reloc_index);
return -EINVAL;
}
so->len = rodata->batch_items * 4;
return 0;
}
static void render_state_fini(struct render_state *so)
{
i915_gem_object_ggtt_unpin(so->obj);
drm_gem_object_unreference(&so->obj->base);
}
int i915_gem_render_state_init(struct intel_engine_cs *ring)
{
const int gen = INTEL_INFO(ring->dev)->gen;
struct i915_render_state *so;
const struct intel_renderstate_rodata *rodata;
struct render_state so;
int ret;
if (WARN_ON(ring->id != RCS))
return -ENOENT;
rodata = render_state_get_rodata(ring->dev, gen);
if (rodata == NULL)
return 0;
ret = render_state_init(&so, ring->dev);
if (ret)
return ret;
so = render_state_alloc(ring->dev);
if (IS_ERR(so))
return PTR_ERR(so);
if (so.rodata == NULL)
return 0;
ret = render_state_setup(gen, rodata, so);
ret = render_state_setup(&so);
if (ret)
goto out;
ret = ring->dispatch_execbuffer(ring,
i915_gem_obj_ggtt_offset(so->obj),
so->len,
so.ggtt_offset,
so.rodata->batch_items * 4,
I915_DISPATCH_SECURE);
if (ret)
goto out;
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so->obj), ring);
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
ret = __i915_add_request(ring, NULL, so->obj, NULL);
ret = __i915_add_request(ring, NULL, so.obj, NULL);
/* __i915_add_request moves object to inactive if it fails */
out:
render_state_free(so);
render_state_fini(&so);
return ret;
}
......@@ -292,9 +292,20 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
kfree(obj->pages);
}
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
if (obj->stolen) {
drm_mm_remove_node(obj->stolen);
kfree(obj->stolen);
obj->stolen = NULL;
}
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
.release = i915_gem_object_release_stolen,
};
static struct drm_i915_gem_object *
......@@ -452,13 +463,3 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
drm_gem_object_unreference(&obj->base);
return NULL;
}
void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
if (obj->stolen) {
drm_mm_remove_node(obj->stolen);
kfree(obj->stolen);
obj->stolen = NULL;
}
}
This diff is collapsed.
......@@ -48,6 +48,7 @@ struct i915_params i915 __read_mostly = {
.disable_display = 0,
.enable_cmd_parser = 1,
.disable_vtd_wa = 0,
.use_mmio_flip = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
......@@ -156,3 +157,7 @@ MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)"
module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
MODULE_PARM_DESC(enable_cmd_parser,
"Enable command parsing (1=enabled [default], 0=disabled)");
module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
MODULE_PARM_DESC(use_mmio_flip,
"use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
This diff is collapsed.
......@@ -76,12 +76,12 @@ static const u32 bdw_ddi_translations_edp[] = {
0x00FFFFFF, 0x00000012, /* eDP parameters */
0x00EBAFFF, 0x00020011,
0x00C71FFF, 0x0006000F,
0x00AAAFFF, 0x000E000A,
0x00FFFFFF, 0x00020011,
0x00DB6FFF, 0x0005000F,
0x00BEEFFF, 0x000A000C,
0x00FFFFFF, 0x0005000F,
0x00DB6FFF, 0x000A000C,
0x00FFFFFF, 0x000A000C,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
};
......@@ -89,12 +89,12 @@ static const u32 bdw_ddi_translations_dp[] = {
0x00FFFFFF, 0x0007000E, /* DP parameters */
0x00D75FFF, 0x000E000A,
0x00BEFFFF, 0x00140006,
0x80B2CFFF, 0x001B0002,
0x00FFFFFF, 0x000E000A,
0x00D75FFF, 0x00180004,
0x80CB2FFF, 0x001B0002,
0x00F7DFFF, 0x00180004,
0x80D75FFF, 0x001B0002,
0x80FFFFFF, 0x001B0002,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
};
......
This diff is collapsed.
This diff is collapsed.
......@@ -358,6 +358,11 @@ struct intel_pipe_wm {
bool sprites_scaled;
};
struct intel_mmio_flip {
u32 seqno;
u32 ring_id;
};
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
......@@ -384,7 +389,6 @@ struct intel_crtc {
struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
uint32_t cursor_cntl;
uint32_t cursor_base;
......@@ -412,6 +416,7 @@ struct intel_crtc {
wait_queue_head_t vbl_wait;
int scanline_offset;
struct intel_mmio_flip mmio_flip;
};
struct intel_plane_wm_parameters {
......@@ -428,7 +433,6 @@ struct intel_plane {
struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
u32 lut_r[1024], lut_g[1024], lut_b[1024];
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y;
......@@ -537,7 +541,6 @@ struct intel_dp {
unsigned long last_power_cycle;
unsigned long last_power_on;
unsigned long last_backlight_off;
bool psr_setup_done;
bool use_tps3;
struct intel_connector *attached_connector;
......@@ -721,8 +724,33 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
void intel_mark_busy(struct drm_device *dev);
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring);
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring);
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flush(struct drm_device *dev,
unsigned frontbuffer_bits);
/**
* intel_frontbuffer_flip - prepare frontbuffer flip
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. This is for
* synchronous plane updates which will happen on the next vblank and which will
* not get delayed by pending gpu rendering.
*
* Can be called without any locks held.
*/
static inline
void intel_frontbuffer_flip(struct drm_device *dev,
unsigned frontbuffer_bits)
{
intel_frontbuffer_flush(dev, frontbuffer_bits);
}
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
void intel_crtc_update_dpms(struct drm_crtc *crtc);
......@@ -831,11 +859,13 @@ void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_edp_psr_enable(struct intel_dp *intel_dp);
void intel_edp_psr_disable(struct intel_dp *intel_dp);
void intel_edp_psr_update(struct drm_device *dev);
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
void intel_edp_psr_exit(struct drm_device *dev);
void intel_edp_psr_init(struct drm_device *dev);
/* intel_dsi.c */
bool intel_dsi_init(struct drm_device *dev);
void intel_dsi_init(struct drm_device *dev);
/* intel_dvo.c */
......@@ -961,6 +991,7 @@ void intel_init_gt_powersave(struct drm_device *dev);
void intel_cleanup_gt_powersave(struct drm_device *dev);
void intel_enable_gt_powersave(struct drm_device *dev);
void intel_disable_gt_powersave(struct drm_device *dev);
void intel_suspend_gt_powersave(struct drm_device *dev);
void intel_reset_gt_powersave(struct drm_device *dev);
void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
......
......@@ -657,7 +657,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
};
bool intel_dsi_init(struct drm_device *dev)
void intel_dsi_init(struct drm_device *dev)
{
struct intel_dsi *intel_dsi;
struct intel_encoder *intel_encoder;
......@@ -673,29 +673,29 @@ bool intel_dsi_init(struct drm_device *dev)
/* There is no detection method for MIPI so rely on VBT */
if (!dev_priv->vbt.has_mipi)
return false;
return;
if (IS_VALLEYVIEW(dev)) {
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
} else {
DRM_ERROR("Unsupported Mipi device to reg base");
return;
}
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
return false;
return;
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dsi);
return false;
return;
}
intel_encoder = &intel_dsi->base;
encoder = &intel_encoder->base;
intel_dsi->attached_connector = intel_connector;
if (IS_VALLEYVIEW(dev)) {
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
} else {
DRM_ERROR("Unsupported Mipi device to reg base");
return false;
}
connector = &intel_connector->base;
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
......@@ -753,12 +753,10 @@ bool intel_dsi_init(struct drm_device *dev)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
return true;
return;
err:
drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dsi);
kfree(intel_connector);
return false;
}
......@@ -143,7 +143,7 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
case MIPI_DSI_DCS_LONG_WRITE:
dsi_vc_dcs_write(intel_dsi, vc, data, len);
break;
};
}
data += len;
......@@ -294,7 +294,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
intel_dsi->init_count = mipi_config->master_init_timer;
intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
intel_dsi->video_frmt_cfg_bits = mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
intel_dsi->video_frmt_cfg_bits =
mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
switch (intel_dsi->escape_clk_div) {
case 0:
......@@ -351,7 +352,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
*
* prepare count
*/
ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare);
ths_prepare_ns = max(mipi_config->ths_prepare,
mipi_config->tclk_prepare);
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2);
/* exit zero count */
......
......@@ -112,7 +112,15 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
u32 tmp;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
if (!(tmp & DVO_ENABLE))
return false;
return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
}
......
......@@ -43,10 +43,36 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
static int intel_fbdev_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct intel_fbdev *ifbdev =
container_of(fb_helper, struct intel_fbdev, helper);
int ret;
ret = drm_fb_helper_set_par(info);
if (ret == 0) {
/*
* FIXME: fbdev presumes that all callbacks also work from
* atomic contexts and relies on that for emergency oops
* printing. KMS totally doesn't do that and the locking here is
* by far not the only place this goes wrong. Ignore this for
* now until we solve this for real.
*/
mutex_lock(&fb_helper->dev->struct_mutex);
ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
true);
mutex_unlock(&fb_helper->dev->struct_mutex);
}
return ret;
}
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_set_par = intel_fbdev_set_par,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
......@@ -417,7 +443,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
}
crtcs[i] = new_crtc;
DRM_DEBUG_KMS("connector %s on pipe %d [CRTC:%d]: %dx%d%s\n",
DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
connector->name,
pipe_name(to_intel_crtc(encoder->crtc)->pipe),
encoder->crtc->base.id,
......
......@@ -1229,6 +1229,70 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
}
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
u32 val;
mutex_lock(&dev_priv->dpio_lock);
/* program left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA1_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA1_FORCE;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
} else {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA2_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA2_FORCE;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
}
/* program clock channel usage */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
/*
* This a a bit weird since generally CL
* matches the pipe, but here we need to
* pick the CL based on the port.
*/
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
if (pipe != PIPE_B)
val &= ~CHV_CMN_USEDCLKCHANNEL;
else
val |= CHV_CMN_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
mutex_unlock(&dev_priv->dpio_lock);
}
static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
......@@ -1528,6 +1592,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
if (IS_CHERRYVIEW(dev)) {
intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = chv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = chv_hdmi_post_disable;
......
......@@ -415,6 +415,10 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
}
intel_overlay_release_old_vid_tail(overlay);
i915_gem_track_fb(overlay->old_vid_bo, NULL,
INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
return 0;
}
......@@ -686,6 +690,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
bool scale_changed = false;
struct drm_device *dev = overlay->dev;
u32 swidth, swidthsw, sheight, ostride;
enum pipe pipe = overlay->crtc->pipe;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
......@@ -713,7 +718,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
oconfig = OCONF_CC_OUT_8BIT;
if (IS_GEN4(overlay->dev))
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= overlay->crtc->pipe == 0 ?
oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
iowrite32(oconfig, &regs->OCONFIG);
intel_overlay_unmap_regs(overlay, regs);
......@@ -776,9 +781,15 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret)
goto out_unpin;
i915_gem_track_fb(overlay->vid_bo, new_bo,
INTEL_FRONTBUFFER_OVERLAY(pipe));
overlay->old_vid_bo = overlay->vid_bo;
overlay->vid_bo = new_bo;
intel_frontbuffer_flip(dev,
INTEL_FRONTBUFFER_OVERLAY(pipe));
return 0;
out_unpin:
......
This diff is collapsed.
......@@ -28,7 +28,6 @@
struct intel_renderstate_rodata {
const u32 *reloc;
const u32 reloc_items;
const u32 *batch;
const u32 batch_items;
};
......@@ -40,7 +39,6 @@ extern const struct intel_renderstate_rodata gen8_null_state;
#define RO_RENDERSTATE(_g) \
const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
.reloc = gen ## _g ## _null_state_relocs, \
.reloc_items = sizeof(gen ## _g ## _null_state_relocs)/4, \
.batch = gen ## _g ## _null_state_batch, \
.batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
}
......
......@@ -6,6 +6,7 @@ static const u32 gen6_null_state_relocs[] = {
0x0000002c,
0x000001e0,
0x000001e4,
-1,
};
static const u32 gen6_null_state_batch[] = {
......
......@@ -5,6 +5,7 @@ static const u32 gen7_null_state_relocs[] = {
0x00000010,
0x00000018,
0x000001ec,
-1,
};
static const u32 gen7_null_state_batch[] = {
......
......@@ -5,6 +5,7 @@ static const u32 gen8_null_state_relocs[] = {
0x00000050,
0x00000060,
0x000003ec,
-1,
};
static const u32 gen8_null_state_batch[] = {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment