Commit 399403c7 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2013-03-23' of...

Merge tag 'drm-intel-next-2013-03-23' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:
Highlights:
- Imre's for_each_sg_pages rework (now also with the stolen mem backed
  case fixed with a hack) plus the drm prime sg list coalescing patch from
  Rahul Sharma. I have some follow-up cleanups pending, already acked by
  Andrew Morton.
- Some prep-work for the crazy no-pch/display-less platform by Ben.
- Some vlv patches, by far not all (Jesse et al).
- Clean up the HDMI/SDVO #define confusion (Paulo)
- gen2-4 vblank fixes from Ville.
- Unclaimed register warning fixes for hsw (Paulo). More still to come ...
- Complete pageflips which have been stuck in a gpu hang, should prevent
  stuck gl compositors (Ville).
- pm patches for vt-switchless resume (Jesse). Note that the i915 enabling
  is not (yet) included, that took a bit longer to settle. PM patches are
  acked by Rafael Wysocki.
- Minor fixlets all over from various people.

* tag 'drm-intel-next-2013-03-23' of git://people.freedesktop.org/~danvet/drm-intel: (79 commits)
  drm/i915: Implement WaSwitchSolVfFArbitrationPriority
  drm/i915: Set the VIC in AVI infoframe for SDVO
  drm/i915: Kill a strange comment about DPMS functions
  drm/i915: Correct sandybrige overclocking
  drm/i915: Introduce GEN7_FEATURES for device info
  drm/i915: Move num_pipes to intel info
  drm/i915: fixup pd vs pt confusion in gen6 ppgtt code
  style nit: Align function parameter continuation properly.
  drm/i915: VLV doesn't have HDMI on port C
  drm/i915: DSPFW and BLC regs are in the display offset range
  drm/i915: set conservative clock gating values on VLV v2
  drm/i915: fix WaDisablePSDDualDispatchEnable on VLV v2
  drm/i915: add more VLV IDs
  drm/i915: use VLV DIP routines on VLV v2
  drm/i915: add media well to VLV force wake routines v2
  drm/i915: don't use plane pipe select on VLV
  drm: modify pages_to_sg prime helper to create optimized SG table
  drm/i915: use for_each_sg_page for setting up the gtt ptes
  drm/i915: create compact dma scatter lists for gem objects
  drm/i915: handle walking compact dma scatter lists
  ...
parents b6a9b7f6 e3dff585
...@@ -105,12 +105,11 @@ drm_clflush_sg(struct sg_table *st) ...@@ -105,12 +105,11 @@ drm_clflush_sg(struct sg_table *st)
{ {
#if defined(CONFIG_X86) #if defined(CONFIG_X86)
if (cpu_has_clflush) { if (cpu_has_clflush) {
struct scatterlist *sg; struct sg_page_iter sg_iter;
int i;
mb(); mb();
for_each_sg(st->sgl, sg, st->nents, i) for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
drm_clflush_page(sg_page(sg)); drm_clflush_page(sg_iter.page);
mb(); mb();
return; return;
......
...@@ -401,21 +401,17 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, ...@@ -401,21 +401,17 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages) struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
{ {
struct sg_table *sg = NULL; struct sg_table *sg = NULL;
struct scatterlist *iter;
int i;
int ret; int ret;
sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!sg) if (!sg)
goto out; goto out;
ret = sg_alloc_table(sg, nr_pages, GFP_KERNEL); ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
nr_pages << PAGE_SHIFT, GFP_KERNEL);
if (ret) if (ret)
goto out; goto out;
for_each_sg(sg->sgl, iter, nr_pages, i)
sg_set_page(iter, pages[i], PAGE_SIZE, 0);
return sg; return sg;
out: out:
kfree(sg); kfree(sg);
......
This diff is collapsed.
...@@ -1452,6 +1452,22 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv) ...@@ -1452,6 +1452,22 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
#undef DEV_INFO_SEP #undef DEV_INFO_SEP
} }
/**
* intel_early_sanitize_regs - clean up BIOS state
* @dev: DRM device
*
* This function must be called before we do any I915_READ or I915_WRITE. Its
* purpose is to clean up any state left by the BIOS that may affect us when
* reading and/or writing registers.
*/
static void intel_early_sanitize_regs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_HASWELL(dev))
I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
/** /**
* i915_driver_load - setup chip and create an initial config * i915_driver_load - setup chip and create an initial config
* @dev: DRM device * @dev: DRM device
...@@ -1542,6 +1558,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1542,6 +1558,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_gmch; goto put_gmch;
} }
intel_early_sanitize_regs(dev);
aperture_size = dev_priv->gtt.mappable_end; aperture_size = dev_priv->gtt.mappable_end;
dev_priv->gtt.mappable = dev_priv->gtt.mappable =
...@@ -1612,14 +1630,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1612,14 +1630,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
mutex_init(&dev_priv->rps.hw_lock); mutex_init(&dev_priv->rps.hw_lock);
mutex_init(&dev_priv->modeset_restore_lock); mutex_init(&dev_priv->modeset_restore_lock);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
dev_priv->num_pipe = 3;
else if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2;
else
dev_priv->num_pipe = 1;
ret = drm_vblank_init(dev, dev_priv->num_pipe);
if (ret) if (ret)
goto out_gem_unload; goto out_gem_unload;
......
...@@ -121,9 +121,7 @@ MODULE_PARM_DESC(i915_enable_ppgtt, ...@@ -121,9 +121,7 @@ MODULE_PARM_DESC(i915_enable_ppgtt,
unsigned int i915_preliminary_hw_support __read_mostly = 0; unsigned int i915_preliminary_hw_support __read_mostly = 0;
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
MODULE_PARM_DESC(preliminary_hw_support, MODULE_PARM_DESC(preliminary_hw_support,
"Enable preliminary hardware support. " "Enable preliminary hardware support. (default: false)");
"Enable Haswell and ValleyView Support. "
"(default: false)");
int i915_disable_power_well __read_mostly = 0; int i915_disable_power_well __read_mostly = 0;
module_param_named(disable_power_well, i915_disable_power_well, int, 0600); module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
...@@ -143,74 +141,74 @@ extern int intel_agp_enabled; ...@@ -143,74 +141,74 @@ extern int intel_agp_enabled;
.driver_data = (unsigned long) info } .driver_data = (unsigned long) info }
static const struct intel_device_info intel_i830_info = { static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1,
}; };
static const struct intel_device_info intel_845g_info = { static const struct intel_device_info intel_845g_info = {
.gen = 2, .gen = 2, .num_pipes = 1,
.has_overlay = 1, .overlay_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1,
}; };
static const struct intel_device_info intel_i85x_info = { static const struct intel_device_info intel_i85x_info = {
.gen = 2, .is_i85x = 1, .is_mobile = 1, .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
.cursor_needs_physical = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1,
}; };
static const struct intel_device_info intel_i865g_info = { static const struct intel_device_info intel_i865g_info = {
.gen = 2, .gen = 2, .num_pipes = 1,
.has_overlay = 1, .overlay_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1,
}; };
static const struct intel_device_info intel_i915g_info = { static const struct intel_device_info intel_i915g_info = {
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1,
}; };
static const struct intel_device_info intel_i915gm_info = { static const struct intel_device_info intel_i915gm_info = {
.gen = 3, .is_mobile = 1, .gen = 3, .is_mobile = 1, .num_pipes = 2,
.cursor_needs_physical = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1, .supports_tv = 1,
}; };
static const struct intel_device_info intel_i945g_info = { static const struct intel_device_info intel_i945g_info = {
.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1,
}; };
static const struct intel_device_info intel_i945gm_info = { static const struct intel_device_info intel_i945gm_info = {
.gen = 3, .is_i945gm = 1, .is_mobile = 1, .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
.has_hotplug = 1, .cursor_needs_physical = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1, .supports_tv = 1,
}; };
static const struct intel_device_info intel_i965g_info = { static const struct intel_device_info intel_i965g_info = {
.gen = 4, .is_broadwater = 1, .gen = 4, .is_broadwater = 1, .num_pipes = 2,
.has_hotplug = 1, .has_hotplug = 1,
.has_overlay = 1, .has_overlay = 1,
}; };
static const struct intel_device_info intel_i965gm_info = { static const struct intel_device_info intel_i965gm_info = {
.gen = 4, .is_crestline = 1, .gen = 4, .is_crestline = 1, .num_pipes = 2,
.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
.has_overlay = 1, .has_overlay = 1,
.supports_tv = 1, .supports_tv = 1,
}; };
static const struct intel_device_info intel_g33_info = { static const struct intel_device_info intel_g33_info = {
.gen = 3, .is_g33 = 1, .gen = 3, .is_g33 = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1, .need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1, .has_overlay = 1,
}; };
static const struct intel_device_info intel_g45_info = { static const struct intel_device_info intel_g45_info = {
.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
.has_pipe_cxsr = 1, .has_hotplug = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
.has_bsd_ring = 1, .has_bsd_ring = 1,
}; };
static const struct intel_device_info intel_gm45_info = { static const struct intel_device_info intel_gm45_info = {
.gen = 4, .is_g4x = 1, .gen = 4, .is_g4x = 1, .num_pipes = 2,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
.supports_tv = 1, .supports_tv = 1,
...@@ -218,26 +216,26 @@ static const struct intel_device_info intel_gm45_info = { ...@@ -218,26 +216,26 @@ static const struct intel_device_info intel_gm45_info = {
}; };
static const struct intel_device_info intel_pineview_info = { static const struct intel_device_info intel_pineview_info = {
.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1, .need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1, .has_overlay = 1,
}; };
static const struct intel_device_info intel_ironlake_d_info = { static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5, .gen = 5, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1, .need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1, .has_bsd_ring = 1,
}; };
static const struct intel_device_info intel_ironlake_m_info = { static const struct intel_device_info intel_ironlake_m_info = {
.gen = 5, .is_mobile = 1, .gen = 5, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1, .need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1, .has_fbc = 1,
.has_bsd_ring = 1, .has_bsd_ring = 1,
}; };
static const struct intel_device_info intel_sandybridge_d_info = { static const struct intel_device_info intel_sandybridge_d_info = {
.gen = 6, .gen = 6, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1, .need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1, .has_bsd_ring = 1,
.has_blt_ring = 1, .has_blt_ring = 1,
...@@ -246,7 +244,7 @@ static const struct intel_device_info intel_sandybridge_d_info = { ...@@ -246,7 +244,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
}; };
static const struct intel_device_info intel_sandybridge_m_info = { static const struct intel_device_info intel_sandybridge_m_info = {
.gen = 6, .is_mobile = 1, .gen = 6, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1, .need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1, .has_fbc = 1,
.has_bsd_ring = 1, .has_bsd_ring = 1,
...@@ -255,61 +253,49 @@ static const struct intel_device_info intel_sandybridge_m_info = { ...@@ -255,61 +253,49 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_force_wake = 1, .has_force_wake = 1,
}; };
#define GEN7_FEATURES \
.gen = 7, .num_pipes = 3, \
.need_gfx_hws = 1, .has_hotplug = 1, \
.has_bsd_ring = 1, \
.has_blt_ring = 1, \
.has_llc = 1, \
.has_force_wake = 1
static const struct intel_device_info intel_ivybridge_d_info = { static const struct intel_device_info intel_ivybridge_d_info = {
.is_ivybridge = 1, .gen = 7, GEN7_FEATURES,
.need_gfx_hws = 1, .has_hotplug = 1, .is_ivybridge = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
.has_force_wake = 1,
}; };
static const struct intel_device_info intel_ivybridge_m_info = { static const struct intel_device_info intel_ivybridge_m_info = {
.is_ivybridge = 1, .gen = 7, .is_mobile = 1, GEN7_FEATURES,
.need_gfx_hws = 1, .has_hotplug = 1, .is_ivybridge = 1,
.has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */ .is_mobile = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
.has_force_wake = 1,
}; };
static const struct intel_device_info intel_valleyview_m_info = { static const struct intel_device_info intel_valleyview_m_info = {
.gen = 7, .is_mobile = 1, GEN7_FEATURES,
.need_gfx_hws = 1, .has_hotplug = 1, .is_mobile = 1,
.has_fbc = 0, .num_pipes = 2,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1, .is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE, .display_mmio_offset = VLV_DISPLAY_BASE,
}; };
static const struct intel_device_info intel_valleyview_d_info = { static const struct intel_device_info intel_valleyview_d_info = {
.gen = 7, GEN7_FEATURES,
.need_gfx_hws = 1, .has_hotplug = 1, .num_pipes = 2,
.has_fbc = 0,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1, .is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE, .display_mmio_offset = VLV_DISPLAY_BASE,
}; };
static const struct intel_device_info intel_haswell_d_info = { static const struct intel_device_info intel_haswell_d_info = {
.is_haswell = 1, .gen = 7, GEN7_FEATURES,
.need_gfx_hws = 1, .has_hotplug = 1, .is_haswell = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
.has_force_wake = 1,
}; };
static const struct intel_device_info intel_haswell_m_info = { static const struct intel_device_info intel_haswell_m_info = {
.is_haswell = 1, .gen = 7, .is_mobile = 1, GEN7_FEATURES,
.need_gfx_hws = 1, .has_hotplug = 1, .is_haswell = 1,
.has_bsd_ring = 1, .is_mobile = 1,
.has_blt_ring = 1,
.has_llc = 1,
.has_force_wake = 1,
}; };
static const struct pci_device_id pciidlist[] = { /* aka */ static const struct pci_device_id pciidlist[] = { /* aka */
...@@ -394,6 +380,9 @@ static const struct pci_device_id pciidlist[] = { /* aka */ ...@@ -394,6 +380,9 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */ INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
{0, 0, 0} {0, 0, 0}
...@@ -1147,6 +1136,27 @@ ilk_dummy_write(struct drm_i915_private *dev_priv) ...@@ -1147,6 +1136,27 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
I915_WRITE_NOTRACE(MI_MODE, 0); I915_WRITE_NOTRACE(MI_MODE, 0);
} }
static void
hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
{
if (IS_HASWELL(dev_priv->dev) &&
(I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
DRM_ERROR("Unknown unclaimed register before writing to %x\n",
reg);
I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
static void
hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
{
if (IS_HASWELL(dev_priv->dev) &&
(I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
DRM_ERROR("Unclaimed write to %x\n", reg);
I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
#define __i915_read(x, y) \ #define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \ u##x val = 0; \
...@@ -1183,18 +1193,12 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ ...@@ -1183,18 +1193,12 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
} \ } \
if (IS_GEN5(dev_priv->dev)) \ if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \ ilk_dummy_write(dev_priv); \
if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \ hsw_unclaimed_reg_clear(dev_priv, reg); \
DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
} \
write##y(val, dev_priv->regs + reg); \ write##y(val, dev_priv->regs + reg); \
if (unlikely(__fifo_ret)) { \ if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \ gen6_gt_check_fifodbg(dev_priv); \
} \ } \
if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \ hsw_unclaimed_reg_check(dev_priv, reg); \
DRM_ERROR("Unclaimed write to %x\n", reg); \
writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT); \
} \
} }
__i915_write(8, b) __i915_write(8, b)
__i915_write(16, w) __i915_write(16, w)
......
...@@ -93,7 +93,7 @@ enum port { ...@@ -93,7 +93,7 @@ enum port {
I915_GEM_DOMAIN_INSTRUCTION | \ I915_GEM_DOMAIN_INSTRUCTION | \
I915_GEM_DOMAIN_VERTEX) I915_GEM_DOMAIN_VERTEX)
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
...@@ -243,7 +243,7 @@ struct drm_i915_error_state { ...@@ -243,7 +243,7 @@ struct drm_i915_error_state {
int page_count; int page_count;
u32 gtt_offset; u32 gtt_offset;
u32 *pages[0]; u32 *pages[0];
} *ringbuffer, *batchbuffer; } *ringbuffer, *batchbuffer, *ctx;
struct drm_i915_error_request { struct drm_i915_error_request {
long jiffies; long jiffies;
u32 seqno; u32 seqno;
...@@ -341,6 +341,7 @@ struct drm_i915_gt_funcs { ...@@ -341,6 +341,7 @@ struct drm_i915_gt_funcs {
struct intel_device_info { struct intel_device_info {
u32 display_mmio_offset; u32 display_mmio_offset;
u8 num_pipes:3;
u8 gen; u8 gen;
u8 is_mobile:1; u8 is_mobile:1;
u8 is_i85x:1; u8 is_i85x:1;
...@@ -905,7 +906,6 @@ typedef struct drm_i915_private { ...@@ -905,7 +906,6 @@ typedef struct drm_i915_private {
struct mutex dpio_lock; struct mutex dpio_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */ /** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask; u32 irq_mask;
u32 gt_irq_mask; u32 gt_irq_mask;
...@@ -913,7 +913,6 @@ typedef struct drm_i915_private { ...@@ -913,7 +913,6 @@ typedef struct drm_i915_private {
struct work_struct hotplug_work; struct work_struct hotplug_work;
bool enable_hotplug_processing; bool enable_hotplug_processing;
int num_pipe;
int num_pch_pll; int num_pch_pll;
unsigned long cfb_size; unsigned long cfb_size;
...@@ -1340,6 +1339,7 @@ struct drm_i915_file_private { ...@@ -1340,6 +1339,7 @@ struct drm_i915_file_private {
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
#define HAS_DDI(dev) (IS_HASWELL(dev)) #define HAS_DDI(dev) (IS_HASWELL(dev))
#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
#define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
...@@ -1529,17 +1529,12 @@ void i915_gem_lastclose(struct drm_device *dev); ...@@ -1529,17 +1529,12 @@ void i915_gem_lastclose(struct drm_device *dev);
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{ {
struct scatterlist *sg = obj->pages->sgl; struct sg_page_iter sg_iter;
int nents = obj->pages->nents;
while (nents > SG_MAX_SINGLE_ALLOC) { for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
if (n < SG_MAX_SINGLE_ALLOC - 1) return sg_iter.page;
break;
return NULL;
sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
n -= SG_MAX_SINGLE_ALLOC - 1;
nents -= SG_MAX_SINGLE_ALLOC - 1;
}
return sg_page(sg+n);
} }
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{ {
...@@ -1901,4 +1896,9 @@ static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) ...@@ -1901,4 +1896,9 @@ static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
return VGACNTRL; return VGACNTRL;
} }
static inline void __user *to_user_ptr(u64 address)
{
return (void __user *)(uintptr_t)address;
}
#endif #endif
...@@ -411,10 +411,9 @@ i915_gem_shmem_pread(struct drm_device *dev, ...@@ -411,10 +411,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
int obj_do_bit17_swizzling, page_do_bit17_swizzling; int obj_do_bit17_swizzling, page_do_bit17_swizzling;
int prefaulted = 0; int prefaulted = 0;
int needs_clflush = 0; int needs_clflush = 0;
struct scatterlist *sg; struct sg_page_iter sg_iter;
int i;
user_data = (char __user *) (uintptr_t) args->data_ptr; user_data = to_user_ptr(args->data_ptr);
remain = args->size; remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
...@@ -441,11 +440,9 @@ i915_gem_shmem_pread(struct drm_device *dev, ...@@ -441,11 +440,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
offset = args->offset; offset = args->offset;
for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
struct page *page; offset >> PAGE_SHIFT) {
struct page *page = sg_iter.page;
if (i < offset >> PAGE_SHIFT)
continue;
if (remain <= 0) if (remain <= 0)
break; break;
...@@ -460,7 +457,6 @@ i915_gem_shmem_pread(struct drm_device *dev, ...@@ -460,7 +457,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE) if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset; page_length = PAGE_SIZE - shmem_page_offset;
page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling && page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0; (page_to_phys(page) & (1 << 17)) != 0;
...@@ -522,7 +518,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, ...@@ -522,7 +518,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
if (!access_ok(VERIFY_WRITE, if (!access_ok(VERIFY_WRITE,
(char __user *)(uintptr_t)args->data_ptr, to_user_ptr(args->data_ptr),
args->size)) args->size))
return -EFAULT; return -EFAULT;
...@@ -613,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, ...@@ -613,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
if (ret) if (ret)
goto out_unpin; goto out_unpin;
user_data = (char __user *) (uintptr_t) args->data_ptr; user_data = to_user_ptr(args->data_ptr);
remain = args->size; remain = args->size;
offset = obj->gtt_offset + args->offset; offset = obj->gtt_offset + args->offset;
...@@ -732,10 +728,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ...@@ -732,10 +728,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
int hit_slowpath = 0; int hit_slowpath = 0;
int needs_clflush_after = 0; int needs_clflush_after = 0;
int needs_clflush_before = 0; int needs_clflush_before = 0;
int i; struct sg_page_iter sg_iter;
struct scatterlist *sg;
user_data = (char __user *) (uintptr_t) args->data_ptr; user_data = to_user_ptr(args->data_ptr);
remain = args->size; remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
...@@ -768,13 +763,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ...@@ -768,13 +763,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
offset = args->offset; offset = args->offset;
obj->dirty = 1; obj->dirty = 1;
for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
struct page *page; offset >> PAGE_SHIFT) {
struct page *page = sg_iter.page;
int partial_cacheline_write; int partial_cacheline_write;
if (i < offset >> PAGE_SHIFT)
continue;
if (remain <= 0) if (remain <= 0)
break; break;
...@@ -796,7 +789,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ...@@ -796,7 +789,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
((shmem_page_offset | page_length) ((shmem_page_offset | page_length)
& (boot_cpu_data.x86_clflush_size - 1)); & (boot_cpu_data.x86_clflush_size - 1));
page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling && page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0; (page_to_phys(page) & (1 << 17)) != 0;
...@@ -867,11 +859,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ...@@ -867,11 +859,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
if (!access_ok(VERIFY_READ, if (!access_ok(VERIFY_READ,
(char __user *)(uintptr_t)args->data_ptr, to_user_ptr(args->data_ptr),
args->size)) args->size))
return -EFAULT; return -EFAULT;
ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr, ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
args->size); args->size);
if (ret) if (ret)
return -EFAULT; return -EFAULT;
...@@ -1633,9 +1625,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) ...@@ -1633,9 +1625,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
static void static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{ {
int page_count = obj->base.size / PAGE_SIZE; struct sg_page_iter sg_iter;
struct scatterlist *sg; int ret;
int ret, i;
BUG_ON(obj->madv == __I915_MADV_PURGED); BUG_ON(obj->madv == __I915_MADV_PURGED);
...@@ -1655,8 +1646,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -1655,8 +1646,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_DONTNEED) if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0; obj->dirty = 0;
for_each_sg(obj->pages->sgl, sg, page_count, i) { for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
struct page *page = sg_page(sg); struct page *page = sg_iter.page;
if (obj->dirty) if (obj->dirty)
set_page_dirty(page); set_page_dirty(page);
...@@ -1757,7 +1748,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -1757,7 +1748,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct address_space *mapping; struct address_space *mapping;
struct sg_table *st; struct sg_table *st;
struct scatterlist *sg; struct scatterlist *sg;
struct sg_page_iter sg_iter;
struct page *page; struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
gfp_t gfp; gfp_t gfp;
/* Assert that the object is not currently in any GPU domain. As it /* Assert that the object is not currently in any GPU domain. As it
...@@ -1787,7 +1780,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -1787,7 +1780,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
gfp = mapping_gfp_mask(mapping); gfp = mapping_gfp_mask(mapping);
gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
gfp &= ~(__GFP_IO | __GFP_WAIT); gfp &= ~(__GFP_IO | __GFP_WAIT);
for_each_sg(st->sgl, sg, page_count, i) { sg = st->sgl;
st->nents = 0;
for (i = 0; i < page_count; i++) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp); page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (IS_ERR(page)) { if (IS_ERR(page)) {
i915_gem_purge(dev_priv, page_count); i915_gem_purge(dev_priv, page_count);
...@@ -1810,9 +1805,18 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -1810,9 +1805,18 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
gfp &= ~(__GFP_IO | __GFP_WAIT); gfp &= ~(__GFP_IO | __GFP_WAIT);
} }
sg_set_page(sg, page, PAGE_SIZE, 0); if (!i || page_to_pfn(page) != last_pfn + 1) {
if (i)
sg = sg_next(sg);
st->nents++;
sg_set_page(sg, page, PAGE_SIZE, 0);
} else {
sg->length += PAGE_SIZE;
}
last_pfn = page_to_pfn(page);
} }
sg_mark_end(sg);
obj->pages = st; obj->pages = st;
if (i915_gem_object_needs_bit17_swizzle(obj)) if (i915_gem_object_needs_bit17_swizzle(obj))
...@@ -1821,8 +1825,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -1821,8 +1825,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
return 0; return 0;
err_pages: err_pages:
for_each_sg(st->sgl, sg, i, page_count) sg_mark_end(sg);
page_cache_release(sg_page(sg)); for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
page_cache_release(sg_iter.page);
sg_free_table(st); sg_free_table(st);
kfree(st); kfree(st);
return PTR_ERR(page); return PTR_ERR(page);
...@@ -4010,7 +4015,16 @@ int i915_gem_init(struct drm_device *dev) ...@@ -4010,7 +4015,16 @@ int i915_gem_init(struct drm_device *dev)
int ret; int ret;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (IS_VALLEYVIEW(dev)) {
/* VLVA0 (potential hack), BIOS isn't actually waking us */
I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
DRM_DEBUG_DRIVER("allow wake ack timed out\n");
}
i915_gem_init_global_gtt(dev); i915_gem_init_global_gtt(dev);
ret = i915_gem_init_hw(dev); ret = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (ret) { if (ret) {
...@@ -4327,7 +4341,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, ...@@ -4327,7 +4341,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
void *vaddr = obj->phys_obj->handle->vaddr + args->offset; void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; char __user *user_data = to_user_ptr(args->data_ptr);
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten; unsigned long unwritten;
......
...@@ -62,7 +62,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme ...@@ -62,7 +62,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
src = obj->pages->sgl; src = obj->pages->sgl;
dst = st->sgl; dst = st->sgl;
for (i = 0; i < obj->pages->nents; i++) { for (i = 0; i < obj->pages->nents; i++) {
sg_set_page(dst, sg_page(src), PAGE_SIZE, 0); sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst); dst = sg_next(dst);
src = sg_next(src); src = sg_next(src);
} }
...@@ -105,7 +105,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) ...@@ -105,7 +105,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{ {
struct drm_i915_gem_object *obj = dma_buf->priv; struct drm_i915_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct scatterlist *sg; struct sg_page_iter sg_iter;
struct page **pages; struct page **pages;
int ret, i; int ret, i;
...@@ -124,14 +124,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) ...@@ -124,14 +124,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
ret = -ENOMEM; ret = -ENOMEM;
pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *)); pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
if (pages == NULL) if (pages == NULL)
goto error; goto error;
for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) i = 0;
pages[i] = sg_page(sg); for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0);
pages[i++] = sg_iter.page;
obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL); obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
drm_free_large(pages); drm_free_large(pages);
if (!obj->dma_buf_vmapping) if (!obj->dma_buf_vmapping)
......
...@@ -305,7 +305,7 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, ...@@ -305,7 +305,7 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int remain, ret; int remain, ret;
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; user_relocs = to_user_ptr(entry->relocs_ptr);
remain = entry->relocation_count; remain = entry->relocation_count;
while (remain) { while (remain) {
...@@ -359,8 +359,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, ...@@ -359,8 +359,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
} }
static int static int
i915_gem_execbuffer_relocate(struct drm_device *dev, i915_gem_execbuffer_relocate(struct eb_objects *eb)
struct eb_objects *eb)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int ret = 0; int ret = 0;
...@@ -475,7 +474,6 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) ...@@ -475,7 +474,6 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects, struct list_head *objects,
bool *need_relocs) bool *need_relocs)
{ {
...@@ -618,7 +616,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, ...@@ -618,7 +616,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
u64 invalid_offset = (u64)-1; u64 invalid_offset = (u64)-1;
int j; int j;
user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; user_relocs = to_user_ptr(exec[i].relocs_ptr);
if (copy_from_user(reloc+total, user_relocs, if (copy_from_user(reloc+total, user_relocs,
exec[i].relocation_count * sizeof(*reloc))) { exec[i].relocation_count * sizeof(*reloc))) {
...@@ -663,7 +661,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, ...@@ -663,7 +661,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err; goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs); ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
if (ret) if (ret)
goto err; goto err;
...@@ -736,7 +734,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, ...@@ -736,7 +734,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
int length; /* limited by fault_in_pages_readable() */ int length; /* limited by fault_in_pages_readable() */
if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
...@@ -752,7 +750,11 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, ...@@ -752,7 +750,11 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
length = exec[i].relocation_count * length = exec[i].relocation_count *
sizeof(struct drm_i915_gem_relocation_entry); sizeof(struct drm_i915_gem_relocation_entry);
/* we may also need to update the presumed offsets */ /*
* We must check that the entire relocation array is safe
* to read, but since we may need to update the presumed
* offsets during execution, check for full write access.
*/
if (!access_ok(VERIFY_WRITE, ptr, length)) if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT; return -EFAULT;
...@@ -949,9 +951,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -949,9 +951,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
} }
if (copy_from_user(cliprects, if (copy_from_user(cliprects,
(struct drm_clip_rect __user *)(uintptr_t) to_user_ptr(args->cliprects_ptr),
args->cliprects_ptr, sizeof(*cliprects)*args->num_cliprects)) {
sizeof(*cliprects)*args->num_cliprects)) {
ret = -EFAULT; ret = -EFAULT;
goto pre_mutex_err; goto pre_mutex_err;
} }
...@@ -986,13 +987,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -986,13 +987,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Move the objects en-masse into the GTT, evicting if necessary. */ /* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs); ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
if (ret) if (ret)
goto err; goto err;
/* The objects are in their final locations, apply the relocations. */ /* The objects are in their final locations, apply the relocations. */
if (need_relocs) if (need_relocs)
ret = i915_gem_execbuffer_relocate(dev, eb); ret = i915_gem_execbuffer_relocate(eb);
if (ret) { if (ret) {
if (ret == -EFAULT) { if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
...@@ -1115,7 +1116,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ...@@ -1115,7 +1116,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -ENOMEM; return -ENOMEM;
} }
ret = copy_from_user(exec_list, ret = copy_from_user(exec_list,
(void __user *)(uintptr_t)args->buffers_ptr, to_user_ptr(args->buffers_ptr),
sizeof(*exec_list) * args->buffer_count); sizeof(*exec_list) * args->buffer_count);
if (ret != 0) { if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n", DRM_DEBUG("copy %d exec entries failed %d\n",
...@@ -1154,7 +1155,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ...@@ -1154,7 +1155,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
for (i = 0; i < args->buffer_count; i++) for (i = 0; i < args->buffer_count; i++)
exec_list[i].offset = exec2_list[i].offset; exec_list[i].offset = exec2_list[i].offset;
/* ... and back out to userspace */ /* ... and back out to userspace */
ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr, ret = copy_to_user(to_user_ptr(args->buffers_ptr),
exec_list, exec_list,
sizeof(*exec_list) * args->buffer_count); sizeof(*exec_list) * args->buffer_count);
if (ret) { if (ret) {
...@@ -1195,8 +1196,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ...@@ -1195,8 +1196,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -ENOMEM; return -ENOMEM;
} }
ret = copy_from_user(exec2_list, ret = copy_from_user(exec2_list,
(struct drm_i915_relocation_entry __user *) to_user_ptr(args->buffers_ptr),
(uintptr_t) args->buffers_ptr,
sizeof(*exec2_list) * args->buffer_count); sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) { if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n", DRM_DEBUG("copy %d exec entries failed %d\n",
...@@ -1208,7 +1208,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ...@@ -1208,7 +1208,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) { if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */ /* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr, ret = copy_to_user(to_user_ptr(args->buffers_ptr),
exec2_list, exec2_list,
sizeof(*exec2_list) * args->buffer_count); sizeof(*exec2_list) * args->buffer_count);
if (ret) { if (ret) {
......
...@@ -83,7 +83,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, ...@@ -83,7 +83,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
{ {
gtt_pte_t *pt_vaddr; gtt_pte_t *pt_vaddr;
gtt_pte_t scratch_pte; gtt_pte_t scratch_pte;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i; unsigned last_pte, i;
...@@ -96,7 +96,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, ...@@ -96,7 +96,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
if (last_pte > I915_PPGTT_PT_ENTRIES) if (last_pte > I915_PPGTT_PT_ENTRIES)
last_pte = I915_PPGTT_PT_ENTRIES; last_pte = I915_PPGTT_PT_ENTRIES;
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
for (i = first_pte; i < last_pte; i++) for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte; pt_vaddr[i] = scratch_pte;
...@@ -105,7 +105,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, ...@@ -105,7 +105,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
num_entries -= last_pte - first_pte; num_entries -= last_pte - first_pte;
first_pte = 0; first_pte = 0;
act_pd++; act_pt++;
} }
} }
...@@ -115,42 +115,27 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, ...@@ -115,42 +115,27 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
enum i915_cache_level cache_level) enum i915_cache_level cache_level)
{ {
gtt_pte_t *pt_vaddr; gtt_pte_t *pt_vaddr;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned i, j, m, segment_len; struct sg_page_iter sg_iter;
dma_addr_t page_addr;
struct scatterlist *sg; pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
/* init sg walking */ dma_addr_t page_addr;
sg = pages->sgl;
i = 0; page_addr = sg_dma_address(sg_iter.sg) +
segment_len = sg_dma_len(sg) >> PAGE_SHIFT; (sg_iter.sg_pgoffset << PAGE_SHIFT);
m = 0; pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr,
cache_level);
while (i < pages->nents) { if (++act_pte == I915_PPGTT_PT_ENTRIES) {
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); kunmap_atomic(pt_vaddr);
act_pt++;
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); act_pte = 0;
pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr,
cache_level);
/* grab the next page */
if (++m == segment_len) {
if (++i == pages->nents)
break;
sg = sg_next(sg);
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
}
}
kunmap_atomic(pt_vaddr);
first_pte = 0; }
act_pd++;
} }
kunmap_atomic(pt_vaddr);
} }
static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
...@@ -432,21 +417,17 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, ...@@ -432,21 +417,17 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
enum i915_cache_level level) enum i915_cache_level level)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct scatterlist *sg = st->sgl;
gtt_pte_t __iomem *gtt_entries = gtt_pte_t __iomem *gtt_entries =
(gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int unused, i = 0; int i = 0;
unsigned int len, m = 0; struct sg_page_iter sg_iter;
dma_addr_t addr; dma_addr_t addr;
for_each_sg(st->sgl, sg, st->nents, unused) { for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
len = sg_dma_len(sg) >> PAGE_SHIFT; addr = sg_dma_address(sg_iter.sg) +
for (m = 0; m < len; m++) { (sg_iter.sg_pgoffset << PAGE_SHIFT);
addr = sg_dma_address(sg) + (m << PAGE_SHIFT); iowrite32(gen6_pte_encode(dev, addr, level), &gtt_entries[i]);
iowrite32(gen6_pte_encode(dev, addr, level), i++;
&gtt_entries[i]);
i++;
}
} }
/* XXX: This serves as a posting read to make sure that the PTE has /* XXX: This serves as a posting read to make sure that the PTE has
...@@ -752,7 +733,7 @@ static int gen6_gmch_probe(struct drm_device *dev, ...@@ -752,7 +733,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
if (IS_GEN7(dev)) if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
*stolen = gen7_get_stolen_size(snb_gmch_ctl); *stolen = gen7_get_stolen_size(snb_gmch_ctl);
else else
*stolen = gen6_get_stolen_size(snb_gmch_ctl); *stolen = gen6_get_stolen_size(snb_gmch_ctl);
......
...@@ -222,8 +222,8 @@ i915_pages_create_for_stolen(struct drm_device *dev, ...@@ -222,8 +222,8 @@ i915_pages_create_for_stolen(struct drm_device *dev,
} }
sg = st->sgl; sg = st->sgl;
sg->offset = offset; /* we set the dummy page here only to make for_each_sg_page work */
sg->length = size; sg_set_page(sg, dev_priv->gtt.scratch_page, size, offset);
sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
sg_dma_len(sg) = size; sg_dma_len(sg) = size;
......
...@@ -473,28 +473,29 @@ i915_gem_swizzle_page(struct page *page) ...@@ -473,28 +473,29 @@ i915_gem_swizzle_page(struct page *page)
void void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{ {
struct scatterlist *sg; struct sg_page_iter sg_iter;
int page_count = obj->base.size >> PAGE_SHIFT;
int i; int i;
if (obj->bit_17 == NULL) if (obj->bit_17 == NULL)
return; return;
for_each_sg(obj->pages->sgl, sg, page_count, i) { i = 0;
struct page *page = sg_page(sg); for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
struct page *page = sg_iter.page;
char new_bit_17 = page_to_phys(page) >> 17; char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) != if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) { (test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(page); i915_gem_swizzle_page(page);
set_page_dirty(page); set_page_dirty(page);
} }
i++;
} }
} }
void void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{ {
struct scatterlist *sg; struct sg_page_iter sg_iter;
int page_count = obj->base.size >> PAGE_SHIFT; int page_count = obj->base.size >> PAGE_SHIFT;
int i; int i;
...@@ -508,11 +509,12 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) ...@@ -508,11 +509,12 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
} }
} }
for_each_sg(obj->pages->sgl, sg, page_count, i) { i = 0;
struct page *page = sg_page(sg); for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
if (page_to_phys(page) & (1 << 17)) if (page_to_phys(sg_iter.page) & (1 << 17))
__set_bit(i, obj->bit_17); __set_bit(i, obj->bit_17);
else else
__clear_bit(i, obj->bit_17); __clear_bit(i, obj->bit_17);
i++;
} }
} }
This diff is collapsed.
...@@ -121,6 +121,7 @@ ...@@ -121,6 +121,7 @@
#define GAM_ECOCHK 0x4090 #define GAM_ECOCHK 0x4090
#define ECOCHK_SNB_BIT (1<<10) #define ECOCHK_SNB_BIT (1<<10)
#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
#define ECOCHK_PPGTT_CACHE64B (0x3<<3) #define ECOCHK_PPGTT_CACHE64B (0x3<<3)
#define ECOCHK_PPGTT_CACHE4B (0x0<<3) #define ECOCHK_PPGTT_CACHE4B (0x0<<3)
...@@ -522,6 +523,9 @@ ...@@ -522,6 +523,9 @@
#define GEN7_ERR_INT 0x44040 #define GEN7_ERR_INT 0x44040
#define ERR_INT_MMIO_UNCLAIMED (1<<13) #define ERR_INT_MMIO_UNCLAIMED (1<<13)
#define FPGA_DBG 0x42300
#define FPGA_DBG_RM_NOCLAIM (1<<31)
#define DERRMR 0x44050 #define DERRMR 0x44050
/* GM45+ chicken bits -- debug workaround bits that may be required /* GM45+ chicken bits -- debug workaround bits that may be required
...@@ -591,6 +595,7 @@ ...@@ -591,6 +595,7 @@
#define I915_USER_INTERRUPT (1<<1) #define I915_USER_INTERRUPT (1<<1)
#define I915_ASLE_INTERRUPT (1<<0) #define I915_ASLE_INTERRUPT (1<<0)
#define I915_BSD_USER_INTERRUPT (1<<25) #define I915_BSD_USER_INTERRUPT (1<<25)
#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
#define EIR 0x020b0 #define EIR 0x020b0
#define EMR 0x020b4 #define EMR 0x020b4
#define ESR 0x020b8 #define ESR 0x020b8
...@@ -1676,42 +1681,63 @@ ...@@ -1676,42 +1681,63 @@
#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7) #define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6) #define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
/* SDVO port control */ /* SDVO and HDMI port control.
#define SDVOB 0x61140 * The same register may be used for SDVO or HDMI */
#define SDVOC 0x61160 #define GEN3_SDVOB 0x61140
#define SDVO_ENABLE (1 << 31) #define GEN3_SDVOC 0x61160
#define SDVO_PIPE_B_SELECT (1 << 30) #define GEN4_HDMIB GEN3_SDVOB
#define SDVO_STALL_SELECT (1 << 29) #define GEN4_HDMIC GEN3_SDVOC
#define SDVO_INTERRUPT_ENABLE (1 << 26) #define PCH_SDVOB 0xe1140
#define PCH_HDMIB PCH_SDVOB
#define PCH_HDMIC 0xe1150
#define PCH_HDMID 0xe1160
/* Gen 3 SDVO bits: */
#define SDVO_ENABLE (1 << 31)
#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
#define SDVO_PIPE_SEL_MASK (1 << 30)
#define SDVO_PIPE_B_SELECT (1 << 30)
#define SDVO_STALL_SELECT (1 << 29)
#define SDVO_INTERRUPT_ENABLE (1 << 26)
/** /**
* 915G/GM SDVO pixel multiplier. * 915G/GM SDVO pixel multiplier.
*
* Programmed value is multiplier - 1, up to 5x. * Programmed value is multiplier - 1, up to 5x.
*
* \sa DPLL_MD_UDI_MULTIPLIER_MASK * \sa DPLL_MD_UDI_MULTIPLIER_MASK
*/ */
#define SDVO_PORT_MULTIPLY_MASK (7 << 23) #define SDVO_PORT_MULTIPLY_MASK (7 << 23)
#define SDVO_PORT_MULTIPLY_SHIFT 23 #define SDVO_PORT_MULTIPLY_SHIFT 23
#define SDVO_PHASE_SELECT_MASK (15 << 19) #define SDVO_PHASE_SELECT_MASK (15 << 19)
#define SDVO_PHASE_SELECT_DEFAULT (6 << 19) #define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) #define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
#define SDVOC_GANG_MODE (1 << 16) #define SDVOC_GANG_MODE (1 << 16) /* Port C only */
#define SDVO_ENCODING_SDVO (0x0 << 10) #define SDVO_BORDER_ENABLE (1 << 7) /* SDVO only */
#define SDVO_ENCODING_HDMI (0x2 << 10) #define SDVOB_PCIE_CONCURRENCY (1 << 3) /* Port B only */
/** Requird for HDMI operation */ #define SDVO_DETECTED (1 << 2)
#define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9)
#define SDVO_COLOR_RANGE_16_235 (1 << 8)
#define SDVO_BORDER_ENABLE (1 << 7)
#define SDVO_AUDIO_ENABLE (1 << 6)
/** New with 965, default is to be set */
#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
/** New with 965, default is to be set */
#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
#define SDVOB_PCIE_CONCURRENCY (1 << 3)
#define SDVO_DETECTED (1 << 2)
/* Bits to be preserved when writing */ /* Bits to be preserved when writing */
#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26)) #define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \
#define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26)) SDVO_INTERRUPT_ENABLE)
#define SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE)
/* Gen 4 SDVO/HDMI bits: */
#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
#define SDVO_ENCODING_SDVO (0 << 10)
#define SDVO_ENCODING_HDMI (2 << 10)
#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
#define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */
#define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */
#define SDVO_AUDIO_ENABLE (1 << 6)
/* VSYNC/HSYNC bits new with 965, default is to be set */
#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
/* Gen 5 (IBX) SDVO/HDMI bits: */
#define HDMI_COLOR_FORMAT_12bpc (3 << 26) /* HDMI only */
#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
/* Gen 6 (CPT) SDVO/HDMI bits: */
#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
/* DVO port control */ /* DVO port control */
#define DVOA 0x61120 #define DVOA 0x61120
...@@ -1898,7 +1924,7 @@ ...@@ -1898,7 +1924,7 @@
#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238) #define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
/* Backlight control */ /* Backlight control */
#define BLC_PWM_CTL2 0x61250 /* 965+ only */ #define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
#define BLM_PWM_ENABLE (1 << 31) #define BLM_PWM_ENABLE (1 << 31)
#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ #define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
#define BLM_PIPE_SELECT (1 << 29) #define BLM_PIPE_SELECT (1 << 29)
...@@ -1917,7 +1943,7 @@ ...@@ -1917,7 +1943,7 @@
#define BLM_PHASE_IN_COUNT_MASK (0xff << 8) #define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
#define BLM_PHASE_IN_INCR_SHIFT (0) #define BLM_PHASE_IN_INCR_SHIFT (0)
#define BLM_PHASE_IN_INCR_MASK (0xff << 0) #define BLM_PHASE_IN_INCR_MASK (0xff << 0)
#define BLC_PWM_CTL 0x61254 #define BLC_PWM_CTL (dev_priv->info->display_mmio_offset + 0x61254)
/* /*
* This is the most significant 15 bits of the number of backlight cycles in a * This is the most significant 15 bits of the number of backlight cycles in a
* complete cycle of the modulated backlight control. * complete cycle of the modulated backlight control.
...@@ -1939,7 +1965,7 @@ ...@@ -1939,7 +1965,7 @@
#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) #define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
#define BLM_POLARITY_PNV (1 << 0) /* pnv only */ #define BLM_POLARITY_PNV (1 << 0) /* pnv only */
#define BLC_HIST_CTL 0x61260 #define BLC_HIST_CTL (dev_priv->info->display_mmio_offset + 0x61260)
/* New registers for PCH-split platforms. Safe where new bits show up, the /* New registers for PCH-split platforms. Safe where new bits show up, the
* register layout machtes with gen4 BLC_PWM_CTL[12]. */ * register layout machtes with gen4 BLC_PWM_CTL[12]. */
...@@ -2776,6 +2802,8 @@ ...@@ -2776,6 +2802,8 @@
#define DSPFW_HPLL_CURSOR_SHIFT 16 #define DSPFW_HPLL_CURSOR_SHIFT 16
#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) #define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
#define DSPFW_HPLL_SR_MASK (0x1ff) #define DSPFW_HPLL_SR_MASK (0x1ff)
#define DSPFW4 (dev_priv->info->display_mmio_offset + 0x70070)
#define DSPFW7 (dev_priv->info->display_mmio_offset + 0x7007c)
/* drain latency register values*/ /* drain latency register values*/
#define DRAIN_LATENCY_PRECISION_32 32 #define DRAIN_LATENCY_PRECISION_32 32
...@@ -3754,14 +3782,16 @@ ...@@ -3754,14 +3782,16 @@
#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344 #define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
#define HSW_VIDEO_DIP_GCP_B 0x61210 #define HSW_VIDEO_DIP_GCP_B 0x61210
#define HSW_TVIDEO_DIP_CTL(pipe) \ #define HSW_TVIDEO_DIP_CTL(trans) \
_PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B) _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \ #define HSW_TVIDEO_DIP_AVI_DATA(trans) \
_PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B) _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \ #define HSW_TVIDEO_DIP_SPD_DATA(trans) \
_PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B) _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
#define HSW_TVIDEO_DIP_GCP(pipe) \ #define HSW_TVIDEO_DIP_GCP(trans) \
_PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B) _TRANSCODER(trans, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
#define _TRANS_HTOTAL_B 0xe1000 #define _TRANS_HTOTAL_B 0xe1000
#define _TRANS_HBLANK_B 0xe1004 #define _TRANS_HBLANK_B 0xe1004
...@@ -3976,34 +4006,6 @@ ...@@ -3976,34 +4006,6 @@
#define FDI_PLL_CTL_1 0xfe000 #define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004 #define FDI_PLL_CTL_2 0xfe004
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
#define TRANSCODER(pipe) ((pipe) << 30)
#define TRANSCODER_CPT(pipe) ((pipe) << 29)
#define TRANSCODER_MASK (1 << 30)
#define TRANSCODER_MASK_CPT (3 << 29)
#define COLOR_FORMAT_8bpc (0)
#define COLOR_FORMAT_12bpc (3 << 26)
#define SDVOB_HOTPLUG_ENABLE (1 << 23)
#define SDVO_ENCODING (0)
#define TMDS_ENCODING (2 << 10)
#define NULL_PACKET_VSYNC_ENABLE (1 << 9)
/* CPT */
#define HDMI_MODE_SELECT (1 << 9)
#define DVI_MODE_SELECT (0)
#define SDVOB_BORDER_ENABLE (1 << 7)
#define AUDIO_ENABLE (1 << 6)
#define VSYNC_ACTIVE_HIGH (1 << 4)
#define HSYNC_ACTIVE_HIGH (1 << 3)
#define PORT_DETECTED (1 << 2)
/* PCH SDVOB multiplex with HDMIB */
#define PCH_SDVOB HDMIB
#define HDMIC 0xe1150
#define HDMID 0xe1160
#define PCH_LVDS 0xe1180 #define PCH_LVDS 0xe1180
#define LVDS_DETECTED (1 << 1) #define LVDS_DETECTED (1 << 1)
...@@ -4149,8 +4151,12 @@ ...@@ -4149,8 +4151,12 @@
#define FORCEWAKE 0xA18C #define FORCEWAKE 0xA18C
#define FORCEWAKE_VLV 0x1300b0 #define FORCEWAKE_VLV 0x1300b0
#define FORCEWAKE_ACK_VLV 0x1300b4 #define FORCEWAKE_ACK_VLV 0x1300b4
#define FORCEWAKE_MEDIA_VLV 0x1300b8
#define FORCEWAKE_ACK_MEDIA_VLV 0x1300bc
#define FORCEWAKE_ACK_HSW 0x130044 #define FORCEWAKE_ACK_HSW 0x130044
#define FORCEWAKE_ACK 0x130090 #define FORCEWAKE_ACK 0x130090
#define VLV_GTLC_WAKE_CTRL 0x130090
#define VLV_GTLC_PW_STATUS 0x130094
#define FORCEWAKE_MT 0xa188 /* multi-threaded */ #define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1 #define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2 #define FORCEWAKE_USER 0x2
......
...@@ -209,7 +209,8 @@ static void i915_save_display(struct drm_device *dev) ...@@ -209,7 +209,8 @@ static void i915_save_display(struct drm_device *dev)
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
} else { } else {
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
...@@ -255,6 +256,7 @@ static void i915_save_display(struct drm_device *dev) ...@@ -255,6 +256,7 @@ static void i915_save_display(struct drm_device *dev)
static void i915_restore_display(struct drm_device *dev) static void i915_restore_display(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 mask = 0xffffffff;
/* Display arbitration */ /* Display arbitration */
if (INTEL_INFO(dev)->gen <= 4) if (INTEL_INFO(dev)->gen <= 4)
...@@ -267,10 +269,13 @@ static void i915_restore_display(struct drm_device *dev) ...@@ -267,10 +269,13 @@ static void i915_restore_display(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
if (HAS_PCH_SPLIT(dev)) { if (drm_core_check_feature(dev, DRIVER_MODESET))
I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS); mask = ~LVDS_PORT_EN;
} else if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->regfile.saveLVDS); if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL); I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
......
...@@ -49,7 +49,7 @@ static ssize_t ...@@ -49,7 +49,7 @@ static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{ {
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev)); return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
} }
static ssize_t static ssize_t
...@@ -57,7 +57,7 @@ show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) ...@@ -57,7 +57,7 @@ show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{ {
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6); u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
return snprintf(buf, PAGE_SIZE, "%u", rc6_residency); return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
} }
static ssize_t static ssize_t
...@@ -65,7 +65,7 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) ...@@ -65,7 +65,7 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{ {
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency); return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
} }
static ssize_t static ssize_t
...@@ -73,7 +73,7 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) ...@@ -73,7 +73,7 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{ {
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency); return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
} }
static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL); static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
...@@ -215,7 +215,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, ...@@ -215,7 +215,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret); return snprintf(buf, PAGE_SIZE, "%d\n", ret);
} }
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
...@@ -229,7 +229,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute ...@@ -229,7 +229,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret); return snprintf(buf, PAGE_SIZE, "%d\n", ret);
} }
static ssize_t gt_max_freq_mhz_store(struct device *kdev, static ssize_t gt_max_freq_mhz_store(struct device *kdev,
...@@ -280,7 +280,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute ...@@ -280,7 +280,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret); return snprintf(buf, PAGE_SIZE, "%d\n", ret);
} }
static ssize_t gt_min_freq_mhz_store(struct device *kdev, static ssize_t gt_min_freq_mhz_store(struct device *kdev,
...@@ -355,7 +355,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr ...@@ -355,7 +355,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
} else { } else {
BUG(); BUG();
} }
return snprintf(buf, PAGE_SIZE, "%d", val); return snprintf(buf, PAGE_SIZE, "%d\n", val);
} }
static const struct attribute *gen6_attrs[] = { static const struct attribute *gen6_attrs[] = {
......
...@@ -1341,15 +1341,15 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) ...@@ -1341,15 +1341,15 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp; uint32_t tmp;
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
if (type == INTEL_OUTPUT_EDP) { if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_backlight_off(intel_dp); ironlake_edp_backlight_off(intel_dp);
} }
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
} }
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
...@@ -1537,9 +1537,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) ...@@ -1537,9 +1537,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) & intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
DDI_BUF_PORT_REVERSAL; DDI_BUF_PORT_REVERSAL;
if (hdmi_connector) if (hdmi_connector)
intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port); intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
else
intel_dig_port->hdmi.sdvox_reg = 0;
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
intel_encoder->type = INTEL_OUTPUT_UNKNOWN; intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
......
This diff is collapsed.
...@@ -328,29 +328,10 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) ...@@ -328,29 +328,10 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = intel_dp->output_reg + 0x10; uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t status; uint32_t status;
bool done; bool done;
if (IS_HASWELL(dev)) {
switch (intel_dig_port->port) {
case PORT_A:
ch_ctl = DPA_AUX_CH_CTL;
break;
case PORT_B:
ch_ctl = PCH_DPB_AUX_CH_CTL;
break;
case PORT_C:
ch_ctl = PCH_DPC_AUX_CH_CTL;
break;
case PORT_D:
ch_ctl = PCH_DPD_AUX_CH_CTL;
break;
default:
BUG();
}
}
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
if (has_aux_irq) if (has_aux_irq)
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
...@@ -370,11 +351,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, ...@@ -370,11 +351,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes, uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size) uint8_t *recv, int recv_size)
{ {
uint32_t output_reg = intel_dp->output_reg;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10; uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t ch_data = ch_ctl + 4; uint32_t ch_data = ch_ctl + 4;
int i, ret, recv_bytes; int i, ret, recv_bytes;
uint32_t status; uint32_t status;
...@@ -388,29 +368,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, ...@@ -388,29 +368,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
*/ */
pm_qos_update_request(&dev_priv->pm_qos, 0); pm_qos_update_request(&dev_priv->pm_qos, 0);
if (IS_HASWELL(dev)) {
switch (intel_dig_port->port) {
case PORT_A:
ch_ctl = DPA_AUX_CH_CTL;
ch_data = DPA_AUX_CH_DATA1;
break;
case PORT_B:
ch_ctl = PCH_DPB_AUX_CH_CTL;
ch_data = PCH_DPB_AUX_CH_DATA1;
break;
case PORT_C:
ch_ctl = PCH_DPC_AUX_CH_CTL;
ch_data = PCH_DPC_AUX_CH_DATA1;
break;
case PORT_D:
ch_ctl = PCH_DPD_AUX_CH_CTL;
ch_data = PCH_DPD_AUX_CH_DATA1;
break;
default:
BUG();
}
}
intel_dp_check_edp(intel_dp); intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk, /* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the * and would like to run at 2MHz. So, take the
...@@ -853,7 +810,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, ...@@ -853,7 +810,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
intel_link_compute_m_n(intel_crtc->bpp, lane_count, intel_link_compute_m_n(intel_crtc->bpp, lane_count,
target_clock, adjusted_mode->clock, &m_n); target_clock, adjusted_mode->clock, &m_n);
if (IS_HASWELL(dev)) { if (HAS_DDI(dev)) {
I915_WRITE(PIPE_DATA_M1(cpu_transcoder), I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
TU_SIZE(m_n.tu) | m_n.gmch_m); TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
...@@ -1020,7 +977,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, ...@@ -1020,7 +977,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
} }
if (is_cpu_edp(intel_dp)) if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
ironlake_set_pll_edp(crtc, adjusted_mode->clock); ironlake_set_pll_edp(crtc, adjusted_mode->clock);
} }
...@@ -1384,7 +1341,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, ...@@ -1384,7 +1341,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & DP_PORT_EN)) if (!(tmp & DP_PORT_EN))
return false; return false;
if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
*pipe = PORT_TO_PIPE_CPT(tmp); *pipe = PORT_TO_PIPE_CPT(tmp);
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
*pipe = PORT_TO_PIPE(tmp); *pipe = PORT_TO_PIPE(tmp);
...@@ -1548,7 +1505,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) ...@@ -1548,7 +1505,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{ {
struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_device *dev = intel_dp_to_dev(intel_dp);
if (IS_HASWELL(dev)) { if (HAS_DDI(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400: case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_9_5; return DP_TRAIN_PRE_EMPHASIS_9_5;
...@@ -1756,7 +1713,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) ...@@ -1756,7 +1713,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
uint32_t signal_levels, mask; uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0]; uint8_t train_set = intel_dp->train_set[0];
if (IS_HASWELL(dev)) { if (HAS_DDI(dev)) {
signal_levels = intel_hsw_signal_levels(train_set); signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK; mask = DDI_BUF_EMP_MASK;
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
...@@ -1787,7 +1744,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, ...@@ -1787,7 +1744,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
int ret; int ret;
uint32_t temp; uint32_t temp;
if (IS_HASWELL(dev)) { if (HAS_DDI(dev)) {
temp = I915_READ(DP_TP_CTL(port)); temp = I915_READ(DP_TP_CTL(port));
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
...@@ -2311,6 +2268,16 @@ g4x_dp_detect(struct intel_dp *intel_dp) ...@@ -2311,6 +2268,16 @@ g4x_dp_detect(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
uint32_t bit; uint32_t bit;
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
enum drm_connector_status status;
status = intel_panel_detect(dev);
if (status == connector_status_unknown)
status = connector_status_connected;
return status;
}
switch (intel_dig_port->port) { switch (intel_dig_port->port) {
case PORT_B: case PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS; bit = PORTB_HOTPLUG_LIVE_STATUS;
...@@ -2844,6 +2811,25 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -2844,6 +2811,25 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
else else
intel_connector->get_hw_state = intel_connector_get_hw_state; intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
if (HAS_DDI(dev)) {
switch (intel_dig_port->port) {
case PORT_A:
intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
break;
case PORT_B:
intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
break;
case PORT_C:
intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
break;
case PORT_D:
intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
break;
default:
BUG();
}
}
/* Set up the DDC bus. */ /* Set up the DDC bus. */
switch (port) { switch (port) {
......
...@@ -347,7 +347,7 @@ struct dip_infoframe { ...@@ -347,7 +347,7 @@ struct dip_infoframe {
} __attribute__((packed)); } __attribute__((packed));
struct intel_hdmi { struct intel_hdmi {
u32 sdvox_reg; u32 hdmi_reg;
int ddc_bus; int ddc_bus;
uint32_t color_range; uint32_t color_range;
bool color_range_auto; bool color_range_auto;
...@@ -366,6 +366,7 @@ struct intel_hdmi { ...@@ -366,6 +366,7 @@ struct intel_hdmi {
struct intel_dp { struct intel_dp {
uint32_t output_reg; uint32_t output_reg;
uint32_t aux_ch_ctl_reg;
uint32_t DP; uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio; bool has_audio;
...@@ -443,7 +444,7 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector) ...@@ -443,7 +444,7 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev); extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, extern void intel_hdmi_init(struct drm_device *dev,
int sdvox_reg, enum port port); int hdmi_reg, enum port port);
extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector); struct intel_connector *intel_connector);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
...@@ -695,4 +696,6 @@ extern bool ...@@ -695,4 +696,6 @@ extern bool
intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
extern void intel_ddi_fdi_disable(struct drm_crtc *crtc); extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
extern void intel_display_handle_reset(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */ #endif /* __INTEL_DRV_H__ */
...@@ -150,8 +150,6 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -150,8 +150,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
} }
info->screen_size = size; info->screen_size = size;
// memset(info->screen_base, 0, size);
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
...@@ -227,7 +225,7 @@ int intel_fbdev_init(struct drm_device *dev) ...@@ -227,7 +225,7 @@ int intel_fbdev_init(struct drm_device *dev)
ifbdev->helper.funcs = &intel_fb_helper_funcs; ifbdev->helper.funcs = &intel_fb_helper_funcs;
ret = drm_fb_helper_init(dev, &ifbdev->helper, ret = drm_fb_helper_init(dev, &ifbdev->helper,
dev_priv->num_pipe, INTEL_INFO(dev)->num_pipes,
INTELFB_CONN_LIMIT); INTELFB_CONN_LIMIT);
if (ret) { if (ret) {
kfree(ifbdev); kfree(ifbdev);
......
...@@ -50,7 +50,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) ...@@ -50,7 +50,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits, WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n"); "HDMI port enabled, expecting disabled\n");
} }
...@@ -120,13 +120,14 @@ static u32 hsw_infoframe_enable(struct dip_infoframe *frame) ...@@ -120,13 +120,14 @@ static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
} }
} }
static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe) static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame,
enum transcoder cpu_transcoder)
{ {
switch (frame->type) { switch (frame->type) {
case DIP_TYPE_AVI: case DIP_TYPE_AVI:
return HSW_TVIDEO_DIP_AVI_DATA(pipe); return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
case DIP_TYPE_SPD: case DIP_TYPE_SPD:
return HSW_TVIDEO_DIP_SPD_DATA(pipe); return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
default: default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
return 0; return 0;
...@@ -293,8 +294,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, ...@@ -293,8 +294,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->cpu_transcoder);
u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe); u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->cpu_transcoder);
unsigned int i, len = DIP_HEADER_SIZE + frame->len; unsigned int i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(ctl_reg); u32 val = I915_READ(ctl_reg);
...@@ -568,7 +569,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, ...@@ -568,7 +569,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = encoder->dev->dev_private; struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->cpu_transcoder);
u32 val = I915_READ(reg); u32 val = I915_READ(reg);
assert_hdmi_port_disabled(intel_hdmi); assert_hdmi_port_disabled(intel_hdmi);
...@@ -597,40 +598,40 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, ...@@ -597,40 +598,40 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 sdvox; u32 hdmi_val;
sdvox = SDVO_ENCODING_HDMI; hdmi_val = SDVO_ENCODING_HDMI;
if (!HAS_PCH_SPLIT(dev)) if (!HAS_PCH_SPLIT(dev))
sdvox |= intel_hdmi->color_range; hdmi_val |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH; hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH; hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
if (intel_crtc->bpp > 24) if (intel_crtc->bpp > 24)
sdvox |= COLOR_FORMAT_12bpc; hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
else else
sdvox |= COLOR_FORMAT_8bpc; hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
/* Required on CPT */ /* Required on CPT */
if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
sdvox |= HDMI_MODE_SELECT; hdmi_val |= HDMI_MODE_SELECT_HDMI;
if (intel_hdmi->has_audio) { if (intel_hdmi->has_audio) {
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(intel_crtc->pipe)); pipe_name(intel_crtc->pipe));
sdvox |= SDVO_AUDIO_ENABLE; hdmi_val |= SDVO_AUDIO_ENABLE;
sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC; hdmi_val |= HDMI_MODE_SELECT_HDMI;
intel_write_eld(encoder, adjusted_mode); intel_write_eld(encoder, adjusted_mode);
} }
if (HAS_PCH_CPT(dev)) if (HAS_PCH_CPT(dev))
sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
else if (intel_crtc->pipe == PIPE_B) else
sdvox |= SDVO_PIPE_B_SELECT; hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe);
I915_WRITE(intel_hdmi->sdvox_reg, sdvox); I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
POSTING_READ(intel_hdmi->sdvox_reg); POSTING_READ(intel_hdmi->hdmi_reg);
intel_hdmi->set_infoframes(encoder, adjusted_mode); intel_hdmi->set_infoframes(encoder, adjusted_mode);
} }
...@@ -643,7 +644,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, ...@@ -643,7 +644,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 tmp; u32 tmp;
tmp = I915_READ(intel_hdmi->sdvox_reg); tmp = I915_READ(intel_hdmi->hdmi_reg);
if (!(tmp & SDVO_ENABLE)) if (!(tmp & SDVO_ENABLE))
return false; return false;
...@@ -660,6 +661,7 @@ static void intel_enable_hdmi(struct intel_encoder *encoder) ...@@ -660,6 +661,7 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
{ {
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp; u32 temp;
u32 enable_bits = SDVO_ENABLE; u32 enable_bits = SDVO_ENABLE;
...@@ -667,38 +669,32 @@ static void intel_enable_hdmi(struct intel_encoder *encoder) ...@@ -667,38 +669,32 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
if (intel_hdmi->has_audio) if (intel_hdmi->has_audio)
enable_bits |= SDVO_AUDIO_ENABLE; enable_bits |= SDVO_AUDIO_ENABLE;
temp = I915_READ(intel_hdmi->sdvox_reg); temp = I915_READ(intel_hdmi->hdmi_reg);
/* HW workaround for IBX, we need to move the port to transcoder A /* HW workaround for IBX, we need to move the port to transcoder A
* before disabling it. */ * before disabling it, so restore the transcoder select bit here. */
if (HAS_PCH_IBX(dev)) { if (HAS_PCH_IBX(dev))
struct drm_crtc *crtc = encoder->base.crtc; enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe);
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
/* Restore the transcoder select bit. */
if (pipe == PIPE_B)
enable_bits |= SDVO_PIPE_B_SELECT;
}
/* HW workaround, need to toggle enable bit off and on for 12bpc, but /* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing. * we do this anyway which shows more stable in testing.
*/ */
if (HAS_PCH_SPLIT(dev)) { if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE); I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
POSTING_READ(intel_hdmi->sdvox_reg); POSTING_READ(intel_hdmi->hdmi_reg);
} }
temp |= enable_bits; temp |= enable_bits;
I915_WRITE(intel_hdmi->sdvox_reg, temp); I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg); POSTING_READ(intel_hdmi->hdmi_reg);
/* HW workaround, need to write this twice for issue that may result /* HW workaround, need to write this twice for issue that may result
* in first write getting masked. * in first write getting masked.
*/ */
if (HAS_PCH_SPLIT(dev)) { if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(intel_hdmi->sdvox_reg, temp); I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg); POSTING_READ(intel_hdmi->hdmi_reg);
} }
} }
...@@ -710,7 +706,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) ...@@ -710,7 +706,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
u32 temp; u32 temp;
u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE; u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
temp = I915_READ(intel_hdmi->sdvox_reg); temp = I915_READ(intel_hdmi->hdmi_reg);
/* HW workaround for IBX, we need to move the port to transcoder A /* HW workaround for IBX, we need to move the port to transcoder A
* before disabling it. */ * before disabling it. */
...@@ -720,12 +716,12 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) ...@@ -720,12 +716,12 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
if (temp & SDVO_PIPE_B_SELECT) { if (temp & SDVO_PIPE_B_SELECT) {
temp &= ~SDVO_PIPE_B_SELECT; temp &= ~SDVO_PIPE_B_SELECT;
I915_WRITE(intel_hdmi->sdvox_reg, temp); I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg); POSTING_READ(intel_hdmi->hdmi_reg);
/* Again we need to write this twice. */ /* Again we need to write this twice. */
I915_WRITE(intel_hdmi->sdvox_reg, temp); I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg); POSTING_READ(intel_hdmi->hdmi_reg);
/* Transcoder selection bits only update /* Transcoder selection bits only update
* effectively on vblank. */ * effectively on vblank. */
...@@ -740,21 +736,21 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) ...@@ -740,21 +736,21 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
* we do this anyway which shows more stable in testing. * we do this anyway which shows more stable in testing.
*/ */
if (HAS_PCH_SPLIT(dev)) { if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE); I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
POSTING_READ(intel_hdmi->sdvox_reg); POSTING_READ(intel_hdmi->hdmi_reg);
} }
temp &= ~enable_bits; temp &= ~enable_bits;
I915_WRITE(intel_hdmi->sdvox_reg, temp); I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg); POSTING_READ(intel_hdmi->hdmi_reg);
/* HW workaround, need to write this twice for issue that may result /* HW workaround, need to write this twice for issue that may result
* in first write getting masked. * in first write getting masked.
*/ */
if (HAS_PCH_SPLIT(dev)) { if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(intel_hdmi->sdvox_reg, temp); I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg); POSTING_READ(intel_hdmi->hdmi_reg);
} }
} }
...@@ -782,7 +778,7 @@ bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, ...@@ -782,7 +778,7 @@ bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
/* See CEA-861-E - 5.1 Default Encoding Parameters */ /* See CEA-861-E - 5.1 Default Encoding Parameters */
if (intel_hdmi->has_hdmi_sink && if (intel_hdmi->has_hdmi_sink &&
drm_match_cea_mode(adjusted_mode) > 1) drm_match_cea_mode(adjusted_mode) > 1)
intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235; intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
else else
intel_hdmi->color_range = 0; intel_hdmi->color_range = 0;
} }
...@@ -916,7 +912,7 @@ intel_hdmi_set_property(struct drm_connector *connector, ...@@ -916,7 +912,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
break; break;
case INTEL_BROADCAST_RGB_LIMITED: case INTEL_BROADCAST_RGB_LIMITED:
intel_hdmi->color_range_auto = false; intel_hdmi->color_range_auto = false;
intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235; intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -1008,13 +1004,13 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -1008,13 +1004,13 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
BUG(); BUG();
} }
if (!HAS_PCH_SPLIT(dev)) { if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
} else if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe; intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes; intel_hdmi->set_infoframes = vlv_set_infoframes;
} else if (IS_HASWELL(dev)) { } else if (!HAS_PCH_SPLIT(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
} else if (HAS_DDI(dev)) {
intel_hdmi->write_infoframe = hsw_write_infoframe; intel_hdmi->write_infoframe = hsw_write_infoframe;
intel_hdmi->set_infoframes = hsw_set_infoframes; intel_hdmi->set_infoframes = hsw_set_infoframes;
} else if (HAS_PCH_IBX(dev)) { } else if (HAS_PCH_IBX(dev)) {
...@@ -1045,7 +1041,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -1045,7 +1041,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
} }
} }
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
{ {
struct intel_digital_port *intel_dig_port; struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder; struct intel_encoder *intel_encoder;
...@@ -1078,7 +1074,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) ...@@ -1078,7 +1074,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
intel_encoder->cloneable = false; intel_encoder->cloneable = false;
intel_dig_port->port = port; intel_dig_port->port = port;
intel_dig_port->hdmi.sdvox_reg = sdvox_reg; intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
intel_dig_port->dp.output_reg = 0; intel_dig_port->dp.output_reg = 0;
intel_hdmi_init_connector(intel_dig_port, intel_connector); intel_hdmi_init_connector(intel_dig_port, intel_connector);
......
...@@ -1019,12 +1019,15 @@ static bool intel_lvds_supported(struct drm_device *dev) ...@@ -1019,12 +1019,15 @@ static bool intel_lvds_supported(struct drm_device *dev)
{ {
/* With the introduction of the PCH we gained a dedicated /* With the introduction of the PCH we gained a dedicated
* LVDS presence pin, use it. */ * LVDS presence pin, use it. */
if (HAS_PCH_SPLIT(dev)) if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
return true; return true;
/* Otherwise LVDS was only attached to mobile products, /* Otherwise LVDS was only attached to mobile products,
* except for the inglorious 830gm */ * except for the inglorious 830gm */
return IS_MOBILE(dev) && !IS_I830(dev); if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
return true;
return false;
} }
/** /**
......
...@@ -335,7 +335,7 @@ void intel_panel_enable_backlight(struct drm_device *dev, ...@@ -335,7 +335,7 @@ void intel_panel_enable_backlight(struct drm_device *dev,
if (tmp & BLM_PWM_ENABLE) if (tmp & BLM_PWM_ENABLE)
goto set_level; goto set_level;
if (dev_priv->num_pipe == 3) if (INTEL_INFO(dev)->num_pipes == 3)
tmp &= ~BLM_PIPE_SELECT_IVB; tmp &= ~BLM_PIPE_SELECT_IVB;
else else
tmp &= ~BLM_PIPE_SELECT; tmp &= ~BLM_PIPE_SELECT;
......
...@@ -2631,9 +2631,11 @@ static void gen6_enable_rps(struct drm_device *dev) ...@@ -2631,9 +2631,11 @@ static void gen6_enable_rps(struct drm_device *dev)
if (!ret) { if (!ret) {
pcu_mbox = 0; pcu_mbox = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
if (ret && pcu_mbox & (1<<31)) { /* OC supported */ if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max from %dMHz to %dMHz\n",
(dev_priv->rps.max_delay & 0xff) * 50,
(pcu_mbox & 0xff) * 50);
dev_priv->rps.max_delay = pcu_mbox & 0xff; dev_priv->rps.max_delay = pcu_mbox & 0xff;
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
} }
} else { } else {
DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
...@@ -2821,7 +2823,7 @@ static void ironlake_enable_rc6(struct drm_device *dev) ...@@ -2821,7 +2823,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
ret = intel_ring_idle(ring); ret = intel_ring_idle(ring);
dev_priv->mm.interruptible = was_interruptible; dev_priv->mm.interruptible = was_interruptible;
if (ret) { if (ret) {
DRM_ERROR("failed to enable ironlake power power savings\n"); DRM_ERROR("failed to enable ironlake power savings\n");
ironlake_teardown_rc6(dev); ironlake_teardown_rc6(dev);
return; return;
} }
...@@ -3768,6 +3770,9 @@ static void haswell_init_clock_gating(struct drm_device *dev) ...@@ -3768,6 +3770,9 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH); GEN6_MBCTL_ENABLE_BOOT_FETCH);
/* WaSwitchSolVfFArbitrationPriority */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
/* XXX: This is a workaround for early silicon revisions and should be /* XXX: This is a workaround for early silicon revisions and should be
* removed later. * removed later.
*/ */
...@@ -3899,8 +3904,10 @@ static void valleyview_init_clock_gating(struct drm_device *dev) ...@@ -3899,8 +3904,10 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE); CHICKEN3_DGMG_DONE_FIX_DISABLE);
/* WaDisablePSDDualDispatchEnable */
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
...@@ -3985,7 +3992,16 @@ static void valleyview_init_clock_gating(struct drm_device *dev) ...@@ -3985,7 +3992,16 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
* Disable clock gating on th GCFG unit to prevent a delay * Disable clock gating on th GCFG unit to prevent a delay
* in the reporting of vblank events. * in the reporting of vblank events.
*/ */
I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
/* Conservative clock gating settings for now */
I915_WRITE(0x9400, 0xffffffff);
I915_WRITE(0x9404, 0xffffffff);
I915_WRITE(0x9408, 0xffffffff);
I915_WRITE(0x940c, 0xffffffff);
I915_WRITE(0x9410, 0xffffffff);
I915_WRITE(0x9414, 0xffffffff);
I915_WRITE(0x9418, 0xffffffff);
} }
static void g4x_init_clock_gating(struct drm_device *dev) static void g4x_init_clock_gating(struct drm_device *dev)
...@@ -4076,7 +4092,7 @@ void intel_set_power_well(struct drm_device *dev, bool enable) ...@@ -4076,7 +4092,7 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
bool is_enabled, enable_requested; bool is_enabled, enable_requested;
uint32_t tmp; uint32_t tmp;
if (!IS_HASWELL(dev)) if (!HAS_POWER_WELL(dev))
return; return;
if (!i915_disable_power_well && !enable) if (!i915_disable_power_well && !enable)
...@@ -4114,7 +4130,7 @@ void intel_init_power_well(struct drm_device *dev) ...@@ -4114,7 +4130,7 @@ void intel_init_power_well(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (!IS_HASWELL(dev)) if (!HAS_POWER_WELL(dev))
return; return;
/* For now, we need the power well to be always enabled. */ /* For now, we need the power well to be always enabled. */
...@@ -4274,21 +4290,14 @@ static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) ...@@ -4274,21 +4290,14 @@ static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{ {
u32 forcewake_ack; if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
if (IS_HASWELL(dev_priv->dev))
forcewake_ack = FORCEWAKE_ACK_HSW;
else
forcewake_ack = FORCEWAKE_ACK;
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
FORCEWAKE_ACK_TIMEOUT_MS)) FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL); I915_WRITE_NOTRACE(FORCEWAKE, 1);
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
FORCEWAKE_ACK_TIMEOUT_MS)) FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
...@@ -4311,7 +4320,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) ...@@ -4311,7 +4320,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
else else
forcewake_ack = FORCEWAKE_MT_ACK; forcewake_ack = FORCEWAKE_MT_ACK;
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS)) FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
...@@ -4319,7 +4328,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) ...@@ -4319,7 +4328,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
/* something from same cacheline, but !FORCEWAKE_MT */ /* something from same cacheline, but !FORCEWAKE_MT */
POSTING_READ(ECOBUS); POSTING_READ(ECOBUS);
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS)) FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
...@@ -4409,15 +4418,22 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) ...@@ -4409,15 +4418,22 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
static void vlv_force_wake_get(struct drm_i915_private *dev_priv) static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
{ {
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0, if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS)) FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS)) FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
__gen6_gt_wait_for_thread_c0(dev_priv); __gen6_gt_wait_for_thread_c0(dev_priv);
} }
...@@ -4425,8 +4441,9 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv) ...@@ -4425,8 +4441,9 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
static void vlv_force_wake_put(struct drm_i915_private *dev_priv) static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{ {
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_VLV */ I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
POSTING_READ(FORCEWAKE_ACK_VLV); _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv); gen6_gt_check_fifodbg(dev_priv);
} }
......
...@@ -246,11 +246,11 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) ...@@ -246,11 +246,11 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
return; return;
} }
if (intel_sdvo->sdvo_reg == SDVOB) { if (intel_sdvo->sdvo_reg == GEN3_SDVOB)
cval = I915_READ(SDVOC); cval = I915_READ(GEN3_SDVOC);
} else { else
bval = I915_READ(SDVOB); bval = I915_READ(GEN3_SDVOB);
}
/* /*
* Write the registers twice for luck. Sometimes, * Write the registers twice for luck. Sometimes,
* writing them only once doesn't appear to 'stick'. * writing them only once doesn't appear to 'stick'.
...@@ -258,10 +258,10 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) ...@@ -258,10 +258,10 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
*/ */
for (i = 0; i < 2; i++) for (i = 0; i < 2; i++)
{ {
I915_WRITE(SDVOB, bval); I915_WRITE(GEN3_SDVOB, bval);
I915_READ(SDVOB); I915_READ(GEN3_SDVOB);
I915_WRITE(SDVOC, cval); I915_WRITE(GEN3_SDVOC, cval);
I915_READ(SDVOC); I915_READ(GEN3_SDVOC);
} }
} }
...@@ -451,7 +451,7 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, ...@@ -451,7 +451,7 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
int i, ret = true; int i, ret = true;
/* Would be simpler to allocate both in one go ? */ /* Would be simpler to allocate both in one go ? */
buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL); buf = kzalloc(args_len * 2 + 2, GFP_KERNEL);
if (!buf) if (!buf)
return false; return false;
...@@ -965,6 +965,8 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, ...@@ -965,6 +965,8 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
} }
avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
intel_dip_infoframe_csum(&avi_if); intel_dip_infoframe_csum(&avi_if);
/* sdvo spec says that the ecc is handled by the hw, and it looks like /* sdvo spec says that the ecc is handled by the hw, and it looks like
...@@ -1076,9 +1078,11 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, ...@@ -1076,9 +1078,11 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
if (intel_sdvo->color_range_auto) { if (intel_sdvo->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */ /* See CEA-861-E - 5.1 Default Encoding Parameters */
/* FIXME: This bit is only valid when using TMDS encoding and 8
* bit per color mode. */
if (intel_sdvo->has_hdmi_monitor && if (intel_sdvo->has_hdmi_monitor &&
drm_match_cea_mode(adjusted_mode) > 1) drm_match_cea_mode(adjusted_mode) > 1)
intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235; intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
else else
intel_sdvo->color_range = 0; intel_sdvo->color_range = 0;
} }
...@@ -1182,10 +1186,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, ...@@ -1182,10 +1186,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
} else { } else {
sdvox = I915_READ(intel_sdvo->sdvo_reg); sdvox = I915_READ(intel_sdvo->sdvo_reg);
switch (intel_sdvo->sdvo_reg) { switch (intel_sdvo->sdvo_reg) {
case SDVOB: case GEN3_SDVOB:
sdvox &= SDVOB_PRESERVE_MASK; sdvox &= SDVOB_PRESERVE_MASK;
break; break;
case SDVOC: case GEN3_SDVOC:
sdvox &= SDVOC_PRESERVE_MASK; sdvox &= SDVOC_PRESERVE_MASK;
break; break;
} }
...@@ -1193,9 +1197,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, ...@@ -1193,9 +1197,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
} }
if (INTEL_PCH_TYPE(dev) >= PCH_CPT) if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
sdvox |= TRANSCODER_CPT(intel_crtc->pipe); sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
else else
sdvox |= TRANSCODER(intel_crtc->pipe); sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe);
if (intel_sdvo->has_hdmi_audio) if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE; sdvox |= SDVO_AUDIO_ENABLE;
...@@ -1305,15 +1309,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder) ...@@ -1305,15 +1309,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
temp = I915_READ(intel_sdvo->sdvo_reg); temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0) { if ((temp & SDVO_ENABLE) == 0) {
/* HW workaround for IBX, we need to move the port /* HW workaround for IBX, we need to move the port
* to transcoder A before disabling it. */ * to transcoder A before disabling it, so restore it here. */
if (HAS_PCH_IBX(dev)) { if (HAS_PCH_IBX(dev))
struct drm_crtc *crtc = encoder->base.crtc; temp |= SDVO_PIPE_SEL(intel_crtc->pipe);
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
/* Restore the transcoder select bit. */
if (pipe == PIPE_B)
temp |= SDVO_PIPE_B_SELECT;
}
intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
} }
...@@ -1932,7 +1930,9 @@ intel_sdvo_set_property(struct drm_connector *connector, ...@@ -1932,7 +1930,9 @@ intel_sdvo_set_property(struct drm_connector *connector,
break; break;
case INTEL_BROADCAST_RGB_LIMITED: case INTEL_BROADCAST_RGB_LIMITED:
intel_sdvo->color_range_auto = false; intel_sdvo->color_range_auto = false;
intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235; /* FIXME: this bit is only valid when using TMDS
* encoding and 8 bit per color mode. */
intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
break; break;
default: default:
return -EINVAL; return -EINVAL;
......
...@@ -1645,6 +1645,11 @@ static int do_register_framebuffer(struct fb_info *fb_info) ...@@ -1645,6 +1645,11 @@ static int do_register_framebuffer(struct fb_info *fb_info)
if (!fb_info->modelist.prev || !fb_info->modelist.next) if (!fb_info->modelist.prev || !fb_info->modelist.next)
INIT_LIST_HEAD(&fb_info->modelist); INIT_LIST_HEAD(&fb_info->modelist);
if (fb_info->skip_vt_switch)
pm_vt_switch_required(fb_info->dev, false);
else
pm_vt_switch_required(fb_info->dev, true);
fb_var_to_videomode(&mode, &fb_info->var); fb_var_to_videomode(&mode, &fb_info->var);
fb_add_videomode(&mode, &fb_info->modelist); fb_add_videomode(&mode, &fb_info->modelist);
registered_fb[i] = fb_info; registered_fb[i] = fb_info;
...@@ -1679,6 +1684,8 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) ...@@ -1679,6 +1684,8 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
if (ret) if (ret)
return -EINVAL; return -EINVAL;
pm_vt_switch_unregister(fb_info->dev);
unlink_framebuffer(fb_info); unlink_framebuffer(fb_info);
if (fb_info->pixmap.addr && if (fb_info->pixmap.addr &&
(fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
......
...@@ -501,6 +501,8 @@ struct fb_info { ...@@ -501,6 +501,8 @@ struct fb_info {
resource_size_t size; resource_size_t size;
} ranges[0]; } ranges[0];
} *apertures; } *apertures;
bool skip_vt_switch; /* no VT switch on suspend/resume required */
}; };
static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
......
...@@ -34,6 +34,19 @@ ...@@ -34,6 +34,19 @@
extern void (*pm_power_off)(void); extern void (*pm_power_off)(void);
extern void (*pm_power_off_prepare)(void); extern void (*pm_power_off_prepare)(void);
struct device; /* we have a circular dep with device.h */
#ifdef CONFIG_VT_CONSOLE_SLEEP
extern void pm_vt_switch_required(struct device *dev, bool required);
extern void pm_vt_switch_unregister(struct device *dev);
#else
static inline void pm_vt_switch_required(struct device *dev, bool required)
{
}
static inline void pm_vt_switch_unregister(struct device *dev)
{
}
#endif /* CONFIG_VT_CONSOLE_SLEEP */
/* /*
* Device power management * Device power management
*/ */
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* Originally from swsusp. * Originally from swsusp.
*/ */
#include <linux/console.h>
#include <linux/vt_kern.h> #include <linux/vt_kern.h>
#include <linux/kbd_kern.h> #include <linux/kbd_kern.h>
#include <linux/vt.h> #include <linux/vt.h>
...@@ -14,8 +15,120 @@ ...@@ -14,8 +15,120 @@
static int orig_fgconsole, orig_kmsg; static int orig_fgconsole, orig_kmsg;
static DEFINE_MUTEX(vt_switch_mutex);
struct pm_vt_switch {
struct list_head head;
struct device *dev;
bool required;
};
static LIST_HEAD(pm_vt_switch_list);
/**
* pm_vt_switch_required - indicate VT switch at suspend requirements
* @dev: device
* @required: if true, caller needs VT switch at suspend/resume time
*
* The different console drivers may or may not require VT switches across
* suspend/resume, depending on how they handle restoring video state and
* what may be running.
*
* Drivers can indicate support for switchless suspend/resume, which can
* save time and flicker, by using this routine and passing 'false' as
* the argument. If any loaded driver needs VT switching, or the
* no_console_suspend argument has been passed on the command line, VT
* switches will occur.
*/
void pm_vt_switch_required(struct device *dev, bool required)
{
struct pm_vt_switch *entry, *tmp;
mutex_lock(&vt_switch_mutex);
list_for_each_entry(tmp, &pm_vt_switch_list, head) {
if (tmp->dev == dev) {
/* already registered, update requirement */
tmp->required = required;
goto out;
}
}
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
goto out;
entry->required = required;
entry->dev = dev;
list_add(&entry->head, &pm_vt_switch_list);
out:
mutex_unlock(&vt_switch_mutex);
}
EXPORT_SYMBOL(pm_vt_switch_required);
/**
* pm_vt_switch_unregister - stop tracking a device's VT switching needs
* @dev: device
*
* Remove @dev from the vt switch list.
*/
void pm_vt_switch_unregister(struct device *dev)
{
struct pm_vt_switch *tmp;
mutex_lock(&vt_switch_mutex);
list_for_each_entry(tmp, &pm_vt_switch_list, head) {
if (tmp->dev == dev) {
list_del(&tmp->head);
break;
}
}
mutex_unlock(&vt_switch_mutex);
}
EXPORT_SYMBOL(pm_vt_switch_unregister);
/*
* There are three cases when a VT switch on suspend/resume are required:
* 1) no driver has indicated a requirement one way or another, so preserve
* the old behavior
* 2) console suspend is disabled, we want to see debug messages across
* suspend/resume
* 3) any registered driver indicates it needs a VT switch
*
* If none of these conditions is present, meaning we have at least one driver
* that doesn't need the switch, and none that do, we can avoid it to make
* resume look a little prettier (and suspend too, but that's usually hidden,
* e.g. when closing the lid on a laptop).
*/
static bool pm_vt_switch(void)
{
struct pm_vt_switch *entry;
bool ret = true;
mutex_lock(&vt_switch_mutex);
if (list_empty(&pm_vt_switch_list))
goto out;
if (!console_suspend_enabled)
goto out;
list_for_each_entry(entry, &pm_vt_switch_list, head) {
if (entry->required)
goto out;
}
ret = false;
out:
mutex_unlock(&vt_switch_mutex);
return ret;
}
int pm_prepare_console(void) int pm_prepare_console(void)
{ {
if (!pm_vt_switch())
return 0;
orig_fgconsole = vt_move_to_console(SUSPEND_CONSOLE, 1); orig_fgconsole = vt_move_to_console(SUSPEND_CONSOLE, 1);
if (orig_fgconsole < 0) if (orig_fgconsole < 0)
return 1; return 1;
...@@ -26,6 +139,9 @@ int pm_prepare_console(void) ...@@ -26,6 +139,9 @@ int pm_prepare_console(void)
void pm_restore_console(void) void pm_restore_console(void)
{ {
if (!pm_vt_switch())
return;
if (orig_fgconsole >= 0) { if (orig_fgconsole >= 0) {
vt_move_to_console(orig_fgconsole, 0); vt_move_to_console(orig_fgconsole, 0);
vt_kmsg_redirect(orig_kmsg); vt_kmsg_redirect(orig_kmsg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment