Commit cd17ef41 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2013-02-01' of...

Merge tag 'drm-intel-next-2013-02-01' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:
"Probably the last feature pull for 3.9, there's some fixes outstanding
thought that I'd like to sneak in. And maybe 3.8 takes a bit longer ...
Anyway, highlights of this pull:
- Kill the horrible IS_DISPLAYREG hack to handle the mmio offset movements
  on vlv, big thanks to Ville.
- Dynamic power well support for Haswell, shaves away a bit when only
  using the eDP port on pipe A (Paulo). Plus unclaimed register fixes
  uncovered by this.
- Clarifications of the gpu hang/reset state transitions, hopefully fixing
  a few spurious -EIO deaths in userspace.
- Haswell ELD fixes.
- Some more (pp)gtt cleanups from Ben.
- A few smaller things all over.

Plus all the stuff from the previous rather small pull request:
- Broadcast RBG improvements and reduced color range fixes from Ville.
- Ben is on a "kill legacy gtt code for good" spree, first pile of patches
  included.
- No-relocs and bo lut improvements for faster execbuf from Chris.
- Some refactorings from Imre."

* tag 'drm-intel-next-2013-02-01' of git://people.freedesktop.org/~danvet/drm-intel: (101 commits)
  GPU/i915: Fix acpi_bus_get_device() check in drivers/gpu/drm/i915/intel_opregion.c
  drm/i915: Set the SR01 "screen off" bit in i915_redisable_vga() too
  drm/i915: Kill IS_DISPLAYREG()
  drm/i915: Introduce i915_vgacntrl_reg()
  drm/i915: gen6_gmch_remove can be static
  drm/i915: dynamic Haswell display power well support
  drm/i915: check the power down well on assert_pipe()
  drm/i915: don't send DP "idle" pattern before "normal" on HSW PORT_A
  drm/i915: don't run hsw power well code on !hsw
  drm/i915: kill cargo-culted locking from power well code
  drm/i915: Only run idle processing from i915_gem_retire_requests_worker
  drm/i915: Fix CAGF for HSW
  drm/i915: Reclaim GTT space for failed PPGTT
  drm/i915: remove intel_gtt structure
  drm/i915: Add probe and remove to the gtt ops
  drm/i915: extract hw ppgtt setup/cleanup code
  drm/i915: pte_encode is gen6+
  drm/i915: vfuncs for ppgtt
  drm/i915: vfuncs for gtt_clear_range/insert_entries
  drm/i915: Error state should print /sys/kernel/debug
  ...
parents 67c96400 7d37beaa
...@@ -60,7 +60,6 @@ struct intel_gtt_driver { ...@@ -60,7 +60,6 @@ struct intel_gtt_driver {
}; };
static struct _intel_private { static struct _intel_private {
struct intel_gtt base;
const struct intel_gtt_driver *driver; const struct intel_gtt_driver *driver;
struct pci_dev *pcidev; /* device one */ struct pci_dev *pcidev; /* device one */
struct pci_dev *bridge_dev; struct pci_dev *bridge_dev;
...@@ -75,7 +74,18 @@ static struct _intel_private { ...@@ -75,7 +74,18 @@ static struct _intel_private {
struct resource ifp_resource; struct resource ifp_resource;
int resource_valid; int resource_valid;
struct page *scratch_page; struct page *scratch_page;
phys_addr_t scratch_page_dma;
int refcount; int refcount;
/* Whether i915 needs to use the dmar apis or not. */
unsigned int needs_dmar : 1;
phys_addr_t gma_bus_addr;
/* Size of memory reserved for graphics by the BIOS */
unsigned int stolen_size;
/* Total number of gtt entries. */
unsigned int gtt_total_entries;
/* Part of the gtt that is mappable by the cpu, for those chips where
* this is not the full gtt. */
unsigned int gtt_mappable_entries;
} intel_private; } intel_private;
#define INTEL_GTT_GEN intel_private.driver->gen #define INTEL_GTT_GEN intel_private.driver->gen
...@@ -291,15 +301,15 @@ static int intel_gtt_setup_scratch_page(void) ...@@ -291,15 +301,15 @@ static int intel_gtt_setup_scratch_page(void)
get_page(page); get_page(page);
set_pages_uc(page, 1); set_pages_uc(page, 1);
if (intel_private.base.needs_dmar) { if (intel_private.needs_dmar) {
dma_addr = pci_map_page(intel_private.pcidev, page, 0, dma_addr = pci_map_page(intel_private.pcidev, page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
return -EINVAL; return -EINVAL;
intel_private.base.scratch_page_dma = dma_addr; intel_private.scratch_page_dma = dma_addr;
} else } else
intel_private.base.scratch_page_dma = page_to_phys(page); intel_private.scratch_page_dma = page_to_phys(page);
intel_private.scratch_page = page; intel_private.scratch_page = page;
...@@ -506,7 +516,7 @@ static unsigned int intel_gtt_total_entries(void) ...@@ -506,7 +516,7 @@ static unsigned int intel_gtt_total_entries(void)
/* On previous hardware, the GTT size was just what was /* On previous hardware, the GTT size was just what was
* required to map the aperture. * required to map the aperture.
*/ */
return intel_private.base.gtt_mappable_entries; return intel_private.gtt_mappable_entries;
} }
} }
...@@ -546,7 +556,7 @@ static unsigned int intel_gtt_mappable_entries(void) ...@@ -546,7 +556,7 @@ static unsigned int intel_gtt_mappable_entries(void)
static void intel_gtt_teardown_scratch_page(void) static void intel_gtt_teardown_scratch_page(void)
{ {
set_pages_wb(intel_private.scratch_page, 1); set_pages_wb(intel_private.scratch_page, 1);
pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma, pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(intel_private.scratch_page); put_page(intel_private.scratch_page);
__free_page(intel_private.scratch_page); __free_page(intel_private.scratch_page);
...@@ -572,8 +582,8 @@ static int intel_gtt_init(void) ...@@ -572,8 +582,8 @@ static int intel_gtt_init(void)
if (ret != 0) if (ret != 0)
return ret; return ret;
intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
intel_private.base.gtt_total_entries = intel_gtt_total_entries(); intel_private.gtt_total_entries = intel_gtt_total_entries();
/* save the PGETBL reg for resume */ /* save the PGETBL reg for resume */
intel_private.PGETBL_save = intel_private.PGETBL_save =
...@@ -585,10 +595,10 @@ static int intel_gtt_init(void) ...@@ -585,10 +595,10 @@ static int intel_gtt_init(void)
dev_info(&intel_private.bridge_dev->dev, dev_info(&intel_private.bridge_dev->dev,
"detected gtt size: %dK total, %dK mappable\n", "detected gtt size: %dK total, %dK mappable\n",
intel_private.base.gtt_total_entries * 4, intel_private.gtt_total_entries * 4,
intel_private.base.gtt_mappable_entries * 4); intel_private.gtt_mappable_entries * 4);
gtt_map_size = intel_private.base.gtt_total_entries * 4; gtt_map_size = intel_private.gtt_total_entries * 4;
intel_private.gtt = NULL; intel_private.gtt = NULL;
if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2) if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
...@@ -605,9 +615,9 @@ static int intel_gtt_init(void) ...@@ -605,9 +615,9 @@ static int intel_gtt_init(void)
global_cache_flush(); /* FIXME: ? */ global_cache_flush(); /* FIXME: ? */
intel_private.base.stolen_size = intel_gtt_stolen_size(); intel_private.stolen_size = intel_gtt_stolen_size();
intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
ret = intel_gtt_setup_scratch_page(); ret = intel_gtt_setup_scratch_page();
if (ret != 0) { if (ret != 0) {
...@@ -622,7 +632,7 @@ static int intel_gtt_init(void) ...@@ -622,7 +632,7 @@ static int intel_gtt_init(void)
pci_read_config_dword(intel_private.pcidev, I915_GMADDR, pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
&gma_addr); &gma_addr);
intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
return 0; return 0;
} }
...@@ -633,8 +643,7 @@ static int intel_fake_agp_fetch_size(void) ...@@ -633,8 +643,7 @@ static int intel_fake_agp_fetch_size(void)
unsigned int aper_size; unsigned int aper_size;
int i; int i;
aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT) aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
/ MB(1);
for (i = 0; i < num_sizes; i++) { for (i = 0; i < num_sizes; i++) {
if (aper_size == intel_fake_agp_sizes[i].size) { if (aper_size == intel_fake_agp_sizes[i].size) {
...@@ -778,7 +787,7 @@ static int intel_fake_agp_configure(void) ...@@ -778,7 +787,7 @@ static int intel_fake_agp_configure(void)
return -EIO; return -EIO;
intel_private.clear_fake_agp = true; intel_private.clear_fake_agp = true;
agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr; agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
return 0; return 0;
} }
...@@ -840,12 +849,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, ...@@ -840,12 +849,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
{ {
int ret = -EINVAL; int ret = -EINVAL;
if (intel_private.base.do_idle_maps)
return -ENODEV;
if (intel_private.clear_fake_agp) { if (intel_private.clear_fake_agp) {
int start = intel_private.base.stolen_size / PAGE_SIZE; int start = intel_private.stolen_size / PAGE_SIZE;
int end = intel_private.base.gtt_mappable_entries; int end = intel_private.gtt_mappable_entries;
intel_gtt_clear_range(start, end - start); intel_gtt_clear_range(start, end - start);
intel_private.clear_fake_agp = false; intel_private.clear_fake_agp = false;
} }
...@@ -856,7 +862,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, ...@@ -856,7 +862,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (mem->page_count == 0) if (mem->page_count == 0)
goto out; goto out;
if (pg_start + mem->page_count > intel_private.base.gtt_total_entries) if (pg_start + mem->page_count > intel_private.gtt_total_entries)
goto out_err; goto out_err;
if (type != mem->type) if (type != mem->type)
...@@ -868,7 +874,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, ...@@ -868,7 +874,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (!mem->is_flushed) if (!mem->is_flushed)
global_cache_flush(); global_cache_flush();
if (intel_private.base.needs_dmar) { if (intel_private.needs_dmar) {
struct sg_table st; struct sg_table st;
ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
...@@ -894,7 +900,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) ...@@ -894,7 +900,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
unsigned int i; unsigned int i;
for (i = first_entry; i < (first_entry + num_entries); i++) { for (i = first_entry; i < (first_entry + num_entries); i++) {
intel_private.driver->write_entry(intel_private.base.scratch_page_dma, intel_private.driver->write_entry(intel_private.scratch_page_dma,
i, 0); i, 0);
} }
readl(intel_private.gtt+i-1); readl(intel_private.gtt+i-1);
...@@ -907,12 +913,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem, ...@@ -907,12 +913,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
if (mem->page_count == 0) if (mem->page_count == 0)
return 0; return 0;
if (intel_private.base.do_idle_maps)
return -ENODEV;
intel_gtt_clear_range(pg_start, mem->page_count); intel_gtt_clear_range(pg_start, mem->page_count);
if (intel_private.base.needs_dmar) { if (intel_private.needs_dmar) {
intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
mem->sg_list = NULL; mem->sg_list = NULL;
mem->num_sg = 0; mem->num_sg = 0;
...@@ -1069,24 +1072,6 @@ static void i965_write_entry(dma_addr_t addr, ...@@ -1069,24 +1072,6 @@ static void i965_write_entry(dma_addr_t addr,
writel(addr | pte_flags, intel_private.gtt + entry); writel(addr | pte_flags, intel_private.gtt + entry);
} }
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
static inline int needs_idle_maps(void)
{
#ifdef CONFIG_INTEL_IOMMU
const unsigned short gpu_devid = intel_private.pcidev->device;
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
intel_iommu_gfx_mapped)
return 1;
#endif
return 0;
}
static int i9xx_setup(void) static int i9xx_setup(void)
{ {
...@@ -1115,9 +1100,6 @@ static int i9xx_setup(void) ...@@ -1115,9 +1100,6 @@ static int i9xx_setup(void)
break; break;
} }
if (needs_idle_maps())
intel_private.base.do_idle_maps = 1;
intel_i9xx_setup_flush(); intel_i9xx_setup_flush();
return 0; return 0;
...@@ -1389,9 +1371,10 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, ...@@ -1389,9 +1371,10 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
} }
EXPORT_SYMBOL(intel_gmch_probe); EXPORT_SYMBOL(intel_gmch_probe);
struct intel_gtt *intel_gtt_get(void) void intel_gtt_get(size_t *gtt_total, size_t *stolen_size)
{ {
return &intel_private.base; *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
*stolen_size = intel_private.stolen_size;
} }
EXPORT_SYMBOL(intel_gtt_get); EXPORT_SYMBOL(intel_gtt_get);
......
...@@ -1483,9 +1483,11 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, ...@@ -1483,9 +1483,11 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define VIDEO_BLOCK 0x02 #define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03 #define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04 #define SPEAKER_BLOCK 0x04
#define VIDEO_CAPABILITY_BLOCK 0x07
#define EDID_BASIC_AUDIO (1 << 6) #define EDID_BASIC_AUDIO (1 << 6)
#define EDID_CEA_YCRCB444 (1 << 5) #define EDID_CEA_YCRCB444 (1 << 5)
#define EDID_CEA_YCRCB422 (1 << 4) #define EDID_CEA_YCRCB422 (1 << 4)
#define EDID_CEA_VCDB_QS (1 << 6)
/** /**
* Search EDID for CEA extension block. * Search EDID for CEA extension block.
...@@ -1901,6 +1903,37 @@ bool drm_detect_monitor_audio(struct edid *edid) ...@@ -1901,6 +1903,37 @@ bool drm_detect_monitor_audio(struct edid *edid)
} }
EXPORT_SYMBOL(drm_detect_monitor_audio); EXPORT_SYMBOL(drm_detect_monitor_audio);
/**
* drm_rgb_quant_range_selectable - is RGB quantization range selectable?
*
* Check whether the monitor reports the RGB quantization range selection
* as supported. The AVI infoframe can then be used to inform the monitor
* which quantization range (full or limited) is used.
*/
bool drm_rgb_quant_range_selectable(struct edid *edid)
{
u8 *edid_ext;
int i, start, end;
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
return false;
if (cea_db_offsets(edid_ext, &start, &end))
return false;
for_each_cea_db(edid_ext, i, start, end) {
if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK &&
cea_db_payload_len(&edid_ext[i]) == 2) {
DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
}
}
return false;
}
EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
/** /**
* drm_add_display_info - pull display info out if present * drm_add_display_info - pull display info out if present
* @edid: EDID data * @edid: EDID data
......
...@@ -16,6 +16,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ ...@@ -16,6 +16,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_gem_tiling.o \ i915_gem_tiling.o \
i915_sysfs.o \ i915_sysfs.o \
i915_trace_points.o \ i915_trace_points.o \
i915_ums.o \
intel_display.o \ intel_display.o \
intel_crt.o \ intel_crt.o \
intel_lvds.o \ intel_lvds.o \
......
...@@ -258,8 +258,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data) ...@@ -258,8 +258,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_printf(m, "%u fault mappable objects, %zu bytes\n", seq_printf(m, "%u fault mappable objects, %zu bytes\n",
count, size); count, size);
seq_printf(m, "%zu [%zu] gtt total\n", seq_printf(m, "%zu [%lu] gtt total\n",
dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); dev_priv->gtt.total,
dev_priv->gtt.mappable_end - dev_priv->gtt.start);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -813,11 +814,11 @@ static int i915_error_state_open(struct inode *inode, struct file *file) ...@@ -813,11 +814,11 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
error_priv->dev = dev; error_priv->dev = dev;
spin_lock_irqsave(&dev_priv->error_lock, flags); spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
error_priv->error = dev_priv->first_error; error_priv->error = dev_priv->gpu_error.first_error;
if (error_priv->error) if (error_priv->error)
kref_get(&error_priv->error->ref); kref_get(&error_priv->error->ref);
spin_unlock_irqrestore(&dev_priv->error_lock, flags); spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
return single_open(file, i915_error_state, error_priv); return single_open(file, i915_error_state, error_priv);
} }
...@@ -956,7 +957,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) ...@@ -956,7 +957,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 rpstat; u32 rpstat, cagf;
u32 rpupei, rpcurup, rpprevup; u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown; u32 rpdownei, rpcurdown, rpprevdown;
int max_freq; int max_freq;
...@@ -975,6 +976,11 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) ...@@ -975,6 +976,11 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
if (IS_HASWELL(dev))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
cagf *= GT_FREQUENCY_MULTIPLIER;
gen6_gt_force_wake_put(dev_priv); gen6_gt_force_wake_put(dev_priv);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -987,8 +993,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) ...@@ -987,8 +993,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gt_perf_status & 0xff); gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n", seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff); rp_state_limits & 0xff);
seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> seq_printf(m, "CAGF: %dMHz\n", cagf);
GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK); GEN6_CURICONT_MASK);
seq_printf(m, "RP CUR UP: %dus\n", rpcurup & seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
...@@ -1674,7 +1679,7 @@ i915_wedged_read(struct file *filp, ...@@ -1674,7 +1679,7 @@ i915_wedged_read(struct file *filp,
len = snprintf(buf, sizeof(buf), len = snprintf(buf, sizeof(buf),
"wedged : %d\n", "wedged : %d\n",
atomic_read(&dev_priv->mm.wedged)); atomic_read(&dev_priv->gpu_error.reset_counter));
if (len > sizeof(buf)) if (len > sizeof(buf))
len = sizeof(buf); len = sizeof(buf);
...@@ -1729,7 +1734,7 @@ i915_ring_stop_read(struct file *filp, ...@@ -1729,7 +1734,7 @@ i915_ring_stop_read(struct file *filp,
int len; int len;
len = snprintf(buf, sizeof(buf), len = snprintf(buf, sizeof(buf),
"0x%08x\n", dev_priv->stop_rings); "0x%08x\n", dev_priv->gpu_error.stop_rings);
if (len > sizeof(buf)) if (len > sizeof(buf))
len = sizeof(buf); len = sizeof(buf);
...@@ -1765,7 +1770,7 @@ i915_ring_stop_write(struct file *filp, ...@@ -1765,7 +1770,7 @@ i915_ring_stop_write(struct file *filp,
if (ret) if (ret)
return ret; return ret;
dev_priv->stop_rings = val; dev_priv->gpu_error.stop_rings = val;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return cnt; return cnt;
...@@ -1779,6 +1784,102 @@ static const struct file_operations i915_ring_stop_fops = { ...@@ -1779,6 +1784,102 @@ static const struct file_operations i915_ring_stop_fops = {
.llseek = default_llseek, .llseek = default_llseek,
}; };
#define DROP_UNBOUND 0x1
#define DROP_BOUND 0x2
#define DROP_RETIRE 0x4
#define DROP_ACTIVE 0x8
#define DROP_ALL (DROP_UNBOUND | \
DROP_BOUND | \
DROP_RETIRE | \
DROP_ACTIVE)
static ssize_t
i915_drop_caches_read(struct file *filp,
char __user *ubuf,
size_t max,
loff_t *ppos)
{
char buf[20];
int len;
len = snprintf(buf, sizeof(buf), "0x%08x\n", DROP_ALL);
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
}
static ssize_t
i915_drop_caches_write(struct file *filp,
const char __user *ubuf,
size_t cnt,
loff_t *ppos)
{
struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
char buf[20];
int val = 0, ret;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
return -EINVAL;
if (copy_from_user(buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
val = simple_strtoul(buf, NULL, 0);
}
DRM_DEBUG_DRIVER("Dropping caches: 0x%08x\n", val);
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
if (val & DROP_ACTIVE) {
ret = i915_gpu_idle(dev);
if (ret)
goto unlock;
}
if (val & (DROP_RETIRE | DROP_ACTIVE))
i915_gem_retire_requests(dev);
if (val & DROP_BOUND) {
list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list)
if (obj->pin_count == 0) {
ret = i915_gem_object_unbind(obj);
if (ret)
goto unlock;
}
}
if (val & DROP_UNBOUND) {
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
if (obj->pages_pin_count == 0) {
ret = i915_gem_object_put_pages(obj);
if (ret)
goto unlock;
}
}
unlock:
mutex_unlock(&dev->struct_mutex);
return ret ?: cnt;
}
static const struct file_operations i915_drop_caches_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = i915_drop_caches_read,
.write = i915_drop_caches_write,
.llseek = default_llseek,
};
static ssize_t static ssize_t
i915_max_freq_read(struct file *filp, i915_max_freq_read(struct file *filp,
char __user *ubuf, char __user *ubuf,
...@@ -2175,6 +2276,12 @@ int i915_debugfs_init(struct drm_minor *minor) ...@@ -2175,6 +2276,12 @@ int i915_debugfs_init(struct drm_minor *minor)
if (ret) if (ret)
return ret; return ret;
ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_gem_drop_caches",
&i915_drop_caches_fops);
if (ret)
return ret;
ret = i915_debugfs_create(minor->debugfs_root, minor, ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_error_state", "i915_error_state",
&i915_error_state_fops); &i915_error_state_fops);
...@@ -2206,6 +2313,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor) ...@@ -2206,6 +2313,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor); 1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1, minor); 1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
1, minor); 1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops, drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
......
...@@ -992,6 +992,12 @@ static int i915_getparam(struct drm_device *dev, void *data, ...@@ -992,6 +992,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PINNED_BATCHES: case I915_PARAM_HAS_PINNED_BATCHES:
value = 1; value = 1;
break; break;
case I915_PARAM_HAS_EXEC_NO_RELOC:
value = 1;
break;
case I915_PARAM_HAS_EXEC_HANDLE_LUT:
value = 1;
break;
default: default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n", DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param); param->param);
...@@ -1070,7 +1076,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, ...@@ -1070,7 +1076,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->dri1.gfx_hws_cpu_addr = dev_priv->dri1.gfx_hws_cpu_addr =
ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096); ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
i915_dma_cleanup(dev); i915_dma_cleanup(dev);
ring->status_page.gfx_addr = 0; ring->status_page.gfx_addr = 0;
...@@ -1420,9 +1426,9 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) ...@@ -1420,9 +1426,9 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap) if (!ap)
return; return;
ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr; ap->ranges[0].base = dev_priv->gtt.mappable_base;
ap->ranges[0].size = ap->ranges[0].size = dev_priv->gtt.mappable_end - dev_priv->gtt.start;
dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
primary = primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
...@@ -1536,18 +1542,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1536,18 +1542,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_gmch; goto put_gmch;
} }
aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; aperture_size = dev_priv->gtt.mappable_end;
dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
dev_priv->mm.gtt_mapping = dev_priv->gtt.mappable =
io_mapping_create_wc(dev_priv->mm.gtt_base_addr, io_mapping_create_wc(dev_priv->gtt.mappable_base,
aperture_size); aperture_size);
if (dev_priv->mm.gtt_mapping == NULL) { if (dev_priv->gtt.mappable == NULL) {
ret = -EIO; ret = -EIO;
goto out_rmmap; goto out_rmmap;
} }
i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr, i915_mtrr_setup(dev_priv, dev_priv->gtt.mappable_base,
aperture_size); aperture_size);
/* The i915 workqueue is primarily used for batched retirement of /* The i915 workqueue is primarily used for batched retirement of
...@@ -1600,7 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1600,7 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
pci_enable_msi(dev->pdev); pci_enable_msi(dev->pdev);
spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock); spin_lock_init(&dev_priv->gpu_error.lock);
spin_lock_init(&dev_priv->rps.lock); spin_lock_init(&dev_priv->rps.lock);
mutex_init(&dev_priv->dpio_lock); mutex_init(&dev_priv->dpio_lock);
...@@ -1652,15 +1657,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1652,15 +1657,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
out_mtrrfree: out_mtrrfree:
if (dev_priv->mm.gtt_mtrr >= 0) { if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr, mtrr_del(dev_priv->mm.gtt_mtrr,
dev_priv->mm.gtt_base_addr, dev_priv->gtt.mappable_base,
aperture_size); aperture_size);
dev_priv->mm.gtt_mtrr = -1; dev_priv->mm.gtt_mtrr = -1;
} }
io_mapping_free(dev_priv->mm.gtt_mapping); io_mapping_free(dev_priv->gtt.mappable);
out_rmmap: out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs); pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch: put_gmch:
i915_gem_gtt_fini(dev); dev_priv->gtt.gtt_remove(dev);
put_bridge: put_bridge:
pci_dev_put(dev_priv->bridge_dev); pci_dev_put(dev_priv->bridge_dev);
free_priv: free_priv:
...@@ -1690,11 +1695,11 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1690,11 +1695,11 @@ int i915_driver_unload(struct drm_device *dev)
/* Cancel the retire work handler, which should be idle now. */ /* Cancel the retire work handler, which should be idle now. */
cancel_delayed_work_sync(&dev_priv->mm.retire_work); cancel_delayed_work_sync(&dev_priv->mm.retire_work);
io_mapping_free(dev_priv->mm.gtt_mapping); io_mapping_free(dev_priv->gtt.mappable);
if (dev_priv->mm.gtt_mtrr >= 0) { if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr, mtrr_del(dev_priv->mm.gtt_mtrr,
dev_priv->mm.gtt_base_addr, dev_priv->gtt.mappable_base,
dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE); dev_priv->gtt.mappable_end);
dev_priv->mm.gtt_mtrr = -1; dev_priv->mm.gtt_mtrr = -1;
} }
...@@ -1720,8 +1725,8 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1720,8 +1725,8 @@ int i915_driver_unload(struct drm_device *dev)
} }
/* Free error state after interrupts are fully disabled. */ /* Free error state after interrupts are fully disabled. */
del_timer_sync(&dev_priv->hangcheck_timer); del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
cancel_work_sync(&dev_priv->error_work); cancel_work_sync(&dev_priv->gpu_error.work);
i915_destroy_error_state(dev); i915_destroy_error_state(dev);
if (dev->pdev->msi_enabled) if (dev->pdev->msi_enabled)
......
...@@ -276,6 +276,7 @@ static const struct intel_device_info intel_valleyview_m_info = { ...@@ -276,6 +276,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
.has_bsd_ring = 1, .has_bsd_ring = 1,
.has_blt_ring = 1, .has_blt_ring = 1,
.is_valleyview = 1, .is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
}; };
static const struct intel_device_info intel_valleyview_d_info = { static const struct intel_device_info intel_valleyview_d_info = {
...@@ -285,6 +286,7 @@ static const struct intel_device_info intel_valleyview_d_info = { ...@@ -285,6 +286,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
.has_bsd_ring = 1, .has_bsd_ring = 1,
.has_blt_ring = 1, .has_blt_ring = 1,
.is_valleyview = 1, .is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
}; };
static const struct intel_device_info intel_haswell_d_info = { static const struct intel_device_info intel_haswell_d_info = {
...@@ -468,6 +470,8 @@ static int i915_drm_freeze(struct drm_device *dev) ...@@ -468,6 +470,8 @@ static int i915_drm_freeze(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
intel_set_power_well(dev, true);
drm_kms_helper_poll_disable(dev); drm_kms_helper_poll_disable(dev);
pci_save_state(dev->pdev); pci_save_state(dev->pdev);
...@@ -779,9 +783,9 @@ int intel_gpu_reset(struct drm_device *dev) ...@@ -779,9 +783,9 @@ int intel_gpu_reset(struct drm_device *dev)
} }
/* Also reset the gpu hangman. */ /* Also reset the gpu hangman. */
if (dev_priv->stop_rings) { if (dev_priv->gpu_error.stop_rings) {
DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n"); DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
dev_priv->stop_rings = 0; dev_priv->gpu_error.stop_rings = 0;
if (ret == -ENODEV) { if (ret == -ENODEV) {
DRM_ERROR("Reset not implemented, but ignoring " DRM_ERROR("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n"); "error for simulated gpu hangs\n");
...@@ -820,12 +824,12 @@ int i915_reset(struct drm_device *dev) ...@@ -820,12 +824,12 @@ int i915_reset(struct drm_device *dev)
i915_gem_reset(dev); i915_gem_reset(dev);
ret = -ENODEV; ret = -ENODEV;
if (get_seconds() - dev_priv->last_gpu_reset < 5) if (get_seconds() - dev_priv->gpu_error.last_reset < 5)
DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
else else
ret = intel_gpu_reset(dev); ret = intel_gpu_reset(dev);
dev_priv->last_gpu_reset = get_seconds(); dev_priv->gpu_error.last_reset = get_seconds();
if (ret) { if (ret) {
DRM_ERROR("Failed to reset chip.\n"); DRM_ERROR("Failed to reset chip.\n");
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -1115,102 +1119,6 @@ MODULE_LICENSE("GPL and additional rights"); ...@@ -1115,102 +1119,6 @@ MODULE_LICENSE("GPL and additional rights");
((HAS_FORCE_WAKE((dev_priv)->dev)) && \ ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
((reg) < 0x40000) && \ ((reg) < 0x40000) && \
((reg) != FORCEWAKE)) ((reg) != FORCEWAKE))
static bool IS_DISPLAYREG(u32 reg)
{
/*
* This should make it easier to transition modules over to the
* new register block scheme, since we can do it incrementally.
*/
if (reg >= VLV_DISPLAY_BASE)
return false;
if (reg >= RENDER_RING_BASE &&
reg < RENDER_RING_BASE + 0xff)
return false;
if (reg >= GEN6_BSD_RING_BASE &&
reg < GEN6_BSD_RING_BASE + 0xff)
return false;
if (reg >= BLT_RING_BASE &&
reg < BLT_RING_BASE + 0xff)
return false;
if (reg == PGTBL_ER)
return false;
if (reg >= IPEIR_I965 &&
reg < HWSTAM)
return false;
if (reg == MI_MODE)
return false;
if (reg == GFX_MODE_GEN7)
return false;
if (reg == RENDER_HWS_PGA_GEN7 ||
reg == BSD_HWS_PGA_GEN7 ||
reg == BLT_HWS_PGA_GEN7)
return false;
if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
reg == GEN6_BSD_RNCID)
return false;
if (reg == GEN6_BLITTER_ECOSKPD)
return false;
if (reg >= 0x4000c &&
reg <= 0x4002c)
return false;
if (reg >= 0x4f000 &&
reg <= 0x4f08f)
return false;
if (reg >= 0x4f100 &&
reg <= 0x4f11f)
return false;
if (reg >= VLV_MASTER_IER &&
reg <= GEN6_PMIER)
return false;
if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
return false;
if (reg >= VLV_IIR_RW &&
reg <= VLV_ISR)
return false;
if (reg == FORCEWAKE_VLV ||
reg == FORCEWAKE_ACK_VLV)
return false;
if (reg == GEN6_GDRST)
return false;
switch (reg) {
case _3D_CHICKEN3:
case IVB_CHICKEN3:
case GEN7_COMMON_SLICE_CHICKEN1:
case GEN7_L3CNTLREG1:
case GEN7_L3_CHICKEN_MODE_REGISTER:
case GEN7_ROW_CHICKEN2:
case GEN7_L3SQCREG4:
case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
case GEN7_HALF_SLICE_CHICKEN1:
case GEN6_MBCTL:
case GEN6_UCGCTL2:
return false;
default:
break;
}
return true;
}
static void static void
ilk_dummy_write(struct drm_i915_private *dev_priv) ilk_dummy_write(struct drm_i915_private *dev_priv)
{ {
...@@ -1234,8 +1142,6 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ ...@@ -1234,8 +1142,6 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
if (dev_priv->forcewake_count == 0) \ if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_put(dev_priv); \ dev_priv->gt.force_wake_put(dev_priv); \
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
} else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
val = read##y(dev_priv->regs + reg + 0x180000); \
} else { \ } else { \
val = read##y(dev_priv->regs + reg); \ val = read##y(dev_priv->regs + reg); \
} \ } \
...@@ -1262,11 +1168,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ ...@@ -1262,11 +1168,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \ DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \ I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
} \ } \
if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ write##y(val, dev_priv->regs + reg); \
write##y(val, dev_priv->regs + reg + 0x180000); \
} else { \
write##y(val, dev_priv->regs + reg); \
} \
if (unlikely(__fifo_ret)) { \ if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \ gen6_gt_check_fifodbg(dev_priv); \
} \ } \
......
This diff is collapsed.
This diff is collapsed.
...@@ -80,7 +80,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, ...@@ -80,7 +80,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
if (mappable) if (mappable)
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
min_size, alignment, cache_level, min_size, alignment, cache_level,
0, dev_priv->mm.gtt_mappable_end); 0, dev_priv->gtt.mappable_end);
else else
drm_mm_init_scan(&dev_priv->mm.gtt_space, drm_mm_init_scan(&dev_priv->mm.gtt_space,
min_size, alignment, cache_level); min_size, alignment, cache_level);
......
This diff is collapsed.
This diff is collapsed.
...@@ -187,11 +187,11 @@ int i915_gem_init_stolen(struct drm_device *dev) ...@@ -187,11 +187,11 @@ int i915_gem_init_stolen(struct drm_device *dev)
if (dev_priv->mm.stolen_base == 0) if (dev_priv->mm.stolen_base == 0)
return 0; return 0;
DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n", DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base); dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
/* Basic memrange allocator for stolen space */ /* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->mm.gtt->stolen_size); drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size);
return 0; return 0;
} }
...@@ -205,7 +205,7 @@ i915_pages_create_for_stolen(struct drm_device *dev, ...@@ -205,7 +205,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
struct scatterlist *sg; struct scatterlist *sg;
DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
BUG_ON(offset > dev_priv->mm.gtt->stolen_size - size); BUG_ON(offset > dev_priv->gtt.stolen_size - size);
/* We hide that we have no struct page backing our stolen object /* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake * by wrapping the contiguous physical allocation with a fake
......
...@@ -272,18 +272,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) ...@@ -272,18 +272,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
return false; return false;
} }
/* size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
if (INTEL_INFO(obj->base.dev)->gen == 3)
size = 1024*1024;
else
size = 512*1024;
while (size < obj->base.size)
size <<= 1;
if (obj->gtt_space->size != size) if (obj->gtt_space->size != size)
return false; return false;
...@@ -368,15 +357,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -368,15 +357,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
obj->map_and_fenceable = obj->map_and_fenceable =
obj->gtt_space == NULL || obj->gtt_space == NULL ||
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode)); i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */ /* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) { if (!obj->map_and_fenceable) {
u32 unfenced_alignment = u32 unfenced_alignment =
i915_gem_get_unfenced_gtt_alignment(dev, i915_gem_get_gtt_alignment(dev, obj->base.size,
obj->base.size, args->tiling_mode,
args->tiling_mode); false);
if (obj->gtt_offset & (unfenced_alignment - 1)) if (obj->gtt_offset & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
} }
......
...@@ -356,8 +356,8 @@ static void notify_ring(struct drm_device *dev, ...@@ -356,8 +356,8 @@ static void notify_ring(struct drm_device *dev,
wake_up_all(&ring->irq_queue); wake_up_all(&ring->irq_queue);
if (i915_enable_hangcheck) { if (i915_enable_hangcheck) {
dev_priv->hangcheck_count = 0; dev_priv->gpu_error.hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer, mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
} }
} }
...@@ -862,23 +862,60 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) ...@@ -862,23 +862,60 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
*/ */
static void i915_error_work_func(struct work_struct *work) static void i915_error_work_func(struct work_struct *work)
{ {
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
error_work); work);
drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
gpu_error);
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct intel_ring_buffer *ring;
char *error_event[] = { "ERROR=1", NULL }; char *error_event[] = { "ERROR=1", NULL };
char *reset_event[] = { "RESET=1", NULL }; char *reset_event[] = { "RESET=1", NULL };
char *reset_done_event[] = { "ERROR=0", NULL }; char *reset_done_event[] = { "ERROR=0", NULL };
int i, ret;
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
if (atomic_read(&dev_priv->mm.wedged)) { /*
* Note that there's only one work item which does gpu resets, so we
* need not worry about concurrent gpu resets potentially incrementing
* error->reset_counter twice. We only need to take care of another
* racing irq/hangcheck declaring the gpu dead for a second time. A
* quick check for that is good enough: schedule_work ensures the
* correct ordering between hang detection and this work item, and since
* the reset in-progress bit is only ever set by code outside of this
* work we don't need to worry about any other races.
*/
if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
DRM_DEBUG_DRIVER("resetting chip\n"); DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
if (!i915_reset(dev)) { reset_event);
atomic_set(&dev_priv->mm.wedged, 0);
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); ret = i915_reset(dev);
if (ret == 0) {
/*
* After all the gem state is reset, increment the reset
* counter and wake up everyone waiting for the reset to
* complete.
*
* Since unlock operations are a one-sided barrier only,
* we need to insert a barrier here to order any seqno
* updates before
* the counter increment.
*/
smp_mb__before_atomic_inc();
atomic_inc(&dev_priv->gpu_error.reset_counter);
kobject_uevent_env(&dev->primary->kdev.kobj,
KOBJ_CHANGE, reset_done_event);
} else {
atomic_set(&error->reset_counter, I915_WEDGED);
} }
complete_all(&dev_priv->error_completion);
for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue);
wake_up_all(&dev_priv->gpu_error.reset_queue);
} }
} }
...@@ -939,7 +976,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, ...@@ -939,7 +976,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind; goto unwind;
local_irq_save(flags); local_irq_save(flags);
if (reloc_offset < dev_priv->mm.gtt_mappable_end && if (reloc_offset < dev_priv->gtt.mappable_end &&
src->has_global_gtt_mapping) { src->has_global_gtt_mapping) {
void __iomem *s; void __iomem *s;
...@@ -948,7 +985,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, ...@@ -948,7 +985,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
* captures what the GPU read. * captures what the GPU read.
*/ */
s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc_offset); reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE); memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s); io_mapping_unmap_atomic(s);
...@@ -1255,9 +1292,9 @@ static void i915_capture_error_state(struct drm_device *dev) ...@@ -1255,9 +1292,9 @@ static void i915_capture_error_state(struct drm_device *dev)
unsigned long flags; unsigned long flags;
int i, pipe; int i, pipe;
spin_lock_irqsave(&dev_priv->error_lock, flags); spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
error = dev_priv->first_error; error = dev_priv->gpu_error.first_error;
spin_unlock_irqrestore(&dev_priv->error_lock, flags); spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error) if (error)
return; return;
...@@ -1268,7 +1305,8 @@ static void i915_capture_error_state(struct drm_device *dev) ...@@ -1268,7 +1305,8 @@ static void i915_capture_error_state(struct drm_device *dev)
return; return;
} }
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", DRM_INFO("capturing error event; look for more information in"
"/sys/kernel/debug/dri/%d/i915_error_state\n",
dev->primary->index); dev->primary->index);
kref_init(&error->ref); kref_init(&error->ref);
...@@ -1341,12 +1379,12 @@ static void i915_capture_error_state(struct drm_device *dev) ...@@ -1341,12 +1379,12 @@ static void i915_capture_error_state(struct drm_device *dev)
error->overlay = intel_overlay_capture_error_state(dev); error->overlay = intel_overlay_capture_error_state(dev);
error->display = intel_display_capture_error_state(dev); error->display = intel_display_capture_error_state(dev);
spin_lock_irqsave(&dev_priv->error_lock, flags); spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
if (dev_priv->first_error == NULL) { if (dev_priv->gpu_error.first_error == NULL) {
dev_priv->first_error = error; dev_priv->gpu_error.first_error = error;
error = NULL; error = NULL;
} }
spin_unlock_irqrestore(&dev_priv->error_lock, flags); spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error) if (error)
i915_error_state_free(&error->ref); i915_error_state_free(&error->ref);
...@@ -1358,10 +1396,10 @@ void i915_destroy_error_state(struct drm_device *dev) ...@@ -1358,10 +1396,10 @@ void i915_destroy_error_state(struct drm_device *dev)
struct drm_i915_error_state *error; struct drm_i915_error_state *error;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&dev_priv->error_lock, flags); spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
error = dev_priv->first_error; error = dev_priv->gpu_error.first_error;
dev_priv->first_error = NULL; dev_priv->gpu_error.first_error = NULL;
spin_unlock_irqrestore(&dev_priv->error_lock, flags); spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error) if (error)
kref_put(&error->ref, i915_error_state_free); kref_put(&error->ref, i915_error_state_free);
...@@ -1482,17 +1520,18 @@ void i915_handle_error(struct drm_device *dev, bool wedged) ...@@ -1482,17 +1520,18 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
i915_report_and_clear_eir(dev); i915_report_and_clear_eir(dev);
if (wedged) { if (wedged) {
INIT_COMPLETION(dev_priv->error_completion); atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
atomic_set(&dev_priv->mm.wedged, 1); &dev_priv->gpu_error.reset_counter);
/* /*
* Wakeup waiting processes so they don't hang * Wakeup waiting processes so that the reset work item
* doesn't deadlock trying to grab various locks.
*/ */
for_each_ring(ring, dev_priv, i) for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue); wake_up_all(&ring->irq_queue);
} }
queue_work(dev_priv->wq, &dev_priv->error_work); queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
} }
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
...@@ -1723,7 +1762,7 @@ static bool i915_hangcheck_hung(struct drm_device *dev) ...@@ -1723,7 +1762,7 @@ static bool i915_hangcheck_hung(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
if (dev_priv->hangcheck_count++ > 1) { if (dev_priv->gpu_error.hangcheck_count++ > 1) {
bool hung = true; bool hung = true;
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
...@@ -1782,25 +1821,29 @@ void i915_hangcheck_elapsed(unsigned long data) ...@@ -1782,25 +1821,29 @@ void i915_hangcheck_elapsed(unsigned long data)
goto repeat; goto repeat;
} }
dev_priv->hangcheck_count = 0; dev_priv->gpu_error.hangcheck_count = 0;
return; return;
} }
i915_get_extra_instdone(dev, instdone); i915_get_extra_instdone(dev, instdone);
if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) { sizeof(acthd)) == 0 &&
memcmp(dev_priv->gpu_error.prev_instdone, instdone,
sizeof(instdone)) == 0) {
if (i915_hangcheck_hung(dev)) if (i915_hangcheck_hung(dev))
return; return;
} else { } else {
dev_priv->hangcheck_count = 0; dev_priv->gpu_error.hangcheck_count = 0;
memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); memcpy(dev_priv->gpu_error.last_acthd, acthd,
memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone)); sizeof(acthd));
memcpy(dev_priv->gpu_error.prev_instdone, instdone,
sizeof(instdone));
} }
repeat: repeat:
/* Reset timer case chip hangs without another request being added */ /* Reset timer case chip hangs without another request being added */
mod_timer(&dev_priv->hangcheck_timer, mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
} }
...@@ -1892,6 +1935,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) ...@@ -1892,6 +1935,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
DE_AUX_CHANNEL_A; DE_AUX_CHANNEL_A;
u32 render_irqs; u32 render_irqs;
u32 hotplug_mask; u32 hotplug_mask;
u32 pch_irq_mask;
dev_priv->irq_mask = ~display_mask; dev_priv->irq_mask = ~display_mask;
...@@ -1935,10 +1979,10 @@ static int ironlake_irq_postinstall(struct drm_device *dev) ...@@ -1935,10 +1979,10 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
SDE_AUX_MASK); SDE_AUX_MASK);
} }
dev_priv->pch_irq_mask = ~hotplug_mask; pch_irq_mask = ~hotplug_mask;
I915_WRITE(SDEIIR, I915_READ(SDEIIR)); I915_WRITE(SDEIIR, I915_READ(SDEIIR));
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); I915_WRITE(SDEIMR, pch_irq_mask);
I915_WRITE(SDEIER, hotplug_mask); I915_WRITE(SDEIER, hotplug_mask);
POSTING_READ(SDEIER); POSTING_READ(SDEIER);
...@@ -1966,6 +2010,7 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) ...@@ -1966,6 +2010,7 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
DE_AUX_CHANNEL_A_IVB; DE_AUX_CHANNEL_A_IVB;
u32 render_irqs; u32 render_irqs;
u32 hotplug_mask; u32 hotplug_mask;
u32 pch_irq_mask;
dev_priv->irq_mask = ~display_mask; dev_priv->irq_mask = ~display_mask;
...@@ -1995,10 +2040,10 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) ...@@ -1995,10 +2040,10 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
SDE_PORTD_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT |
SDE_GMBUS_CPT | SDE_GMBUS_CPT |
SDE_AUX_MASK_CPT); SDE_AUX_MASK_CPT);
dev_priv->pch_irq_mask = ~hotplug_mask; pch_irq_mask = ~hotplug_mask;
I915_WRITE(SDEIIR, I915_READ(SDEIIR)); I915_WRITE(SDEIIR, I915_READ(SDEIIR));
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); I915_WRITE(SDEIMR, pch_irq_mask);
I915_WRITE(SDEIER, hotplug_mask); I915_WRITE(SDEIER, hotplug_mask);
POSTING_READ(SDEIER); POSTING_READ(SDEIER);
...@@ -2767,11 +2812,12 @@ void intel_irq_init(struct drm_device *dev) ...@@ -2767,11 +2812,12 @@ void intel_irq_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func); INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, setup_timer(&dev_priv->gpu_error.hangcheck_timer,
i915_hangcheck_elapsed,
(unsigned long) dev); (unsigned long) dev);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -267,27 +267,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) ...@@ -267,27 +267,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
crt->force_hotplug_required = 0; crt->force_hotplug_required = 0;
save_adpa = adpa = I915_READ(PCH_ADPA); save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
if (turn_off_dac) if (turn_off_dac)
adpa &= ~ADPA_DAC_ENABLE; adpa &= ~ADPA_DAC_ENABLE;
I915_WRITE(PCH_ADPA, adpa); I915_WRITE(crt->adpa_reg, adpa);
if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000)) 1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) { if (turn_off_dac) {
I915_WRITE(PCH_ADPA, save_adpa); I915_WRITE(crt->adpa_reg, save_adpa);
POSTING_READ(PCH_ADPA); POSTING_READ(crt->adpa_reg);
} }
} }
/* Check the status to see if both blue and green are on now */ /* Check the status to see if both blue and green are on now */
adpa = I915_READ(PCH_ADPA); adpa = I915_READ(crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true; ret = true;
else else
...@@ -300,26 +300,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) ...@@ -300,26 +300,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa; u32 adpa;
bool ret; bool ret;
u32 save_adpa; u32 save_adpa;
save_adpa = adpa = I915_READ(ADPA); save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
I915_WRITE(ADPA, adpa); I915_WRITE(crt->adpa_reg, adpa);
if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000)) { 1000)) {
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
I915_WRITE(ADPA, save_adpa); I915_WRITE(crt->adpa_reg, save_adpa);
} }
/* Check the status to see if both blue and green are on now */ /* Check the status to see if both blue and green are on now */
adpa = I915_READ(ADPA); adpa = I915_READ(crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true; ret = true;
else else
...@@ -665,11 +666,11 @@ static void intel_crt_reset(struct drm_connector *connector) ...@@ -665,11 +666,11 @@ static void intel_crt_reset(struct drm_connector *connector)
if (HAS_PCH_SPLIT(dev)) { if (HAS_PCH_SPLIT(dev)) {
u32 adpa; u32 adpa;
adpa = I915_READ(PCH_ADPA); adpa = I915_READ(crt->adpa_reg);
adpa &= ~ADPA_CRT_HOTPLUG_MASK; adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS; adpa |= ADPA_HOTPLUG_BITS;
I915_WRITE(PCH_ADPA, adpa); I915_WRITE(crt->adpa_reg, adpa);
POSTING_READ(PCH_ADPA); POSTING_READ(crt->adpa_reg);
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1; crt->force_hotplug_required = 1;
......
...@@ -677,6 +677,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder, ...@@ -677,6 +677,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n", DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
port_name(port), pipe_name(pipe)); port_name(port), pipe_name(pipe));
intel_crtc->eld_vld = false;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
...@@ -987,7 +988,13 @@ void intel_ddi_enable_pipe_func(struct drm_crtc *crtc) ...@@ -987,7 +988,13 @@ void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
if (cpu_transcoder == TRANSCODER_EDP) { if (cpu_transcoder == TRANSCODER_EDP) {
switch (pipe) { switch (pipe) {
case PIPE_A: case PIPE_A:
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; /* Can only use the always-on power well for eDP when
* not using the panel fitter, and when not using motion
* blur mitigation (which we don't support). */
if (dev_priv->pch_pf_size)
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
break; break;
case PIPE_B: case PIPE_B:
temp |= TRANS_DDI_EDP_INPUT_B_ONOFF; temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
...@@ -1287,10 +1294,14 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) ...@@ -1287,10 +1294,14 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
static void intel_enable_ddi(struct intel_encoder *intel_encoder) static void intel_enable_ddi(struct intel_encoder *intel_encoder)
{ {
struct drm_encoder *encoder = &intel_encoder->base; struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder); enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type; int type = intel_encoder->type;
uint32_t tmp;
if (type == INTEL_OUTPUT_HDMI) { if (type == INTEL_OUTPUT_HDMI) {
/* In HDMI/DVI mode, the port width, and swing/emphasis values /* In HDMI/DVI mode, the port width, and swing/emphasis values
...@@ -1303,18 +1314,34 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) ...@@ -1303,18 +1314,34 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
ironlake_edp_backlight_on(intel_dp); ironlake_edp_backlight_on(intel_dp);
} }
if (intel_crtc->eld_vld) {
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
}
} }
static void intel_disable_ddi(struct intel_encoder *intel_encoder) static void intel_disable_ddi(struct intel_encoder *intel_encoder)
{ {
struct drm_encoder *encoder = &intel_encoder->base; struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int type = intel_encoder->type; int type = intel_encoder->type;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
if (type == INTEL_OUTPUT_EDP) { if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_backlight_off(intel_dp); ironlake_edp_backlight_off(intel_dp);
} }
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
} }
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
......
This diff is collapsed.
...@@ -763,6 +763,22 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, ...@@ -763,6 +763,22 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
return false; return false;
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
if (intel_dp->color_range_auto) {
/*
* See:
* CEA-861-E - 5.1 Default Encoding Parameters
* VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
*/
if (bpp != 18 && drm_mode_cea_vic(adjusted_mode) > 1)
intel_dp->color_range = DP_COLOR_RANGE_16_235;
else
intel_dp->color_range = 0;
}
if (intel_dp->color_range)
adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
for (clock = 0; clock <= max_clock; clock++) { for (clock = 0; clock <= max_clock; clock++) {
...@@ -967,7 +983,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, ...@@ -967,7 +983,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
else else
intel_dp->DP |= DP_PLL_FREQ_270MHZ; intel_dp->DP |= DP_PLL_FREQ_270MHZ;
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
intel_dp->DP |= intel_dp->color_range; if (!HAS_PCH_SPLIT(dev))
intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH; intel_dp->DP |= DP_SYNC_HS_HIGH;
...@@ -1770,14 +1787,18 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, ...@@ -1770,14 +1787,18 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE: case DP_TRAINING_PATTERN_DISABLE:
temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
I915_WRITE(DP_TP_CTL(port), temp);
if (wait_for((I915_READ(DP_TP_STATUS(port)) & if (port != PORT_A) {
DP_TP_STATUS_IDLE_DONE), 1)) temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
DRM_ERROR("Timed out waiting for DP idle patterns\n"); I915_WRITE(DP_TP_CTL(port), temp);
if (wait_for((I915_READ(DP_TP_STATUS(port)) &
DP_TP_STATUS_IDLE_DONE), 1))
DRM_ERROR("Timed out waiting for DP idle patterns\n");
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
}
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
break; break;
...@@ -2276,16 +2297,17 @@ g4x_dp_detect(struct intel_dp *intel_dp) ...@@ -2276,16 +2297,17 @@ g4x_dp_detect(struct intel_dp *intel_dp)
{ {
struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
uint32_t bit; uint32_t bit;
switch (intel_dp->output_reg) { switch (intel_dig_port->port) {
case DP_B: case PORT_B:
bit = DPB_HOTPLUG_LIVE_STATUS; bit = DPB_HOTPLUG_LIVE_STATUS;
break; break;
case DP_C: case PORT_C:
bit = DPC_HOTPLUG_LIVE_STATUS; bit = DPC_HOTPLUG_LIVE_STATUS;
break; break;
case DP_D: case PORT_D:
bit = DPD_HOTPLUG_LIVE_STATUS; bit = DPD_HOTPLUG_LIVE_STATUS;
break; break;
default: default:
...@@ -2459,10 +2481,21 @@ intel_dp_set_property(struct drm_connector *connector, ...@@ -2459,10 +2481,21 @@ intel_dp_set_property(struct drm_connector *connector,
} }
if (property == dev_priv->broadcast_rgb_property) { if (property == dev_priv->broadcast_rgb_property) {
if (val == !!intel_dp->color_range) switch (val) {
return 0; case INTEL_BROADCAST_RGB_AUTO:
intel_dp->color_range_auto = true;
intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; break;
case INTEL_BROADCAST_RGB_FULL:
intel_dp->color_range_auto = false;
intel_dp->color_range = 0;
break;
case INTEL_BROADCAST_RGB_LIMITED:
intel_dp->color_range_auto = false;
intel_dp->color_range = DP_COLOR_RANGE_16_235;
break;
default:
return -EINVAL;
}
goto done; goto done;
} }
...@@ -2603,6 +2636,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect ...@@ -2603,6 +2636,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_attach_force_audio_property(connector); intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector); intel_attach_broadcast_rgb_property(connector);
intel_dp->color_range_auto = true;
if (is_edp(intel_dp)) { if (is_edp(intel_dp)) {
drm_mode_create_scaling_mode_property(connector->dev); drm_mode_create_scaling_mode_property(connector->dev);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment