Commit 4238a417 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel: (58 commits)
  drm/i915,intel_agp: Add support for Sandybridge D0
  drm/i915: fix render pipe control notify on sandybridge
  agp/intel: set 40-bit dma mask on Sandybridge
  drm/i915: Remove the conflicting BUG_ON()
  drm/i915/suspend: s/IS_IRONLAKE/HAS_PCH_SPLIT/
  drm/i915/suspend: Flush register writes before busy-waiting.
  i915: disable DAC on Ironlake also when doing CRT load detection.
  drm/i915: wait for actual vblank, not just 20ms
  drm/i915: make sure eDP PLL is enabled at the right time
  drm/i915: fix VGA plane disable for Ironlake+
  drm/i915: eDP mode set sequence corrections
  drm/i915: add panel reset workaround
  drm/i915: Enable RC6 on Ironlake.
  drm/i915/sdvo: Only set is_lvds if we have a valid fixed mode.
  drm/i915: Set up a render context on Ironlake
  drm/i915 invalidate indirect state pointers at end of ring exec
  drm/i915: Wake-up wait_request() from elapsed hang-check (v2)
  drm/i915: Apply i830 errata for cursor alignment
  drm/i915: Only update i845/i865 CURBASE when disabled (v2)
  drm/i915: FBC is updated within set_base() so remove second call in mode_set()
  ...
parents bc584c51 4fefe435
......@@ -819,13 +819,16 @@ static const struct intel_driver_description {
"Sandybridge", NULL, &intel_gen6_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
"Sandybridge", NULL, &intel_gen6_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG,
"Sandybridge", NULL, &intel_gen6_driver },
{ 0, 0, NULL, NULL, NULL }
};
static int __devinit intel_gmch_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge)
{
int i;
int i, mask;
bridge->driver = NULL;
for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
......@@ -845,14 +848,19 @@ static int __devinit intel_gmch_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
if (bridge->driver->mask_memory == intel_i965_mask_memory) {
if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
dev_err(&intel_private.pcidev->dev,
"set gfx device dma mask 36bit failed!\n");
else
pci_set_consistent_dma_mask(intel_private.pcidev,
DMA_BIT_MASK(36));
}
if (bridge->driver->mask_memory == intel_gen6_mask_memory)
mask = 40;
else if (bridge->driver->mask_memory == intel_i965_mask_memory)
mask = 36;
else
mask = 32;
if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
dev_err(&intel_private.pcidev->dev,
"set gfx device dma mask %d-bit failed!\n", mask);
else
pci_set_consistent_dma_mask(intel_private.pcidev,
DMA_BIT_MASK(mask));
return 1;
}
......
......@@ -204,6 +204,7 @@
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG 0x0126
/* cover 915 and 945 variants */
#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
......
......@@ -8,6 +8,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_suspend.o \
i915_gem.o \
i915_gem_debug.o \
i915_gem_evict.o \
i915_gem_tiling.o \
i915_trace_points.o \
intel_display.o \
......@@ -18,6 +19,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_hdmi.o \
intel_sdvo.o \
intel_modes.o \
intel_panel.o \
intel_i2c.o \
intel_fb.o \
intel_tv.o \
......
......@@ -30,20 +30,17 @@
#include "intel_drv.h"
struct intel_dvo_device {
char *name;
const char *name;
int type;
/* DVOA/B/C output register */
u32 dvo_reg;
/* GPIO register used for i2c bus to control this device */
u32 gpio;
int slave_addr;
struct i2c_adapter *i2c_bus;
const struct intel_dvo_dev_ops *dev_ops;
void *dev_priv;
struct drm_display_mode *panel_fixed_mode;
bool panel_wants_dither;
struct i2c_adapter *i2c_bus;
};
struct intel_dvo_dev_ops {
......
......@@ -467,6 +467,9 @@ static int i915_error_state(struct seq_file *m, void *unused)
}
}
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
out:
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
......
......@@ -499,6 +499,13 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
}
}
if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
BEGIN_LP_RING(2);
OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
}
i915_emit_breadcrumb(dev);
return 0;
......
......@@ -181,6 +181,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
{0, 0, 0}
};
......
......@@ -113,6 +113,9 @@ struct intel_opregion {
int enabled;
};
struct intel_overlay;
struct intel_overlay_error_state;
struct drm_i915_master_private {
drm_local_map_t *sarea;
struct _drm_i915_sarea *sarea_priv;
......@@ -166,6 +169,7 @@ struct drm_i915_error_state {
u32 purgeable:1;
} *active_bo;
u32 active_bo_count;
struct intel_overlay_error_state *overlay;
};
struct drm_i915_display_funcs {
......@@ -186,8 +190,6 @@ struct drm_i915_display_funcs {
/* clock gating init */
};
struct intel_overlay;
struct intel_device_info {
u8 is_mobile : 1;
u8 is_i8xx : 1;
......@@ -242,6 +244,7 @@ typedef struct drm_i915_private {
struct pci_dev *bridge_dev;
struct intel_ring_buffer render_ring;
struct intel_ring_buffer bsd_ring;
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
void *seqno_page;
......@@ -251,6 +254,7 @@ typedef struct drm_i915_private {
drm_local_map_t hws_map;
struct drm_gem_object *seqno_obj;
struct drm_gem_object *pwrctx;
struct drm_gem_object *renderctx;
struct resource mch_res;
......@@ -285,6 +289,9 @@ typedef struct drm_i915_private {
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
int num_pipe;
u32 flush_rings;
#define FLUSH_RENDER_RING 0x1
#define FLUSH_BSD_RING 0x2
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
......@@ -568,8 +575,6 @@ typedef struct drm_i915_private {
*/
struct delayed_work retire_work;
uint32_t next_gem_seqno;
/**
* Waiting sequence number, if any
*/
......@@ -610,6 +615,8 @@ typedef struct drm_i915_private {
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
/* Panel fitter placement and size for Ironlake+ */
u32 pch_pf_pos, pch_pf_size;
struct drm_crtc *plane_to_crtc_mapping[2];
struct drm_crtc *pipe_to_crtc_mapping[2];
......@@ -669,6 +676,8 @@ struct drm_i915_gem_object {
struct list_head list;
/** This object's place on GPU write list */
struct list_head gpu_write_list;
/** This object's place on eviction list */
struct list_head evict_list;
/**
* This is set if the object is on the active or flushing lists
......@@ -978,6 +987,7 @@ int i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end);
int i915_gpu_idle(struct drm_device *dev);
int i915_gem_idle(struct drm_device *dev);
uint32_t i915_add_request(struct drm_device *dev,
struct drm_file *file_priv,
......@@ -991,7 +1001,9 @@ int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj, int id);
struct drm_gem_object *obj,
int id,
int align);
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
......@@ -1003,6 +1015,11 @@ int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
void i915_gem_shrinker_init(void);
void i915_gem_shrinker_exit(void);
/* i915_gem_evict.c */
int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
int i915_gem_evict_everything(struct drm_device *dev);
int i915_gem_evict_inactive(struct drm_device *dev);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
......@@ -1066,6 +1083,10 @@ extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
/* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
/**
* Lock test for when it's just for synchronization of ring access.
*
......@@ -1092,26 +1113,26 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
#define I915_VERBOSE 0
#define BEGIN_LP_RING(n) do { \
drm_i915_private_t *dev_priv = dev->dev_private; \
drm_i915_private_t *dev_priv__ = dev->dev_private; \
if (I915_VERBOSE) \
DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
intel_ring_begin(dev, &dev_priv->render_ring, (n)); \
intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \
} while (0)
#define OUT_RING(x) do { \
drm_i915_private_t *dev_priv = dev->dev_private; \
drm_i915_private_t *dev_priv__ = dev->dev_private; \
if (I915_VERBOSE) \
DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
intel_ring_emit(dev, &dev_priv->render_ring, x); \
intel_ring_emit(dev, &dev_priv__->render_ring, x); \
} while (0)
#define ADVANCE_LP_RING() do { \
drm_i915_private_t *dev_priv = dev->dev_private; \
drm_i915_private_t *dev_priv__ = dev->dev_private; \
if (I915_VERBOSE) \
DRM_DEBUG("ADVANCE_LP_RING %x\n", \
dev_priv->render_ring.tail); \
intel_ring_advance(dev, &dev_priv->render_ring); \
dev_priv__->render_ring.tail); \
intel_ring_advance(dev, &dev_priv__->render_ring); \
} while(0)
/**
......
......@@ -35,6 +35,7 @@
#include <linux/swap.h>
#include <linux/pci.h>
static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
......@@ -48,8 +49,6 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
static int i915_gem_evict_something(struct drm_device *dev, int min_size);
static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv);
......@@ -58,6 +57,14 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj);
static LIST_HEAD(shrink_list);
static DEFINE_SPINLOCK(shrink_list_lock);
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
{
return obj_priv->gtt_space &&
!obj_priv->active &&
obj_priv->pin_count == 0;
}
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end)
{
......@@ -313,7 +320,8 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
if (ret == -ENOMEM) {
struct drm_device *dev = obj->dev;
ret = i915_gem_evict_something(dev, obj->size);
ret = i915_gem_evict_something(dev, obj->size,
i915_gem_get_gtt_alignment(obj));
if (ret)
return ret;
......@@ -1036,6 +1044,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
/* Maintain LRU order of "inactive" objects */
if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
......@@ -1137,7 +1150,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
pgoff_t page_offset;
unsigned long pfn;
......@@ -1155,8 +1168,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto unlock;
list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
ret = i915_gem_object_set_to_gtt_domain(obj, write);
if (ret)
goto unlock;
......@@ -1169,6 +1180,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock;
}
if (i915_gem_object_is_inactive(obj_priv))
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
page_offset;
......@@ -1363,7 +1377,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_mmap_gtt *args = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret;
......@@ -1409,7 +1422,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
return ret;
}
list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
}
drm_gem_object_unreference(obj);
......@@ -1493,9 +1505,16 @@ i915_gem_object_truncate(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct inode *inode;
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*. Here we mirror the actions taken
* when by shmem_delete_inode() to release the backing store.
*/
inode = obj->filp->f_path.dentry->d_inode;
if (inode->i_op->truncate)
inode->i_op->truncate (inode);
truncate_inode_pages(inode->i_mapping, 0);
if (inode->i_op->truncate_range)
inode->i_op->truncate_range(inode, 0, (loff_t)-1);
obj_priv->madv = __I915_MADV_PURGED;
}
......@@ -1887,19 +1906,6 @@ i915_gem_flush(struct drm_device *dev,
flush_domains);
}
static void
i915_gem_flush_ring(struct drm_device *dev,
uint32_t invalidate_domains,
uint32_t flush_domains,
struct intel_ring_buffer *ring)
{
if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
ring->flush(dev, ring,
invalidate_domains,
flush_domains);
}
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
......@@ -1973,8 +1979,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
* cause memory corruption through use-after-free.
*/
BUG_ON(obj_priv->active);
/* release the fence reg _after_ flushing */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
i915_gem_clear_fence_reg(obj);
......@@ -2010,34 +2014,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return ret;
}
static struct drm_gem_object *
i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
struct drm_gem_object *best = NULL;
struct drm_gem_object *first = NULL;
/* Try to find the smallest clean object */
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
struct drm_gem_object *obj = &obj_priv->base;
if (obj->size >= min_size) {
if ((!obj_priv->dirty ||
i915_gem_object_is_purgeable(obj_priv)) &&
(!best || obj->size < best->size)) {
best = obj;
if (best->size == min_size)
return best;
}
if (!first)
first = obj;
}
}
return best ? best : first;
}
static int
int
i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
......@@ -2078,155 +2055,6 @@ i915_gpu_idle(struct drm_device *dev)
return ret;
}
static int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
bool lists_empty;
spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->render_ring.active_list) &&
(!HAS_BSD(dev)
|| list_empty(&dev_priv->bsd_ring.active_list)));
spin_unlock(&dev_priv->mm.active_list_lock);
if (lists_empty)
return -ENOSPC;
/* Flush everything (on to the inactive lists) and evict */
ret = i915_gpu_idle(dev);
if (ret)
return ret;
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
ret = i915_gem_evict_from_inactive_list(dev);
if (ret)
return ret;
spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->render_ring.active_list) &&
(!HAS_BSD(dev)
|| list_empty(&dev_priv->bsd_ring.active_list)));
spin_unlock(&dev_priv->mm.active_list_lock);
BUG_ON(!lists_empty);
return 0;
}
static int
i915_gem_evict_something(struct drm_device *dev, int min_size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
int ret;
struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
for (;;) {
i915_gem_retire_requests(dev);
/* If there's an inactive buffer available now, grab it
* and be done.
*/
obj = i915_gem_find_inactive_object(dev, min_size);
if (obj) {
struct drm_i915_gem_object *obj_priv;
#if WATCH_LRU
DRM_INFO("%s: evicting %p\n", __func__, obj);
#endif
obj_priv = to_intel_bo(obj);
BUG_ON(obj_priv->pin_count != 0);
BUG_ON(obj_priv->active);
/* Wait on the rendering and unbind the buffer. */
return i915_gem_object_unbind(obj);
}
/* If we didn't get anything, but the ring is still processing
* things, wait for the next to finish and hopefully leave us
* a buffer to evict.
*/
if (!list_empty(&render_ring->request_list)) {
struct drm_i915_gem_request *request;
request = list_first_entry(&render_ring->request_list,
struct drm_i915_gem_request,
list);
ret = i915_wait_request(dev,
request->seqno, request->ring);
if (ret)
return ret;
continue;
}
if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
struct drm_i915_gem_request *request;
request = list_first_entry(&bsd_ring->request_list,
struct drm_i915_gem_request,
list);
ret = i915_wait_request(dev,
request->seqno, request->ring);
if (ret)
return ret;
continue;
}
/* If we didn't have anything on the request list but there
* are buffers awaiting a flush, emit one and try again.
* When we wait on it, those buffers waiting for that flush
* will get moved to inactive.
*/
if (!list_empty(&dev_priv->mm.flushing_list)) {
struct drm_i915_gem_object *obj_priv;
/* Find an object that we can immediately reuse */
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
obj = &obj_priv->base;
if (obj->size >= min_size)
break;
obj = NULL;
}
if (obj != NULL) {
uint32_t seqno;
i915_gem_flush_ring(dev,
obj->write_domain,
obj->write_domain,
obj_priv->ring);
seqno = i915_add_request(dev, NULL,
obj->write_domain,
obj_priv->ring);
if (seqno == 0)
return -ENOMEM;
continue;
}
}
/* If we didn't do any of the above, there's no single buffer
* large enough to swap out for the new one, so just evict
* everything and start again. (This should be rare.)
*/
if (!list_empty (&dev_priv->mm.inactive_list))
return i915_gem_evict_from_inactive_list(dev);
else
return i915_gem_evict_everything(dev);
}
}
int
i915_gem_object_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
......@@ -2666,7 +2494,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
#if WATCH_LRU
DRM_INFO("%s: GTT full, evicting something\n", __func__);
#endif
ret = i915_gem_evict_something(dev, obj->size);
ret = i915_gem_evict_something(dev, obj->size, alignment);
if (ret)
return ret;
......@@ -2684,7 +2512,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
if (ret == -ENOMEM) {
/* first try to clear up some space from the GTT */
ret = i915_gem_evict_something(dev, obj->size);
ret = i915_gem_evict_something(dev, obj->size,
alignment);
if (ret) {
/* now try to shrink everyone else */
if (gfpmask) {
......@@ -2714,7 +2543,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
ret = i915_gem_evict_something(dev, obj->size);
ret = i915_gem_evict_something(dev, obj->size, alignment);
if (ret)
return ret;
......@@ -2723,6 +2552,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
atomic_inc(&dev->gtt_count);
atomic_add(obj->size, &dev->gtt_memory);
/* keep track of bounds object by adding it to the inactive list */
list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
......@@ -3117,6 +2949,7 @@ static void
i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
......@@ -3179,6 +3012,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
obj->pending_write_domain = obj->write_domain;
obj->read_domains = obj->pending_read_domains;
if (flush_domains & I915_GEM_GPU_DOMAINS) {
if (obj_priv->ring == &dev_priv->render_ring)
dev_priv->flush_rings |= FLUSH_RENDER_RING;
else if (obj_priv->ring == &dev_priv->bsd_ring)
dev_priv->flush_rings |= FLUSH_BSD_RING;
}
dev->invalidate_domains |= invalidate_domains;
dev->flush_domains |= flush_domains;
#if WATCH_BUF
......@@ -3718,7 +3558,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ring = &dev_priv->render_ring;
}
if (args->buffer_count < 1) {
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
......@@ -3892,6 +3731,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/
dev->invalidate_domains = 0;
dev->flush_domains = 0;
dev_priv->flush_rings = 0;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
......@@ -3912,16 +3752,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
if (dev_priv->flush_rings & FLUSH_RENDER_RING)
(void)i915_add_request(dev, file_priv,
dev->flush_domains,
&dev_priv->render_ring);
if (HAS_BSD(dev))
(void)i915_add_request(dev, file_priv,
dev->flush_domains,
&dev_priv->bsd_ring);
}
dev->flush_domains,
&dev_priv->render_ring);
if (dev_priv->flush_rings & FLUSH_BSD_RING)
(void)i915_add_request(dev, file_priv,
dev->flush_domains,
&dev_priv->bsd_ring);
}
for (i = 0; i < args->buffer_count; i++) {
......@@ -4192,6 +4030,10 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
if (alignment == 0)
alignment = i915_gem_get_gtt_alignment(obj);
if (obj_priv->gtt_offset & (alignment - 1)) {
WARN(obj_priv->pin_count,
"bo is already pinned with incorrect alignment:"
" offset=%x, req.alignment=%x\n",
obj_priv->gtt_offset, alignment);
ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
......@@ -4213,8 +4055,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
atomic_inc(&dev->pin_count);
atomic_add(obj->size, &dev->pin_memory);
if (!obj_priv->active &&
(obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
!list_empty(&obj_priv->list))
(obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
list_del_init(&obj_priv->list);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
......@@ -4359,22 +4200,34 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
/* Update the active list for the hardware's current position.
* Otherwise this only updates on a delayed timer or when irqs are
* actually unmasked, and our working set ends up being larger than
* required.
*/
i915_gem_retire_requests(dev);
obj_priv = to_intel_bo(obj);
/* Don't count being on the flushing list against the object being
* done. Otherwise, a buffer left on the flushing list but not getting
* flushed (because nobody's flushing that domain) won't ever return
* unbusy and get reused by libdrm's bo cache. The other expected
* consumer of this interface, OpenGL's occlusion queries, also specs
* that the objects get unbusy "eventually" without any interference.
/* Count all active objects as busy, even if they are currently not used
* by the gpu. Users of this interface expect objects to eventually
* become non-busy without any further actions, therefore emit any
* necessary flushes here.
*/
args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
obj_priv = to_intel_bo(obj);
args->busy = obj_priv->active;
if (args->busy) {
/* Unconditionally flush objects, even when the gpu still uses this
* object. Userspace calling this function indicates that it wants to
* use this buffer rather sooner than later, so issuing the required
* flush earlier is beneficial.
*/
if (obj->write_domain) {
i915_gem_flush(dev, 0, obj->write_domain);
(void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
}
/* Update the active list for the hardware's current position.
* Otherwise this only updates on a delayed timer or when irqs
* are actually unmasked, and our working set ends up being
* larger than required.
*/
i915_gem_retire_requests_ring(dev, obj_priv->ring);
args->busy = obj_priv->active;
}
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
......@@ -4514,30 +4367,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
i915_gem_free_object_tail(obj);
}
/** Unbinds all inactive objects. */
static int
i915_gem_evict_from_inactive_list(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
while (!list_empty(&dev_priv->mm.inactive_list)) {
struct drm_gem_object *obj;
int ret;
obj = &list_first_entry(&dev_priv->mm.inactive_list,
struct drm_i915_gem_object,
list)->base;
ret = i915_gem_object_unbind(obj);
if (ret != 0) {
DRM_ERROR("Error unbinding object: %d\n", ret);
return ret;
}
}
return 0;
}
int
i915_gem_idle(struct drm_device *dev)
{
......@@ -4562,7 +4391,7 @@ i915_gem_idle(struct drm_device *dev)
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_gem_evict_from_inactive_list(dev);
ret = i915_gem_evict_inactive(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
......@@ -4680,6 +4509,8 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
goto cleanup_render_ring;
}
dev_priv->next_seqno = 1;
return 0;
cleanup_render_ring:
......@@ -4841,7 +4672,7 @@ i915_gem_load(struct drm_device *dev)
* e.g. for cursor + overlay regs
*/
int i915_gem_init_phys_object(struct drm_device *dev,
int id, int size)
int id, int size, int align)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_phys_object *phys_obj;
......@@ -4856,7 +4687,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
phys_obj->id = id;
phys_obj->handle = drm_pci_alloc(dev, size, 0);
phys_obj->handle = drm_pci_alloc(dev, size, align);
if (!phys_obj->handle) {
ret = -ENOMEM;
goto kfree_obj;
......@@ -4938,7 +4769,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
int
i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj, int id)
struct drm_gem_object *obj,
int id,
int align)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
......@@ -4957,11 +4790,10 @@ i915_gem_attach_phys_object(struct drm_device *dev,
i915_gem_detach_phys_object(dev, obj);
}
/* create a new object */
if (!dev_priv->mm.phys_objs[id - 1]) {
ret = i915_gem_init_phys_object(dev, id,
obj->size);
obj->size, align);
if (ret) {
DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
goto out;
......
/*
* Copyright © 2008-2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uuk>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drv.h"
#include "i915_drm.h"
static struct drm_i915_gem_object *
i915_gem_next_active_object(struct drm_device *dev,
struct list_head **render_iter,
struct list_head **bsd_iter)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
if (*render_iter != &dev_priv->render_ring.active_list)
render_obj = list_entry(*render_iter,
struct drm_i915_gem_object,
list);
if (HAS_BSD(dev)) {
if (*bsd_iter != &dev_priv->bsd_ring.active_list)
bsd_obj = list_entry(*bsd_iter,
struct drm_i915_gem_object,
list);
if (render_obj == NULL) {
*bsd_iter = (*bsd_iter)->next;
return bsd_obj;
}
if (bsd_obj == NULL) {
*render_iter = (*render_iter)->next;
return render_obj;
}
/* XXX can we handle seqno wrapping? */
if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
*render_iter = (*render_iter)->next;
return render_obj;
} else {
*bsd_iter = (*bsd_iter)->next;
return bsd_obj;
}
} else {
*render_iter = (*render_iter)->next;
return render_obj;
}
}
static bool
mark_free(struct drm_i915_gem_object *obj_priv,
struct list_head *unwind)
{
list_add(&obj_priv->evict_list, unwind);
return drm_mm_scan_add_block(obj_priv->gtt_space);
}
#define i915_for_each_active_object(OBJ, R, B) \
*(R) = dev_priv->render_ring.active_list.next; \
*(B) = dev_priv->bsd_ring.active_list.next; \
while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
int
i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
struct drm_i915_gem_object *obj_priv, *tmp_obj_priv;
struct list_head *render_iter, *bsd_iter;
int ret = 0;
i915_gem_retire_requests(dev);
/* Re-check for free space after retiring requests */
if (drm_mm_search_free(&dev_priv->mm.gtt_space,
min_size, alignment, 0))
return 0;
/*
* The goal is to evict objects and amalgamate space in LRU order.
* The oldest idle objects reside on the inactive list, which is in
* retirement order. The next objects to retire are those on the (per
* ring) active list that do not have an outstanding flush. Once the
* hardware reports completion (the seqno is updated after the
* batchbuffer has been finished) the clean buffer objects would
* be retired to the inactive list. Any dirty objects would be added
* to the tail of the flushing list. So after processing the clean
* active objects we need to emit a MI_FLUSH to retire the flushing
* list, hence the retirement order of the flushing list is in
* advance of the dirty objects on the active lists.
*
* The retirement sequence is thus:
* 1. Inactive objects (already retired)
* 2. Clean active objects
* 3. Flushing list
* 4. Dirty active objects.
*
* On each list, the oldest objects lie at the HEAD with the freshest
* object on the TAIL.
*/
INIT_LIST_HEAD(&unwind_list);
drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
if (mark_free(obj_priv, &unwind_list))
goto found;
}
/* Now merge in the soon-to-be-expired objects... */
i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
/* Does the object require an outstanding flush? */
if (obj_priv->base.write_domain || obj_priv->pin_count)
continue;
if (mark_free(obj_priv, &unwind_list))
goto found;
}
/* Finally add anything with a pending flush (in order of retirement) */
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
if (obj_priv->pin_count)
continue;
if (mark_free(obj_priv, &unwind_list))
goto found;
}
i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
if (! obj_priv->base.write_domain || obj_priv->pin_count)
continue;
if (mark_free(obj_priv, &unwind_list))
goto found;
}
/* Nothing found, clean up and bail out! */
list_for_each_entry(obj_priv, &unwind_list, evict_list) {
ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
BUG_ON(ret);
}
/* We expect the caller to unpin, evict all and try again, or give up.
* So calling i915_gem_evict_everything() is unnecessary.
*/
return -ENOSPC;
found:
INIT_LIST_HEAD(&eviction_list);
list_for_each_entry_safe(obj_priv, tmp_obj_priv,
&unwind_list, evict_list) {
if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
/* drm_mm doesn't allow any other other operations while
* scanning, therefore store to be evicted objects on a
* temporary list. */
list_move(&obj_priv->evict_list, &eviction_list);
}
}
/* Unbinding will emit any required flushes */
list_for_each_entry_safe(obj_priv, tmp_obj_priv,
&eviction_list, evict_list) {
#if WATCH_LRU
DRM_INFO("%s: evicting %p\n", __func__, obj);
#endif
ret = i915_gem_object_unbind(&obj_priv->base);
if (ret)
return ret;
}
/* The just created free hole should be on the top of the free stack
* maintained by drm_mm, so this BUG_ON actually executes in O(1).
* Furthermore all accessed data has just recently been used, so it
* should be really fast, too. */
BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
alignment, 0));
return 0;
}
int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
bool lists_empty;
spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->render_ring.active_list) &&
(!HAS_BSD(dev)
|| list_empty(&dev_priv->bsd_ring.active_list)));
spin_unlock(&dev_priv->mm.active_list_lock);
if (lists_empty)
return -ENOSPC;
/* Flush everything (on to the inactive lists) and evict */
ret = i915_gpu_idle(dev);
if (ret)
return ret;
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
ret = i915_gem_evict_inactive(dev);
if (ret)
return ret;
spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->render_ring.active_list) &&
(!HAS_BSD(dev)
|| list_empty(&dev_priv->bsd_ring.active_list)));
spin_unlock(&dev_priv->mm.active_list_lock);
BUG_ON(!lists_empty);
return 0;
}
/** Unbinds all inactive objects. */
int
i915_gem_evict_inactive(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
while (!list_empty(&dev_priv->mm.inactive_list)) {
struct drm_gem_object *obj;
int ret;
obj = &list_first_entry(&dev_priv->mm.inactive_list,
struct drm_i915_gem_object,
list)->base;
ret = i915_gem_object_unbind(obj);
if (ret != 0) {
DRM_ERROR("Error unbinding object: %d\n", ret);
return ret;
}
}
return 0;
}
......@@ -425,9 +425,11 @@ static struct drm_i915_error_object *
i915_error_object_create(struct drm_device *dev,
struct drm_gem_object *src)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_object *dst;
struct drm_i915_gem_object *src_priv;
int page, page_count;
u32 reloc_offset;
if (src == NULL)
return NULL;
......@@ -442,18 +444,27 @@ i915_error_object_create(struct drm_device *dev,
if (dst == NULL)
return NULL;
reloc_offset = src_priv->gtt_offset;
for (page = 0; page < page_count; page++) {
void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
unsigned long flags;
void __iomem *s;
void *d;
d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
if (d == NULL)
goto unwind;
local_irq_save(flags);
s = kmap_atomic(src_priv->pages[page], KM_IRQ0);
memcpy(d, s, PAGE_SIZE);
kunmap_atomic(s, KM_IRQ0);
s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
reloc_offset,
KM_IRQ0);
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s, KM_IRQ0);
local_irq_restore(flags);
dst->pages[page] = d;
reloc_offset += PAGE_SIZE;
}
dst->page_count = page_count;
dst->gtt_offset = src_priv->gtt_offset;
......@@ -489,6 +500,7 @@ i915_error_state_free(struct drm_device *dev,
i915_error_object_free(error->batchbuffer[1]);
i915_error_object_free(error->ringbuffer);
kfree(error->active_bo);
kfree(error->overlay);
kfree(error);
}
......@@ -612,18 +624,57 @@ static void i915_capture_error_state(struct drm_device *dev)
if (batchbuffer[1] == NULL &&
error->acthd >= obj_priv->gtt_offset &&
error->acthd < obj_priv->gtt_offset + obj->size &&
batchbuffer[0] != obj)
error->acthd < obj_priv->gtt_offset + obj->size)
batchbuffer[1] = obj;
count++;
}
/* Scan the other lists for completeness for those bizarre errors. */
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL &&
bbaddr >= obj_priv->gtt_offset &&
bbaddr < obj_priv->gtt_offset + obj->size)
batchbuffer[0] = obj;
if (batchbuffer[1] == NULL &&
error->acthd >= obj_priv->gtt_offset &&
error->acthd < obj_priv->gtt_offset + obj->size)
batchbuffer[1] = obj;
if (batchbuffer[0] && batchbuffer[1])
break;
}
}
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL &&
bbaddr >= obj_priv->gtt_offset &&
bbaddr < obj_priv->gtt_offset + obj->size)
batchbuffer[0] = obj;
if (batchbuffer[1] == NULL &&
error->acthd >= obj_priv->gtt_offset &&
error->acthd < obj_priv->gtt_offset + obj->size)
batchbuffer[1] = obj;
if (batchbuffer[0] && batchbuffer[1])
break;
}
}
/* We need to copy these to an anonymous buffer as the simplest
* method to avoid being overwritten by userpace.
*/
error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
if (batchbuffer[1] != batchbuffer[0])
error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
else
error->batchbuffer[1] = NULL;
/* Record the ringbuffer */
error->ringbuffer = i915_error_object_create(dev,
......@@ -667,6 +718,8 @@ static void i915_capture_error_state(struct drm_device *dev)
do_gettimeofday(&error->time);
error->overlay = intel_overlay_capture_error_state(dev);
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (dev_priv->first_error == NULL) {
dev_priv->first_error = error;
......@@ -1251,6 +1304,16 @@ void i915_hangcheck_elapsed(unsigned long data)
&dev_priv->render_ring),
i915_get_tail_request(dev)->seqno)) {
dev_priv->hangcheck_count = 0;
/* Issue a wake-up to catch stuck h/w. */
if (dev_priv->render_ring.waiting_gem_seqno |
dev_priv->bsd_ring.waiting_gem_seqno) {
DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
if (dev_priv->render_ring.waiting_gem_seqno)
DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
if (dev_priv->bsd_ring.waiting_gem_seqno)
DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
}
return;
}
......@@ -1318,12 +1381,17 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
(void) I915_READ(DEIER);
/* user interrupt should be enabled, but masked initial */
/* Gen6 only needs render pipe_control now */
if (IS_GEN6(dev))
render_mask = GT_PIPE_NOTIFY;
dev_priv->gt_irq_mask_reg = ~render_mask;
dev_priv->gt_irq_enable_reg = render_mask;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
if (IS_GEN6(dev))
I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
(void) I915_READ(GTIER);
......
......@@ -114,10 +114,6 @@ struct opregion_asle {
#define ASLE_REQ_MSK 0xf
/* response bits of ASLE irq request */
#define ASLE_ALS_ILLUM_FAIL (2<<10)
#define ASLE_BACKLIGHT_FAIL (2<<12)
#define ASLE_PFIT_FAIL (2<<14)
#define ASLE_PWM_FREQ_FAIL (2<<16)
#define ASLE_ALS_ILLUM_FAILED (1<<10)
#define ASLE_BACKLIGHT_FAILED (1<<12)
#define ASLE_PFIT_FAILED (1<<14)
......@@ -155,11 +151,11 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
u32 max_backlight, level, shift;
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAIL;
return ASLE_BACKLIGHT_FAILED;
bclp &= ASLE_BCLP_MSK;
if (bclp < 0 || bclp > 255)
return ASLE_BACKLIGHT_FAIL;
return ASLE_BACKLIGHT_FAILED;
blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
......@@ -211,7 +207,7 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
if (!(pfit & ASLE_PFIT_VALID))
return ASLE_PFIT_FAIL;
return ASLE_PFIT_FAILED;
return 0;
}
......
......@@ -170,6 +170,7 @@
#define MI_NO_WRITE_FLUSH (1 << 2)
#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
......@@ -180,6 +181,12 @@
#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
#define MI_MM_SPACE_GTT (1<<8)
#define MI_MM_SPACE_PHYSICAL (0<<8)
#define MI_SAVE_EXT_STATE_EN (1<<3)
#define MI_RESTORE_EXT_STATE_EN (1<<2)
#define MI_RESTORE_INHIBIT (1<<0)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
......@@ -1099,6 +1106,11 @@
#define DDRMPLL1 0X12c20
#define PEG_BAND_GAP_DATA 0x14d68
/*
* Logical Context regs
*/
#define CCID 0x2180
#define CCID_EN (1<<0)
/*
* Overlay regs
*/
......@@ -2069,6 +2081,7 @@
#define PIPE_DITHER_TYPE_ST01 (1 << 2)
/* Pipe A */
#define PIPEADSL 0x70000
#define DSL_LINEMASK 0x00000fff
#define PIPEACONF 0x70008
#define PIPEACONF_ENABLE (1<<31)
#define PIPEACONF_DISABLE 0
......@@ -2928,6 +2941,7 @@
#define TRANS_DP_VSYNC_ACTIVE_LOW 0
#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
#define TRANS_DP_HSYNC_ACTIVE_LOW 0
#define TRANS_DP_SYNC_MASK (3<<3)
/* SNB eDP training params */
/* SNB A-stepping */
......
......@@ -34,7 +34,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
if (IS_IRONLAKE(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
} else {
dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
......@@ -53,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
if (IS_IRONLAKE(dev))
if (HAS_PCH_SPLIT(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A)
......@@ -75,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
if (IS_IRONLAKE(dev))
if (HAS_PCH_SPLIT(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A)
......@@ -239,7 +239,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
if (IS_IRONLAKE(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
......@@ -247,7 +247,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
if (IS_IRONLAKE(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
......@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
}
if (IS_I965G(dev) && !IS_IRONLAKE(dev))
if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
......@@ -264,10 +264,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
if (!IS_IRONLAKE(dev))
if (!HAS_PCH_SPLIT(dev))
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
if (IS_IRONLAKE(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
......@@ -304,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
if (IS_IRONLAKE(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
......@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
}
if (IS_I965G(dev) && !IS_IRONLAKE(dev))
if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
......@@ -321,10 +321,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
if (!IS_IRONLAKE(dev))
if (!HAS_PCH_SPLIT(dev))
dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
if (IS_IRONLAKE(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
......@@ -369,7 +369,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
if (IS_IRONLAKE(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dpll_a_reg = PCH_DPLL_A;
dpll_b_reg = PCH_DPLL_B;
fpa0_reg = PCH_FPA0;
......@@ -385,7 +385,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
fpb1_reg = FPB1;
}
if (IS_IRONLAKE(dev)) {
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
}
......@@ -395,16 +395,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
~DPLL_VCO_ENABLE);
DRM_UDELAY(150);
POSTING_READ(dpll_a_reg);
udelay(150);
}
I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
/* Actually enable it */
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
DRM_UDELAY(150);
if (IS_I965G(dev) && !IS_IRONLAKE(dev))
POSTING_READ(dpll_a_reg);
udelay(150);
if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
DRM_UDELAY(150);
POSTING_READ(DPLL_A_MD);
}
udelay(150);
/* Restore mode */
I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
......@@ -413,10 +417,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
if (!IS_IRONLAKE(dev))
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
if (IS_IRONLAKE(dev)) {
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
......@@ -460,16 +464,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
~DPLL_VCO_ENABLE);
DRM_UDELAY(150);
POSTING_READ(dpll_b_reg);
udelay(150);
}
I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
/* Actually enable it */
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
DRM_UDELAY(150);
if (IS_I965G(dev) && !IS_IRONLAKE(dev))
POSTING_READ(dpll_b_reg);
udelay(150);
if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
DRM_UDELAY(150);
POSTING_READ(DPLL_B_MD);
}
udelay(150);
/* Restore mode */
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
......@@ -478,10 +486,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
if (!IS_IRONLAKE(dev))
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
if (IS_IRONLAKE(dev)) {
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
......@@ -546,14 +554,14 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveCURSIZE = I915_READ(CURSIZE);
/* CRT state */
if (IS_IRONLAKE(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveADPA = I915_READ(PCH_ADPA);
} else {
dev_priv->saveADPA = I915_READ(ADPA);
}
/* LVDS state */
if (IS_IRONLAKE(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
......@@ -571,10 +579,10 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveLVDS = I915_READ(LVDS);
}
if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
if (IS_IRONLAKE(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
......@@ -602,7 +610,7 @@ void i915_save_display(struct drm_device *dev)
/* Only save FBC state on the platform that supports FBC */
if (I915_HAS_FBC(dev)) {
if (IS_IRONLAKE_M(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
} else if (IS_GM45(dev)) {
dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
......@@ -618,7 +626,7 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
if (IS_IRONLAKE(dev))
if (HAS_PCH_SPLIT(dev))
dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
else
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
......@@ -660,24 +668,24 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
/* CRT state */
if (IS_IRONLAKE(dev))
if (HAS_PCH_SPLIT(dev))
I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
else
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
if (IS_I965G(dev) && !IS_IRONLAKE(dev))
if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
if (IS_IRONLAKE(dev)) {
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
} else if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS);
if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
if (IS_IRONLAKE(dev)) {
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
......@@ -708,7 +716,7 @@ void i915_restore_display(struct drm_device *dev)
/* only restore FBC info on the platform that supports FBC*/
if (I915_HAS_FBC(dev)) {
if (IS_IRONLAKE_M(dev)) {
if (HAS_PCH_SPLIT(dev)) {
ironlake_disable_fbc(dev);
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else if (IS_GM45(dev)) {
......@@ -723,14 +731,15 @@ void i915_restore_display(struct drm_device *dev)
}
}
/* VGA state */
if (IS_IRONLAKE(dev))
if (HAS_PCH_SPLIT(dev))
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
else
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
DRM_UDELAY(150);
POSTING_READ(VGA_PD);
udelay(150);
i915_restore_vga(dev);
}
......@@ -748,7 +757,7 @@ int i915_save_state(struct drm_device *dev)
i915_save_display(dev);
/* Interrupt state */
if (IS_IRONLAKE(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveDEIER = I915_READ(DEIER);
dev_priv->saveDEIMR = I915_READ(DEIMR);
dev_priv->saveGTIER = I915_READ(GTIER);
......@@ -762,7 +771,7 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveIMR = I915_READ(IMR);
}
if (IS_IRONLAKE_M(dev))
if (HAS_PCH_SPLIT(dev))
ironlake_disable_drps(dev);
/* Cache mode state */
......@@ -820,7 +829,7 @@ int i915_restore_state(struct drm_device *dev)
i915_restore_display(dev);
/* Interrupt state */
if (IS_IRONLAKE(dev)) {
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(DEIER, dev_priv->saveDEIER);
I915_WRITE(DEIMR, dev_priv->saveDEIMR);
I915_WRITE(GTIER, dev_priv->saveGTIER);
......@@ -835,7 +844,7 @@ int i915_restore_state(struct drm_device *dev)
/* Clock gating state */
intel_init_clock_gating(dev);
if (IS_IRONLAKE_M(dev))
if (HAS_PCH_SPLIT(dev))
ironlake_enable_drps(dev);
/* Cache mode state */
......
......@@ -160,19 +160,20 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa, temp;
bool ret;
bool turn_off_dac = false;
temp = adpa = I915_READ(PCH_ADPA);
if (HAS_PCH_CPT(dev)) {
/* Disable DAC before force detect */
I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE);
(void)I915_READ(PCH_ADPA);
} else {
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
/* disable HPD first */
I915_WRITE(PCH_ADPA, adpa);
(void)I915_READ(PCH_ADPA);
}
if (HAS_PCH_SPLIT(dev))
turn_off_dac = true;
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
if (turn_off_dac)
adpa &= ~ADPA_DAC_ENABLE;
/* disable HPD first */
I915_WRITE(PCH_ADPA, adpa);
(void)I915_READ(PCH_ADPA);
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
ADPA_CRT_HOTPLUG_WARMUP_10MS |
......@@ -185,10 +186,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
I915_WRITE(PCH_ADPA, adpa);
while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
;
if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000, 1))
DRM_ERROR("timed out waiting for FORCE_TRIGGER");
if (HAS_PCH_CPT(dev)) {
if (turn_off_dac) {
I915_WRITE(PCH_ADPA, temp);
(void)I915_READ(PCH_ADPA);
}
......@@ -237,17 +239,13 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
for (i = 0; i < tries ; i++) {
unsigned long timeout;
/* turn on the FORCE_DETECT */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
timeout = jiffies + msecs_to_jiffies(1000);
/* wait for FORCE_DETECT to go off */
do {
if (!(I915_READ(PORT_HOTPLUG_EN) &
CRT_HOTPLUG_FORCE_DETECT))
break;
msleep(1);
} while (time_after(timeout, jiffies));
if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
CRT_HOTPLUG_FORCE_DETECT) == 0,
1000, 1))
DRM_ERROR("timed out waiting for FORCE_DETECT to go off");
}
stat = I915_READ(PORT_HOTPLUG_STAT);
......@@ -331,7 +329,7 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
/* Wait for next Vblank to substitue
* border color for Color info */
intel_wait_for_vblank(dev);
intel_wait_for_vblank(dev, pipe);
st00 = I915_READ8(VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
......@@ -508,17 +506,8 @@ static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs
.best_encoder = intel_attached_encoder,
};
static void intel_crt_enc_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
.destroy = intel_crt_enc_destroy,
.destroy = intel_encoder_destroy,
};
void intel_crt_init(struct drm_device *dev)
......
......@@ -29,6 +29,7 @@
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
#include "drmP.h"
#include "intel_drv.h"
#include "i915_drm.h"
......@@ -976,14 +977,54 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
}
void
intel_wait_for_vblank(struct drm_device *dev)
/**
* intel_wait_for_vblank - wait for vblank on a given pipe
* @dev: drm device
* @pipe: pipe to wait for
*
* Wait for vblank to occur on a given pipe. Needed for various bits of
* mode setting code.
*/
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
{
/* Wait for 20ms, i.e. one cycle at 50hz. */
if (in_dbg_master())
mdelay(20); /* The kernel debugger cannot call msleep() */
else
msleep(20);
struct drm_i915_private *dev_priv = dev->dev_private;
int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT);
/* Wait for vblank interrupt bit to set */
if (wait_for((I915_READ(pipestat_reg) &
PIPE_VBLANK_INTERRUPT_STATUS) == 0,
50, 0))
DRM_DEBUG_KMS("vblank wait timed out\n");
}
/**
* intel_wait_for_vblank_off - wait for vblank after disabling a pipe
* @dev: drm device
* @pipe: pipe to wait for
*
* After disabling a pipe, we can't wait for vblank in the usual way,
* spinning on the vblank interrupt status bit, since we won't actually
* see an interrupt when the pipe is disabled.
*
* So this function waits for the display line value to settle (it
* usually ends up stopping at the start of the next frame).
*/
void intel_wait_for_vblank_off(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
unsigned long timeout = jiffies + msecs_to_jiffies(100);
u32 last_line;
/* Wait for the display line to settle */
do {
last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
mdelay(5);
} while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
time_after(timeout, jiffies));
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("vblank wait timed out\n");
}
/* Parameters have changed, update FBC info */
......@@ -1037,7 +1078,6 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long timeout = jiffies + msecs_to_jiffies(1);
u32 fbc_ctl;
if (!I915_HAS_FBC(dev))
......@@ -1052,16 +1092,11 @@ void i8xx_disable_fbc(struct drm_device *dev)
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) {
if (time_after(jiffies, timeout)) {
DRM_DEBUG_DRIVER("FBC idle timed out\n");
break;
}
; /* do nothing */
if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
intel_wait_for_vblank(dev);
DRM_DEBUG_KMS("disabled FBC\n");
}
......@@ -1118,7 +1153,6 @@ void g4x_disable_fbc(struct drm_device *dev)
dpfc_ctl = I915_READ(DPFC_CONTROL);
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
intel_wait_for_vblank(dev);
DRM_DEBUG_KMS("disabled FBC\n");
}
......@@ -1179,7 +1213,6 @@ void ironlake_disable_fbc(struct drm_device *dev)
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
intel_wait_for_vblank(dev);
DRM_DEBUG_KMS("disabled FBC\n");
}
......@@ -1478,7 +1511,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if ((IS_I965G(dev) || plane == 0))
intel_update_fbc(crtc, &crtc->mode);
intel_wait_for_vblank(dev);
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_increase_pllclock(crtc, true);
return 0;
......@@ -1585,20 +1618,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
Start, Offset, x, y, crtc->fb->pitch);
I915_WRITE(dspstride, crtc->fb->pitch);
if (IS_I965G(dev)) {
I915_WRITE(dspbase, Offset);
I915_READ(dspbase);
I915_WRITE(dspsurf, Start);
I915_READ(dspsurf);
I915_WRITE(dsptileoff, (y << 16) | x);
I915_WRITE(dspbase, Offset);
} else {
I915_WRITE(dspbase, Start + Offset);
I915_READ(dspbase);
}
POSTING_READ(dspbase);
if ((IS_I965G(dev) || plane == 0))
intel_update_fbc(crtc, &crtc->mode);
intel_wait_for_vblank(dev);
intel_wait_for_vblank(dev, pipe);
if (old_fb) {
intel_fb = to_intel_framebuffer(old_fb);
......@@ -1627,54 +1658,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
/* Disable the VGA plane that we never use */
static void i915_disable_vga (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u8 sr1;
u32 vga_reg;
if (HAS_PCH_SPLIT(dev))
vga_reg = CPU_VGACNTRL;
else
vga_reg = VGACNTRL;
if (I915_READ(vga_reg) & VGA_DISP_DISABLE)
return;
I915_WRITE8(VGA_SR_INDEX, 1);
sr1 = I915_READ8(VGA_SR_DATA);
I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5));
udelay(100);
I915_WRITE(vga_reg, VGA_DISP_DISABLE);
}
static void ironlake_disable_pll_edp (struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
DRM_DEBUG_KMS("\n");
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
}
static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
dpa_ctl = I915_READ(DP_A);
dpa_ctl |= DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
udelay(200);
}
static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
{
struct drm_device *dev = crtc->dev;
......@@ -1945,7 +1928,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
int trans_dpll_sel = (pipe == 0) ? 0 : 1;
u32 temp;
int n;
u32 pipe_bpc;
temp = I915_READ(pipeconf_reg);
......@@ -1958,7 +1940,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
DRM_DEBUG_KMS("crtc %d dpms on\n", pipe);
DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
temp = I915_READ(PCH_LVDS);
......@@ -1968,10 +1950,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
if (HAS_eDP) {
/* enable eDP PLL */
ironlake_enable_pll_edp(crtc);
} else {
if (!HAS_eDP) {
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
temp = I915_READ(fdi_rx_reg);
......@@ -2005,15 +1984,13 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Enable panel fitting for LVDS */
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
|| HAS_eDP || intel_pch_has_edp(crtc)) {
temp = I915_READ(pf_ctl_reg);
I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
/* currently full aspect */
I915_WRITE(pf_win_pos, 0);
I915_WRITE(pf_win_size,
(dev_priv->panel_fixed_mode->hdisplay << 16) |
(dev_priv->panel_fixed_mode->vdisplay));
if (dev_priv->pch_pf_size) {
temp = I915_READ(pf_ctl_reg);
I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
I915_WRITE(pf_win_pos, dev_priv->pch_pf_pos);
I915_WRITE(pf_win_size, dev_priv->pch_pf_size);
} else
I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
}
/* Enable CPU pipe */
......@@ -2097,9 +2074,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int reg;
reg = I915_READ(trans_dp_ctl);
reg &= ~TRANS_DP_PORT_SEL_MASK;
reg = TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_ENH_FRAMING;
reg &= ~(TRANS_DP_PORT_SEL_MASK |
TRANS_DP_SYNC_MASK);
reg |= (TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_ENH_FRAMING);
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
......@@ -2137,18 +2115,17 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
I915_READ(transconf_reg);
while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
;
if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 10, 0))
DRM_ERROR("failed to enable transcoder\n");
}
intel_crtc_load_lut(crtc);
intel_update_fbc(crtc, &crtc->mode);
break;
break;
case DRM_MODE_DPMS_OFF:
DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
drm_vblank_off(dev, pipe);
/* Disable display plane */
......@@ -2164,26 +2141,14 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
i915_disable_vga(dev);
/* disable cpu pipe, disable after all planes disabled */
temp = I915_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) != 0) {
I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
I915_READ(pipeconf_reg);
n = 0;
/* wait for cpu pipe off, pipe state */
while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) {
n++;
if (n < 60) {
udelay(500);
continue;
} else {
DRM_DEBUG_KMS("pipe %d off delay\n",
pipe);
break;
}
}
if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1))
DRM_ERROR("failed to turn off cpu pipe\n");
} else
DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
......@@ -2244,20 +2209,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
temp = I915_READ(transconf_reg);
if ((temp & TRANS_ENABLE) != 0) {
I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
I915_READ(transconf_reg);
n = 0;
/* wait for PCH transcoder off, transcoder state */
while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) {
n++;
if (n < 60) {
udelay(500);
continue;
} else {
DRM_DEBUG_KMS("transcoder %d off "
"delay\n", pipe);
break;
}
}
if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1))
DRM_ERROR("failed to disable transcoder\n");
}
temp = I915_READ(transconf_reg);
......@@ -2294,10 +2249,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
I915_READ(pch_dpll_reg);
if (HAS_eDP) {
ironlake_disable_pll_edp(crtc);
}
/* Switch from PCDclk to Rawclk */
temp = I915_READ(fdi_rx_reg);
temp &= ~FDI_SEL_PCDCLK;
......@@ -2372,8 +2323,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
intel_update_watermarks(dev);
/* Enable the DPLL */
temp = I915_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) == 0) {
......@@ -2413,8 +2362,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
intel_crtc_dpms_overlay(intel_crtc, true);
break;
case DRM_MODE_DPMS_OFF:
intel_update_watermarks(dev);
/* Give the overlay scaler a chance to disable if it's on this pipe */
intel_crtc_dpms_overlay(intel_crtc, false);
drm_vblank_off(dev, pipe);
......@@ -2423,9 +2370,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
/* Disable the VGA plane that we never use */
i915_disable_vga(dev);
/* Disable display plane */
temp = I915_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
......@@ -2435,10 +2379,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(dspbase_reg);
}
if (!IS_I9XX(dev)) {
/* Wait for vblank for the disable to take effect */
intel_wait_for_vblank(dev);
}
/* Wait for vblank for the disable to take effect */
intel_wait_for_vblank_off(dev, pipe);
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipeconf_reg == PIPEACONF &&
......@@ -2453,7 +2395,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
}
/* Wait for vblank for the disable to take effect. */
intel_wait_for_vblank(dev);
intel_wait_for_vblank_off(dev, pipe);
temp = I915_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) != 0) {
......@@ -2469,9 +2411,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
/**
* Sets the power management mode of the pipe and plane.
*
* This code should probably grow support for turning the cursor off and back
* on appropriately at the same time as we're turning the pipe off/on.
*/
static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
......@@ -2482,9 +2421,26 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
int pipe = intel_crtc->pipe;
bool enabled;
intel_crtc->dpms_mode = mode;
intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
/* When switching on the display, ensure that SR is disabled
* with multiple pipes prior to enabling to new pipe.
*
* When switching off the display, make sure the cursor is
* properly hidden prior to disabling the pipe.
*/
if (mode == DRM_MODE_DPMS_ON)
intel_update_watermarks(dev);
else
intel_crtc_update_cursor(crtc);
dev_priv->display.dpms(crtc, mode);
intel_crtc->dpms_mode = mode;
if (mode == DRM_MODE_DPMS_ON)
intel_crtc_update_cursor(crtc);
else
intel_update_watermarks(dev);
if (!dev->primary->master)
return;
......@@ -2536,6 +2492,20 @@ void intel_encoder_commit (struct drm_encoder *encoder)
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
void intel_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
if (intel_encoder->ddc_bus)
intel_i2c_destroy(intel_encoder->ddc_bus);
if (intel_encoder->i2c_bus)
intel_i2c_destroy(intel_encoder->i2c_bus);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
}
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
......@@ -2867,7 +2837,7 @@ struct cxsr_latency {
unsigned long cursor_hpll_disable;
};
static struct cxsr_latency cxsr_latency_table[] = {
static const struct cxsr_latency cxsr_latency_table[] = {
{1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
{1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
{1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
......@@ -2905,11 +2875,13 @@ static struct cxsr_latency cxsr_latency_table[] = {
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3,
int fsb, int mem)
static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
int is_ddr3,
int fsb,
int mem)
{
const struct cxsr_latency *latency;
int i;
struct cxsr_latency *latency;
if (fsb == 0 || mem == 0)
return NULL;
......@@ -2930,13 +2902,9 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3,
static void pineview_disable_cxsr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
/* deactivate cxsr */
reg = I915_READ(DSPFW3);
reg &= ~(PINEVIEW_SELF_REFRESH_EN);
I915_WRITE(DSPFW3, reg);
DRM_INFO("Big FIFO is disabled\n");
I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
}
/*
......@@ -3024,12 +2992,12 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct cxsr_latency *latency;
u32 reg;
unsigned long wm;
struct cxsr_latency *latency;
int sr_clock;
latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
dev_priv->fsb_freq, dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
......@@ -3075,9 +3043,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
/* activate cxsr */
reg = I915_READ(DSPFW3);
reg |= PINEVIEW_SELF_REFRESH_EN;
I915_WRITE(DSPFW3, reg);
I915_WRITE(DSPFW3,
I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
DRM_DEBUG_KMS("Self-refresh is enabled\n");
} else {
pineview_disable_cxsr(dev);
......@@ -3354,12 +3321,11 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
int line_count;
int planea_htotal = 0, planeb_htotal = 0;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
/* Need htotal for all active display plane */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
intel_crtc = to_intel_crtc(crtc);
if (crtc->enabled) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
if (intel_crtc->plane == 0)
planea_htotal = crtc->mode.htotal;
else
......@@ -3519,7 +3485,6 @@ static void intel_update_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
int sr_hdisplay = 0;
unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
int enabled = 0, pixel_size = 0;
......@@ -3530,8 +3495,8 @@ static void intel_update_watermarks(struct drm_device *dev)
/* Get the clock config from both planes */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
intel_crtc = to_intel_crtc(crtc);
if (crtc->enabled) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
enabled++;
if (intel_crtc->plane == 0) {
DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
......@@ -3966,9 +3931,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll_reg = pch_dpll_reg;
}
if (is_edp) {
ironlake_disable_pll_edp(crtc);
} else if ((dpll & DPLL_VCO_ENABLE)) {
if (!is_edp) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
I915_READ(dpll_reg);
......@@ -4167,7 +4130,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(pipeconf_reg, pipeconf);
I915_READ(pipeconf_reg);
intel_wait_for_vblank(dev);
intel_wait_for_vblank(dev, pipe);
if (IS_IRONLAKE(dev)) {
/* enable address swizzle for tiling buffer */
......@@ -4180,9 +4143,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Flush the plane changes */
ret = intel_pipe_set_base(crtc, x, y, old_fb);
if ((IS_I965G(dev) || plane == 0))
intel_update_fbc(crtc, &crtc->mode);
intel_update_watermarks(dev);
drm_vblank_post_modeset(dev, pipe);
......@@ -4216,6 +4176,62 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
}
}
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool visible = base != 0;
u32 cntl;
if (intel_crtc->cursor_visible == visible)
return;
cntl = I915_READ(CURACNTR);
if (visible) {
/* On these chipsets we can only modify the base whilst
* the cursor is disabled.
*/
I915_WRITE(CURABASE, base);
cntl &= ~(CURSOR_FORMAT_MASK);
/* XXX width must be 64, stride 256 => 0x00 << 28 */
cntl |= CURSOR_ENABLE |
CURSOR_GAMMA_ENABLE |
CURSOR_FORMAT_ARGB;
} else
cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
I915_WRITE(CURACNTR, cntl);
intel_crtc->cursor_visible = visible;
}
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
bool visible = base != 0;
if (intel_crtc->cursor_visible != visible) {
uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
if (base) {
cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
cntl |= pipe << 28; /* Connect to correct pipe */
} else {
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
static void intel_crtc_update_cursor(struct drm_crtc *crtc)
{
......@@ -4225,12 +4241,12 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int x = intel_crtc->cursor_x;
int y = intel_crtc->cursor_y;
uint32_t base, pos;
u32 base, pos;
bool visible;
pos = 0;
if (crtc->fb) {
if (intel_crtc->cursor_on && crtc->fb) {
base = intel_crtc->cursor_addr;
if (x > (int) crtc->fb->width)
base = 0;
......@@ -4259,37 +4275,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
pos |= y << CURSOR_Y_SHIFT;
visible = base != 0;
if (!visible && !intel_crtc->cursor_visble)
if (!visible && !intel_crtc->cursor_visible)
return;
I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
if (intel_crtc->cursor_visble != visible) {
uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
if (base) {
/* Hooray for CUR*CNTR differences */
if (IS_MOBILE(dev) || IS_I9XX(dev)) {
cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
cntl |= pipe << 28; /* Connect to correct pipe */
} else {
cntl &= ~(CURSOR_FORMAT_MASK);
cntl |= CURSOR_ENABLE;
cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
}
} else {
if (IS_MOBILE(dev) || IS_I9XX(dev)) {
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
} else {
cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
}
}
I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
intel_crtc->cursor_visble = visible;
}
/* and commit changes on next vblank */
I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base);
else
i9xx_update_cursor(crtc, base);
if (visible)
intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
......@@ -4354,8 +4347,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
addr = obj_priv->gtt_offset;
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, bo,
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
align);
if (ret) {
DRM_ERROR("failed to attach phys object\n");
goto fail_locked;
......@@ -4544,7 +4539,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
encoder_funcs->commit(encoder);
}
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev);
intel_wait_for_vblank(dev, intel_crtc->pipe);
return crtc;
}
......@@ -4749,7 +4744,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
dpll = I915_READ(dpll_reg);
intel_wait_for_vblank(dev);
intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
......@@ -4793,7 +4788,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
dpll = I915_READ(dpll_reg);
intel_wait_for_vblank(dev);
intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
......@@ -5083,14 +5078,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->pending_flip_obj = obj;
if (intel_crtc->plane)
flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
/* Wait for any previous flip to finish */
if (IS_GEN3(dev))
while (I915_READ(ISR) & flip_mask)
;
if (IS_GEN3(dev) || IS_GEN2(dev)) {
BEGIN_LP_RING(2);
OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
OUT_RING(0);
ADVANCE_LP_RING();
}
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = obj_priv->gtt_offset;
......@@ -5104,12 +5101,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(offset | obj_priv->tiling_mode);
pipesrc = I915_READ(pipesrc_reg);
OUT_RING(pipesrc & 0x0fff0fff);
} else {
} else if (IS_GEN3(dev)) {
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
OUT_RING(offset);
OUT_RING(MI_NOOP);
} else {
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
OUT_RING(offset);
OUT_RING(MI_NOOP);
}
ADVANCE_LP_RING();
......@@ -5432,37 +5435,37 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
};
static struct drm_gem_object *
intel_alloc_power_context(struct drm_device *dev)
intel_alloc_context_page(struct drm_device *dev)
{
struct drm_gem_object *pwrctx;
struct drm_gem_object *ctx;
int ret;
pwrctx = i915_gem_alloc_object(dev, 4096);
if (!pwrctx) {
ctx = i915_gem_alloc_object(dev, 4096);
if (!ctx) {
DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
return NULL;
}
mutex_lock(&dev->struct_mutex);
ret = i915_gem_object_pin(pwrctx, 4096);
ret = i915_gem_object_pin(ctx, 4096);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
}
ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
if (ret) {
DRM_ERROR("failed to set-domain on power context: %d\n", ret);
goto err_unpin;
}
mutex_unlock(&dev->struct_mutex);
return pwrctx;
return ctx;
err_unpin:
i915_gem_object_unpin(pwrctx);
i915_gem_object_unpin(ctx);
err_unref:
drm_gem_object_unreference(pwrctx);
drm_gem_object_unreference(ctx);
mutex_unlock(&dev->struct_mutex);
return NULL;
}
......@@ -5494,7 +5497,6 @@ void ironlake_enable_drps(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
int i = 0;
/* 100ms RC evaluation intervals */
I915_WRITE(RCUPEI, 100000);
......@@ -5538,13 +5540,8 @@ void ironlake_enable_drps(struct drm_device *dev)
rgvmodectl |= MEMMODE_SWMODE_EN;
I915_WRITE(MEMMODECTL, rgvmodectl);
while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) {
if (i++ > 100) {
DRM_ERROR("stuck trying to change perf mode\n");
break;
}
msleep(1);
}
if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0))
DRM_ERROR("stuck trying to change perf mode\n");
msleep(1);
ironlake_set_drps(dev, fstart);
......@@ -5725,7 +5722,8 @@ void intel_init_clock_gating(struct drm_device *dev)
ILK_DPFC_DIS2 |
ILK_CLK_FBC);
}
return;
if (IS_GEN6(dev))
return;
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
......@@ -5768,6 +5766,31 @@ void intel_init_clock_gating(struct drm_device *dev)
* GPU can automatically power down the render unit if given a page
* to save state.
*/
if (IS_IRONLAKE_M(dev)) {
if (dev_priv->renderctx == NULL)
dev_priv->renderctx = intel_alloc_context_page(dev);
if (dev_priv->renderctx) {
struct drm_i915_gem_object *obj_priv;
obj_priv = to_intel_bo(dev_priv->renderctx);
if (obj_priv) {
BEGIN_LP_RING(4);
OUT_RING(MI_SET_CONTEXT);
OUT_RING(obj_priv->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
MI_RESTORE_INHIBIT);
OUT_RING(MI_NOOP);
OUT_RING(MI_FLUSH);
ADVANCE_LP_RING();
}
} else {
DRM_DEBUG_KMS("Failed to allocate render context."
"Disable RC6\n");
return;
}
}
if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_i915_gem_object *obj_priv = NULL;
......@@ -5776,7 +5799,7 @@ void intel_init_clock_gating(struct drm_device *dev)
} else {
struct drm_gem_object *pwrctx;
pwrctx = intel_alloc_power_context(dev);
pwrctx = intel_alloc_context_page(dev);
if (pwrctx) {
dev_priv->pwrctx = pwrctx;
obj_priv = to_intel_bo(pwrctx);
......@@ -5948,6 +5971,29 @@ static void intel_init_quirks(struct drm_device *dev)
}
}
/* Disable the VGA plane that we never use */
static void i915_disable_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u8 sr1;
u32 vga_reg;
if (HAS_PCH_SPLIT(dev))
vga_reg = CPU_VGACNTRL;
else
vga_reg = VGACNTRL;
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
outb(1, VGA_SR_INDEX);
sr1 = inb(VGA_SR_DATA);
outb(sr1 | 1<<5, VGA_SR_DATA);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
udelay(300);
I915_WRITE(vga_reg, VGA_DISP_DISABLE);
POSTING_READ(vga_reg);
}
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
......@@ -5996,6 +6042,9 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_clock_gating(dev);
/* Just disable it once at startup */
i915_disable_vga(dev);
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
......@@ -6034,6 +6083,16 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
if (dev_priv->renderctx) {
struct drm_i915_gem_object *obj_priv;
obj_priv = to_intel_bo(dev_priv->renderctx);
I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN);
I915_READ(CCID);
i915_gem_object_unpin(dev_priv->renderctx);
drm_gem_object_unreference(dev_priv->renderctx);
}
if (dev_priv->pwrctx) {
struct drm_i915_gem_object *obj_priv;
......
......@@ -42,10 +42,11 @@
#define DP_LINK_CONFIGURATION_SIZE 9
#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP)
#define IS_PCH_eDP(dp_priv) ((dp_priv)->is_pch_edp)
#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP)
#define IS_PCH_eDP(i) ((i)->is_pch_edp)
struct intel_dp_priv {
struct intel_dp {
struct intel_encoder base;
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
......@@ -54,40 +55,39 @@ struct intel_dp_priv {
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[4];
struct intel_encoder *intel_encoder;
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
};
static void
intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base);
}
static void
intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP);
static void intel_dp_link_train(struct intel_dp *intel_dp);
static void intel_dp_link_down(struct intel_dp *intel_dp);
void
intel_edp_link_config (struct intel_encoder *intel_encoder,
int *lane_num, int *link_bw)
int *lane_num, int *link_bw)
{
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
*lane_num = dp_priv->lane_count;
if (dp_priv->link_bw == DP_LINK_BW_1_62)
*lane_num = intel_dp->lane_count;
if (intel_dp->link_bw == DP_LINK_BW_1_62)
*link_bw = 162000;
else if (dp_priv->link_bw == DP_LINK_BW_2_7)
else if (intel_dp->link_bw == DP_LINK_BW_2_7)
*link_bw = 270000;
}
static int
intel_dp_max_lane_count(struct intel_encoder *intel_encoder)
intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int max_lane_count = 4;
if (dp_priv->dpcd[0] >= 0x11) {
max_lane_count = dp_priv->dpcd[2] & 0x1f;
if (intel_dp->dpcd[0] >= 0x11) {
max_lane_count = intel_dp->dpcd[2] & 0x1f;
switch (max_lane_count) {
case 1: case 2: case 4:
break;
......@@ -99,10 +99,9 @@ intel_dp_max_lane_count(struct intel_encoder *intel_encoder)
}
static int
intel_dp_max_link_bw(struct intel_encoder *intel_encoder)
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int max_link_bw = dp_priv->dpcd[1];
int max_link_bw = intel_dp->dpcd[1];
switch (max_link_bw) {
case DP_LINK_BW_1_62:
......@@ -126,13 +125,11 @@ intel_dp_link_clock(uint8_t link_bw)
/* I think this is a fiction */
static int
intel_dp_link_required(struct drm_device *dev,
struct intel_encoder *intel_encoder, int pixel_clock)
intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv))
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
return (pixel_clock * dev_priv->edp_bpp) / 8;
else
return pixel_clock * 3;
......@@ -149,14 +146,13 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
int max_lanes = intel_dp_max_lane_count(intel_encoder);
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
dev_priv->panel_fixed_mode) {
if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
return MODE_PANEL;
......@@ -167,8 +163,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
/* only refuse the mode on non eDP since we have seen some wierd eDP panels
which are outside spec tolerances but somehow work by magic */
if (!IS_eDP(intel_encoder) &&
(intel_dp_link_required(connector->dev, intel_encoder, mode->clock)
if (!IS_eDP(intel_dp) &&
(intel_dp_link_required(connector->dev, intel_dp, mode->clock)
> intel_dp_max_data_rate(max_link_clock, max_lanes)))
return MODE_CLOCK_HIGH;
......@@ -232,13 +228,12 @@ intel_hrawclk(struct drm_device *dev)
}
static int
intel_dp_aux_ch(struct intel_encoder *intel_encoder,
intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size)
{
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t output_reg = dp_priv->output_reg;
struct drm_device *dev = intel_encoder->enc.dev;
uint32_t output_reg = intel_dp->output_reg;
struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
......@@ -253,7 +248,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
*/
if (IS_eDP(intel_encoder)) {
if (IS_eDP(intel_dp)) {
if (IS_GEN6(dev))
aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
else
......@@ -344,7 +339,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
/* Write data to the aux channel in native mode */
static int
intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
intel_dp_aux_native_write(struct intel_dp *intel_dp,
uint16_t address, uint8_t *send, int send_bytes)
{
int ret;
......@@ -361,7 +356,7 @@ intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
memcpy(&msg[4], send, send_bytes);
msg_bytes = send_bytes + 4;
for (;;) {
ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1);
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
......@@ -376,15 +371,15 @@ intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
/* Write a single byte to the aux channel in native mode */
static int
intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder,
intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
uint16_t address, uint8_t byte)
{
return intel_dp_aux_native_write(intel_encoder, address, &byte, 1);
return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
}
/* read bytes from a native aux channel */
static int
intel_dp_aux_native_read(struct intel_encoder *intel_encoder,
intel_dp_aux_native_read(struct intel_dp *intel_dp,
uint16_t address, uint8_t *recv, int recv_bytes)
{
uint8_t msg[4];
......@@ -403,7 +398,7 @@ intel_dp_aux_native_read(struct intel_encoder *intel_encoder,
reply_bytes = recv_bytes + 1;
for (;;) {
ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes,
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
reply, reply_bytes);
if (ret == 0)
return -EPROTO;
......@@ -426,10 +421,9 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
struct intel_dp_priv *dp_priv = container_of(adapter,
struct intel_dp_priv,
adapter);
struct intel_encoder *intel_encoder = dp_priv->intel_encoder;
struct intel_dp *intel_dp = container_of(adapter,
struct intel_dp,
adapter);
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
......@@ -468,7 +462,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
for (;;) {
ret = intel_dp_aux_ch(intel_encoder,
ret = intel_dp_aux_ch(intel_dp,
msg, msg_bytes,
reply, reply_bytes);
if (ret < 0) {
......@@ -496,57 +490,42 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
static int
intel_dp_i2c_init(struct intel_encoder *intel_encoder,
intel_dp_i2c_init(struct intel_dp *intel_dp,
struct intel_connector *intel_connector, const char *name)
{
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
DRM_DEBUG_KMS("i2c_init %s\n", name);
dp_priv->algo.running = false;
dp_priv->algo.address = 0;
dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;
memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter));
dp_priv->adapter.owner = THIS_MODULE;
dp_priv->adapter.class = I2C_CLASS_DDC;
strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
dp_priv->adapter.algo_data = &dp_priv->algo;
dp_priv->adapter.dev.parent = &intel_connector->base.kdev;
return i2c_dp_aux_add_bus(&dp_priv->adapter);
intel_dp->algo.running = false;
intel_dp->algo.address = 0;
intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
intel_dp->adapter.owner = THIS_MODULE;
intel_dp->adapter.class = I2C_CLASS_DDC;
strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
intel_dp->adapter.algo_data = &intel_dp->algo;
intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
return i2c_dp_aux_add_bus(&intel_dp->adapter);
}
static bool
intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_encoder);
int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
dev_priv->panel_fixed_mode) {
struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
adjusted_mode->hdisplay = fixed_mode->hdisplay;
adjusted_mode->hsync_start = fixed_mode->hsync_start;
adjusted_mode->hsync_end = fixed_mode->hsync_end;
adjusted_mode->htotal = fixed_mode->htotal;
adjusted_mode->vdisplay = fixed_mode->vdisplay;
adjusted_mode->vsync_start = fixed_mode->vsync_start;
adjusted_mode->vsync_end = fixed_mode->vsync_end;
adjusted_mode->vtotal = fixed_mode->vtotal;
adjusted_mode->clock = fixed_mode->clock;
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
mode, adjusted_mode);
/*
* the mode->clock is used to calculate the Data&Link M/N
* of the pipe. For the eDP the fixed clock should be used.
......@@ -558,31 +537,33 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock)
if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock)
<= link_avail) {
dp_priv->link_bw = bws[clock];
dp_priv->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
DRM_DEBUG_KMS("Display port link bw %02x lane "
"count %d clock %d\n",
dp_priv->link_bw, dp_priv->lane_count,
intel_dp->link_bw, intel_dp->lane_count,
adjusted_mode->clock);
return true;
}
}
}
if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
/* okay we failed just pick the highest */
dp_priv->lane_count = max_lane_count;
dp_priv->link_bw = bws[max_clock];
adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
intel_dp->lane_count = max_lane_count;
intel_dp->link_bw = bws[max_clock];
adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
"count %d clock %d\n",
dp_priv->link_bw, dp_priv->lane_count,
intel_dp->link_bw, intel_dp->lane_count,
adjusted_mode->clock);
return true;
}
return false;
}
......@@ -626,17 +607,14 @@ bool intel_pch_has_edp(struct drm_crtc *crtc)
struct drm_encoder *encoder;
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
struct intel_encoder *intel_encoder;
struct intel_dp_priv *dp_priv;
struct intel_dp *intel_dp;
if (!encoder || encoder->crtc != crtc)
if (encoder->crtc != crtc)
continue;
intel_encoder = enc_to_intel_encoder(encoder);
dp_priv = intel_encoder->dev_priv;
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT)
return dp_priv->is_pch_edp;
intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
return intel_dp->is_pch_edp;
}
return false;
}
......@@ -657,18 +635,15 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
* Find the lane count in the intel_encoder private
*/
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
struct intel_encoder *intel_encoder;
struct intel_dp_priv *dp_priv;
struct intel_dp *intel_dp;
if (encoder->crtc != crtc)
continue;
intel_encoder = enc_to_intel_encoder(encoder);
dp_priv = intel_encoder->dev_priv;
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = dp_priv->lane_count;
if (IS_PCH_eDP(dp_priv))
intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = intel_dp->lane_count;
if (IS_PCH_eDP(intel_dp))
bpp = dev_priv->edp_bpp;
break;
}
......@@ -724,107 +699,114 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct drm_crtc *crtc = intel_encoder->enc.crtc;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_crtc *crtc = intel_dp->base.enc.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
dp_priv->DP = (DP_VOLTAGE_0_4 |
intel_dp->DP = (DP_VOLTAGE_0_4 |
DP_PRE_EMPHASIS_0);
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
dp_priv->DP |= DP_SYNC_HS_HIGH;
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dp_priv->DP |= DP_SYNC_VS_HIGH;
intel_dp->DP |= DP_SYNC_VS_HIGH;
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT;
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
else
dp_priv->DP |= DP_LINK_TRAIN_OFF;
intel_dp->DP |= DP_LINK_TRAIN_OFF;
switch (dp_priv->lane_count) {
switch (intel_dp->lane_count) {
case 1:
dp_priv->DP |= DP_PORT_WIDTH_1;
intel_dp->DP |= DP_PORT_WIDTH_1;
break;
case 2:
dp_priv->DP |= DP_PORT_WIDTH_2;
intel_dp->DP |= DP_PORT_WIDTH_2;
break;
case 4:
dp_priv->DP |= DP_PORT_WIDTH_4;
intel_dp->DP |= DP_PORT_WIDTH_4;
break;
}
if (dp_priv->has_audio)
dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE;
if (intel_dp->has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
dp_priv->link_configuration[0] = dp_priv->link_bw;
dp_priv->link_configuration[1] = dp_priv->lane_count;
memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
intel_dp->link_configuration[0] = intel_dp->link_bw;
intel_dp->link_configuration[1] = intel_dp->lane_count;
/*
* Check for DPCD version > 1.1 and enhanced framing support
*/
if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
dp_priv->DP |= DP_ENHANCED_FRAMING;
if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
intel_dp->DP |= DP_ENHANCED_FRAMING;
}
/* CPT DP's pipe select is decided in TRANS_DP_CTL */
if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
dp_priv->DP |= DP_PIPEB_SELECT;
intel_dp->DP |= DP_PIPEB_SELECT;
if (IS_eDP(intel_encoder)) {
if (IS_eDP(intel_dp)) {
/* don't miss out required setting for eDP */
dp_priv->DP |= DP_PLL_ENABLE;
intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
dp_priv->DP |= DP_PLL_FREQ_160MHZ;
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
dp_priv->DP |= DP_PLL_FREQ_270MHZ;
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
}
}
static void ironlake_edp_panel_on (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long timeout = jiffies + msecs_to_jiffies(5000);
u32 pp, pp_status;
u32 pp;
pp_status = I915_READ(PCH_PP_STATUS);
if (pp_status & PP_ON)
if (I915_READ(PCH_PP_STATUS) & PP_ON)
return;
pp = I915_READ(PCH_PP_CONTROL);
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
do {
pp_status = I915_READ(PCH_PP_STATUS);
} while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout));
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status);
if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10))
DRM_ERROR("panel on wait timed out: 0x%08x\n",
I915_READ(PCH_PP_STATUS));
pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
}
static void ironlake_edp_panel_off (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long timeout = jiffies + msecs_to_jiffies(5000);
u32 pp, pp_status;
u32 pp;
pp = I915_READ(PCH_PP_CONTROL);
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
pp &= ~POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
do {
pp_status = I915_READ(PCH_PP_STATUS);
} while ((pp_status & PP_ON) && !time_after(jiffies, timeout));
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("panel off wait timed out\n");
if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10))
DRM_ERROR("panel off wait timed out: 0x%08x\n",
I915_READ(PCH_PP_STATUS));
/* Make sure VDD is enabled so DP AUX will work */
pp |= EDP_FORCE_VDD;
pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
}
static void ironlake_edp_backlight_on (struct drm_device *dev)
......@@ -849,33 +831,87 @@ static void ironlake_edp_backlight_off (struct drm_device *dev)
I915_WRITE(PCH_PP_CONTROL, pp);
}
static void ironlake_edp_pll_on(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
DRM_DEBUG_KMS("\n");
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
}
static void ironlake_edp_pll_off(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
dpa_ctl = I915_READ(DP_A);
dpa_ctl |= DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
udelay(200);
}
static void intel_dp_prepare(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (IS_eDP(intel_dp)) {
ironlake_edp_backlight_off(dev);
ironlake_edp_panel_on(dev);
ironlake_edp_pll_on(encoder);
}
if (dp_reg & DP_PORT_EN)
intel_dp_link_down(intel_dp);
}
static void intel_dp_commit(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_link_train(intel_dp);
}
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
ironlake_edp_backlight_on(dev);
}
static void
intel_dp_dpms(struct drm_encoder *encoder, int mode)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dp_reg = I915_READ(dp_priv->output_reg);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
if (dp_reg & DP_PORT_EN) {
intel_dp_link_down(intel_encoder, dp_priv->DP);
if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
ironlake_edp_backlight_off(dev);
ironlake_edp_panel_off(dev);
}
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
ironlake_edp_backlight_off(dev);
ironlake_edp_panel_off(dev);
}
if (dp_reg & DP_PORT_EN)
intel_dp_link_down(intel_dp);
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
ironlake_edp_pll_off(encoder);
} else {
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
ironlake_edp_panel_on(dev);
intel_dp_link_train(intel_dp);
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
ironlake_edp_backlight_on(dev);
}
}
}
dp_priv->dpms_mode = mode;
intel_dp->dpms_mode = mode;
}
/*
......@@ -883,12 +919,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
* link status information
*/
static bool
intel_dp_get_link_status(struct intel_encoder *intel_encoder,
intel_dp_get_link_status(struct intel_dp *intel_dp,
uint8_t link_status[DP_LINK_STATUS_SIZE])
{
int ret;
ret = intel_dp_aux_native_read(intel_encoder,
ret = intel_dp_aux_native_read(intel_dp,
DP_LANE0_1_STATUS,
link_status, DP_LINK_STATUS_SIZE);
if (ret != DP_LINK_STATUS_SIZE)
......@@ -965,7 +1001,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
}
static void
intel_get_adjust_train(struct intel_encoder *intel_encoder,
intel_get_adjust_train(struct intel_dp *intel_dp,
uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane_count,
uint8_t train_set[4])
......@@ -1101,27 +1137,27 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
}
static bool
intel_dp_set_link_train(struct intel_encoder *intel_encoder,
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
uint8_t dp_train_pat,
uint8_t train_set[4],
bool first)
{
struct drm_device *dev = intel_encoder->enc.dev;
struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
int ret;
I915_WRITE(dp_priv->output_reg, dp_reg_value);
POSTING_READ(dp_priv->output_reg);
I915_WRITE(intel_dp->output_reg, dp_reg_value);
POSTING_READ(intel_dp->output_reg);
if (first)
intel_wait_for_vblank(dev);
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_dp_aux_native_write_1(intel_encoder,
intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
ret = intel_dp_aux_native_write(intel_encoder,
ret = intel_dp_aux_native_write(intel_dp,
DP_TRAINING_LANE0_SET, train_set, 4);
if (ret != 4)
return false;
......@@ -1130,12 +1166,10 @@ intel_dp_set_link_train(struct intel_encoder *intel_encoder,
}
static void
intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
intel_dp_link_train(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_encoder->enc.dev;
struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t train_set[4];
uint8_t link_status[DP_LINK_STATUS_SIZE];
int i;
......@@ -1145,13 +1179,15 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
bool first = true;
int tries;
u32 reg;
uint32_t DP = intel_dp->DP;
/* Write the link configuration data */
intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET,
link_configuration, DP_LINK_CONFIGURATION_SIZE);
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
intel_dp->link_configuration,
DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
......@@ -1162,39 +1198,39 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
if (!intel_dp_set_link_train(intel_encoder, reg,
if (!intel_dp_set_link_train(intel_dp, reg,
DP_TRAINING_PATTERN_1, train_set, first))
break;
first = false;
/* Set training pattern 1 */
udelay(100);
if (!intel_dp_get_link_status(intel_encoder, link_status))
if (!intel_dp_get_link_status(intel_dp, link_status))
break;
if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
clock_recovery = true;
break;
}
/* Check to see if we've tried the max voltage */
for (i = 0; i < dp_priv->lane_count; i++)
for (i = 0; i < intel_dp->lane_count; i++)
if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
if (i == dp_priv->lane_count)
if (i == intel_dp->lane_count)
break;
/* Check to see if we've tried the same voltage 5 times */
......@@ -1207,7 +1243,7 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Compute new train_set as requested by target */
intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
}
/* channel equalization */
......@@ -1217,30 +1253,30 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
/* Use train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_encoder, reg,
if (!intel_dp_set_link_train(intel_dp, reg,
DP_TRAINING_PATTERN_2, train_set,
false))
break;
udelay(400);
if (!intel_dp_get_link_status(intel_encoder, link_status))
if (!intel_dp_get_link_status(intel_dp, link_status))
break;
if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) {
channel_eq = true;
break;
}
......@@ -1250,53 +1286,53 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
break;
/* Compute new train_set as requested by target */
intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
++tries;
}
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
I915_WRITE(dp_priv->output_reg, reg);
POSTING_READ(dp_priv->output_reg);
intel_dp_aux_native_write_1(intel_encoder,
I915_WRITE(intel_dp->output_reg, reg);
POSTING_READ(intel_dp->output_reg);
intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
}
static void
intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
intel_dp_link_down(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_encoder->enc.dev;
struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t DP = intel_dp->DP;
DRM_DEBUG_KMS("\n");
if (IS_eDP(intel_encoder)) {
if (IS_eDP(intel_dp)) {
DP &= ~DP_PLL_ENABLE;
I915_WRITE(dp_priv->output_reg, DP);
POSTING_READ(dp_priv->output_reg);
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
udelay(100);
}
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) {
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
POSTING_READ(dp_priv->output_reg);
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
POSTING_READ(intel_dp->output_reg);
} else {
DP &= ~DP_LINK_TRAIN_MASK;
I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
POSTING_READ(dp_priv->output_reg);
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
POSTING_READ(intel_dp->output_reg);
}
udelay(17000);
if (IS_eDP(intel_encoder))
if (IS_eDP(intel_dp))
DP |= DP_LINK_TRAIN_OFF;
I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(dp_priv->output_reg);
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
}
/*
......@@ -1309,41 +1345,39 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
*/
static void
intel_dp_check_link_status(struct intel_encoder *intel_encoder)
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t link_status[DP_LINK_STATUS_SIZE];
if (!intel_encoder->enc.crtc)
if (!intel_dp->base.enc.crtc)
return;
if (!intel_dp_get_link_status(intel_encoder, link_status)) {
intel_dp_link_down(intel_encoder, dp_priv->DP);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
intel_dp_link_down(intel_dp);
return;
}
if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
if (!intel_channel_eq_ok(link_status, intel_dp->lane_count))
intel_dp_link_train(intel_dp);
}
static enum drm_connector_status
ironlake_dp_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum drm_connector_status status;
status = connector_status_disconnected;
if (intel_dp_aux_native_read(intel_encoder,
0x000, dp_priv->dpcd,
sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
if (intel_dp_aux_native_read(intel_dp,
0x000, intel_dp->dpcd,
sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
{
if (dp_priv->dpcd[0] != 0)
if (intel_dp->dpcd[0] != 0)
status = connector_status_connected;
}
DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0],
dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]);
DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
return status;
}
......@@ -1357,19 +1391,18 @@ static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_device *dev = intel_encoder->enc.dev;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t temp, bit;
enum drm_connector_status status;
dp_priv->has_audio = false;
intel_dp->has_audio = false;
if (HAS_PCH_SPLIT(dev))
return ironlake_dp_detect(connector);
switch (dp_priv->output_reg) {
switch (intel_dp->output_reg) {
case DP_B:
bit = DPB_HOTPLUG_INT_STATUS;
break;
......@@ -1389,11 +1422,11 @@ intel_dp_detect(struct drm_connector *connector)
return connector_status_disconnected;
status = connector_status_disconnected;
if (intel_dp_aux_native_read(intel_encoder,
0x000, dp_priv->dpcd,
sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
if (intel_dp_aux_native_read(intel_dp,
0x000, intel_dp->dpcd,
sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
{
if (dp_priv->dpcd[0] != 0)
if (intel_dp->dpcd[0] != 0)
status = connector_status_connected;
}
return status;
......@@ -1402,18 +1435,17 @@ intel_dp_detect(struct drm_connector *connector)
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_device *dev = intel_encoder->enc.dev;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus);
if (ret) {
if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
!dev_priv->panel_fixed_mode) {
struct drm_display_mode *newmode;
list_for_each_entry(newmode, &connector->probed_modes,
......@@ -1430,7 +1462,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
}
/* if eDP has no EDID, try to use fixed panel mode from VBT */
if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
if (dev_priv->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
......@@ -1452,9 +1484,9 @@ intel_dp_destroy (struct drm_connector *connector)
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
.dpms = intel_dp_dpms,
.mode_fixup = intel_dp_mode_fixup,
.prepare = intel_encoder_prepare,
.prepare = intel_dp_prepare,
.mode_set = intel_dp_mode_set,
.commit = intel_encoder_commit,
.commit = intel_dp_commit,
};
static const struct drm_connector_funcs intel_dp_connector_funcs = {
......@@ -1470,27 +1502,17 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
.best_encoder = intel_attached_encoder,
};
static void intel_dp_enc_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
if (intel_encoder->i2c_bus)
intel_i2c_destroy(intel_encoder->i2c_bus);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.destroy = intel_dp_enc_destroy,
.destroy = intel_encoder_destroy,
};
void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
intel_dp_check_link_status(intel_encoder);
if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON)
intel_dp_check_link_status(intel_dp);
}
/* Return which DP Port should be selected for Transcoder DP control */
......@@ -1500,18 +1522,18 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
struct intel_encoder *intel_encoder = NULL;
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
struct intel_dp *intel_dp;
if (encoder->crtc != crtc)
continue;
intel_encoder = enc_to_intel_encoder(encoder);
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
return dp_priv->output_reg;
}
intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
return intel_dp->output_reg;
}
return -1;
}
......@@ -1540,30 +1562,28 @@ intel_dp_init(struct drm_device *dev, int output_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_dp *intel_dp;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_dp_priv *dp_priv;
const char *name = NULL;
int type;
intel_encoder = kcalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
if (!intel_encoder)
intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
if (!intel_dp)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_encoder);
kfree(intel_dp);
return;
}
intel_encoder = &intel_dp->base;
dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
if (HAS_PCH_SPLIT(dev) && (output_reg == PCH_DP_D))
if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
if (intel_dpd_is_edp(dev))
dp_priv->is_pch_edp = true;
intel_dp->is_pch_edp = true;
if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
} else {
......@@ -1584,18 +1604,16 @@ intel_dp_init(struct drm_device *dev, int output_reg)
else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
if (IS_eDP(intel_encoder))
if (IS_eDP(intel_dp))
intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
dp_priv->intel_encoder = intel_encoder;
dp_priv->output_reg = output_reg;
dp_priv->has_audio = false;
dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
intel_encoder->dev_priv = dp_priv;
intel_dp->output_reg = output_reg;
intel_dp->has_audio = false;
intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
......@@ -1630,12 +1648,12 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
}
intel_dp_i2c_init(intel_encoder, intel_connector, name);
intel_dp_i2c_init(intel_dp, intel_connector, name);
intel_encoder->ddc_bus = &dp_priv->adapter;
intel_encoder->ddc_bus = &intel_dp->adapter;
intel_encoder->hot_plug = intel_dp_hot_plug;
if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
/* initialize panel mode from VBT if available for eDP */
if (dev_priv->lfp_lvds_vbt_mode) {
dev_priv->panel_fixed_mode =
......
......@@ -32,6 +32,20 @@
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#define wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
int ret__ = 0; \
while (! (COND)) { \
if (time_after(jiffies, timeout__)) { \
ret__ = -ETIMEDOUT; \
break; \
} \
if (W) msleep(W); \
} \
ret__; \
})
/*
* Display related stuff
*/
......@@ -102,7 +116,6 @@ struct intel_encoder {
struct i2c_adapter *ddc_bus;
bool load_detect_temp;
bool needs_tv_clock;
void *dev_priv;
void (*hot_plug)(struct intel_encoder *);
int crtc_mask;
int clone_mask;
......@@ -110,7 +123,6 @@ struct intel_encoder {
struct intel_connector {
struct drm_connector base;
void *dev_priv;
};
struct intel_crtc;
......@@ -156,7 +168,7 @@ struct intel_crtc {
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
bool cursor_visble;
bool cursor_visible, cursor_on;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
......@@ -188,10 +200,18 @@ extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern int intel_panel_fitter_pipe (struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
......@@ -199,7 +219,8 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void intel_wait_for_vblank(struct drm_device *dev);
extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe);
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
......
......@@ -38,7 +38,7 @@
#define CH7xxx_ADDR 0x76
#define TFP410_ADDR 0x38
static struct intel_dvo_device intel_dvo_devices[] = {
static const struct intel_dvo_device intel_dvo_devices[] = {
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "sil164",
......@@ -77,20 +77,33 @@ static struct intel_dvo_device intel_dvo_devices[] = {
}
};
struct intel_dvo {
struct intel_encoder base;
struct intel_dvo_device dev;
struct drm_display_mode *panel_fixed_mode;
bool panel_wants_dither;
};
static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
{
return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base);
}
static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
u32 dvo_reg = dvo->dvo_reg;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
if (mode == DRM_MODE_DPMS_ON) {
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
dvo->dev_ops->dpms(dvo, mode);
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
} else {
dvo->dev_ops->dpms(dvo, mode);
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
I915_READ(dvo_reg);
}
......@@ -100,38 +113,36 @@ static int intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
/* XXX: Validate clock range */
if (dvo->panel_fixed_mode) {
if (mode->hdisplay > dvo->panel_fixed_mode->hdisplay)
if (intel_dvo->panel_fixed_mode) {
if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > dvo->panel_fixed_mode->vdisplay)
if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay)
return MODE_PANEL;
}
return dvo->dev_ops->mode_valid(dvo, mode);
return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
}
static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
if (dvo->panel_fixed_mode != NULL) {
#define C(x) adjusted_mode->x = dvo->panel_fixed_mode->x
if (intel_dvo->panel_fixed_mode != NULL) {
#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x
C(hdisplay);
C(hsync_start);
C(hsync_end);
......@@ -145,8 +156,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
#undef C
}
if (dvo->dev_ops->mode_fixup)
return dvo->dev_ops->mode_fixup(dvo, mode, adjusted_mode);
if (intel_dvo->dev.dev_ops->mode_fixup)
return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode);
return true;
}
......@@ -158,11 +169,10 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
int pipe = intel_crtc->pipe;
u32 dvo_val;
u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg;
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
switch (dvo_reg) {
......@@ -178,7 +188,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
break;
}
dvo->dev_ops->mode_set(dvo, mode, adjusted_mode);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode);
/* Save the data order, since I don't know what it should be set to. */
dvo_val = I915_READ(dvo_reg) &
......@@ -214,40 +224,38 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
return dvo->dev_ops->detect(dvo);
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
static int intel_dvo_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
/* We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus);
if (!list_empty(&connector->probed_modes))
return 1;
if (dvo->panel_fixed_mode != NULL) {
if (intel_dvo->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, dvo->panel_fixed_mode);
mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode);
if (mode) {
drm_mode_probed_add(connector, mode);
return 1;
}
}
return 0;
}
static void intel_dvo_destroy (struct drm_connector *connector)
static void intel_dvo_destroy(struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
......@@ -277,28 +285,20 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
if (dvo) {
if (dvo->dev_ops->destroy)
dvo->dev_ops->destroy(dvo);
if (dvo->panel_fixed_mode)
kfree(dvo->panel_fixed_mode);
}
if (intel_encoder->i2c_bus)
intel_i2c_destroy(intel_encoder->i2c_bus);
if (intel_encoder->ddc_bus)
intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
if (intel_dvo->dev.dev_ops->destroy)
intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
kfree(intel_dvo->panel_fixed_mode);
intel_encoder_destroy(encoder);
}
static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
.destroy = intel_dvo_enc_destroy,
};
/**
* Attempts to get a fixed panel timing for LVDS (currently only the i830).
*
......@@ -306,15 +306,13 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
* chip being on DVOB/C and having multiple pipes.
*/
static struct drm_display_mode *
intel_dvo_get_current_mode (struct drm_connector *connector)
intel_dvo_get_current_mode(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
uint32_t dvo_reg = dvo->dvo_reg;
uint32_t dvo_val = I915_READ(dvo_reg);
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
struct drm_display_mode *mode = NULL;
/* If the DVO port is active, that'll be the LVDS, so we can pull out
......@@ -327,7 +325,6 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
crtc = intel_get_crtc_from_pipe(dev, pipe);
if (crtc) {
mode = intel_crtc_mode_get(dev, crtc);
if (mode) {
mode->type |= DRM_MODE_TYPE_PREFERRED;
if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
......@@ -337,28 +334,32 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
}
}
}
return mode;
}
void intel_dvo_init(struct drm_device *dev)
{
struct intel_encoder *intel_encoder;
struct intel_dvo *intel_dvo;
struct intel_connector *intel_connector;
struct intel_dvo_device *dvo;
struct i2c_adapter *i2cbus = NULL;
int ret = 0;
int i;
int encoder_type = DRM_MODE_ENCODER_NONE;
intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL);
if (!intel_encoder)
intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL);
if (!intel_dvo)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_encoder);
kfree(intel_dvo);
return;
}
intel_encoder = &intel_dvo->base;
/* Set up the DDC bus */
intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
if (!intel_encoder->ddc_bus)
......@@ -367,10 +368,9 @@ void intel_dvo_init(struct drm_device *dev)
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
struct drm_connector *connector = &intel_connector->base;
const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
int gpio;
dvo = &intel_dvo_devices[i];
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
* in the spec.
......@@ -393,11 +393,8 @@ void intel_dvo_init(struct drm_device *dev)
continue;
}
if (dvo->dev_ops!= NULL)
ret = dvo->dev_ops->init(dvo, i2cbus);
else
ret = false;
intel_dvo->dev = *dvo;
ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus);
if (!ret)
continue;
......@@ -429,9 +426,6 @@ void intel_dvo_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
intel_encoder->dev_priv = dvo;
intel_encoder->i2c_bus = i2cbus;
drm_encoder_init(dev, &intel_encoder->enc,
&intel_dvo_enc_funcs, encoder_type);
drm_encoder_helper_add(&intel_encoder->enc,
......@@ -447,9 +441,9 @@ void intel_dvo_init(struct drm_device *dev)
* headers, likely), so for now, just get the current
* mode being output through DVO.
*/
dvo->panel_fixed_mode =
intel_dvo->panel_fixed_mode =
intel_dvo_get_current_mode(connector);
dvo->panel_wants_dither = true;
intel_dvo->panel_wants_dither = true;
}
drm_sysfs_connector_add(connector);
......@@ -461,6 +455,6 @@ void intel_dvo_init(struct drm_device *dev)
if (i2cbus != NULL)
intel_i2c_destroy(i2cbus);
free_intel:
kfree(intel_encoder);
kfree(intel_dvo);
kfree(intel_connector);
}
......@@ -37,11 +37,17 @@
#include "i915_drm.h"
#include "i915_drv.h"
struct intel_hdmi_priv {
struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg;
bool has_hdmi_sink;
};
static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base);
}
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
......@@ -50,8 +56,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 sdvox;
sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
......@@ -60,7 +65,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
if (hdmi_priv->has_hdmi_sink) {
if (intel_hdmi->has_hdmi_sink) {
sdvox |= SDVO_AUDIO_ENABLE;
if (HAS_PCH_CPT(dev))
sdvox |= HDMI_MODE_SELECT;
......@@ -73,26 +78,25 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
sdvox |= SDVO_PIPE_B_SELECT;
}
I915_WRITE(hdmi_priv->sdvox_reg, sdvox);
POSTING_READ(hdmi_priv->sdvox_reg);
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
}
static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
temp = I915_READ(hdmi_priv->sdvox_reg);
temp = I915_READ(intel_hdmi->sdvox_reg);
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
POSTING_READ(hdmi_priv->sdvox_reg);
I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
POSTING_READ(intel_hdmi->sdvox_reg);
}
if (mode != DRM_MODE_DPMS_ON) {
......@@ -101,15 +105,15 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
temp |= SDVO_ENABLE;
}
I915_WRITE(hdmi_priv->sdvox_reg, temp);
POSTING_READ(hdmi_priv->sdvox_reg);
I915_WRITE(intel_hdmi->sdvox_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg);
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(hdmi_priv->sdvox_reg, temp);
POSTING_READ(hdmi_priv->sdvox_reg);
I915_WRITE(intel_hdmi->sdvox_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg);
}
}
......@@ -138,19 +142,17 @@ static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
hdmi_priv->has_hdmi_sink = false;
edid = drm_get_edid(connector,
intel_encoder->ddc_bus);
intel_hdmi->has_hdmi_sink = false;
edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus);
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
}
connector->display_info.raw_edid = NULL;
kfree(edid);
......@@ -162,13 +164,13 @@ intel_hdmi_detect(struct drm_connector *connector)
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
return intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
......@@ -199,18 +201,8 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs
.best_encoder = intel_attached_encoder,
};
static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
if (intel_encoder->i2c_bus)
intel_i2c_destroy(intel_encoder->i2c_bus);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
.destroy = intel_hdmi_enc_destroy,
.destroy = intel_encoder_destroy,
};
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
......@@ -219,21 +211,19 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_hdmi_priv *hdmi_priv;
struct intel_hdmi *intel_hdmi;
intel_encoder = kcalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
if (!intel_encoder)
intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
if (!intel_hdmi)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_encoder);
kfree(intel_hdmi);
return;
}
hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
intel_encoder = &intel_hdmi->base;
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
......@@ -274,8 +264,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
if (!intel_encoder->ddc_bus)
goto err_connector;
hdmi_priv->sdvox_reg = sdvox_reg;
intel_encoder->dev_priv = hdmi_priv;
intel_hdmi->sdvox_reg = sdvox_reg;
drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
......@@ -298,7 +287,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
err_connector:
drm_connector_cleanup(connector);
kfree(intel_encoder);
kfree(intel_hdmi);
kfree(intel_connector);
return;
......
......@@ -41,12 +41,18 @@
#include <linux/acpi.h>
/* Private structure for the integrated LVDS support */
struct intel_lvds_priv {
struct intel_lvds {
struct intel_encoder base;
int fitting_mode;
u32 pfit_control;
u32 pfit_pgm_ratios;
};
static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder)
{
return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base);
}
/**
* Sets the backlight level.
*
......@@ -90,7 +96,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
static void intel_lvds_set_power(struct drm_device *dev, bool on)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_status, ctl_reg, status_reg, lvds_reg;
u32 ctl_reg, status_reg, lvds_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
......@@ -108,9 +114,8 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
POWER_TARGET_ON);
do {
pp_status = I915_READ(status_reg);
} while ((pp_status & PP_ON) == 0);
if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0))
DRM_ERROR("timed out waiting to enable LVDS pipe");
intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
} else {
......@@ -118,9 +123,8 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
~POWER_TARGET_ON);
do {
pp_status = I915_READ(status_reg);
} while (pp_status & PP_ON);
if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0))
DRM_ERROR("timed out waiting for LVDS pipe to turn off");
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
POSTING_READ(lvds_reg);
......@@ -219,9 +223,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
struct drm_encoder *tmp_encoder;
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
/* Should never happen!! */
......@@ -241,26 +244,20 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
/* If we don't have a panel mode, there is nothing we can do */
if (dev_priv->panel_fixed_mode == NULL)
return true;
/*
* We have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
adjusted_mode->hsync_start =
dev_priv->panel_fixed_mode->hsync_start;
adjusted_mode->hsync_end =
dev_priv->panel_fixed_mode->hsync_end;
adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
adjusted_mode->vsync_start =
dev_priv->panel_fixed_mode->vsync_start;
adjusted_mode->vsync_end =
dev_priv->panel_fixed_mode->vsync_end;
adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
if (HAS_PCH_SPLIT(dev)) {
intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
mode, adjusted_mode);
return true;
}
/* Make sure pre-965s set dither correctly */
if (!IS_I965G(dev)) {
......@@ -273,10 +270,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
adjusted_mode->vdisplay == mode->vdisplay)
goto out;
/* full screen scale for now */
if (HAS_PCH_SPLIT(dev))
goto out;
/* 965+ wants fuzzy fitting */
if (IS_I965G(dev))
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
......@@ -288,12 +281,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
if (!HAS_PCH_SPLIT(dev)) {
I915_WRITE(BCLRPAT_A, 0);
I915_WRITE(BCLRPAT_B, 0);
}
I915_WRITE(BCLRPAT_A, 0);
I915_WRITE(BCLRPAT_B, 0);
switch (lvds_priv->fitting_mode) {
switch (intel_lvds->fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
......@@ -378,8 +369,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
out:
lvds_priv->pfit_control = pfit_control;
lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
intel_lvds->pfit_control = pfit_control;
intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
dev_priv->lvds_border_bits = border;
/*
......@@ -427,8 +418,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
/*
* The LVDS pin pair will already have been turned on in the
......@@ -444,8 +434,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* screen. Should be enabled before the pipe is enabled, according to
* register description and PRM.
*/
I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios);
I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
}
/**
......@@ -600,18 +590,17 @@ static int intel_lvds_set_property(struct drm_connector *connector,
connector->encoder) {
struct drm_crtc *crtc = connector->encoder->crtc;
struct drm_encoder *encoder = connector->encoder;
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return 0;
}
if (lvds_priv->fitting_mode == value) {
if (intel_lvds->fitting_mode == value) {
/* the LVDS scaling property is not changed */
return 0;
}
lvds_priv->fitting_mode = value;
intel_lvds->fitting_mode = value;
if (crtc && crtc->enabled) {
/*
* If the CRTC is enabled, the display will be changed
......@@ -647,19 +636,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.destroy = intel_lvds_destroy,
};
static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
if (intel_encoder->ddc_bus)
intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
.destroy = intel_lvds_enc_destroy,
.destroy = intel_encoder_destroy,
};
static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
......@@ -843,13 +821,13 @@ static int lvds_is_present_in_vbt(struct drm_device *dev)
void intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
struct intel_lvds_priv *lvds_priv;
u32 lvds;
int pipe, gpio = GPIOC;
......@@ -872,20 +850,20 @@ void intel_lvds_init(struct drm_device *dev)
gpio = PCH_GPIOC;
}
intel_encoder = kzalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_lvds_priv), GFP_KERNEL);
if (!intel_encoder) {
intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
if (!intel_lvds) {
return;
}
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_encoder);
kfree(intel_lvds);
return;
}
connector = &intel_connector->base;
intel_encoder = &intel_lvds->base;
encoder = &intel_encoder->enc;
connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
......@@ -905,8 +883,6 @@ void intel_lvds_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1);
intel_encoder->dev_priv = lvds_priv;
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
/*
......@@ -916,7 +892,7 @@ void intel_lvds_init(struct drm_device *dev)
drm_connector_attach_property(&intel_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
lvds_priv->fitting_mode = DRM_MODE_SCALE_ASPECT;
intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
/*
* LVDS discovery:
* 1) check for EDID on DDC
......@@ -1024,6 +1000,6 @@ void intel_lvds_init(struct drm_device *dev)
intel_i2c_destroy(intel_encoder->ddc_bus);
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
kfree(intel_lvds);
kfree(intel_connector);
}
......@@ -1367,7 +1367,8 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->flip_addr = overlay->reg_bo->gtt_offset;
} else {
ret = i915_gem_attach_phys_object(dev, reg_bo,
I915_GEM_PHYS_OVERLAY_REGS);
I915_GEM_PHYS_OVERLAY_REGS,
0);
if (ret) {
DRM_ERROR("failed to attach phys overlay regs\n");
goto out_free_bo;
......@@ -1416,3 +1417,99 @@ void intel_cleanup_overlay(struct drm_device *dev)
kfree(dev_priv->overlay);
}
}
struct intel_overlay_error_state {
struct overlay_registers regs;
unsigned long base;
u32 dovsta;
u32 isr;
};
struct intel_overlay_error_state *
intel_overlay_capture_error_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay = dev_priv->overlay;
struct intel_overlay_error_state *error;
struct overlay_registers __iomem *regs;
if (!overlay || !overlay->active)
return NULL;
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
error->dovsta = I915_READ(DOVSTA);
error->isr = I915_READ(ISR);
if (OVERLAY_NONPHYSICAL(overlay->dev))
error->base = (long) overlay->reg_bo->gtt_offset;
else
error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
goto err;
memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
intel_overlay_unmap_regs_atomic(overlay);
return error;
err:
kfree(error);
return NULL;
}
void
intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error)
{
seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
error->dovsta, error->isr);
seq_printf(m, " Register file at 0x%08lx:\n",
error->base);
#define P(x) seq_printf(m, " " #x ": 0x%08x\n", error->regs.x)
P(OBUF_0Y);
P(OBUF_1Y);
P(OBUF_0U);
P(OBUF_0V);
P(OBUF_1U);
P(OBUF_1V);
P(OSTRIDE);
P(YRGB_VPH);
P(UV_VPH);
P(HORZ_PH);
P(INIT_PHS);
P(DWINPOS);
P(DWINSZ);
P(SWIDTH);
P(SWIDTHSW);
P(SHEIGHT);
P(YRGBSCALE);
P(UVSCALE);
P(OCLRC0);
P(OCLRC1);
P(DCLRKV);
P(DCLRKM);
P(SCLRKVH);
P(SCLRKVL);
P(SCLRKEN);
P(OCONFIG);
P(OCMD);
P(OSTART_0Y);
P(OSTART_1Y);
P(OSTART_0U);
P(OSTART_0V);
P(OSTART_1U);
P(OSTART_1V);
P(OTILEOFF_0Y);
P(OTILEOFF_1Y);
P(OTILEOFF_0U);
P(OTILEOFF_0V);
P(OTILEOFF_1U);
P(OTILEOFF_1V);
P(FASTHSCALE);
P(UVSCALEV);
#undef P
}
/*
* Copyright © 2006-2010 Intel Corporation
* Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#include "intel_drv.h"
void
intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
{
adjusted_mode->hdisplay = fixed_mode->hdisplay;
adjusted_mode->hsync_start = fixed_mode->hsync_start;
adjusted_mode->hsync_end = fixed_mode->hsync_end;
adjusted_mode->htotal = fixed_mode->htotal;
adjusted_mode->vdisplay = fixed_mode->vdisplay;
adjusted_mode->vsync_start = fixed_mode->vsync_start;
adjusted_mode->vsync_end = fixed_mode->vsync_end;
adjusted_mode->vtotal = fixed_mode->vtotal;
adjusted_mode->clock = fixed_mode->clock;
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
}
/* adjusted_mode has been preset to be the panel's fixed mode */
void
intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int x, y, width, height;
x = y = width = height = 0;
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == mode->hdisplay &&
adjusted_mode->vdisplay == mode->vdisplay)
goto done;
switch (fitting_mode) {
case DRM_MODE_SCALE_CENTER:
width = mode->hdisplay;
height = mode->vdisplay;
x = (adjusted_mode->hdisplay - width + 1)/2;
y = (adjusted_mode->vdisplay - height + 1)/2;
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
{
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
if (scaled_width > scaled_height) { /* pillar */
width = scaled_height / mode->vdisplay;
x = (adjusted_mode->hdisplay - width + 1) / 2;
y = 0;
height = adjusted_mode->vdisplay;
} else if (scaled_width < scaled_height) { /* letter */
height = scaled_width / mode->hdisplay;
y = (adjusted_mode->vdisplay - height + 1) / 2;
x = 0;
width = adjusted_mode->hdisplay;
} else {
x = y = 0;
width = adjusted_mode->hdisplay;
height = adjusted_mode->vdisplay;
}
}
break;
default:
case DRM_MODE_SCALE_FULLSCREEN:
x = y = 0;
width = adjusted_mode->hdisplay;
height = adjusted_mode->vdisplay;
break;
}
done:
dev_priv->pch_pf_pos = (x << 16) | y;
dev_priv->pch_pf_size = (width << 16) | height;
}
......@@ -33,18 +33,35 @@
#include "i915_drm.h"
#include "i915_trace.h"
static u32 i915_gem_get_seqno(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 seqno;
seqno = dev_priv->next_seqno;
/* reserve 0 for non-seqno */
if (++dev_priv->next_seqno == 0)
dev_priv->next_seqno = 1;
return seqno;
}
static void
render_ring_flush(struct drm_device *dev,
struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 cmd;
#if WATCH_EXEC
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
invalidate_domains, flush_domains);
#endif
u32 cmd;
trace_i915_gem_request_flush(dev, ring->next_seqno,
trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
invalidate_domains, flush_domains);
if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
......@@ -233,9 +250,10 @@ render_ring_add_request(struct drm_device *dev,
struct drm_file *file_priv,
u32 flush_domains)
{
u32 seqno;
drm_i915_private_t *dev_priv = dev->dev_private;
seqno = intel_ring_get_seqno(dev, ring);
u32 seqno;
seqno = i915_gem_get_seqno(dev);
if (IS_GEN6(dev)) {
BEGIN_LP_RING(6);
......@@ -405,7 +423,9 @@ bsd_ring_add_request(struct drm_device *dev,
u32 flush_domains)
{
u32 seqno;
seqno = intel_ring_get_seqno(dev, ring);
seqno = i915_gem_get_seqno(dev);
intel_ring_begin(dev, ring, 4);
intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(dev, ring,
......@@ -479,7 +499,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
exec_len = (uint32_t) exec->batch_len;
trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
count = nbox ? nbox : 1;
......@@ -515,7 +535,16 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
intel_ring_advance(dev, ring);
}
if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
intel_ring_begin(dev, ring, 2);
intel_ring_emit(dev, ring, MI_FLUSH |
MI_NO_WRITE_FLUSH |
MI_INVALIDATE_ISP );
intel_ring_emit(dev, ring, MI_NOOP);
intel_ring_advance(dev, ring);
}
/* XXX breadcrumb */
return 0;
}
......@@ -588,9 +617,10 @@ static int init_status_page(struct drm_device *dev,
int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
int ret;
struct drm_i915_gem_object *obj_priv;
struct drm_gem_object *obj;
int ret;
ring->dev = dev;
if (I915_NEED_GFX_HWS(dev)) {
......@@ -603,16 +633,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n");
ret = -ENOMEM;
goto cleanup;
goto err_hws;
}
ring->gem_object = obj;
ret = i915_gem_object_pin(obj, ring->alignment);
if (ret != 0) {
drm_gem_object_unreference(obj);
goto cleanup;
}
if (ret)
goto err_unref;
obj_priv = to_intel_bo(obj);
ring->map.size = ring->size;
......@@ -624,18 +652,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
drm_core_ioremap_wc(&ring->map, dev);
if (ring->map.handle == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
ret = -EINVAL;
goto cleanup;
goto err_unpin;
}
ring->virtual_start = ring->map.handle;
ret = ring->init(dev, ring);
if (ret != 0) {
intel_cleanup_ring_buffer(dev, ring);
return ret;
}
if (ret)
goto err_unmap;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_kernel_lost_context(dev);
......@@ -649,7 +673,15 @@ int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
return ret;
cleanup:
err_unmap:
drm_core_ioremapfree(&ring->map, dev);
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
drm_gem_object_unreference(obj);
ring->gem_object = NULL;
err_hws:
cleanup_status_page(dev, ring);
return ret;
}
......@@ -682,9 +714,11 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
}
virt = (unsigned int *)(ring->virtual_start + ring->tail);
rem /= 4;
while (rem--)
rem /= 8;
while (rem--) {
*virt++ = MI_NOOP;
*virt++ = MI_NOOP;
}
ring->tail = 0;
ring->space = ring->head - 8;
......@@ -729,21 +763,14 @@ void intel_ring_begin(struct drm_device *dev,
intel_wrap_ring_buffer(dev, ring);
if (unlikely(ring->space < n))
intel_wait_ring_buffer(dev, ring, n);
}
void intel_ring_emit(struct drm_device *dev,
struct intel_ring_buffer *ring, unsigned int data)
{
unsigned int *virt = ring->virtual_start + ring->tail;
*virt = data;
ring->tail += 4;
ring->tail &= ring->size - 1;
ring->space -= 4;
ring->space -= n;
}
void intel_ring_advance(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
ring->tail &= ring->size - 1;
ring->advance_ring(dev, ring);
}
......@@ -762,18 +789,6 @@ void intel_fill_struct(struct drm_device *dev,
intel_ring_advance(dev, ring);
}
u32 intel_ring_get_seqno(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
u32 seqno;
seqno = ring->next_seqno;
/* reserve 0 for non-seqno */
if (++ring->next_seqno == 0)
ring->next_seqno = 1;
return seqno;
}
struct intel_ring_buffer render_ring = {
.name = "render ring",
.regs = {
......@@ -791,7 +806,6 @@ struct intel_ring_buffer render_ring = {
.head = 0,
.tail = 0,
.space = 0,
.next_seqno = 1,
.user_irq_refcount = 0,
.irq_gem_seqno = 0,
.waiting_gem_seqno = 0,
......@@ -830,7 +844,6 @@ struct intel_ring_buffer bsd_ring = {
.head = 0,
.tail = 0,
.space = 0,
.next_seqno = 1,
.user_irq_refcount = 0,
.irq_gem_seqno = 0,
.waiting_gem_seqno = 0,
......
......@@ -26,7 +26,6 @@ struct intel_ring_buffer {
unsigned int head;
unsigned int tail;
unsigned int space;
u32 next_seqno;
struct intel_hw_status_page status_page;
u32 irq_gem_seqno; /* last seq seem at irq time */
......@@ -106,8 +105,16 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring);
void intel_ring_begin(struct drm_device *dev,
struct intel_ring_buffer *ring, int n);
void intel_ring_emit(struct drm_device *dev,
struct intel_ring_buffer *ring, u32 data);
static inline void intel_ring_emit(struct drm_device *dev,
struct intel_ring_buffer *ring,
unsigned int data)
{
unsigned int *virt = ring->virtual_start + ring->tail;
*virt = data;
ring->tail += 4;
}
void intel_fill_struct(struct drm_device *dev,
struct intel_ring_buffer *ring,
void *data,
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -312,7 +312,7 @@ struct intel_sdvo_set_target_input_args {
# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
/** 5 bytes of bit flags for TV formats shared by all TV format functions */
/** 6 bytes of bit flags for TV formats shared by all TV format functions */
struct intel_sdvo_tv_format {
unsigned int ntsc_m:1;
unsigned int ntsc_j:1;
......@@ -596,32 +596,32 @@ struct intel_sdvo_enhancements_reply {
unsigned int overscan_h:1;
unsigned int overscan_v:1;
unsigned int position_h:1;
unsigned int position_v:1;
unsigned int hpos:1;
unsigned int vpos:1;
unsigned int sharpness:1;
unsigned int dot_crawl:1;
unsigned int dither:1;
unsigned int max_tv_chroma_filter:1;
unsigned int max_tv_luma_filter:1;
unsigned int tv_chroma_filter:1;
unsigned int tv_luma_filter:1;
} __attribute__((packed));
/* Picture enhancement limits below are dependent on the current TV format,
* and thus need to be queried and set after it.
*/
#define SDVO_CMD_GET_MAX_FLICKER_FITER 0x4d
#define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FITER 0x7b
#define SDVO_CMD_GET_MAX_2D_FLICKER_FITER 0x52
#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d
#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b
#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52
#define SDVO_CMD_GET_MAX_SATURATION 0x55
#define SDVO_CMD_GET_MAX_HUE 0x58
#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
#define SDVO_CMD_GET_MAX_POSITION_H 0x67
#define SDVO_CMD_GET_MAX_POSITION_V 0x6a
#define SDVO_CMD_GET_MAX_SHARPNESS_V 0x6d
#define SDVO_CMD_GET_MAX_TV_CHROMA 0x74
#define SDVO_CMD_GET_MAX_TV_LUMA 0x77
#define SDVO_CMD_GET_MAX_HPOS 0x67
#define SDVO_CMD_GET_MAX_VPOS 0x6a
#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d
#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
struct intel_sdvo_enhancement_limits_reply {
u16 max_value;
u16 default_value;
......@@ -638,10 +638,10 @@ struct intel_sdvo_enhancement_limits_reply {
#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
#define SDVO_CMD_GET_ADAPTIVE_FLICKER_FITER 0x50
#define SDVO_CMD_SET_ADAPTIVE_FLICKER_FITER 0x51
#define SDVO_CMD_GET_2D_FLICKER_FITER 0x53
#define SDVO_CMD_SET_2D_FLICKER_FITER 0x54
#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50
#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51
#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53
#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54
#define SDVO_CMD_GET_SATURATION 0x56
#define SDVO_CMD_SET_SATURATION 0x57
#define SDVO_CMD_GET_HUE 0x59
......@@ -654,16 +654,16 @@ struct intel_sdvo_enhancement_limits_reply {
#define SDVO_CMD_SET_OVERSCAN_H 0x63
#define SDVO_CMD_GET_OVERSCAN_V 0x65
#define SDVO_CMD_SET_OVERSCAN_V 0x66
#define SDVO_CMD_GET_POSITION_H 0x68
#define SDVO_CMD_SET_POSITION_H 0x69
#define SDVO_CMD_GET_POSITION_V 0x6b
#define SDVO_CMD_SET_POSITION_V 0x6c
#define SDVO_CMD_GET_HPOS 0x68
#define SDVO_CMD_SET_HPOS 0x69
#define SDVO_CMD_GET_VPOS 0x6b
#define SDVO_CMD_SET_VPOS 0x6c
#define SDVO_CMD_GET_SHARPNESS 0x6e
#define SDVO_CMD_SET_SHARPNESS 0x6f
#define SDVO_CMD_GET_TV_CHROMA 0x75
#define SDVO_CMD_SET_TV_CHROMA 0x76
#define SDVO_CMD_GET_TV_LUMA 0x78
#define SDVO_CMD_SET_TV_LUMA 0x79
#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
struct intel_sdvo_enhancements_arg {
u16 value;
}__attribute__((packed));
......
......@@ -44,7 +44,9 @@ enum tv_margin {
};
/** Private structure for the integrated TV support */
struct intel_tv_priv {
struct intel_tv {
struct intel_encoder base;
int type;
char *tv_format;
int margin[4];
......@@ -896,6 +898,11 @@ static const struct tv_mode tv_modes[] = {
},
};
static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
{
return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base);
}
static void
intel_tv_dpms(struct drm_encoder *encoder, int mode)
{
......@@ -929,19 +936,17 @@ intel_tv_mode_lookup (char *tv_format)
}
static const struct tv_mode *
intel_tv_mode_find (struct intel_encoder *intel_encoder)
intel_tv_mode_find (struct intel_tv *intel_tv)
{
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
return intel_tv_mode_lookup(tv_priv->tv_format);
return intel_tv_mode_lookup(intel_tv->tv_format);
}
static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
/* Ensure TV refresh is close to desired refresh */
if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
......@@ -957,8 +962,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
{
struct drm_device *dev = encoder->dev;
struct drm_mode_config *drm_config = &dev->mode_config;
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder);
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
struct drm_encoder *other_encoder;
if (!tv_mode)
......@@ -983,9 +988,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
u32 tv_ctl;
u32 hctl1, hctl2, hctl3;
u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
......@@ -1001,7 +1005,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
tv_ctl = I915_READ(TV_CTL);
tv_ctl &= TV_CTL_SAVE;
switch (tv_priv->type) {
switch (intel_tv->type) {
default:
case DRM_MODE_CONNECTOR_Unknown:
case DRM_MODE_CONNECTOR_Composite:
......@@ -1154,11 +1158,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
/* Wait for vblank for the disable to take effect */
if (!IS_I9XX(dev))
intel_wait_for_vblank(dev);
intel_wait_for_vblank(dev, intel_crtc->pipe);
I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
/* Wait for vblank for the disable to take effect. */
intel_wait_for_vblank(dev);
intel_wait_for_vblank(dev, intel_crtc->pipe);
/* Filter ctl must be set before TV_WIN_SIZE */
I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
......@@ -1168,12 +1172,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
else
ysize = 2*tv_mode->nbr_end + 1;
xpos += tv_priv->margin[TV_MARGIN_LEFT];
ypos += tv_priv->margin[TV_MARGIN_TOP];
xsize -= (tv_priv->margin[TV_MARGIN_LEFT] +
tv_priv->margin[TV_MARGIN_RIGHT]);
ysize -= (tv_priv->margin[TV_MARGIN_TOP] +
tv_priv->margin[TV_MARGIN_BOTTOM]);
xpos += intel_tv->margin[TV_MARGIN_LEFT];
ypos += intel_tv->margin[TV_MARGIN_TOP];
xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
intel_tv->margin[TV_MARGIN_RIGHT]);
ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
intel_tv->margin[TV_MARGIN_BOTTOM]);
I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
......@@ -1222,11 +1226,12 @@ static const struct drm_display_mode reported_modes[] = {
* \return false if TV is disconnected.
*/
static int
intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
intel_tv_detect_type (struct intel_tv *intel_tv)
{
struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_encoder *encoder = &intel_tv->base.enc;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
unsigned long irqflags;
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
......@@ -1263,11 +1268,11 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
DAC_C_0_7_V);
I915_WRITE(TV_CTL, tv_ctl);
I915_WRITE(TV_DAC, tv_dac);
intel_wait_for_vblank(dev);
intel_wait_for_vblank(dev, intel_crtc->pipe);
tv_dac = I915_READ(TV_DAC);
I915_WRITE(TV_DAC, save_tv_dac);
I915_WRITE(TV_CTL, save_tv_ctl);
intel_wait_for_vblank(dev);
intel_wait_for_vblank(dev, intel_crtc->pipe);
/*
* A B C
* 0 1 1 Composite
......@@ -1304,12 +1309,11 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
static void intel_tv_find_better_format(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
int i;
if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
tv_mode->component_only)
return;
......@@ -1317,12 +1321,12 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) {
tv_mode = tv_modes + i;
if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
tv_mode->component_only)
break;
}
tv_priv->tv_format = tv_mode->name;
intel_tv->tv_format = tv_mode->name;
drm_connector_property_set_value(connector,
connector->dev->mode_config.tv_mode_property, i);
}
......@@ -1336,31 +1340,31 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
static enum drm_connector_status
intel_tv_detect(struct drm_connector *connector)
{
struct drm_crtc *crtc;
struct drm_display_mode mode;
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
int dpms_mode;
int type = tv_priv->type;
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
int type;
mode = reported_modes[0];
drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
if (encoder->crtc && encoder->crtc->enabled) {
type = intel_tv_detect_type(encoder->crtc, intel_encoder);
type = intel_tv_detect_type(intel_tv);
} else {
crtc = intel_get_load_detect_pipe(intel_encoder, connector,
struct drm_crtc *crtc;
int dpms_mode;
crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
&mode, &dpms_mode);
if (crtc) {
type = intel_tv_detect_type(crtc, intel_encoder);
intel_release_load_detect_pipe(intel_encoder, connector,
type = intel_tv_detect_type(intel_tv);
intel_release_load_detect_pipe(&intel_tv->base, connector,
dpms_mode);
} else
type = -1;
}
tv_priv->type = type;
intel_tv->type = type;
if (type < 0)
return connector_status_disconnected;
......@@ -1391,8 +1395,8 @@ intel_tv_chose_preferred_modes(struct drm_connector *connector,
struct drm_display_mode *mode_ptr)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
......@@ -1417,8 +1421,8 @@ intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
int j, count = 0;
u64 tmp;
......@@ -1483,8 +1487,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
{
struct drm_device *dev = connector->dev;
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
......@@ -1494,30 +1497,30 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
goto out;
if (property == dev->mode_config.tv_left_margin_property &&
tv_priv->margin[TV_MARGIN_LEFT] != val) {
tv_priv->margin[TV_MARGIN_LEFT] = val;
intel_tv->margin[TV_MARGIN_LEFT] != val) {
intel_tv->margin[TV_MARGIN_LEFT] = val;
changed = true;
} else if (property == dev->mode_config.tv_right_margin_property &&
tv_priv->margin[TV_MARGIN_RIGHT] != val) {
tv_priv->margin[TV_MARGIN_RIGHT] = val;
intel_tv->margin[TV_MARGIN_RIGHT] != val) {
intel_tv->margin[TV_MARGIN_RIGHT] = val;
changed = true;
} else if (property == dev->mode_config.tv_top_margin_property &&
tv_priv->margin[TV_MARGIN_TOP] != val) {
tv_priv->margin[TV_MARGIN_TOP] = val;
intel_tv->margin[TV_MARGIN_TOP] != val) {
intel_tv->margin[TV_MARGIN_TOP] = val;
changed = true;
} else if (property == dev->mode_config.tv_bottom_margin_property &&
tv_priv->margin[TV_MARGIN_BOTTOM] != val) {
tv_priv->margin[TV_MARGIN_BOTTOM] = val;
intel_tv->margin[TV_MARGIN_BOTTOM] != val) {
intel_tv->margin[TV_MARGIN_BOTTOM] = val;
changed = true;
} else if (property == dev->mode_config.tv_mode_property) {
if (val >= ARRAY_SIZE(tv_modes)) {
ret = -EINVAL;
goto out;
}
if (!strcmp(tv_priv->tv_format, tv_modes[val].name))
if (!strcmp(intel_tv->tv_format, tv_modes[val].name))
goto out;
tv_priv->tv_format = tv_modes[val].name;
intel_tv->tv_format = tv_modes[val].name;
changed = true;
} else {
ret = -EINVAL;
......@@ -1553,16 +1556,8 @@ static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs =
.best_encoder = intel_attached_encoder,
};
static void intel_tv_enc_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
.destroy = intel_tv_enc_destroy,
.destroy = intel_encoder_destroy,
};
/*
......@@ -1606,9 +1601,9 @@ intel_tv_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_tv *intel_tv;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_tv_priv *tv_priv;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
char **tv_format_names;
int i, initial_mode = 0;
......@@ -1647,18 +1642,18 @@ intel_tv_init(struct drm_device *dev)
(tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
return;
intel_encoder = kzalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_tv_priv), GFP_KERNEL);
if (!intel_encoder) {
intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL);
if (!intel_tv) {
return;
}
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_encoder);
kfree(intel_tv);
return;
}
intel_encoder = &intel_tv->base;
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
......@@ -1668,22 +1663,20 @@ intel_tv_init(struct drm_device *dev)
DRM_MODE_ENCODER_TVDAC);
drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
intel_encoder->dev_priv = tv_priv;
tv_priv->type = DRM_MODE_CONNECTOR_Unknown;
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
tv_priv->margin[TV_MARGIN_LEFT] = 54;
tv_priv->margin[TV_MARGIN_TOP] = 36;
tv_priv->margin[TV_MARGIN_RIGHT] = 46;
tv_priv->margin[TV_MARGIN_BOTTOM] = 37;
intel_tv->margin[TV_MARGIN_LEFT] = 54;
intel_tv->margin[TV_MARGIN_TOP] = 36;
intel_tv->margin[TV_MARGIN_RIGHT] = 46;
intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
......@@ -1703,16 +1696,16 @@ intel_tv_init(struct drm_device *dev)
initial_mode);
drm_connector_attach_property(connector,
dev->mode_config.tv_left_margin_property,
tv_priv->margin[TV_MARGIN_LEFT]);
intel_tv->margin[TV_MARGIN_LEFT]);
drm_connector_attach_property(connector,
dev->mode_config.tv_top_margin_property,
tv_priv->margin[TV_MARGIN_TOP]);
intel_tv->margin[TV_MARGIN_TOP]);
drm_connector_attach_property(connector,
dev->mode_config.tv_right_margin_property,
tv_priv->margin[TV_MARGIN_RIGHT]);
intel_tv->margin[TV_MARGIN_RIGHT]);
drm_connector_attach_property(connector,
dev->mode_config.tv_bottom_margin_property,
tv_priv->margin[TV_MARGIN_BOTTOM]);
intel_tv->margin[TV_MARGIN_BOTTOM]);
out:
drm_sysfs_connector_add(connector);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment