Commit 9c19415c authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2012-06-04' of...

Merge tag 'drm-intel-next-2012-06-04' of git://people.freedesktop.org/~danvet/drm-intel into drm-core-next

Daniel Vetter writes:
rc2 is out the door so I've figured I'll annoy you with the first -next
pull request for 3.6 already. Highlights:
- new wait_rendring_timeout interface (Ben)
- l3 cache remapping and error uevent support (Ben)
- even more infoframes work from Paulo
- gen4 hotplug rework from Chris
- prep work to make Laurent Pincharts original mode constification for
 connector->mode_fixup possible

QA reported a few new bugs this time around, but no regressions afact. For
3.5 the only thing I'm aware of is the edp vdd dmesg spam Linus originally
reported - it looks like that might have been introduced in 3.5. But
somehow my brain is routinely offline when I work on that issue, so things
seem to take forever (and atm I'm at patch v4 for that little problem).

* tag 'drm-intel-next-2012-06-04' of git://people.freedesktop.org/~danvet/drm-intel: (39 commits)
  drm/i915: add min freq control to debugfs
  drm/i915: don't chnage the original mode in dp_mode_fixup
  drm/i915: adjusted_mode->clock in the dp mode_fixup
  drm/i915: compute the target_clock for edp directly
  drm/i915: extract object active state flushing code
  drm/i915: clarify IBX dp workaround
  drm/i915: simplify sysfs setup code
  drm/i915: initialize the parity work only once
  drm/i915: ivybridge_handle_parity_error should be static
  drm/i915: l3 parity sysfs interface
  drm/i915: remap l3 on hw init
  drm/i915: enable parity error interrupts
  drm/i915: Dynamic Parity Detection handling
  drm/i915: s/mdelay/msleep/ in the sdvo detect function
  drm/i915: reuse the sdvo tv clock adjustment in ilk mode_set
  drm/i915: there's no cxsr on ilk
  drm/i915: add some barriers when changing DIPs
  drm/i915: remove comment about HSW HDMI DIPs
  drm/i915: don't set SDVO_BORDER_ENABLE when we're HDMI
  drm/i915: don't write 0 to DIP control at HDMI init
  ...
parents fb09185a 1523c310
......@@ -1764,6 +1764,64 @@ static const struct file_operations i915_max_freq_fops = {
.llseek = default_llseek,
};
static ssize_t
i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
loff_t *ppos)
{
struct drm_device *dev = filp->private_data;
drm_i915_private_t *dev_priv = dev->dev_private;
char buf[80];
int len;
len = snprintf(buf, sizeof(buf),
"min freq: %d\n", dev_priv->min_delay * 50);
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
}
static ssize_t
i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private;
char buf[20];
int val = 1;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
return -EINVAL;
if (copy_from_user(buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
val = simple_strtoul(buf, NULL, 0);
}
DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
/*
* Turbo will still be enabled, but won't go below the set value.
*/
dev_priv->min_delay = val / 50;
gen6_set_rps(dev, val / 50);
return cnt;
}
static const struct file_operations i915_min_freq_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = i915_min_freq_read,
.write = i915_min_freq_write,
.llseek = default_llseek,
};
static ssize_t
i915_cache_sharing_read(struct file *filp,
char __user *ubuf,
......@@ -1996,6 +2054,12 @@ int i915_debugfs_init(struct drm_minor *minor)
if (ret)
return ret;
ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_min_freq",
&i915_min_freq_fops);
if (ret)
return ret;
ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_cache_sharing",
&i915_cache_sharing_fops);
......@@ -2028,6 +2092,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops,
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
......
......@@ -1803,6 +1803,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
......
......@@ -657,6 +657,8 @@ typedef struct drm_i915_private {
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
u32 *l3_remap_info;
struct shrinker inactive_shrinker;
/**
......@@ -817,6 +819,8 @@ typedef struct drm_i915_private {
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
struct work_struct parity_error_work;
} drm_i915_private_t;
/* Iterate over initialised rings */
......@@ -1237,6 +1241,8 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
......@@ -1315,6 +1321,7 @@ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
void i915_gem_l3_remap(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
......@@ -1323,8 +1330,8 @@ int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring,
uint32_t seqno);
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
......
......@@ -1899,34 +1899,82 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
return ret;
}
/**
* __wait_seqno - wait until execution of seqno has finished
* @ring: the ring expected to report seqno
* @seqno: duh!
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
bool interruptible)
bool interruptible, struct timespec *timeout)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
int ret = 0;
struct timespec before, now, wait_time={1,0};
unsigned long timeout_jiffies;
long end;
bool wait_forever = true;
if (i915_seqno_passed(ring->get_seqno(ring), seqno))
return 0;
trace_i915_gem_request_wait_begin(ring, seqno);
if (timeout != NULL) {
wait_time = *timeout;
wait_forever = false;
}
timeout_jiffies = timespec_to_jiffies(&wait_time);
if (WARN_ON(!ring->irq_get(ring)))
return -ENODEV;
/* Record current time in case interrupted by signal, or wedged * */
getrawmonotonic(&before);
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring), seqno) || \
atomic_read(&dev_priv->mm.wedged))
do {
if (interruptible)
end = wait_event_interruptible_timeout(ring->irq_queue,
EXIT_COND,
timeout_jiffies);
else
end = wait_event_timeout(ring->irq_queue, EXIT_COND,
timeout_jiffies);
if (interruptible)
ret = wait_event_interruptible(ring->irq_queue,
EXIT_COND);
else
wait_event(ring->irq_queue, EXIT_COND);
if (atomic_read(&dev_priv->mm.wedged))
end = -EAGAIN;
} while (end == 0 && wait_forever);
getrawmonotonic(&now);
ring->irq_put(ring);
trace_i915_gem_request_wait_end(ring, seqno);
#undef EXIT_COND
return ret;
if (timeout) {
struct timespec sleep_time = timespec_sub(now, before);
*timeout = timespec_sub(*timeout, sleep_time);
}
switch (end) {
case -EAGAIN: /* Wedged */
case -ERESTARTSYS: /* Signal */
return (int)end;
case 0: /* Timeout */
if (timeout)
set_normalized_timespec(timeout, 0, 0);
return -ETIME;
default: /* Completed */
WARN_ON(end < 0); /* We're not aware of other errors */
return 0;
}
}
/**
......@@ -1934,8 +1982,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
* request and object lists appropriately for that event.
*/
int
i915_wait_request(struct intel_ring_buffer *ring,
uint32_t seqno)
i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
int ret = 0;
......@@ -1950,9 +1997,7 @@ i915_wait_request(struct intel_ring_buffer *ring,
if (ret)
return ret;
ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible);
if (atomic_read(&dev_priv->mm.wedged))
ret = -EAGAIN;
ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
return ret;
}
......@@ -1975,7 +2020,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
* it.
*/
if (obj->active) {
ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
if (ret)
return ret;
i915_gem_retire_requests_ring(obj->ring);
......@@ -1984,6 +2029,110 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
return 0;
}
/**
* Ensures that an object will eventually get non-busy by flushing any required
* write domains, emitting any outstanding lazy request and retiring and
* completed requests.
*/
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
int ret;
if (obj->active) {
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
ret = i915_gem_check_olr(obj->ring,
obj->last_rendering_seqno);
if (ret)
return ret;
i915_gem_retire_requests_ring(obj->ring);
}
return 0;
}
/**
* i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
* @DRM_IOCTL_ARGS: standard ioctl arguments
*
* Returns 0 if successful, else an error is returned with the remaining time in
* the timeout parameter.
* -ETIME: object is still busy after timeout
* -ERESTARTSYS: signal interrupted the wait
* -ENONENT: object doesn't exist
* Also possible, but rare:
* -EAGAIN: GPU wedged
* -ENOMEM: damn
* -ENODEV: Internal IRQ fail
* -E?: The add request failed
*
* The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
* non-zero timeout parameter the wait ioctl will wait for the given number of
* nanoseconds on an object becoming unbusy. Since the wait itself does so
* without holding struct_mutex the object may become re-busied before this
* function completes. A similar but shorter * race condition exists in the busy
* ioctl
*/
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring = NULL;
struct timespec timeout;
u32 seqno = 0;
int ret = 0;
timeout = ns_to_timespec(args->timeout_ns);
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
if (&obj->base == NULL) {
mutex_unlock(&dev->struct_mutex);
return -ENOENT;
}
/* Need to make sure the object gets inactive eventually. */
ret = i915_gem_object_flush_active(obj);
if (ret)
goto out;
if (obj->active) {
seqno = obj->last_rendering_seqno;
ring = obj->ring;
}
if (seqno == 0)
goto out;
/* Do this after OLR check to make sure we make forward progress polling
* on this IOCTL with a 0 timeout (like busy ioctl)
*/
if (!args->timeout_ns) {
ret = -ETIME;
goto out;
}
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
ret = __wait_seqno(ring, seqno, true, &timeout);
WARN_ON(!timespec_valid(&timeout));
args->timeout_ns = timespec_to_ns(&timeout);
return ret;
out:
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
return ret;
}
/**
* i915_gem_object_sync - sync an object to a ring.
*
......@@ -2160,7 +2309,7 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
return ret;
}
return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
}
int i915_gpu_idle(struct drm_device *dev)
......@@ -2364,7 +2513,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
}
if (obj->last_fenced_seqno) {
ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
if (ret)
return ret;
......@@ -3030,7 +3179,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (seqno == 0)
return 0;
ret = __wait_seqno(ring, seqno, true);
ret = __wait_seqno(ring, seqno, true, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
......@@ -3199,30 +3348,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* become non-busy without any further actions, therefore emit any
* necessary flushes here.
*/
args->busy = obj->active;
if (args->busy) {
/* Unconditionally flush objects, even when the gpu still uses this
* object. Userspace calling this function indicates that it wants to
* use this buffer rather sooner than later, so issuing the required
* flush earlier is beneficial.
*/
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
} else {
ret = i915_gem_check_olr(obj->ring,
obj->last_rendering_seqno);
}
ret = i915_gem_object_flush_active(obj);
/* Update the active list for the hardware's current position.
* Otherwise this only updates on a delayed timer or when irqs
* are actually unmasked, and our working set ends up being
* larger than required.
*/
i915_gem_retire_requests_ring(obj->ring);
args->busy = obj->active;
}
args->busy = obj->active;
drm_gem_object_unreference(&obj->base);
unlock:
......@@ -3435,6 +3563,38 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
void i915_gem_l3_remap(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 misccpctl;
int i;
if (!IS_IVYBRIDGE(dev))
return;
if (!dev_priv->mm.l3_remap_info)
return;
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
POSTING_READ(GEN7_MISCCPCTL);
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
DRM_DEBUG("0x%x was already programmed to %x\n",
GEN7_L3LOG_BASE + i, remap);
if (remap && !dev_priv->mm.l3_remap_info[i/4])
DRM_DEBUG_DRIVER("Clearing remapped register\n");
I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
}
/* Make sure all the writes land before disabling dop clock gating */
POSTING_READ(GEN7_L3LOG_BASE);
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
}
void i915_gem_init_swizzling(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
......@@ -3524,6 +3684,8 @@ i915_gem_init_hw(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
i915_gem_l3_remap(dev);
i915_gem_init_swizzling(dev);
ret = intel_init_render_ring_buffer(dev);
......
......@@ -375,6 +375,86 @@ static void gen6_pm_rps_work(struct work_struct *work)
mutex_unlock(&dev_priv->dev->struct_mutex);
}
/**
* ivybridge_parity_work - Workqueue called when a parity error interrupt
* occurred.
* @work: workqueue struct
*
* Doesn't actually do anything except notify userspace. As a consequence of
* this event, userspace should try to remap the bad rows since statistically
* it is likely the same row is more likely to go bad again.
*/
static void ivybridge_parity_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
parity_error_work);
u32 error_status, row, bank, subbank;
char *parity_event[5];
uint32_t misccpctl;
unsigned long flags;
/* We must turn off DOP level clock gating to access the L3 registers.
* In order to prevent a get/put style interface, acquire struct mutex
* any time we access those registers.
*/
mutex_lock(&dev_priv->dev->struct_mutex);
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
POSTING_READ(GEN7_MISCCPCTL);
error_status = I915_READ(GEN7_L3CDERRST1);
row = GEN7_PARITY_ERROR_ROW(error_status);
bank = GEN7_PARITY_ERROR_BANK(error_status);
subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
GEN7_L3CDERRST1_ENABLE);
POSTING_READ(GEN7_L3CDERRST1);
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
mutex_unlock(&dev_priv->dev->struct_mutex);
parity_event[0] = "L3_PARITY_ERROR=1";
parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
parity_event[4] = NULL;
kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
KOBJ_CHANGE, parity_event);
DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
row, bank, subbank);
kfree(parity_event[3]);
kfree(parity_event[2]);
kfree(parity_event[1]);
}
static void ivybridge_handle_parity_error(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long flags;
if (!IS_IVYBRIDGE(dev))
return;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
queue_work(dev_priv->wq, &dev_priv->parity_error_work);
}
static void snb_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 gt_iir)
......@@ -394,6 +474,9 @@ static void snb_gt_irq_handler(struct drm_device *dev,
DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
i915_handle_error(dev, false);
}
if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
ivybridge_handle_parity_error(dev);
}
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
......@@ -1649,7 +1732,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
atomic_set(&dev_priv->irq_received, 0);
I915_WRITE(HWSTAM, 0xeffe);
/* XXX hotplug from PCH */
......@@ -1812,13 +1894,13 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
DE_PIPEA_VBLANK_IVB);
POSTING_READ(DEIER);
dev_priv->gt_irq_mask = ~0;
dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
GEN6_BLITTER_USER_INTERRUPT;
GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
......@@ -2167,9 +2249,9 @@ static int i915_irq_postinstall(struct drm_device *dev)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
......@@ -2329,10 +2411,8 @@ static void i965_irq_preinstall(struct drm_device * dev)
atomic_set(&dev_priv->irq_received, 0);
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
I915_WRITE(HWSTAM, 0xeffe);
for_each_pipe(pipe)
......@@ -2345,11 +2425,13 @@ static void i965_irq_preinstall(struct drm_device * dev)
static int i965_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug_en;
u32 enable_mask;
u32 error_mask;
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
......@@ -2365,13 +2447,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
if (I915_HAS_HOTPLUG(dev)) {
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
}
/*
* Enable some error detection, note the instruction error mask
* bit is reserved, so we leave it masked.
......@@ -2391,36 +2466,40 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
if (I915_HAS_HOTPLUG(dev)) {
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
/* Note HDMI and DP share bits */
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
/* Note HDMI and DP share hotplug bits */
hotplug_en = 0;
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
if (IS_G4X(dev)) {
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
} else {
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
}
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
/* Programming the CRT detection parameters tends
to generate a spurious hotplug event about three
seconds later. So just do it once.
*/
if (IS_G4X(dev))
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
/* Programming the CRT detection parameters tends
to generate a spurious hotplug event about three
seconds later. So just do it once.
*/
if (IS_G4X(dev))
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
/* Ignore TV since it's buggy */
/* Ignore TV since it's buggy */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
}
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
intel_opregion_enable_asle(dev);
......@@ -2478,8 +2557,7 @@ static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
ret = IRQ_HANDLED;
/* Consume port. Then clear IIR or we'll miss events */
if ((I915_HAS_HOTPLUG(dev)) &&
(iir & I915_DISPLAY_PORT_INTERRUPT)) {
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
......@@ -2552,10 +2630,8 @@ static void i965_irq_uninstall(struct drm_device * dev)
if (!dev_priv)
return;
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
I915_WRITE(HWSTAM, 0xffffffff);
for_each_pipe(pipe)
......@@ -2576,6 +2652,7 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
......
......@@ -1566,20 +1566,34 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define PORT_HOTPLUG_STAT 0x61114
#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
#define DPB_HOTPLUG_INT_STATUS (1 << 29)
#define HDMIC_HOTPLUG_INT_STATUS (1 << 28)
#define DPC_HOTPLUG_INT_STATUS (1 << 28)
#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
#define DPD_HOTPLUG_INT_STATUS (1 << 27)
/* HDMI/DP bits are gen4+ */
#define DPB_HOTPLUG_LIVE_STATUS (1 << 29)
#define DPC_HOTPLUG_LIVE_STATUS (1 << 28)
#define DPD_HOTPLUG_LIVE_STATUS (1 << 27)
#define DPD_HOTPLUG_INT_STATUS (3 << 21)
#define DPC_HOTPLUG_INT_STATUS (3 << 19)
#define DPB_HOTPLUG_INT_STATUS (3 << 17)
/* HDMI bits are shared with the DP bits */
#define HDMIB_HOTPLUG_LIVE_STATUS (1 << 29)
#define HDMIC_HOTPLUG_LIVE_STATUS (1 << 28)
#define HDMID_HOTPLUG_LIVE_STATUS (1 << 27)
#define HDMID_HOTPLUG_INT_STATUS (3 << 21)
#define HDMIC_HOTPLUG_INT_STATUS (3 << 19)
#define HDMIB_HOTPLUG_INT_STATUS (3 << 17)
/* CRT/TV common between gen3+ */
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
/* SDVO is different across gen3/4 */
#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
/* SDVO port control */
#define SDVOB 0x61140
......@@ -1711,8 +1725,10 @@
#define VIDEO_DIP_PORT_C (2 << 29)
#define VIDEO_DIP_PORT_D (3 << 29)
#define VIDEO_DIP_PORT_MASK (3 << 29)
#define VIDEO_DIP_ENABLE_GCP (1 << 25)
#define VIDEO_DIP_ENABLE_AVI (1 << 21)
#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
#define VIDEO_DIP_ENABLE_GAMUT (4 << 21)
#define VIDEO_DIP_ENABLE_SPD (8 << 21)
#define VIDEO_DIP_SELECT_AVI (0 << 19)
#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
......@@ -1723,7 +1739,11 @@
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
#define VIDEO_DIP_FREQ_MASK (3 << 16)
/* HSW and later: */
#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
/* Panel power sequencing */
......@@ -4111,6 +4131,26 @@
#define GEN6_RC6 3
#define GEN6_RC7 4
#define GEN7_MISCCPCTL (0x9424)
#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
#define GEN7_PARITY_ERROR_VALID (1<<13)
#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
#define GEN7_L3CDERRST1_SUBBANK_MASK (7<<8)
#define GEN7_PARITY_ERROR_ROW(reg) \
((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14)
#define GEN7_PARITY_ERROR_BANK(reg) \
((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11)
#define GEN7_PARITY_ERROR_SUBBANK(reg) \
((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
#define GEN7_L3CDERRST1_ENABLE (1<<7)
#define GEN7_L3LOG_BASE 0xB070
#define GEN7_L3LOG_SIZE 0x80
#define G4X_AUD_VID_DID 0x62020
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
......
......@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/stat.h>
#include <linux/sysfs.h>
#include "intel_drv.h"
#include "i915_drv.h"
static u32 calc_residency(struct drm_device *dev, const u32 reg)
......@@ -92,20 +93,134 @@ static struct attribute_group rc6_attr_group = {
.attrs = rc6_attrs
};
void i915_setup_sysfs(struct drm_device *dev)
static int l3_access_valid(struct drm_device *dev, loff_t offset)
{
if (!IS_IVYBRIDGE(dev))
return -EPERM;
if (offset % 4 != 0)
return -EINVAL;
if (offset >= GEN7_L3LOG_SIZE)
return -ENXIO;
return 0;
}
static ssize_t
i915_l3_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
uint32_t misccpctl;
int i, ret;
ret = l3_access_valid(drm_dev, offset);
if (ret)
return ret;
ret = i915_mutex_lock_interruptible(drm_dev);
if (ret)
return ret;
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4)
*((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i);
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
mutex_unlock(&drm_dev->struct_mutex);
return i - offset;
}
static ssize_t
i915_l3_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
u32 *temp = NULL; /* Just here to make handling failures easy */
int ret;
/* ILK doesn't have any residency information */
if (INTEL_INFO(dev)->gen < 6)
return;
ret = l3_access_valid(drm_dev, offset);
if (ret)
return ret;
ret = sysfs_merge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
ret = i915_mutex_lock_interruptible(drm_dev);
if (ret)
DRM_ERROR("sysfs setup failed\n");
return ret;
if (!dev_priv->mm.l3_remap_info) {
temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
if (!temp) {
mutex_unlock(&drm_dev->struct_mutex);
return -ENOMEM;
}
}
ret = i915_gpu_idle(drm_dev);
if (ret) {
kfree(temp);
mutex_unlock(&drm_dev->struct_mutex);
return ret;
}
/* TODO: Ideally we really want a GPU reset here to make sure errors
* aren't propagated. Since I cannot find a stable way to reset the GPU
* at this point it is left as a TODO.
*/
if (temp)
dev_priv->mm.l3_remap_info = temp;
memcpy(dev_priv->mm.l3_remap_info + (offset/4),
buf + (offset/4),
count);
i915_gem_l3_remap(drm_dev);
mutex_unlock(&drm_dev->struct_mutex);
return count;
}
static struct bin_attribute dpf_attrs = {
.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
.size = GEN7_L3LOG_SIZE,
.read = i915_l3_read,
.write = i915_l3_write,
.mmap = NULL
};
void i915_setup_sysfs(struct drm_device *dev)
{
int ret;
if (INTEL_INFO(dev)->gen >= 6) {
ret = sysfs_merge_group(&dev->primary->kdev.kobj,
&rc6_attr_group);
if (ret)
DRM_ERROR("RC6 residency sysfs setup failed\n");
}
if (IS_IVYBRIDGE(dev)) {
ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
if (ret)
DRM_ERROR("l3 parity sysfs setup failed\n");
}
}
void i915_teardown_sysfs(struct drm_device *dev)
{
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
}
......@@ -311,9 +311,33 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_ARGS(ring, seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
TRACE_EVENT(i915_gem_request_wait_begin,
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
TP_ARGS(ring, seqno)
TP_ARGS(ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
__field(bool, blocking)
),
/* NB: the blocking information is racy since mutex_is_locked
* doesn't check that the current thread holds the lock. The only
* other option would be to pass the boolean information of whether
* or not the class was blocking down through the stack which is
* less desirable.
*/
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = seqno;
__entry->blocking = mutex_is_locked(&ring->dev->struct_mutex);
),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
__entry->dev, __entry->ring, __entry->seqno,
__entry->blocking ? "yes (NB)" : "no")
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
......
......@@ -726,8 +726,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
I915_WRITE(DDI_FUNC_CTL(pipe), temp);
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
void intel_ddi_dpms(struct drm_encoder *encoder, int mode)
......
......@@ -4405,25 +4405,10 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
&clock,
&reduced_clock);
}
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
if (is_sdvo && is_tv) {
if (adjusted_mode->clock >= 100000
&& adjusted_mode->clock < 140500) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 3;
clock.m1 = 16;
clock.m2 = 8;
} else if (adjusted_mode->clock >= 140500
&& adjusted_mode->clock <= 200000) {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 6;
clock.m1 = 12;
clock.m2 = 8;
}
}
if (is_sdvo && is_tv)
i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
/* FDI link */
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
......@@ -4431,16 +4416,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
if (is_cpu_edp) {
target_clock = mode->clock;
intel_edp_link_config(edp_encoder, &lane, &link_bw);
} else {
/* [e]DP over FDI requires target mode clock
instead of link clock */
if (is_dp)
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
/* FDI is a binary signal running at ~2.7GHz, encoding
* each output octet as 10 bits. The actual frequency
* is stored as a divider into a 100MHz clock, and the
......@@ -4451,6 +4428,14 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
}
/* [e]DP over FDI requires target mode clock instead of link clock. */
if (edp_encoder)
target_clock = intel_edp_target_clock(edp_encoder, mode);
else if (is_dp)
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
......@@ -4662,16 +4647,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
intel_crtc->lowfreq_avail = true;
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
}
} else {
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
}
}
}
......
......@@ -155,6 +155,18 @@ intel_edp_link_config(struct intel_encoder *intel_encoder,
*link_bw = 270000;
}
int
intel_edp_target_clock(struct intel_encoder *intel_encoder,
struct drm_display_mode *mode)
{
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
if (intel_dp->panel_fixed_mode)
return intel_dp->panel_fixed_mode->clock;
else
return mode->clock;
}
static int
intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
......@@ -225,7 +237,7 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
static bool
intel_dp_adjust_dithering(struct intel_dp *intel_dp,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
bool adjust_mode)
{
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
......@@ -239,8 +251,8 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
if (mode_rate > max_rate)
return false;
if (adjusted_mode)
adjusted_mode->private_flags
if (adjust_mode)
mode->private_flags
|= INTEL_MODE_DP_FORCE_6BPC;
return true;
......@@ -263,7 +275,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
if (!intel_dp_adjust_dithering(intel_dp, mode, NULL))
if (!intel_dp_adjust_dithering(intel_dp, mode, false))
return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
......@@ -706,25 +718,20 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
mode, adjusted_mode);
/*
* the mode->clock is used to calculate the Data&Link M/N
* of the pipe. For the eDP the fixed clock should be used.
*/
mode->clock = intel_dp->panel_fixed_mode->clock;
}
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %02x pixel clock %iKHz\n",
max_lane_count, bws[max_clock], mode->clock);
max_lane_count, bws[max_clock], adjusted_mode->clock);
if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true))
return false;
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
mode_rate = intel_dp_link_required(mode->clock, bpp);
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
......@@ -1922,7 +1929,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
DP |= DP_LINK_TRAIN_OFF;
}
if (!HAS_PCH_CPT(dev) &&
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
struct drm_crtc *crtc = intel_dp->base.base.crtc;
......@@ -2099,25 +2106,23 @@ g4x_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t temp, bit;
uint32_t bit;
switch (intel_dp->output_reg) {
case DP_B:
bit = DPB_HOTPLUG_INT_STATUS;
bit = DPB_HOTPLUG_LIVE_STATUS;
break;
case DP_C:
bit = DPC_HOTPLUG_INT_STATUS;
bit = DPC_HOTPLUG_LIVE_STATUS;
break;
case DP_D:
bit = DPD_HOTPLUG_INT_STATUS;
bit = DPD_HOTPLUG_LIVE_STATUS;
break;
default:
return connector_status_unknown;
}
temp = I915_READ(PORT_HOTPLUG_STAT);
if ((temp & bit) == 0)
if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
return connector_status_disconnected;
return intel_dp_detect_dpcd(intel_dp);
......@@ -2520,19 +2525,19 @@ intel_dp_init(struct drm_device *dev, int output_reg)
case DP_B:
case PCH_DP_B:
dev_priv->hotplug_supported_mask |=
HDMIB_HOTPLUG_INT_STATUS;
DPB_HOTPLUG_INT_STATUS;
name = "DPDDC-B";
break;
case DP_C:
case PCH_DP_C:
dev_priv->hotplug_supported_mask |=
HDMIC_HOTPLUG_INT_STATUS;
DPC_HOTPLUG_INT_STATUS;
name = "DPDDC-C";
break;
case DP_D:
case PCH_DP_D:
dev_priv->hotplug_supported_mask |=
HDMID_HOTPLUG_INT_STATUS;
DPD_HOTPLUG_INT_STATUS;
name = "DPDDC-D";
break;
}
......
......@@ -301,6 +301,8 @@ struct intel_hdmi {
enum hdmi_force_audio force_audio;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
void (*set_infoframes)(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
};
static inline struct drm_crtc *
......@@ -343,9 +345,6 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
extern void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
......@@ -360,6 +359,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern int intel_edp_target_clock(struct intel_encoder *,
struct drm_display_mode *mode);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
......@@ -372,7 +373,7 @@ extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern u32 intel_panel_get_backlight(struct drm_device *dev);
......
......@@ -121,36 +121,31 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 val = I915_READ(VIDEO_DIP_CTL);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
val &= ~VIDEO_DIP_PORT_MASK;
if (intel_hdmi->sdvox_reg == SDVOB)
val |= VIDEO_DIP_PORT_B;
else if (intel_hdmi->sdvox_reg == SDVOC)
val |= VIDEO_DIP_PORT_C;
else
return;
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val &= ~g4x_infoframe_enable(frame);
val |= VIDEO_DIP_ENABLE;
I915_WRITE(VIDEO_DIP_CTL, val);
mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
mmiowb();
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(VIDEO_DIP_CTL, val);
POSTING_READ(VIDEO_DIP_CTL);
}
static void ibx_write_infoframe(struct drm_encoder *encoder,
......@@ -160,46 +155,32 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(reg);
val &= ~VIDEO_DIP_PORT_MASK;
switch (intel_hdmi->sdvox_reg) {
case HDMIB:
val |= VIDEO_DIP_PORT_B;
break;
case HDMIC:
val |= VIDEO_DIP_PORT_C;
break;
case HDMID:
val |= VIDEO_DIP_PORT_D;
break;
default:
return;
}
intel_wait_for_vblank(dev, intel_crtc->pipe);
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val &= ~g4x_infoframe_enable(frame);
val |= VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
mmiowb();
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(reg, val);
POSTING_READ(reg);
}
static void cpt_write_infoframe(struct drm_encoder *encoder,
......@@ -213,32 +194,31 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(reg);
intel_wait_for_vblank(dev, intel_crtc->pipe);
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
/* The DIP control register spec says that we need to update the AVI
* infoframe without clearing its enable bit */
if (frame->type == DIP_TYPE_AVI)
val |= VIDEO_DIP_ENABLE_AVI;
else
if (frame->type != DIP_TYPE_AVI)
val &= ~g4x_infoframe_enable(frame);
val |= VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
mmiowb();
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(reg, val);
POSTING_READ(reg);
}
static void vlv_write_infoframe(struct drm_encoder *encoder,
......@@ -252,26 +232,28 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(reg);
intel_wait_for_vblank(dev, intel_crtc->pipe);
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val &= ~g4x_infoframe_enable(frame);
val |= VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
mmiowb();
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(reg, val);
POSTING_READ(reg);
}
static void hsw_write_infoframe(struct drm_encoder *encoder,
......@@ -289,18 +271,19 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
if (data_reg == 0)
return;
intel_wait_for_vblank(dev, intel_crtc->pipe);
val &= ~hsw_infoframe_enable(frame);
I915_WRITE(ctl_reg, val);
mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(data_reg + i, *data);
data++;
}
mmiowb();
val |= hsw_infoframe_enable(frame);
I915_WRITE(ctl_reg, val);
POSTING_READ(ctl_reg);
}
static void intel_set_infoframe(struct drm_encoder *encoder,
......@@ -308,14 +291,11 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
if (!intel_hdmi->has_hdmi_sink)
return;
intel_dip_infoframe_csum(frame);
intel_hdmi->write_infoframe(encoder, frame);
}
void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
......@@ -330,7 +310,7 @@ void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
intel_set_infoframe(encoder, &avi_if);
}
void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
{
struct dip_infoframe spd_if;
......@@ -345,6 +325,213 @@ void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
intel_set_infoframe(encoder, &spd_if);
}
static void g4x_set_infoframes(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
u32 port;
/* If the registers were not initialized yet, they might be zeroes,
* which means we're selecting the AVI DIP and we're setting its
* frequency to once. This seems to really confuse the HW and make
* things stop working (the register spec says the AVI always needs to
* be sent every VSync). So here we avoid writing to the register more
* than we need and also explicitly select the AVI DIP and explicitly
* set its frequency to every VSync. Avoiding to write it twice seems to
* be enough to solve the problem, but being defensive shouldn't hurt us
* either. */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
if (!intel_hdmi->has_hdmi_sink) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
}
switch (intel_hdmi->sdvox_reg) {
case SDVOB:
port = VIDEO_DIP_PORT_B;
break;
case SDVOC:
port = VIDEO_DIP_PORT_C;
break;
default:
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
val &= ~VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
}
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
val |= VIDEO_DIP_ENABLE;
val &= ~VIDEO_DIP_ENABLE_VENDOR;
I915_WRITE(reg, val);
POSTING_READ(reg);
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
}
static void ibx_set_infoframes(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
u32 port;
/* See the big comment in g4x_set_infoframes() */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
if (!intel_hdmi->has_hdmi_sink) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
}
switch (intel_hdmi->sdvox_reg) {
case HDMIB:
port = VIDEO_DIP_PORT_B;
break;
case HDMIC:
port = VIDEO_DIP_PORT_C;
break;
case HDMID:
port = VIDEO_DIP_PORT_D;
break;
default:
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
val &= ~VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
}
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
val |= VIDEO_DIP_ENABLE;
val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_GCP);
I915_WRITE(reg, val);
POSTING_READ(reg);
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
}
static void cpt_set_infoframes(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
/* See the big comment in g4x_set_infoframes() */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
if (!intel_hdmi->has_hdmi_sink) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
}
/* Set both together, unset both together: see the spec. */
val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_GCP);
I915_WRITE(reg, val);
POSTING_READ(reg);
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
}
static void vlv_set_infoframes(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
/* See the big comment in g4x_set_infoframes() */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
if (!intel_hdmi->has_hdmi_sink) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
}
val |= VIDEO_DIP_ENABLE;
val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_GCP);
I915_WRITE(reg, val);
POSTING_READ(reg);
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
}
static void hsw_set_infoframes(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
if (!intel_hdmi->has_hdmi_sink) {
I915_WRITE(reg, 0);
POSTING_READ(reg);
return;
}
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
I915_WRITE(reg, val);
POSTING_READ(reg);
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
}
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
......@@ -355,7 +542,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 sdvox;
sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
sdvox = SDVO_ENCODING_HDMI;
if (!HAS_PCH_SPLIT(dev))
sdvox |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
......@@ -388,8 +575,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
......@@ -452,6 +638,27 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
return true;
}
static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
{
struct drm_device *dev = intel_hdmi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit;
switch (intel_hdmi->sdvox_reg) {
case SDVOB:
bit = HDMIB_HOTPLUG_LIVE_STATUS;
break;
case SDVOC:
bit = HDMIC_HOTPLUG_LIVE_STATUS;
break;
default:
bit = 0;
break;
}
return I915_READ(PORT_HOTPLUG_STAT) & bit;
}
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
......@@ -460,6 +667,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi))
return status;
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
edid = drm_get_edid(connector,
......@@ -633,7 +843,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_hdmi *intel_hdmi;
int i;
intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
if (!intel_hdmi)
......@@ -710,26 +919,19 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
if (!HAS_PCH_SPLIT(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
I915_WRITE(VIDEO_DIP_CTL, 0);
intel_hdmi->set_infoframes = g4x_set_infoframes;
} else if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
for_each_pipe(i)
I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0);
intel_hdmi->set_infoframes = vlv_set_infoframes;
} else if (IS_HASWELL(dev)) {
/* FIXME: Haswell has a new set of DIP frame registers, but we are
* just doing the minimal required for HDMI to work at this stage.
*/
intel_hdmi->write_infoframe = hsw_write_infoframe;
for_each_pipe(i)
I915_WRITE(HSW_TVIDEO_DIP_CTL(i), 0);
intel_hdmi->set_infoframes = hsw_set_infoframes;
} else if (HAS_PCH_IBX(dev)) {
intel_hdmi->write_infoframe = ibx_write_infoframe;
for_each_pipe(i)
I915_WRITE(TVIDEO_DIP_CTL(i), 0);
intel_hdmi->set_infoframes = ibx_set_infoframes;
} else {
intel_hdmi->write_infoframe = cpt_write_infoframe;
for_each_pipe(i)
I915_WRITE(TVIDEO_DIP_CTL(i), 0);
intel_hdmi->set_infoframes = cpt_set_infoframes;
}
if (IS_HASWELL(dev))
......
......@@ -226,7 +226,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
ret = i915_wait_request(ring, overlay->last_flip_req);
ret = i915_wait_seqno(ring, overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
......@@ -452,7 +452,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
if (overlay->last_flip_req == 0)
return 0;
ret = i915_wait_request(ring, overlay->last_flip_req);
ret = i915_wait_seqno(ring, overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
......
......@@ -56,7 +56,7 @@ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
void
intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
......
......@@ -438,6 +438,9 @@ static int init_render_ring(struct intel_ring_buffer *ring)
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
if (IS_IVYBRIDGE(dev))
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
return ret;
}
......@@ -825,7 +828,11 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
if (IS_IVYBRIDGE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
GEN6_RENDER_L3_PARITY_ERROR));
else
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
......@@ -844,7 +851,10 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
I915_WRITE_IMR(ring, ~0);
if (IS_IVYBRIDGE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
else
I915_WRITE_IMR(ring, ~0);
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
......@@ -1100,7 +1110,7 @@ static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
ret = i915_wait_request(ring, seqno);
ret = i915_wait_seqno(ring, seqno);
dev_priv->mm.interruptible = was_interruptible;
if (!ret)
......
......@@ -140,9 +140,6 @@ struct intel_sdvo {
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
/* Input timings for adjusted_mode */
struct intel_sdvo_dtd input_dtd;
};
struct intel_sdvo_connector {
......@@ -953,11 +950,15 @@ intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
return true;
}
/* Asks the sdvo controller for the preferred input mode given the output mode.
* Unfortunately we have to set up the full output mode to do that. */
static bool
intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct intel_sdvo_dtd input_dtd;
/* Reset the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
return false;
......@@ -969,10 +970,10 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
return false;
if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
&intel_sdvo->input_dtd))
&input_dtd))
return false;
intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
return true;
}
......@@ -993,17 +994,17 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
return false;
(void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
mode,
adjusted_mode);
(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
mode,
adjusted_mode);
} else if (intel_sdvo->is_lvds) {
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
intel_sdvo->sdvo_lvds_fixed_mode))
return false;
(void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
mode,
adjusted_mode);
(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
mode,
adjusted_mode);
}
/* Make the CRTC code factor in the SDVO pixel multiplier. The
......@@ -1057,7 +1058,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
intel_sdvo->sdvo_lvds_fixed_mode);
else
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
(void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd);
if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
DRM_INFO("Setting output timings on %s failed\n",
SDVO_NAME(intel_sdvo));
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
......@@ -1079,7 +1082,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
* adjusted_mode.
*/
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
(void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
DRM_INFO("Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
switch (pixel_multiplier) {
default:
......@@ -1376,7 +1381,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
/* add 30ms delay when the output type might be TV */
if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
mdelay(30);
msleep(30);
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
return connector_status_unknown;
......@@ -2521,6 +2526,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
u32 hotplug_mask;
int i;
intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
......@@ -2552,10 +2558,18 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
}
}
if (intel_sdvo->is_sdvob)
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
else
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
hotplug_mask = 0;
if (IS_G4X(dev)) {
hotplug_mask = intel_sdvo->is_sdvob ?
SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X;
} else if (IS_GEN4(dev)) {
hotplug_mask = intel_sdvo->is_sdvob ?
SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965;
} else {
hotplug_mask = intel_sdvo->is_sdvob ?
SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
}
dev_priv->hotplug_supported_mask |= hotplug_mask;
drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
......
......@@ -200,6 +200,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_EXECBUFFER2 0x29
#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
#define DRM_I915_GEM_WAIT 0x2c
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
......@@ -243,6 +244,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
......@@ -886,4 +888,12 @@ struct drm_intel_sprite_colorkey {
__u32 flags;
};
struct drm_i915_gem_wait {
/** Handle of BO we shall wait on */
__u32 bo_handle;
__u32 flags;
/** Number of nanoseconds to wait, Returns time remaining. */
__u64 timeout_ns;
};
#endif /* _I915_DRM_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment