Commit f3c58cee authored by Dave Airlie's avatar Dave Airlie

Merge branch 'for-linux-next' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:
A few intel fixes for smaller issues and one revert for an sdv hack which
we've wanted to kill anyway. Plus two drm patches included for your
convenience, both regression fixers for mine own screw-ups.

+ both fixes for stolen mem handling.

* 'for-linux-next' of git://people.freedesktop.org/~danvet/drm-intel:
  drm/i915: clear the stolen fb before resuming
  Revert "drm/i915: Calculate correct stolen size for GEN7+"
  drm/i915: hsw: fix link training for eDP on port-A
  Revert "drm/i915: revert eDP bpp clamping code changes"
  drm: don't check modeset locks in panic handler
  drm/i915: Fix pipe enabled mask for pipe C in WM calculations
  drm/mm: fix dump table BUG
  drm/i915: Always normalize return timeout for wait_timeout_ioctl
parents 307b9c02 1ffc5289
...@@ -78,6 +78,10 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev) ...@@ -78,6 +78,10 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
{ {
struct drm_crtc *crtc; struct drm_crtc *crtc;
/* Locking is currently fubar in the panic handler. */
if (oops_in_progress)
return;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
WARN_ON(!mutex_is_locked(&crtc->mutex)); WARN_ON(!mutex_is_locked(&crtc->mutex));
......
...@@ -755,33 +755,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) ...@@ -755,33 +755,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
EXPORT_SYMBOL(drm_mm_debug_table); EXPORT_SYMBOL(drm_mm_debug_table);
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
{ {
struct drm_mm_node *entry;
unsigned long total_used = 0, total_free = 0, total = 0;
unsigned long hole_start, hole_end, hole_size; unsigned long hole_start, hole_end, hole_size;
hole_start = drm_mm_hole_node_start(&mm->head_node); if (entry->hole_follows) {
hole_end = drm_mm_hole_node_end(&mm->head_node); hole_start = drm_mm_hole_node_start(entry);
hole_end = drm_mm_hole_node_end(entry);
hole_size = hole_end - hole_start; hole_size = hole_end - hole_start;
if (hole_size)
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
hole_start, hole_end, hole_size); hole_start, hole_end, hole_size);
total_free += hole_size; return hole_size;
}
return 0;
}
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
struct drm_mm_node *entry;
unsigned long total_used = 0, total_free = 0, total = 0;
total_free += drm_mm_dump_hole(m, &mm->head_node);
drm_mm_for_each_node(entry, mm) { drm_mm_for_each_node(entry, mm) {
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
entry->start, entry->start + entry->size, entry->start, entry->start + entry->size,
entry->size); entry->size);
total_used += entry->size; total_used += entry->size;
if (entry->hole_follows) { total_free += drm_mm_dump_hole(m, entry);
hole_start = drm_mm_hole_node_start(entry);
hole_end = drm_mm_hole_node_end(entry);
hole_size = hole_end - hole_start;
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
hole_start, hole_end, hole_size);
total_free += hole_size;
}
} }
total = total_free + total_used; total = total_free + total_used;
......
...@@ -1045,6 +1045,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, ...@@ -1045,6 +1045,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
if (timeout) { if (timeout) {
struct timespec sleep_time = timespec_sub(now, before); struct timespec sleep_time = timespec_sub(now, before);
*timeout = timespec_sub(*timeout, sleep_time); *timeout = timespec_sub(*timeout, sleep_time);
if (!timespec_valid(timeout)) /* i.e. negative time remains */
set_normalized_timespec(timeout, 0, 0);
} }
switch (end) { switch (end) {
...@@ -1053,8 +1055,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, ...@@ -1053,8 +1055,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
case -ERESTARTSYS: /* Signal */ case -ERESTARTSYS: /* Signal */
return (int)end; return (int)end;
case 0: /* Timeout */ case 0: /* Timeout */
if (timeout)
set_normalized_timespec(timeout, 0, 0);
return -ETIME; return -ETIME;
default: /* Completed */ default: /* Completed */
WARN_ON(end < 0); /* We're not aware of other errors */ WARN_ON(end < 0); /* We're not aware of other errors */
...@@ -2377,10 +2377,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -2377,10 +2377,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
if (timeout) { if (timeout)
WARN_ON(!timespec_valid(timeout));
args->timeout_ns = timespec_to_ns(timeout); args->timeout_ns = timespec_to_ns(timeout);
}
return ret; return ret;
out: out:
......
...@@ -709,15 +709,6 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) ...@@ -709,15 +709,6 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
return snb_gmch_ctl << 25; /* 32 MB units */ return snb_gmch_ctl << 25; /* 32 MB units */
} }
static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
{
static const int stolen_decoder[] = {
0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
return stolen_decoder[snb_gmch_ctl] << 20;
}
static int gen6_gmch_probe(struct drm_device *dev, static int gen6_gmch_probe(struct drm_device *dev,
size_t *gtt_total, size_t *gtt_total,
size_t *stolen, size_t *stolen,
...@@ -747,11 +738,7 @@ static int gen6_gmch_probe(struct drm_device *dev, ...@@ -747,11 +738,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
*stolen = gen7_get_stolen_size(snb_gmch_ctl);
else
*stolen = gen6_get_stolen_size(snb_gmch_ctl); *stolen = gen6_get_stolen_size(snb_gmch_ctl);
*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
/* For Modern GENs the PTEs and register space are split in the BAR */ /* For Modern GENs the PTEs and register space are split in the BAR */
......
...@@ -46,8 +46,6 @@ ...@@ -46,8 +46,6 @@
#define SNB_GMCH_GGMS_MASK 0x3 #define SNB_GMCH_GGMS_MASK 0x3
#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ #define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
#define SNB_GMCH_GMS_MASK 0x1f #define SNB_GMCH_GMS_MASK 0x1f
#define IVB_GMCH_GMS_SHIFT 4
#define IVB_GMCH_GMS_MASK 0xf
/* PCI config space */ /* PCI config space */
......
...@@ -1265,6 +1265,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) ...@@ -1265,6 +1265,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp); intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp); intel_dp_complete_link_train(intel_dp);
if (port != PORT_A)
intel_dp_stop_link_train(intel_dp);
} }
} }
...@@ -1326,6 +1328,9 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) ...@@ -1326,6 +1328,9 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
} else if (type == INTEL_OUTPUT_EDP) { } else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (port == PORT_A)
intel_dp_stop_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp); ironlake_edp_backlight_on(intel_dp);
} }
......
...@@ -702,6 +702,9 @@ intel_dp_compute_config(struct intel_encoder *encoder, ...@@ -702,6 +702,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Walk through all bpp values. Luckily they're all nicely spaced with 2 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */ * bpc in between. */
bpp = min_t(int, 8*3, pipe_config->pipe_bpp); bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
if (is_edp(intel_dp) && dev_priv->edp.bpp)
bpp = min_t(int, bpp, dev_priv->edp.bpp);
for (; bpp >= 6*3; bpp -= 2*3) { for (; bpp >= 6*3; bpp -= 2*3) {
mode_rate = intel_dp_link_required(target_clock, bpp); mode_rate = intel_dp_link_required(target_clock, bpp);
...@@ -739,6 +742,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, ...@@ -739,6 +742,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_dp->link_bw = bws[clock]; intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count; intel_dp->lane_count = lane_count;
adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
pipe_config->pipe_bpp = bpp;
pipe_config->pixel_target_clock = target_clock; pipe_config->pixel_target_clock = target_clock;
DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
...@@ -751,20 +755,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, ...@@ -751,20 +755,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
target_clock, adjusted_mode->clock, target_clock, adjusted_mode->clock,
&pipe_config->dp_m_n); &pipe_config->dp_m_n);
/*
* XXX: We have a strange regression where using the vbt edp bpp value
* for the link bw computation results in black screens, the panel only
* works when we do the computation at the usual 24bpp (but still
* requires us to use 18bpp). Until that's fully debugged, stay
* bug-for-bug compatible with the old code.
*/
if (is_edp(intel_dp) && dev_priv->edp.bpp) {
DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n",
bpp, dev_priv->edp.bpp);
bpp = min_t(int, bpp, dev_priv->edp.bpp);
}
pipe_config->pipe_bpp = bpp;
return true; return true;
} }
...@@ -1389,6 +1379,7 @@ static void intel_enable_dp(struct intel_encoder *encoder) ...@@ -1389,6 +1379,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
ironlake_edp_panel_on(intel_dp); ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, true); ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp); intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp); ironlake_edp_backlight_on(intel_dp);
} }
...@@ -1711,10 +1702,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, ...@@ -1711,10 +1702,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port; enum port port = intel_dig_port->port;
int ret; int ret;
uint32_t temp;
if (HAS_DDI(dev)) { if (HAS_DDI(dev)) {
temp = I915_READ(DP_TP_CTL(port)); uint32_t temp = I915_READ(DP_TP_CTL(port));
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE; temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
...@@ -1724,18 +1714,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, ...@@ -1724,18 +1714,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE: case DP_TRAINING_PATTERN_DISABLE:
if (port != PORT_A) {
temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
I915_WRITE(DP_TP_CTL(port), temp);
if (wait_for((I915_READ(DP_TP_STATUS(port)) &
DP_TP_STATUS_IDLE_DONE), 1))
DRM_ERROR("Timed out waiting for DP idle patterns\n");
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
}
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
break; break;
...@@ -1811,6 +1789,37 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, ...@@ -1811,6 +1789,37 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
return true; return true;
} }
static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
uint32_t val;
if (!HAS_DDI(dev))
return;
val = I915_READ(DP_TP_CTL(port));
val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
val |= DP_TP_CTL_LINK_TRAIN_IDLE;
I915_WRITE(DP_TP_CTL(port), val);
/*
* On PORT_A we can have only eDP in SST mode. There the only reason
* we need to set idle transmission mode is to work around a HW issue
* where we enable the pipe while not in idle link-training mode.
* In this case there is requirement to wait for a minimum number of
* idle patterns to be sent.
*/
if (port == PORT_A)
return;
if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
1))
DRM_ERROR("Timed out waiting for DP idle patterns\n");
}
/* Enable corresponding port and start training pattern 1 */ /* Enable corresponding port and start training pattern 1 */
void void
intel_dp_start_link_train(struct intel_dp *intel_dp) intel_dp_start_link_train(struct intel_dp *intel_dp)
...@@ -1953,10 +1962,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) ...@@ -1953,10 +1962,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries; ++tries;
} }
intel_dp_set_idle_link_train(intel_dp);
intel_dp->DP = DP;
if (channel_eq) if (channel_eq)
DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n"); DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n");
intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); }
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
{
intel_dp_set_link_train(intel_dp, intel_dp->DP,
DP_TRAINING_PATTERN_DISABLE);
} }
static void static void
...@@ -2164,6 +2182,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) ...@@ -2164,6 +2182,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
drm_get_encoder_name(&intel_encoder->base)); drm_get_encoder_name(&intel_encoder->base));
intel_dp_start_link_train(intel_dp); intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp); intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
} }
} }
......
...@@ -499,6 +499,7 @@ extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -499,6 +499,7 @@ extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
extern void intel_dp_init_link_config(struct intel_dp *intel_dp); extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
extern void intel_dp_start_link_train(struct intel_dp *intel_dp); extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
extern void intel_dp_complete_link_train(struct intel_dp *intel_dp); extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
extern void intel_dp_encoder_destroy(struct drm_encoder *encoder); extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
extern void intel_dp_check_link_status(struct intel_dp *intel_dp); extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
......
...@@ -262,10 +262,22 @@ void intel_fbdev_fini(struct drm_device *dev) ...@@ -262,10 +262,22 @@ void intel_fbdev_fini(struct drm_device *dev)
void intel_fbdev_set_suspend(struct drm_device *dev, int state) void intel_fbdev_set_suspend(struct drm_device *dev, int state)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
if (!dev_priv->fbdev) struct intel_fbdev *ifbdev = dev_priv->fbdev;
struct fb_info *info;
if (!ifbdev)
return; return;
fb_set_suspend(dev_priv->fbdev->helper.fbdev, state); info = ifbdev->helper.fbdev;
/* On resume from hibernation: If the object is shmemfs backed, it has
* been restored from swap. If the object is stolen however, it will be
* full of whatever garbage was left in there.
*/
if (!state && ifbdev->ifb.obj->stolen)
memset_io(info->screen_base, 0, info->screen_size);
fb_set_suspend(info, state);
} }
MODULE_LICENSE("GPL and additional rights"); MODULE_LICENSE("GPL and additional rights");
......
...@@ -1301,17 +1301,17 @@ static void valleyview_update_wm(struct drm_device *dev) ...@@ -1301,17 +1301,17 @@ static void valleyview_update_wm(struct drm_device *dev)
vlv_update_drain_latency(dev); vlv_update_drain_latency(dev);
if (g4x_compute_wm0(dev, 0, if (g4x_compute_wm0(dev, PIPE_A,
&valleyview_wm_info, latency_ns, &valleyview_wm_info, latency_ns,
&valleyview_cursor_wm_info, latency_ns, &valleyview_cursor_wm_info, latency_ns,
&planea_wm, &cursora_wm)) &planea_wm, &cursora_wm))
enabled |= 1; enabled |= 1 << PIPE_A;
if (g4x_compute_wm0(dev, 1, if (g4x_compute_wm0(dev, PIPE_B,
&valleyview_wm_info, latency_ns, &valleyview_wm_info, latency_ns,
&valleyview_cursor_wm_info, latency_ns, &valleyview_cursor_wm_info, latency_ns,
&planeb_wm, &cursorb_wm)) &planeb_wm, &cursorb_wm))
enabled |= 2; enabled |= 1 << PIPE_B;
if (single_plane_enabled(enabled) && if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1, g4x_compute_srwm(dev, ffs(enabled) - 1,
...@@ -1357,17 +1357,17 @@ static void g4x_update_wm(struct drm_device *dev) ...@@ -1357,17 +1357,17 @@ static void g4x_update_wm(struct drm_device *dev)
int plane_sr, cursor_sr; int plane_sr, cursor_sr;
unsigned int enabled = 0; unsigned int enabled = 0;
if (g4x_compute_wm0(dev, 0, if (g4x_compute_wm0(dev, PIPE_A,
&g4x_wm_info, latency_ns, &g4x_wm_info, latency_ns,
&g4x_cursor_wm_info, latency_ns, &g4x_cursor_wm_info, latency_ns,
&planea_wm, &cursora_wm)) &planea_wm, &cursora_wm))
enabled |= 1; enabled |= 1 << PIPE_A;
if (g4x_compute_wm0(dev, 1, if (g4x_compute_wm0(dev, PIPE_B,
&g4x_wm_info, latency_ns, &g4x_wm_info, latency_ns,
&g4x_cursor_wm_info, latency_ns, &g4x_cursor_wm_info, latency_ns,
&planeb_wm, &cursorb_wm)) &planeb_wm, &cursorb_wm))
enabled |= 2; enabled |= 1 << PIPE_B;
if (single_plane_enabled(enabled) && if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1, g4x_compute_srwm(dev, ffs(enabled) - 1,
...@@ -1716,7 +1716,7 @@ static void ironlake_update_wm(struct drm_device *dev) ...@@ -1716,7 +1716,7 @@ static void ironlake_update_wm(struct drm_device *dev)
unsigned int enabled; unsigned int enabled;
enabled = 0; enabled = 0;
if (g4x_compute_wm0(dev, 0, if (g4x_compute_wm0(dev, PIPE_A,
&ironlake_display_wm_info, &ironlake_display_wm_info,
ILK_LP0_PLANE_LATENCY, ILK_LP0_PLANE_LATENCY,
&ironlake_cursor_wm_info, &ironlake_cursor_wm_info,
...@@ -1727,10 +1727,10 @@ static void ironlake_update_wm(struct drm_device *dev) ...@@ -1727,10 +1727,10 @@ static void ironlake_update_wm(struct drm_device *dev)
DRM_DEBUG_KMS("FIFO watermarks For pipe A -" DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n", " plane %d, " "cursor: %d\n",
plane_wm, cursor_wm); plane_wm, cursor_wm);
enabled |= 1; enabled |= 1 << PIPE_A;
} }
if (g4x_compute_wm0(dev, 1, if (g4x_compute_wm0(dev, PIPE_B,
&ironlake_display_wm_info, &ironlake_display_wm_info,
ILK_LP0_PLANE_LATENCY, ILK_LP0_PLANE_LATENCY,
&ironlake_cursor_wm_info, &ironlake_cursor_wm_info,
...@@ -1741,7 +1741,7 @@ static void ironlake_update_wm(struct drm_device *dev) ...@@ -1741,7 +1741,7 @@ static void ironlake_update_wm(struct drm_device *dev)
DRM_DEBUG_KMS("FIFO watermarks For pipe B -" DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n", " plane %d, cursor: %d\n",
plane_wm, cursor_wm); plane_wm, cursor_wm);
enabled |= 2; enabled |= 1 << PIPE_B;
} }
/* /*
...@@ -1801,7 +1801,7 @@ static void sandybridge_update_wm(struct drm_device *dev) ...@@ -1801,7 +1801,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
unsigned int enabled; unsigned int enabled;
enabled = 0; enabled = 0;
if (g4x_compute_wm0(dev, 0, if (g4x_compute_wm0(dev, PIPE_A,
&sandybridge_display_wm_info, latency, &sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency, &sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) { &plane_wm, &cursor_wm)) {
...@@ -1812,10 +1812,10 @@ static void sandybridge_update_wm(struct drm_device *dev) ...@@ -1812,10 +1812,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
DRM_DEBUG_KMS("FIFO watermarks For pipe A -" DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n", " plane %d, " "cursor: %d\n",
plane_wm, cursor_wm); plane_wm, cursor_wm);
enabled |= 1; enabled |= 1 << PIPE_A;
} }
if (g4x_compute_wm0(dev, 1, if (g4x_compute_wm0(dev, PIPE_B,
&sandybridge_display_wm_info, latency, &sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency, &sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) { &plane_wm, &cursor_wm)) {
...@@ -1826,7 +1826,7 @@ static void sandybridge_update_wm(struct drm_device *dev) ...@@ -1826,7 +1826,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
DRM_DEBUG_KMS("FIFO watermarks For pipe B -" DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n", " plane %d, cursor: %d\n",
plane_wm, cursor_wm); plane_wm, cursor_wm);
enabled |= 2; enabled |= 1 << PIPE_B;
} }
/* /*
...@@ -1904,7 +1904,7 @@ static void ivybridge_update_wm(struct drm_device *dev) ...@@ -1904,7 +1904,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
unsigned int enabled; unsigned int enabled;
enabled = 0; enabled = 0;
if (g4x_compute_wm0(dev, 0, if (g4x_compute_wm0(dev, PIPE_A,
&sandybridge_display_wm_info, latency, &sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency, &sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) { &plane_wm, &cursor_wm)) {
...@@ -1915,10 +1915,10 @@ static void ivybridge_update_wm(struct drm_device *dev) ...@@ -1915,10 +1915,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
DRM_DEBUG_KMS("FIFO watermarks For pipe A -" DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n", " plane %d, " "cursor: %d\n",
plane_wm, cursor_wm); plane_wm, cursor_wm);
enabled |= 1; enabled |= 1 << PIPE_A;
} }
if (g4x_compute_wm0(dev, 1, if (g4x_compute_wm0(dev, PIPE_B,
&sandybridge_display_wm_info, latency, &sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency, &sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) { &plane_wm, &cursor_wm)) {
...@@ -1929,10 +1929,10 @@ static void ivybridge_update_wm(struct drm_device *dev) ...@@ -1929,10 +1929,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
DRM_DEBUG_KMS("FIFO watermarks For pipe B -" DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n", " plane %d, cursor: %d\n",
plane_wm, cursor_wm); plane_wm, cursor_wm);
enabled |= 2; enabled |= 1 << PIPE_B;
} }
if (g4x_compute_wm0(dev, 2, if (g4x_compute_wm0(dev, PIPE_C,
&sandybridge_display_wm_info, latency, &sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency, &sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) { &plane_wm, &cursor_wm)) {
...@@ -1943,7 +1943,7 @@ static void ivybridge_update_wm(struct drm_device *dev) ...@@ -1943,7 +1943,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
DRM_DEBUG_KMS("FIFO watermarks For pipe C -" DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
" plane %d, cursor: %d\n", " plane %d, cursor: %d\n",
plane_wm, cursor_wm); plane_wm, cursor_wm);
enabled |= 3; enabled |= 1 << PIPE_C;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment