Commit 5b726e06 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-intel-fixes-2015-12-23' of git://anongit.freedesktop.org/drm-intel

Pull i915 drm fixes from Jani Nikula:
 "Here's a batch of i915 fixes all around.  It may be slightly bigger
  than one would hope for at this stage, but they've all been through
  testing in our -next before being picked up for v4.4.  Also, I missed
  Dave's fixes pull earlier today just because I wanted an extra testing
  round on this.  So I'm fairly confident.

  Wishing you all the things it is customary to wish this time of the
  year"

* tag 'drm-intel-fixes-2015-12-23' of git://anongit.freedesktop.org/drm-intel:
  drm/i915: Correct max delay for HDMI hotplug live status checking
  drm/i915: mdelay(10) considered harmful
  drm/i915: Kill intel_crtc->cursor_bo
  drm/i915: Workaround CHV pipe C cursor fail
  drm/i915: Only spin whilst waiting on the current request
  drm/i915: Limit the busy wait on requests to 5us not 10ms!
  drm/i915: Break busywaiting for requests on pending signals
  drm/i915: Disable primary plane if we fail to reconstruct BIOS fb (v2)
  drm/i915: Set the map-and-fenceable flag for preallocated objects
  drm/i915: Drop the broken cursor base==0 special casing
parents 2bfd43d8 a98728e0
......@@ -2193,8 +2193,17 @@ struct drm_i915_gem_request {
struct drm_i915_private *i915;
struct intel_engine_cs *ring;
/** GEM sequence number associated with this request. */
uint32_t seqno;
/** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing
* this request.
*/
u32 previous_seqno;
/** GEM sequence number associated with this request,
* when the HWS breadcrumb is equal or greater than this the GPU
* has finished processing this request.
*/
u32 seqno;
/** Position in the ringbuffer of the start of the request */
u32 head;
......@@ -2839,6 +2848,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
/*
* BEWARE: Do not use the function below unless you can _absolutely_
......@@ -2910,15 +2920,17 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
bool lazy_coherency)
{
u32 seqno;
BUG_ON(req == NULL);
seqno = req->ring->get_seqno(req->ring, lazy_coherency);
u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
return i915_seqno_passed(seqno, req->previous_seqno);
}
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
bool lazy_coherency)
{
u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
return i915_seqno_passed(seqno, req->seqno);
}
......
......@@ -1146,23 +1146,74 @@ static bool missed_irq(struct drm_i915_private *dev_priv,
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
}
static int __i915_spin_request(struct drm_i915_gem_request *req)
static unsigned long local_clock_us(unsigned *cpu)
{
unsigned long t;
/* Cheaply and approximately convert from nanoseconds to microseconds.
* The result and subsequent calculations are also defined in the same
* approximate microseconds units. The principal source of timing
* error here is from the simple truncation.
*
* Note that local_clock() is only defined wrt to the current CPU;
* the comparisons are no longer valid if we switch CPUs. Instead of
* blocking preemption for the entire busywait, we can detect the CPU
* switch and use that as indicator of system load and a reason to
* stop busywaiting, see busywait_stop().
*/
*cpu = get_cpu();
t = local_clock() >> 10;
put_cpu();
return t;
}
static bool busywait_stop(unsigned long timeout, unsigned cpu)
{
unsigned this_cpu;
if (time_after(local_clock_us(&this_cpu), timeout))
return true;
return this_cpu != cpu;
}
static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
{
unsigned long timeout;
unsigned cpu;
/* When waiting for high frequency requests, e.g. during synchronous
* rendering split between the CPU and GPU, the finite amount of time
* required to set up the irq and wait upon it limits the response
* rate. By busywaiting on the request completion for a short while we
* can service the high frequency waits as quick as possible. However,
* if it is a slow request, we want to sleep as quickly as possible.
* The tradeoff between waiting and sleeping is roughly the time it
* takes to sleep on a request, on the order of a microsecond.
*/
if (i915_gem_request_get_ring(req)->irq_refcount)
if (req->ring->irq_refcount)
return -EBUSY;
timeout = jiffies + 1;
/* Only spin if we know the GPU is processing this request */
if (!i915_gem_request_started(req, true))
return -EAGAIN;
timeout = local_clock_us(&cpu) + 5;
while (!need_resched()) {
if (i915_gem_request_completed(req, true))
return 0;
if (time_after_eq(jiffies, timeout))
if (signal_pending_state(state, current))
break;
if (busywait_stop(timeout, cpu))
break;
cpu_relax_lowlatency();
}
if (i915_gem_request_completed(req, false))
return 0;
......@@ -1197,6 +1248,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
struct drm_i915_private *dev_priv = dev->dev_private;
const bool irq_test_in_progress =
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(wait);
unsigned long timeout_expire;
s64 before, now;
......@@ -1229,7 +1281,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
before = ktime_get_raw_ns();
/* Optimistic spin for the next jiffie before touching IRQs */
ret = __i915_spin_request(req);
ret = __i915_spin_request(req, state);
if (ret == 0)
goto out;
......@@ -1241,8 +1293,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
for (;;) {
struct timer_list timer;
prepare_to_wait(&ring->irq_queue, &wait,
interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
prepare_to_wait(&ring->irq_queue, &wait, state);
/* We need to check whether any gpu reset happened in between
* the caller grabbing the seqno and now ... */
......@@ -1260,7 +1311,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
break;
}
if (interruptible && signal_pending(current)) {
if (signal_pending_state(state, current)) {
ret = -ERESTARTSYS;
break;
}
......@@ -2554,6 +2605,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
request->batch_obj = obj;
request->emitted_jiffies = jiffies;
request->previous_seqno = ring->last_submitted_seqno;
ring->last_submitted_seqno = request->seqno;
list_add_tail(&request->list, &ring->request_list);
......@@ -4080,6 +4132,29 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
return false;
}
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
bool mappable, fenceable;
u32 fence_size, fence_alignment;
fence_size = i915_gem_get_gtt_size(obj->base.dev,
obj->base.size,
obj->tiling_mode);
fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
obj->base.size,
obj->tiling_mode,
true);
fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0);
mappable = (vma->node.start + fence_size <=
to_i915(obj->base.dev)->gtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable;
}
static int
i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
......@@ -4147,25 +4222,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
(bound ^ vma->bound) & GLOBAL_BIND) {
bool mappable, fenceable;
u32 fence_size, fence_alignment;
fence_size = i915_gem_get_gtt_size(obj->base.dev,
obj->base.size,
obj->tiling_mode);
fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
obj->base.size,
obj->tiling_mode,
true);
fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0);
mappable = (vma->node.start + fence_size <=
dev_priv->gtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable;
__i915_vma_set_map_and_fenceable(vma);
WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
}
......
......@@ -2676,6 +2676,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
return ret;
}
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
}
......
......@@ -687,6 +687,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
}
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
}
......
......@@ -116,6 +116,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
static void intel_modeset_setup_hw_state(struct drm_device *dev);
static void intel_pre_disable_primary(struct drm_crtc *crtc);
typedef struct {
int min, max;
......@@ -2607,6 +2608,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct drm_i915_gem_object *obj;
struct drm_plane *primary = intel_crtc->base.primary;
struct drm_plane_state *plane_state = primary->state;
struct drm_crtc_state *crtc_state = intel_crtc->base.state;
struct intel_plane *intel_plane = to_intel_plane(primary);
struct drm_framebuffer *fb;
if (!plane_config->fb)
......@@ -2643,6 +2646,18 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
}
}
/*
* We've failed to reconstruct the BIOS FB. Current display state
* indicates that the primary plane is visible, but has a NULL FB,
* which will lead to problems later if we don't fix it up. The
* simplest solution is to just disable the primary plane now and
* pretend the BIOS never had it enabled.
*/
to_intel_plane_state(plane_state)->visible = false;
crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
intel_pre_disable_primary(&intel_crtc->base);
intel_plane->disable_plane(primary, &intel_crtc->base);
return;
valid_fb:
......@@ -9910,14 +9925,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
return true;
}
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t cntl = 0, size = 0;
if (base) {
if (on) {
unsigned int width = intel_crtc->base.cursor->state->crtc_w;
unsigned int height = intel_crtc->base.cursor->state->crtc_h;
unsigned int stride = roundup_pow_of_two(width) * 4;
......@@ -9972,16 +9987,15 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
}
}
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
uint32_t cntl;
uint32_t cntl = 0;
cntl = 0;
if (base) {
if (on) {
cntl = MCURSOR_GAMMA_ENABLE;
switch (intel_crtc->base.cursor->state->crtc_w) {
case 64:
......@@ -10032,18 +10046,17 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
int y = cursor_state->crtc_y;
u32 base = 0, pos = 0;
if (on)
base = intel_crtc->cursor_addr;
if (x >= intel_crtc->config->pipe_src_w)
base = 0;
on = false;
if (y >= intel_crtc->config->pipe_src_h)
base = 0;
on = false;
if (x < 0) {
if (x + cursor_state->crtc_w <= 0)
base = 0;
on = false;
pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
x = -x;
......@@ -10052,16 +10065,13 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
if (y < 0) {
if (y + cursor_state->crtc_h <= 0)
base = 0;
on = false;
pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
y = -y;
}
pos |= y << CURSOR_Y_SHIFT;
if (base == 0 && intel_crtc->cursor_base == 0)
return;
I915_WRITE(CURPOS(pipe), pos);
/* ILK+ do this automagically */
......@@ -10072,9 +10082,9 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
}
if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base);
i845_update_cursor(crtc, base, on);
else
i9xx_update_cursor(crtc, base);
i9xx_update_cursor(crtc, base, on);
}
static bool cursor_size_ok(struct drm_device *dev,
......@@ -13718,6 +13728,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
struct drm_crtc *crtc = crtc_state->base.crtc;
struct drm_framebuffer *fb = state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
enum pipe pipe = to_intel_plane(plane)->pipe;
unsigned stride;
int ret;
......@@ -13751,6 +13762,22 @@ intel_check_cursor_plane(struct drm_plane *plane,
return -EINVAL;
}
/*
* There's something wrong with the cursor on CHV pipe C.
* If it straddles the left edge of the screen then
* moving it away from the edge or disabling it often
* results in a pipe underrun, and often that can lead to
* dead pipe (constant underrun reported, and it scans
* out just a solid color). To recover from that, the
* display power well must be turned off and on again.
* Refuse the put the cursor into that compromised position.
*/
if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
state->visible && state->base.crtc_x < 0) {
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
}
return 0;
}
......@@ -13774,9 +13801,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
crtc = crtc ? crtc : plane->crtc;
intel_crtc = to_intel_crtc(crtc);
if (intel_crtc->cursor_bo == obj)
goto update;
if (!obj)
addr = 0;
else if (!INTEL_INFO(dev)->cursor_needs_physical)
......@@ -13785,9 +13809,7 @@ intel_commit_cursor_plane(struct drm_plane *plane,
addr = obj->phys_handle->busaddr;
intel_crtc->cursor_addr = addr;
intel_crtc->cursor_bo = obj;
update:
if (crtc->state->active)
intel_crtc_update_cursor(crtc, state->visible);
}
......
......@@ -550,7 +550,6 @@ struct intel_crtc {
int adjusted_x;
int adjusted_y;
struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
uint32_t cursor_cntl;
uint32_t cursor_size;
......
......@@ -1374,17 +1374,18 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
bool live_status = false;
unsigned int retry = 3;
unsigned int try;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
while (!live_status && --retry) {
for (try = 0; !live_status && try < 4; try++) {
if (try)
msleep(10);
live_status = intel_digital_port_connected(dev_priv,
hdmi_to_dig_port(intel_hdmi));
mdelay(10);
}
if (!live_status)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment