Commit 7cd9beb3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Misc i915, vmwgfx and radeon fixes along with a fix for one of those
  recursive sleep mutex debug cases in the mst code"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/vmwgfx: Fix an issue with the device losing its irq line on module unload
  drm/vmwgfx: Correctly NULLify dma buffer pointer on failure
  drm/vmwgfx: Reorder device takedown somewhat
  drm/vmwgfx: Fix a couple of lock dependency violations
  drm/radeon: drop setting UPLL to sleep mode
  drm/radeon: fix wait to actually occur after the signaling callback
  drm/i915: Prevent TLB error on first execution on SNB
  drm/i915: Do both mt and gen6 style forcewake reset on ivb probe
  drm/i915: Make WAIT_IOCTL negative timeouts be indefinite again
  drm/i915: use in_interrupt() not in_irq() to check context
  drm/mst: fix recursive sleep warning on qlock
  drm: Don't assign fbs for universal cursor support to files
parents 60b3e7bd e2cdcafa
...@@ -43,9 +43,10 @@ ...@@ -43,9 +43,10 @@
#include "drm_crtc_internal.h" #include "drm_crtc_internal.h"
#include "drm_internal.h" #include "drm_internal.h"
static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, static struct drm_framebuffer *
struct drm_mode_fb_cmd2 *r, internal_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv); struct drm_mode_fb_cmd2 *r,
struct drm_file *file_priv);
/* Avoid boilerplate. I'm tired of typing. */ /* Avoid boilerplate. I'm tired of typing. */
#define DRM_ENUM_NAME_FN(fnname, list) \ #define DRM_ENUM_NAME_FN(fnname, list) \
...@@ -2908,13 +2909,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, ...@@ -2908,13 +2909,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
*/ */
if (req->flags & DRM_MODE_CURSOR_BO) { if (req->flags & DRM_MODE_CURSOR_BO) {
if (req->handle) { if (req->handle) {
fb = add_framebuffer_internal(dev, &fbreq, file_priv); fb = internal_framebuffer_create(dev, &fbreq, file_priv);
if (IS_ERR(fb)) { if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
return PTR_ERR(fb); return PTR_ERR(fb);
} }
drm_framebuffer_reference(fb);
} else { } else {
fb = NULL; fb = NULL;
} }
...@@ -3267,9 +3266,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) ...@@ -3267,9 +3266,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
return 0; return 0;
} }
static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, static struct drm_framebuffer *
struct drm_mode_fb_cmd2 *r, internal_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv) struct drm_mode_fb_cmd2 *r,
struct drm_file *file_priv)
{ {
struct drm_mode_config *config = &dev->mode_config; struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
...@@ -3301,12 +3301,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, ...@@ -3301,12 +3301,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
return fb; return fb;
} }
mutex_lock(&file_priv->fbs_lock);
r->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
mutex_unlock(&file_priv->fbs_lock);
return fb; return fb;
} }
...@@ -3328,15 +3322,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, ...@@ -3328,15 +3322,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
int drm_mode_addfb2(struct drm_device *dev, int drm_mode_addfb2(struct drm_device *dev,
void *data, struct drm_file *file_priv) void *data, struct drm_file *file_priv)
{ {
struct drm_mode_fb_cmd2 *r = data;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL; return -EINVAL;
fb = add_framebuffer_internal(dev, data, file_priv); fb = internal_framebuffer_create(dev, r, file_priv);
if (IS_ERR(fb)) if (IS_ERR(fb))
return PTR_ERR(fb); return PTR_ERR(fb);
/* Transfer ownership to the filp for reaping on close */
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
mutex_lock(&file_priv->fbs_lock);
r->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
mutex_unlock(&file_priv->fbs_lock);
return 0; return 0;
} }
......
...@@ -733,10 +733,14 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, ...@@ -733,10 +733,14 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg) struct drm_dp_sideband_msg_tx *txmsg)
{ {
bool ret; bool ret;
mutex_lock(&mgr->qlock);
/*
* All updates to txmsg->state are protected by mgr->qlock, and the two
* cases we check here are terminal states. For those the barriers
* provided by the wake_up/wait_event pair are enough.
*/
ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
mutex_unlock(&mgr->qlock);
return ret; return ret;
} }
...@@ -1363,12 +1367,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, ...@@ -1363,12 +1367,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
return 0; return 0;
} }
/* must be called holding qlock */
static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
{ {
struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_sideband_msg_tx *txmsg;
int ret; int ret;
WARN_ON(!mutex_is_locked(&mgr->qlock));
/* construct a chunk from the first msg in the tx_msg queue */ /* construct a chunk from the first msg in the tx_msg queue */
if (list_empty(&mgr->tx_msg_downq)) { if (list_empty(&mgr->tx_msg_downq)) {
mgr->tx_down_in_progress = false; mgr->tx_down_in_progress = false;
......
...@@ -2936,9 +2936,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -2936,9 +2936,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
req = obj->last_read_req; req = obj->last_read_req;
/* Do this after OLR check to make sure we make forward progress polling /* Do this after OLR check to make sure we make forward progress polling
* on this IOCTL with a timeout <=0 (like busy ioctl) * on this IOCTL with a timeout == 0 (like busy ioctl)
*/ */
if (args->timeout_ns <= 0) { if (args->timeout_ns == 0) {
ret = -ETIME; ret = -ETIME;
goto out; goto out;
} }
...@@ -2948,7 +2948,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -2948,7 +2948,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
i915_gem_request_reference(req); i915_gem_request_reference(req);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns, ret = __i915_wait_request(req, reset_counter, true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
file->driver_priv); file->driver_priv);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_request_unreference(req); i915_gem_request_unreference(req);
...@@ -4792,6 +4793,9 @@ i915_gem_init_hw(struct drm_device *dev) ...@@ -4792,6 +4793,9 @@ i915_gem_init_hw(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO; return -EIO;
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
if (dev_priv->ellc_size) if (dev_priv->ellc_size)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
...@@ -4824,7 +4828,7 @@ i915_gem_init_hw(struct drm_device *dev) ...@@ -4824,7 +4828,7 @@ i915_gem_init_hw(struct drm_device *dev)
for_each_ring(ring, dev_priv, i) { for_each_ring(ring, dev_priv, i) {
ret = ring->init_hw(ring); ret = ring->init_hw(ring);
if (ret) if (ret)
return ret; goto out;
} }
for (i = 0; i < NUM_L3_SLICES(dev); i++) for (i = 0; i < NUM_L3_SLICES(dev); i++)
...@@ -4841,9 +4845,11 @@ i915_gem_init_hw(struct drm_device *dev) ...@@ -4841,9 +4845,11 @@ i915_gem_init_hw(struct drm_device *dev)
DRM_ERROR("Context enable failed %d\n", ret); DRM_ERROR("Context enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev); i915_gem_cleanup_ringbuffer(dev);
return ret; goto out;
} }
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret; return ret;
} }
...@@ -4877,6 +4883,14 @@ int i915_gem_init(struct drm_device *dev) ...@@ -4877,6 +4883,14 @@ int i915_gem_init(struct drm_device *dev)
dev_priv->gt.stop_ring = intel_logical_ring_stop; dev_priv->gt.stop_ring = intel_logical_ring_stop;
} }
/* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
* used by the CS may be stale, despite us poking the TLB reset. If
* we hold the forcewake during initialisation these problems
* just magically go away.
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ret = i915_gem_init_userptr(dev); ret = i915_gem_init_userptr(dev);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
...@@ -4903,6 +4917,7 @@ int i915_gem_init(struct drm_device *dev) ...@@ -4903,6 +4917,7 @@ int i915_gem_init(struct drm_device *dev)
} }
out_unlock: out_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
......
...@@ -9716,7 +9716,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe) ...@@ -9716,7 +9716,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
WARN_ON(!in_irq()); WARN_ON(!in_interrupt());
if (crtc == NULL) if (crtc == NULL)
return; return;
......
...@@ -1048,8 +1048,14 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) ...@@ -1048,8 +1048,14 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
/* We need to init first for ECOBUS access and then /* We need to init first for ECOBUS access and then
* determine later if we want to reinit, in case of MT access is * determine later if we want to reinit, in case of MT access is
* not working * not working. In this stage we don't know which flavour this
* ivb is, so it is better to reset also the gen6 fw registers
* before the ecobus check.
*/ */
__raw_i915_write32(dev_priv, FORCEWAKE, 0);
__raw_posting_read(dev_priv, ECOBUS);
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_MT_ACK); FORCEWAKE_MT, FORCEWAKE_MT_ACK);
......
...@@ -1030,37 +1030,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence) ...@@ -1030,37 +1030,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence)
return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
} }
struct radeon_wait_cb {
struct fence_cb base;
struct task_struct *task;
};
static void
radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
{
struct radeon_wait_cb *wait =
container_of(cb, struct radeon_wait_cb, base);
wake_up_process(wait->task);
}
static signed long radeon_fence_default_wait(struct fence *f, bool intr, static signed long radeon_fence_default_wait(struct fence *f, bool intr,
signed long t) signed long t)
{ {
struct radeon_fence *fence = to_radeon_fence(f); struct radeon_fence *fence = to_radeon_fence(f);
struct radeon_device *rdev = fence->rdev; struct radeon_device *rdev = fence->rdev;
bool signaled; struct radeon_wait_cb cb;
fence_enable_sw_signaling(&fence->base); cb.task = current;
/* if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
* This function has to return -EDEADLK, but cannot hold return t;
* exclusive_lock during the wait because some callers
* may already hold it. This means checking needs_reset without while (t > 0) {
* lock, and not fiddling with any gpu internals. if (intr)
* set_current_state(TASK_INTERRUPTIBLE);
* The callback installed with fence_enable_sw_signaling will else
* run before our wait_event_*timeout call, so we will see set_current_state(TASK_UNINTERRUPTIBLE);
* both the signaled fence and the changes to needs_reset.
*/ /*
* radeon_test_signaled must be called after
* set_current_state to prevent a race with wake_up_process
*/
if (radeon_test_signaled(fence))
break;
if (rdev->needs_reset) {
t = -EDEADLK;
break;
}
t = schedule_timeout(t);
if (t > 0 && intr && signal_pending(current))
t = -ERESTARTSYS;
}
__set_current_state(TASK_RUNNING);
fence_remove_callback(f, &cb.base);
if (intr)
t = wait_event_interruptible_timeout(rdev->fence_queue,
((signaled = radeon_test_signaled(fence)) ||
rdev->needs_reset), t);
else
t = wait_event_timeout(rdev->fence_queue,
((signaled = radeon_test_signaled(fence)) ||
rdev->needs_reset), t);
if (t > 0 && !signaled)
return -EDEADLK;
return t; return t;
} }
......
...@@ -7130,8 +7130,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) ...@@ -7130,8 +7130,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
if (!vclk || !dclk) { if (!vclk || !dclk) {
/* keep the Bypass mode, put PLL to sleep */ /* keep the Bypass mode */
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
return 0; return 0;
} }
...@@ -7147,8 +7146,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) ...@@ -7147,8 +7146,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
/* set VCO_MODE to 1 */ /* set VCO_MODE to 1 */
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
/* toggle UPLL_SLEEP to 1 then back to 0 */ /* disable sleep mode */
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
/* deassert UPLL_RESET */ /* deassert UPLL_RESET */
......
...@@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_err1; goto out_err1;
} }
ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
(dev_priv->vram_size >> PAGE_SHIFT));
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing memory manager for VRAM.\n");
goto out_err2;
}
dev_priv->has_gmr = true;
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
VMW_PL_GMR) != 0) {
DRM_INFO("No GMR memory available. "
"Graphics memory resources are very limited.\n");
dev_priv->has_gmr = false;
}
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
dev_priv->has_mob = true;
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
VMW_PL_MOB) != 0) {
DRM_INFO("No MOB memory available. "
"3D will be disabled.\n");
dev_priv->has_mob = false;
}
}
dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
dev_priv->mmio_size); dev_priv->mmio_size);
...@@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_no_fman; goto out_no_fman;
} }
ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
(dev_priv->vram_size >> PAGE_SHIFT));
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing memory manager for VRAM.\n");
goto out_no_vram;
}
dev_priv->has_gmr = true;
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
VMW_PL_GMR) != 0) {
DRM_INFO("No GMR memory available. "
"Graphics memory resources are very limited.\n");
dev_priv->has_gmr = false;
}
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
dev_priv->has_mob = true;
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
VMW_PL_MOB) != 0) {
DRM_INFO("No MOB memory available. "
"3D will be disabled.\n");
dev_priv->has_mob = false;
}
}
vmw_kms_save_vga(dev_priv); vmw_kms_save_vga(dev_priv);
/* Start kms and overlay systems, needs fifo. */ /* Start kms and overlay systems, needs fifo. */
...@@ -838,6 +839,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -838,6 +839,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
vmw_kms_close(dev_priv); vmw_kms_close(dev_priv);
out_no_kms: out_no_kms:
vmw_kms_restore_vga(dev_priv); vmw_kms_restore_vga(dev_priv);
if (dev_priv->has_mob)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_no_vram:
vmw_fence_manager_takedown(dev_priv->fman); vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman: out_no_fman:
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
...@@ -853,12 +860,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -853,12 +860,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
iounmap(dev_priv->mmio_virt); iounmap(dev_priv->mmio_virt);
out_err3: out_err3:
arch_phys_wc_del(dev_priv->mmio_mtrr); arch_phys_wc_del(dev_priv->mmio_mtrr);
if (dev_priv->has_mob)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_err2:
(void)ttm_bo_device_release(&dev_priv->bdev); (void)ttm_bo_device_release(&dev_priv->bdev);
out_err1: out_err1:
vmw_ttm_global_release(dev_priv); vmw_ttm_global_release(dev_priv);
...@@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_device *dev) ...@@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_device *dev)
} }
vmw_kms_close(dev_priv); vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv); vmw_overlay_close(dev_priv);
if (dev_priv->has_mob)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
vmw_fence_manager_takedown(dev_priv->fman); vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev); drm_irq_uninstall(dev_priv->dev);
...@@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_device *dev) ...@@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_device *dev)
ttm_object_device_release(&dev_priv->tdev); ttm_object_device_release(&dev_priv->tdev);
iounmap(dev_priv->mmio_virt); iounmap(dev_priv->mmio_virt);
arch_phys_wc_del(dev_priv->mmio_mtrr); arch_phys_wc_del(dev_priv->mmio_mtrr);
if (dev_priv->has_mob)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
(void)ttm_bo_device_release(&dev_priv->bdev); (void)ttm_bo_device_release(&dev_priv->bdev);
vmw_ttm_global_release(dev_priv); vmw_ttm_global_release(dev_priv);
...@@ -1235,6 +1238,7 @@ static void vmw_remove(struct pci_dev *pdev) ...@@ -1235,6 +1238,7 @@ static void vmw_remove(struct pci_dev *pdev)
{ {
struct drm_device *dev = pci_get_drvdata(pdev); struct drm_device *dev = pci_get_drvdata(pdev);
pci_disable_device(pdev);
drm_put_dev(dev); drm_put_dev(dev);
} }
......
...@@ -890,7 +890,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -890,7 +890,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n"); DRM_ERROR("Could not find or use MOB buffer.\n");
return -EINVAL; ret = -EINVAL;
goto out_no_reloc;
} }
bo = &vmw_bo->base; bo = &vmw_bo->base;
...@@ -914,7 +915,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -914,7 +915,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
out_no_reloc: out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo); vmw_dmabuf_unreference(&vmw_bo);
vmw_bo_p = NULL; *vmw_bo_p = NULL;
return ret; return ret;
} }
...@@ -951,7 +952,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -951,7 +952,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n"); DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL; ret = -EINVAL;
goto out_no_reloc;
} }
bo = &vmw_bo->base; bo = &vmw_bo->base;
...@@ -974,7 +976,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -974,7 +976,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
out_no_reloc: out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo); vmw_dmabuf_unreference(&vmw_bo);
vmw_bo_p = NULL; *vmw_bo_p = NULL;
return ret; return ret;
} }
...@@ -2780,13 +2782,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, ...@@ -2780,13 +2782,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
NULL, arg->command_size, arg->throttle_us, NULL, arg->command_size, arg->throttle_us,
(void __user *)(unsigned long)arg->fence_rep, (void __user *)(unsigned long)arg->fence_rep,
NULL); NULL);
ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_unlock; return ret;
vmw_kms_cursor_post_execbuf(dev_priv); vmw_kms_cursor_post_execbuf(dev_priv);
out_unlock: return 0;
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
} }
...@@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, ...@@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
int i; int i;
struct drm_mode_config *mode_config = &dev->mode_config; struct drm_mode_config *mode_config = &dev->mode_config;
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
if (!arg->num_outputs) { if (!arg->num_outputs) {
struct drm_vmw_rect def_rect = {0, 0, 800, 600}; struct drm_vmw_rect def_rect = {0, 0, 800, 600};
vmw_du_update_layout(dev_priv, 1, &def_rect); vmw_du_update_layout(dev_priv, 1, &def_rect);
goto out_unlock; return 0;
} }
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
GFP_KERNEL); GFP_KERNEL);
if (unlikely(!rects)) { if (unlikely(!rects))
ret = -ENOMEM; return -ENOMEM;
goto out_unlock;
}
user_rects = (void __user *)(unsigned long)arg->rects; user_rects = (void __user *)(unsigned long)arg->rects;
ret = copy_from_user(rects, user_rects, rects_size); ret = copy_from_user(rects, user_rects, rects_size);
...@@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, ...@@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
out_free: out_free:
kfree(rects); kfree(rects);
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret; return ret;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment