Commit 9e9a928e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "All fairly small: radeon stability and a panic path fix.

  Mostly radeon fixes, suspend/resume fix, stability on the CIK
  chipsets, along with a locking check avoidance patch for panic times
  regression"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/radeon: use the CP DMA on CIK
  drm/radeon: sync page table updates
  drm/radeon: fix vm buffer size estimation
  drm/crtc-helper: skip locking checks in panicking path
  drm/radeon/dpm: resume fixes for some systems
parents d2cfd310 0a4ae727
......@@ -29,6 +29,7 @@
* Jesse Barnes <jesse.barnes@intel.com>
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/moduleparam.h>
......@@ -88,7 +89,13 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
/*
* We can expect this mutex to be locked if we are not panicking.
* Locking is currently fubar in the panic handler.
*/
if (!oops_in_progress)
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder)
return true;
......@@ -112,7 +119,13 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
/*
* We can expect this mutex to be locked if we are not panicking.
* Locking is currently fubar in the panic handler.
*/
if (!oops_in_progress)
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
return true;
......
......@@ -270,8 +270,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
radeon_crtc->enabled = true;
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
......@@ -289,10 +287,10 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
radeon_crtc->enabled = false;
/* adjust pm to dpms changes AFTER disabling crtcs */
radeon_pm_compute_clocks(rdev);
break;
}
/* adjust pm to dpms */
radeon_pm_compute_clocks(rdev);
}
static void
......
......@@ -2049,8 +2049,8 @@ static struct radeon_asic ci_asic = {
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &cik_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
.copy = &cik_copy_dma,
.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
.copy = &cik_copy_cpdma,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
......
......@@ -1558,6 +1558,10 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
drm_kms_helper_poll_enable(dev);
/* set the power state here in case we are a PX system or headless */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
radeon_pm_compute_clocks(rdev);
if (fbcon) {
radeon_fbdev_set_suspend(rdev, 0);
console_unlock();
......
......@@ -1104,7 +1104,6 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
if (ret)
goto dpm_resume_fail;
rdev->pm.dpm_enabled = true;
radeon_pm_compute_clocks(rdev);
return;
dpm_resume_fail:
......
......@@ -132,7 +132,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_cs_reloc *list;
unsigned i, idx;
list = kmalloc_array(vm->max_pde_used + 1,
list = kmalloc_array(vm->max_pde_used + 2,
sizeof(struct radeon_cs_reloc), GFP_KERNEL);
if (!list)
return NULL;
......@@ -585,7 +585,8 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
{
static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
struct radeon_bo *pd = vm->page_directory;
uint64_t pd_addr = radeon_bo_gpu_offset(pd);
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0, pt_idx, ndw;
struct radeon_ib ib;
......@@ -642,6 +643,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
incr, R600_PTE_VALID);
if (ib.length_dw != 0) {
radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
......@@ -689,15 +691,18 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
/* walk over the address space and update the page tables */
for (addr = start; addr < end; ) {
uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
unsigned nptes;
uint64_t pte;
radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj);
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
nptes = RADEON_VM_PTE_COUNT - (addr & mask);
pte = radeon_bo_gpu_offset(vm->page_tables[pt_idx].bo);
pte = radeon_bo_gpu_offset(pt);
pte += (addr & mask) * 8;
if ((last_pte + 8 * count) != pte) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment