Commit 80e06794 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Mostly quiet now:

  i915:
    fixing userspace visiblie issues, all stable marked

  radeon:
    one more pll fix, two crashers, one suspend/resume regression"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/radeon: Resume fbcon last
  drm/radeon: only allocate necessary size for vm bo list
  drm/radeon: don't allow RADEON_GEM_DOMAIN_CPU for command submission
  drm/radeon: avoid crash if VM command submission isn't available
  drm/radeon: lower the ref * post PLL maximum once more
  drm/i915: Prevent negative relocation deltas from wrapping
  drm/i915: Only copy back the modified fields to userspace from execbuffer
  drm/i915: Fix dynamic allocation of physical handles
parents 9f12600f 18ee37a4
...@@ -1833,7 +1833,6 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1833,7 +1833,6 @@ int i915_driver_unload(struct drm_device *dev)
flush_workqueue(dev_priv->wq); flush_workqueue(dev_priv->wq);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev); i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev); i915_gem_context_fini(dev);
WARN_ON(dev_priv->mm.aliasing_ppgtt); WARN_ON(dev_priv->mm.aliasing_ppgtt);
......
...@@ -242,18 +242,6 @@ struct intel_ddi_plls { ...@@ -242,18 +242,6 @@ struct intel_ddi_plls {
#define WATCH_LISTS 0 #define WATCH_LISTS 0
#define WATCH_GTT 0 #define WATCH_GTT 0
#define I915_GEM_PHYS_CURSOR_0 1
#define I915_GEM_PHYS_CURSOR_1 2
#define I915_GEM_PHYS_OVERLAY_REGS 3
#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
struct drm_i915_gem_phys_object {
int id;
struct page **page_list;
drm_dma_handle_t *handle;
struct drm_i915_gem_object *cur_obj;
};
struct opregion_header; struct opregion_header;
struct opregion_acpi; struct opregion_acpi;
struct opregion_swsci; struct opregion_swsci;
...@@ -1187,9 +1175,6 @@ struct i915_gem_mm { ...@@ -1187,9 +1175,6 @@ struct i915_gem_mm {
/** Bit 6 swizzling required for Y tiling */ /** Bit 6 swizzling required for Y tiling */
uint32_t bit_6_swizzle_y; uint32_t bit_6_swizzle_y;
/* storage for physical objects */
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
/* accounting, useful for userland debugging */ /* accounting, useful for userland debugging */
spinlock_t object_stat_lock; spinlock_t object_stat_lock;
size_t object_memory; size_t object_memory;
...@@ -1769,7 +1754,7 @@ struct drm_i915_gem_object { ...@@ -1769,7 +1754,7 @@ struct drm_i915_gem_object {
struct drm_file *pin_filp; struct drm_file *pin_filp;
/** for phy allocated objects */ /** for phy allocated objects */
struct drm_i915_gem_phys_object *phys_obj; drm_dma_handle_t *phys_handle;
}; };
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
...@@ -2204,10 +2189,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma); ...@@ -2204,10 +2189,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
#define PIN_MAPPABLE 0x1 #define PIN_MAPPABLE 0x1
#define PIN_NONBLOCK 0x2 #define PIN_NONBLOCK 0x2
#define PIN_GLOBAL 0x4 #define PIN_GLOBAL 0x4
#define PIN_OFFSET_BIAS 0x8
#define PIN_OFFSET_MASK (~4095)
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm, struct i915_address_space *vm,
uint32_t alignment, uint32_t alignment,
unsigned flags); uint64_t flags);
int __must_check i915_vma_unbind(struct i915_vma *vma); int __must_check i915_vma_unbind(struct i915_vma *vma);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
...@@ -2334,13 +2321,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, ...@@ -2334,13 +2321,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment, u32 alignment,
struct intel_ring_buffer *pipelined); struct intel_ring_buffer *pipelined);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev, int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
struct drm_i915_gem_object *obj,
int id,
int align); int align);
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
int i915_gem_open(struct drm_device *dev, struct drm_file *file); int i915_gem_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file);
...@@ -2465,6 +2447,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev, ...@@ -2465,6 +2447,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
int min_size, int min_size,
unsigned alignment, unsigned alignment,
unsigned cache_level, unsigned cache_level,
unsigned long start,
unsigned long end,
unsigned flags); unsigned flags);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
int i915_gem_evict_everything(struct drm_device *dev); int i915_gem_evict_everything(struct drm_device *dev);
......
This diff is collapsed.
...@@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind) ...@@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
int int
i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
int min_size, unsigned alignment, unsigned cache_level, int min_size, unsigned alignment, unsigned cache_level,
unsigned long start, unsigned long end,
unsigned flags) unsigned flags)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list; struct list_head eviction_list, unwind_list;
struct i915_vma *vma; struct i915_vma *vma;
int ret = 0; int ret = 0;
...@@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, ...@@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
*/ */
INIT_LIST_HEAD(&unwind_list); INIT_LIST_HEAD(&unwind_list);
if (flags & PIN_MAPPABLE) { if (start != 0 || end != vm->total) {
BUG_ON(!i915_is_ggtt(vm));
drm_mm_init_scan_with_range(&vm->mm, min_size, drm_mm_init_scan_with_range(&vm->mm, min_size,
alignment, cache_level, 0, alignment, cache_level,
dev_priv->gtt.mappable_end); start, end);
} else } else
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
......
...@@ -35,6 +35,9 @@ ...@@ -35,6 +35,9 @@
#define __EXEC_OBJECT_HAS_PIN (1<<31) #define __EXEC_OBJECT_HAS_PIN (1<<31)
#define __EXEC_OBJECT_HAS_FENCE (1<<30) #define __EXEC_OBJECT_HAS_FENCE (1<<30)
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
#define BATCH_OFFSET_BIAS (256*1024)
struct eb_vmas { struct eb_vmas {
struct list_head vmas; struct list_head vmas;
...@@ -545,7 +548,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, ...@@ -545,7 +548,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence; bool need_fence;
unsigned flags; uint64_t flags;
int ret; int ret;
flags = 0; flags = 0;
...@@ -559,6 +562,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, ...@@ -559,6 +562,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
if (entry->flags & EXEC_OBJECT_NEEDS_GTT) if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
flags |= PIN_GLOBAL; flags |= PIN_GLOBAL;
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
if (ret) if (ret)
...@@ -592,6 +597,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, ...@@ -592,6 +597,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
return 0; return 0;
} }
static bool
eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
{
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
struct drm_i915_gem_object *obj = vma->obj;
bool need_fence, need_mappable;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(vma);
WARN_ON((need_mappable || need_fence) &&
!i915_is_ggtt(vma->vm));
if (entry->alignment &&
vma->node.start & (entry->alignment - 1))
return true;
if (need_mappable && !obj->map_and_fenceable)
return true;
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
vma->node.start < BATCH_OFFSET_BIAS)
return true;
return false;
}
static int static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *vmas, struct list_head *vmas,
...@@ -653,26 +688,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -653,26 +688,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
/* Unbind any ill-fitting objects or pin. */ /* Unbind any ill-fitting objects or pin. */
list_for_each_entry(vma, vmas, exec_list) { list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool need_fence, need_mappable;
obj = vma->obj;
if (!drm_mm_node_allocated(&vma->node)) if (!drm_mm_node_allocated(&vma->node))
continue; continue;
need_fence = if (eb_vma_misplaced(vma, has_fenced_gpu_access))
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(vma);
WARN_ON((need_mappable || need_fence) &&
!i915_is_ggtt(vma->vm));
if ((entry->alignment &&
vma->node.start & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
ret = i915_vma_unbind(vma); ret = i915_vma_unbind(vma);
else else
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
...@@ -773,9 +792,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, ...@@ -773,9 +792,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
* relocations were valid. * relocations were valid.
*/ */
for (j = 0; j < exec[i].relocation_count; j++) { for (j = 0; j < exec[i].relocation_count; j++) {
if (copy_to_user(&user_relocs[j].presumed_offset, if (__copy_to_user(&user_relocs[j].presumed_offset,
&invalid_offset, &invalid_offset,
sizeof(invalid_offset))) { sizeof(invalid_offset))) {
ret = -EFAULT; ret = -EFAULT;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
goto err; goto err;
...@@ -999,6 +1018,25 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, ...@@ -999,6 +1018,25 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return 0; return 0;
} }
static struct drm_i915_gem_object *
eb_get_batch(struct eb_vmas *eb)
{
struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
/*
* SNA is doing fancy tricks with compressing batch buffers, which leads
* to negative relocation deltas. Usually that works out ok since the
* relocate address is still positive, except when the batch is placed
* very low in the GTT. Ensure this doesn't happen.
*
* Note that actual hangs have only been observed on gen7, but for
* paranoia do it everywhere.
*/
vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
return vma->obj;
}
static int static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file, struct drm_file *file,
...@@ -1153,7 +1191,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1153,7 +1191,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err; goto err;
/* take note of the batch buffer before we might reorder the lists */ /* take note of the batch buffer before we might reorder the lists */
batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj; batch_obj = eb_get_batch(eb);
/* Move the objects en-masse into the GTT, evicting if necessary. */ /* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
...@@ -1355,18 +1393,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ...@@ -1355,18 +1393,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) { if (!ret) {
struct drm_i915_gem_exec_object __user *user_exec_list =
to_user_ptr(args->buffers_ptr);
/* Copy the new buffer offsets back to the user's exec list. */ /* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++) for (i = 0; i < args->buffer_count; i++) {
exec_list[i].offset = exec2_list[i].offset; ret = __copy_to_user(&user_exec_list[i].offset,
/* ... and back out to userspace */ &exec2_list[i].offset,
ret = copy_to_user(to_user_ptr(args->buffers_ptr), sizeof(user_exec_list[i].offset));
exec_list, if (ret) {
sizeof(*exec_list) * args->buffer_count); ret = -EFAULT;
if (ret) { DRM_DEBUG("failed to copy %d exec entries "
ret = -EFAULT; "back to user (%d)\n",
DRM_DEBUG("failed to copy %d exec entries " args->buffer_count, ret);
"back to user (%d)\n", break;
args->buffer_count, ret); }
} }
} }
...@@ -1412,14 +1453,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ...@@ -1412,14 +1453,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) { if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */ /* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user(to_user_ptr(args->buffers_ptr), struct drm_i915_gem_exec_object2 *user_exec_list =
exec2_list, to_user_ptr(args->buffers_ptr);
sizeof(*exec2_list) * args->buffer_count); int i;
if (ret) {
ret = -EFAULT; for (i = 0; i < args->buffer_count; i++) {
DRM_DEBUG("failed to copy %d exec entries " ret = __copy_to_user(&user_exec_list[i].offset,
"back to user (%d)\n", &exec2_list[i].offset,
args->buffer_count, ret); sizeof(user_exec_list[i].offset));
if (ret) {
ret = -EFAULT;
DRM_DEBUG("failed to copy %d exec entries "
"back to user\n",
args->buffer_count);
break;
}
} }
} }
......
...@@ -1089,7 +1089,9 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) ...@@ -1089,7 +1089,9 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
if (ret == -ENOSPC && !retried) { if (ret == -ENOSPC && !retried) {
ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
GEN6_PD_SIZE, GEN6_PD_ALIGN, GEN6_PD_SIZE, GEN6_PD_ALIGN,
I915_CACHE_NONE, 0); I915_CACHE_NONE,
0, dev_priv->gtt.base.total,
0);
if (ret) if (ret)
return ret; return ret;
......
...@@ -7825,14 +7825,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -7825,14 +7825,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
addr = i915_gem_obj_ggtt_offset(obj); addr = i915_gem_obj_ggtt_offset(obj);
} else { } else {
int align = IS_I830(dev) ? 16 * 1024 : 256; int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, obj, ret = i915_gem_object_attach_phys(obj, align);
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
align);
if (ret) { if (ret) {
DRM_DEBUG_KMS("failed to attach phys object\n"); DRM_DEBUG_KMS("failed to attach phys object\n");
goto fail_locked; goto fail_locked;
} }
addr = obj->phys_obj->handle->busaddr; addr = obj->phys_handle->busaddr;
} }
if (IS_GEN2(dev)) if (IS_GEN2(dev))
...@@ -7840,10 +7838,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -7840,10 +7838,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
finish: finish:
if (intel_crtc->cursor_bo) { if (intel_crtc->cursor_bo) {
if (INTEL_INFO(dev)->cursor_needs_physical) { if (!INTEL_INFO(dev)->cursor_needs_physical)
if (intel_crtc->cursor_bo != obj)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
drm_gem_object_unreference(&intel_crtc->cursor_bo->base); drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
} }
......
...@@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) ...@@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
struct overlay_registers __iomem *regs; struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else else
regs = io_mapping_map_wc(dev_priv->gtt.mappable, regs = io_mapping_map_wc(dev_priv->gtt.mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo)); i915_gem_obj_ggtt_offset(overlay->reg_bo));
...@@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev) ...@@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->reg_bo = reg_bo; overlay->reg_bo = reg_bo;
if (OVERLAY_NEEDS_PHYSICAL(dev)) { if (OVERLAY_NEEDS_PHYSICAL(dev)) {
ret = i915_gem_attach_phys_object(dev, reg_bo, ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
I915_GEM_PHYS_OVERLAY_REGS,
PAGE_SIZE);
if (ret) { if (ret) {
DRM_ERROR("failed to attach phys overlay regs\n"); DRM_ERROR("failed to attach phys overlay regs\n");
goto out_free_bo; goto out_free_bo;
} }
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; overlay->flip_addr = reg_bo->phys_handle->busaddr;
} else { } else {
ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
if (ret) { if (ret) {
...@@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) ...@@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
/* Cast to make sparse happy, but it's wc memory anyway, so /* Cast to make sparse happy, but it's wc memory anyway, so
* equivalent to the wc io mapping on X86. */ * equivalent to the wc io mapping on X86. */
regs = (struct overlay_registers __iomem *) regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_obj->handle->vaddr; overlay->reg_bo->phys_handle->vaddr;
else else
regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo)); i915_gem_obj_ggtt_offset(overlay->reg_bo));
...@@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev) ...@@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
error->dovsta = I915_READ(DOVSTA); error->dovsta = I915_READ(DOVSTA);
error->isr = I915_READ(ISR); error->isr = I915_READ(ISR);
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
else else
error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
......
...@@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) ...@@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
uint32_t domain = r->write_domain ? uint32_t domain = r->write_domain ?
r->write_domain : r->read_domains; r->write_domain : r->read_domains;
if (domain & RADEON_GEM_DOMAIN_CPU) {
DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
"for command submission\n");
return -EINVAL;
}
p->relocs[i].domain = domain; p->relocs[i].domain = domain;
if (domain == RADEON_GEM_DOMAIN_VRAM) if (domain == RADEON_GEM_DOMAIN_VRAM)
domain |= RADEON_GEM_DOMAIN_GTT; domain |= RADEON_GEM_DOMAIN_GTT;
...@@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) ...@@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EINVAL; return -EINVAL;
/* we only support VM on some SI+ rings */ /* we only support VM on some SI+ rings */
if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) && if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
((p->cs_flags & RADEON_CS_USE_VM) == 0)) { if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
DRM_ERROR("Ring %d requires VM!\n", p->ring); DRM_ERROR("Ring %d requires VM!\n", p->ring);
return -EINVAL; return -EINVAL;
}
} else {
if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
DRM_ERROR("VM not supported on ring %d!\n",
p->ring);
return -EINVAL;
}
} }
} }
......
...@@ -1533,11 +1533,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) ...@@ -1533,11 +1533,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
radeon_restore_bios_scratch_regs(rdev); radeon_restore_bios_scratch_regs(rdev);
if (fbcon) {
radeon_fbdev_set_suspend(rdev, 0);
console_unlock();
}
/* init dig PHYs, disp eng pll */ /* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) { if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev); radeon_atom_encoder_init(rdev);
...@@ -1562,6 +1557,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) ...@@ -1562,6 +1557,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
} }
drm_kms_helper_poll_enable(dev); drm_kms_helper_poll_enable(dev);
if (fbcon) {
radeon_fbdev_set_suspend(rdev, 0);
console_unlock();
}
return 0; return 0;
} }
......
...@@ -862,7 +862,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, ...@@ -862,7 +862,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
unsigned *fb_div, unsigned *ref_div) unsigned *fb_div, unsigned *ref_div)
{ {
/* limit reference * post divider to a maximum */ /* limit reference * post divider to a maximum */
ref_div_max = min(128 / post_div, ref_div_max); ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
/* get matching reference and feedback divider */ /* get matching reference and feedback divider */
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
......
...@@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, ...@@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
struct list_head *head) struct list_head *head)
{ {
struct radeon_cs_reloc *list; struct radeon_cs_reloc *list;
unsigned i, idx, size; unsigned i, idx;
size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc); list = kmalloc_array(vm->max_pde_used + 1,
list = kmalloc(size, GFP_KERNEL); sizeof(struct radeon_cs_reloc), GFP_KERNEL);
if (!list) if (!list)
return NULL; return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment