Commit 576f0586 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Flush extra hard after writing relocations through the GTT

Recently discovered in commit bdae33b8 ("drm/i915: Use maximum write
flush for pwrite_gtt") was that we needed to our full write barrier
before changing the GGTT PTE to ensure that our indirect writes through
the GTT landed before the PTE changed (and the writes end up in a
different page). That also applies to our GGTT relocation path.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: stable@vger.kernel.org
Reviewed-by: default avatarPrathap Kumar Valsan <prathap.kumar.valsan@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190730112151.5633-4-chris@chris-wilson.co.uk
parent 51fbd8de
...@@ -1014,11 +1014,12 @@ static void reloc_cache_reset(struct reloc_cache *cache) ...@@ -1014,11 +1014,12 @@ static void reloc_cache_reset(struct reloc_cache *cache)
kunmap_atomic(vaddr); kunmap_atomic(vaddr);
i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm); i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
} else { } else {
wmb(); struct i915_ggtt *ggtt = cache_to_ggtt(cache);
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __iomem *)vaddr); io_mapping_unmap_atomic((void __iomem *)vaddr);
if (cache->node.allocated) {
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
if (cache->node.allocated) {
ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.clear_range(&ggtt->vm,
cache->node.start, cache->node.start,
cache->node.size); cache->node.size);
...@@ -1073,6 +1074,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, ...@@ -1073,6 +1074,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
void *vaddr; void *vaddr;
if (cache->vaddr) { if (cache->vaddr) {
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
} else { } else {
struct i915_vma *vma; struct i915_vma *vma;
...@@ -1114,7 +1116,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, ...@@ -1114,7 +1116,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start; offset = cache->node.start;
if (cache->node.allocated) { if (cache->node.allocated) {
wmb();
ggtt->vm.insert_page(&ggtt->vm, ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page), i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0); offset, I915_CACHE_NONE, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment