Commit 51335df9 authored by David Herrmann's avatar David Herrmann Committed by Dave Airlie

drm/vma: provide drm_vma_node_unmap() helper

Instead of unmapping the nodes in TTM and GEM users manually, we provide
a generic wrapper which does the correct thing for all vma-nodes.

v2: remove bdev->dev_mapping test in ttm_bo_unmap_virtual_unlocked() as
ttm_mem_io_free_vm() does nothing in that case (io_reserved_vm is 0).
v4: Fix docbook comments
v5: use drm_vma_node_size()

Cc: Dave Airlie <airlied@redhat.com>
Cc: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: default avatarDavid Herrmann <dh.herrmann@gmail.com>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarDave Airlie <airlied@gmail.com>
parent 72525b3f
......@@ -1427,11 +1427,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
if (!obj->fault_mappable)
return;
if (obj->base.dev->dev_mapping)
unmap_mapping_range(obj->base.dev->dev_mapping,
(loff_t)drm_vma_node_offset_addr(&obj->base.vma_node),
obj->base.size, 1);
drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
obj->fault_mappable = false;
}
......
......@@ -1488,17 +1488,8 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
loff_t offset, holelen;
if (!bdev->dev_mapping)
return;
if (drm_vma_node_has_offset(&bo->vma_node)) {
offset = (loff_t) drm_vma_node_offset_addr(&bo->vma_node);
holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
}
drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
ttm_mem_io_free_vm(bo);
}
......
......@@ -24,6 +24,7 @@
*/
#include <drm/drm_mm.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
......@@ -199,4 +200,25 @@ static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
return ((__u64)node->vm_node.start) << PAGE_SHIFT;
}
/**
* drm_vma_node_unmap() - Unmap offset node
* @node: Offset node
* @file_mapping: Address space to unmap @node from
*
* Unmap all userspace mappings for a given offset node. The mappings must be
* associated with the @file_mapping address-space. If no offset exists or
* the address-space is invalid, nothing is done.
*
* This call is unlocked. The caller must guarantee that drm_vma_offset_remove()
* is not called on this node concurrently.
*/
static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
struct address_space *file_mapping)
{
if (file_mapping && drm_vma_node_has_offset(node))
unmap_mapping_range(file_mapping,
drm_vma_node_offset_addr(node),
drm_vma_node_size(node) << PAGE_SHIFT, 1);
}
#endif /* __DRM_VMA_MANAGER_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment