Commit 267d51d7 authored by Andrey Grodzovsky's avatar Andrey Grodzovsky

drm/ttm: Remap all page faults to per process dummy page.

On device removal reroute all CPU mappings to dummy page.

v3:
Remove loop to find DRM file and instead access it
by vma->vm_file->private_data. Move dummy page installation
into a separate function.

v4:
Map the entire BOs VA space into on demand allocated dummy page
on the first fault for that BO.

v5: Remove duplicate return.

v6: Polish ttm_bo_vm_dummy_page, remove superfluous code.
Signed-off-by: default avatarAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210512142648.666476-2-andrey.grodzovsky@amd.com
parent e52e4a31
......@@ -34,6 +34,8 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_vma_manager.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <linux/mm.h>
#include <linux/pfn_t.h>
#include <linux/rbtree.h>
......@@ -380,19 +382,69 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
}
EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
static void ttm_bo_release_dummy_page(struct drm_device *dev, void *res)
{
struct page *dummy_page = (struct page *)res;
__free_page(dummy_page);
}
vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = vma->vm_private_data;
struct drm_device *ddev = bo->base.dev;
vm_fault_t ret = VM_FAULT_NOPAGE;
unsigned long address;
unsigned long pfn;
struct page *page;
/* Allocate new dummy page to map all the VA range in this VMA to it*/
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return VM_FAULT_OOM;
/* Set the page to be freed using drmm release action */
if (drmm_add_action_or_reset(ddev, ttm_bo_release_dummy_page, page))
return VM_FAULT_OOM;
pfn = page_to_pfn(page);
/* Prefault the entire VMA range right away to avoid further faults */
for (address = vma->vm_start; address < vma->vm_end; address += PAGE_SIZE) {
if (vma->vm_flags & VM_MIXEDMAP)
ret = vmf_insert_mixed_prot(vma, address,
__pfn_to_pfn_t(pfn, PFN_DEV),
prot);
else
ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
}
return ret;
}
EXPORT_SYMBOL(ttm_bo_vm_dummy_page);
vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
pgprot_t prot;
struct ttm_buffer_object *bo = vma->vm_private_data;
struct drm_device *ddev = bo->base.dev;
vm_fault_t ret;
int idx;
ret = ttm_bo_vm_reserve(bo, vmf);
if (ret)
return ret;
prot = vma->vm_page_prot;
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
if (drm_dev_enter(ddev, &idx)) {
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
drm_dev_exit(idx);
} else {
ret = ttm_bo_vm_dummy_page(vmf, prot);
}
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
......
......@@ -620,4 +620,6 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all);
vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment