Commit a095c60b authored by Dave Airlie's avatar Dave Airlie

Merge tag 'ttm-next-2014-01-13' of git://people.freedesktop.org/~thomash/linux into drm-next

Some code cleanup by Rashika Keria,
VM stuff for ttm:
-Use PFNMAP instead of MIXEDMAP where possible for performance
-Refuse to fault imported pages, an initial step to support dma-bufs
better from within TTM.
-Correctly set page mapping and -index members. These are needed in various
places in the vm subsystem that we are not using yet, but plan to use soonish:
For example unmap-mapping-range keeping COW pages, and dirty tracking
fbdefio style, but also for PCI memory.

ttm-next 2013-01-14 pull request

* tag 'ttm-next-2014-01-13' of git://people.freedesktop.org/~thomash/linux:
  drivers: gpu: Remove unused function in ttm_lock.c
  drivers: gpu: Mark function as static in ttm_bo_util.c
  drivers: gpu: Mark function as static in ttm_bo.c
  drm/ttm: Correctly set page mapping and -index members
  drm/ttm: Refuse to fault (prime-) imported pages
  drm/ttm: Use VM_PFNMAP for shared bo maps
parents 99d4a8ae 52028704
......@@ -957,7 +957,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_mem_space);
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
bool interruptible,
bool no_wait_gpu)
......
......@@ -187,7 +187,7 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
}
}
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
......@@ -219,7 +219,7 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
return 0;
}
void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void *virtual)
{
struct ttm_mem_type_manager *man;
......
......@@ -132,6 +132,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
}
/*
* Refuse to fault imported pages. This should be handled
* (if at all) by redirecting mmap to the exporter.
*/
if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
retval = VM_FAULT_SIGBUS;
goto out_unlock;
}
if (bdev->driver->fault_reserve_notify) {
ret = bdev->driver->fault_reserve_notify(bo);
switch (ret) {
......@@ -217,10 +226,17 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} else if (unlikely(!page)) {
break;
}
page->mapping = vma->vm_file->f_mapping;
page->index = drm_vma_node_start(&bo->vma_node) +
page_offset;
pfn = page_to_pfn(page);
}
if (vma->vm_flags & VM_MIXEDMAP)
ret = vm_insert_mixed(&cvma, address, pfn);
else
ret = vm_insert_pfn(&cvma, address, pfn);
/*
* Somebody beat us to this PTE or prefaulting to
* an already populated PTE, or prefaulting error.
......@@ -250,6 +266,8 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
struct ttm_buffer_object *bo =
(struct ttm_buffer_object *)vma->vm_private_data;
WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
(void)ttm_bo_reference(bo);
}
......@@ -319,7 +337,14 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
*/
vma->vm_private_data = bo;
vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
/*
* PFNMAP is faster than MIXEDMAP due to reduced page
* administration. So use MIXEDMAP only if private VMA, where
* we need to support COW.
*/
vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
out_unref:
ttm_bo_unref(&bo);
......@@ -334,7 +359,8 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
vma->vm_ops = &ttm_bo_vm_ops;
vma->vm_private_data = ttm_bo_reference(bo);
vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND;
return 0;
}
EXPORT_SYMBOL(ttm_fbdev_mmap);
......@@ -186,14 +186,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
}
EXPORT_SYMBOL(ttm_write_lock);
void ttm_write_lock_downgrade(struct ttm_lock *lock)
{
spin_lock(&lock->lock);
lock->rw = 1;
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
}
static int __ttm_vt_unlock(struct ttm_lock *lock)
{
int ret = 0;
......
......@@ -170,9 +170,8 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
ttm_tt_unbind(ttm);
}
if (ttm->state == tt_unbound) {
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
}
if (ttm->state == tt_unbound)
ttm_tt_unpopulate(ttm);
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
ttm->swap_storage)
......@@ -362,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
page_cache_release(to_page);
}
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
ttm_tt_unpopulate(ttm);
ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
if (persistent_swap_storage)
......@@ -375,3 +374,23 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
return ret;
}
static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
{
pgoff_t i;
struct page **page = ttm->pages;
for (i = 0; i < ttm->num_pages; ++i) {
(*page)->mapping = NULL;
(*page++)->index = 0;
}
}
void ttm_tt_unpopulate(struct ttm_tt *ttm)
{
if (ttm->state == tt_unpopulated)
return;
ttm_tt_clear_mapping(ttm);
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
}
......@@ -681,6 +681,15 @@ extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
extern int ttm_tt_swapout(struct ttm_tt *ttm,
struct file *persistent_swap_storage);
/**
* ttm_tt_unpopulate - free pages from a ttm
*
* @ttm: Pointer to the ttm_tt structure
*
* Calls the driver method to free all pages from a ttm
*/
extern void ttm_tt_unpopulate(struct ttm_tt *ttm);
/*
* ttm_bo.c
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment