Commit a987fcaa authored by Thomas Hellstrom's avatar Thomas Hellstrom Committed by Dave Airlie

ttm: Make parts of a struct ttm_bo_device global.

Common resources, like memory accounting and swap lists should be
global and not per device. Introduce a struct ttm_bo_global to
accomodate this, and register it with sysfs. Add a small sysfs interface
to return the number of active buffer objects.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: default avatarDave Airlie <airlied@linux.ie>
parent 5fd9cbad
......@@ -37,6 +37,7 @@
* TTM.
*/
struct radeon_mman {
struct ttm_bo_global_ref bo_global_ref;
struct ttm_global_reference mem_global_ref;
bool mem_global_referenced;
struct ttm_bo_device bdev;
......
......@@ -77,9 +77,25 @@ static int radeon_ttm_global_init(struct radeon_device *rdev)
global_ref->release = &radeon_ttm_mem_global_release;
r = ttm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed referencing a global TTM memory object.\n");
DRM_ERROR("Failed setting up TTM memory accounting "
"subsystem.\n");
return r;
}
rdev->mman.bo_global_ref.mem_glob =
rdev->mman.mem_global_ref.object;
global_ref = &rdev->mman.bo_global_ref.ref;
global_ref->global_type = TTM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
r = ttm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
ttm_global_item_unref(&rdev->mman.mem_global_ref);
return r;
}
rdev->mman.mem_global_referenced = true;
return 0;
}
......@@ -87,6 +103,7 @@ static int radeon_ttm_global_init(struct radeon_device *rdev)
static void radeon_ttm_global_fini(struct radeon_device *rdev)
{
if (rdev->mman.mem_global_referenced) {
ttm_global_item_unref(&rdev->mman.bo_global_ref.ref);
ttm_global_item_unref(&rdev->mman.mem_global_ref);
rdev->mman.mem_global_referenced = false;
}
......@@ -286,9 +303,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
out_cleanup:
if (tmp_mem.mm_node) {
spin_lock(&rdev->mman.bdev.lru_lock);
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
spin_lock(&glob->lru_lock);
drm_mm_put_block(tmp_mem.mm_node);
spin_unlock(&rdev->mman.bdev.lru_lock);
spin_unlock(&glob->lru_lock);
return r;
}
return r;
......@@ -323,9 +342,11 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
}
out_cleanup:
if (tmp_mem.mm_node) {
spin_lock(&rdev->mman.bdev.lru_lock);
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
spin_lock(&glob->lru_lock);
drm_mm_put_block(tmp_mem.mm_node);
spin_unlock(&rdev->mman.bdev.lru_lock);
spin_unlock(&glob->lru_lock);
return r;
}
return r;
......@@ -441,7 +462,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
}
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&rdev->mman.bdev,
rdev->mman.mem_global_ref.object,
rdev->mman.bo_global_ref.ref.object,
&radeon_bo_driver, DRM_FILE_PAGE_OFFSET);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
......
This diff is collapsed.
......@@ -41,9 +41,9 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
struct ttm_mem_reg *old_mem = &bo->mem;
if (old_mem->mm_node) {
spin_lock(&bo->bdev->lru_lock);
spin_lock(&bo->glob->lru_lock);
drm_mm_put_block(old_mem->mm_node);
spin_unlock(&bo->bdev->lru_lock);
spin_unlock(&bo->glob->lru_lock);
}
old_mem->mm_node = NULL;
}
......
......@@ -166,7 +166,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
set_page_dirty_lock(page);
ttm->pages[i] = NULL;
ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE);
ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
put_page(page);
}
ttm->state = tt_unpopulated;
......@@ -177,8 +177,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
{
struct page *p;
struct ttm_bo_device *bdev = ttm->bdev;
struct ttm_mem_global *mem_glob = bdev->mem_glob;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
int ret;
while (NULL == (p = ttm->pages[index])) {
......@@ -348,7 +347,7 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
printk(KERN_ERR TTM_PFX
"Erroneous page count. "
"Leaking pages.\n");
ttm_mem_global_free_page(ttm->bdev->mem_glob,
ttm_mem_global_free_page(ttm->glob->mem_glob,
cur_page);
__free_page(cur_page);
}
......@@ -394,7 +393,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm,
struct mm_struct *mm = tsk->mm;
int ret;
int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
BUG_ON(num_pages != ttm->num_pages);
BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
......@@ -439,8 +438,7 @@ struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
if (!ttm)
return NULL;
ttm->bdev = bdev;
ttm->glob = bdev->glob;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1;
......
......@@ -155,6 +155,7 @@ struct ttm_buffer_object {
* Members constant at init.
*/
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
unsigned long buffer_start;
enum ttm_bo_type type;
......
......@@ -32,6 +32,7 @@
#include "ttm/ttm_bo_api.h"
#include "ttm/ttm_memory.h"
#include "ttm/ttm_module.h"
#include "drm_mm.h"
#include "linux/workqueue.h"
#include "linux/fs.h"
......@@ -160,7 +161,7 @@ struct ttm_tt {
long last_lomem_page;
uint32_t page_flags;
unsigned long num_pages;
struct ttm_bo_device *bdev;
struct ttm_bo_global *glob;
struct ttm_backend *be;
struct task_struct *tsk;
unsigned long start;
......@@ -355,24 +356,73 @@ struct ttm_bo_driver {
void *(*sync_obj_ref) (void *sync_obj);
};
#define TTM_NUM_MEM_TYPES 8
/**
* struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
*/
struct ttm_bo_global_ref {
struct ttm_global_reference ref;
struct ttm_mem_global *mem_glob;
};
#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
idling before CPU mapping */
#define TTM_BO_PRIV_FLAG_MAX 1
/**
* struct ttm_bo_device - Buffer object driver device-specific data.
* struct ttm_bo_global - Buffer object driver global data.
*
* @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
* @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
* @count: Current number of buffer object.
* @pages: Current number of pinned pages.
* @dummy_read_page: Pointer to a dummy page used for mapping requests
* of unpopulated pages.
* @shrink: A shrink callback object used for buffre object swap.
* @shrink: A shrink callback object used for buffer object swap.
* @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
* used by a buffer object. This is excluding page arrays and backing pages.
* @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
* @device_list_mutex: Mutex protecting the device list.
* This mutex is held while traversing the device list for pm options.
* @lru_lock: Spinlock protecting the bo subsystem lru lists.
* @device_list: List of buffer object devices.
* @swap_lru: Lru list of buffer objects used for swapping.
*/
struct ttm_bo_global {
/**
* Constant after init.
*/
struct kobject kobj;
struct ttm_mem_global *mem_glob;
struct page *dummy_read_page;
struct ttm_mem_shrink shrink;
size_t ttm_bo_extra_size;
size_t ttm_bo_size;
struct mutex device_list_mutex;
spinlock_t lru_lock;
/**
* Protected by device_list_mutex.
*/
struct list_head device_list;
/**
* Protected by the lru_lock.
*/
struct list_head swap_lru;
/**
* Internal protection.
*/
atomic_t bo_count;
};
#define TTM_NUM_MEM_TYPES 8
#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
idling before CPU mapping */
#define TTM_BO_PRIV_FLAG_MAX 1
/**
* struct ttm_bo_device - Buffer object driver device-specific data.
*
* @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
* @man: An array of mem_type_managers.
* @addr_space_mm: Range manager for the device address space.
* lru_lock: Spinlock that protects the buffer+device lru lists and
......@@ -390,32 +440,21 @@ struct ttm_bo_device {
/*
* Constant after bo device init / atomic.
*/
struct ttm_mem_global *mem_glob;
struct list_head device_list;
struct ttm_bo_global *glob;
struct ttm_bo_driver *driver;
struct page *dummy_read_page;
struct ttm_mem_shrink shrink;
size_t ttm_bo_extra_size;
size_t ttm_bo_size;
rwlock_t vm_lock;
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
/*
* Protected by the vm lock.
*/
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
struct rb_root addr_space_rb;
struct drm_mm addr_space_mm;
/*
* Might want to change this to one lock per manager.
*/
spinlock_t lru_lock;
/*
* Protected by the lru lock.
* Protected by the global:lru lock.
*/
struct list_head ddestroy;
struct list_head swap_lru;
/*
* Protected by load / firstopen / lastclose /unload sync.
......@@ -629,6 +668,9 @@ extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
unsigned long *bus_offset,
unsigned long *bus_size);
extern void ttm_bo_global_release(struct ttm_global_reference *ref);
extern int ttm_bo_global_init(struct ttm_global_reference *ref);
extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
/**
......@@ -646,7 +688,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
* !0: Failure.
*/
extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_mem_global *mem_glob,
struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
uint64_t file_page_offset);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment