Commit 5fd9cbad authored by Thomas Hellstrom's avatar Thomas Hellstrom Committed by Dave Airlie

drm/ttm: Memory accounting rework.

Use inclusive zones to simplify accounting and its sysfs representation.
Use DMA32 accounting where applicable.

Add a sysfs interface to make the heuristically determined limits
readable and configurable.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: default avatarDave Airlie <airlied@linux.ie>
parent e9840be8
...@@ -70,7 +70,7 @@ static void ttm_bo_release_list(struct kref *list_kref) ...@@ -70,7 +70,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
if (bo->destroy) if (bo->destroy)
bo->destroy(bo); bo->destroy(bo);
else { else {
ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false); ttm_mem_global_free(bdev->mem_glob, bo->acc_size);
kfree(bo); kfree(bo);
} }
} }
...@@ -1065,14 +1065,14 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev, ...@@ -1065,14 +1065,14 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
size_t acc_size = size_t acc_size =
ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false); ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
bo = kzalloc(sizeof(*bo), GFP_KERNEL); bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (unlikely(bo == NULL)) { if (unlikely(bo == NULL)) {
ttm_mem_global_free(mem_glob, acc_size, false); ttm_mem_global_free(mem_glob, acc_size);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -71,7 +71,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref) ...@@ -71,7 +71,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
mutex_lock(&item->mutex); mutex_lock(&item->mutex);
if (item->refcount == 0) { if (item->refcount == 0) {
item->object = kmalloc(ref->size, GFP_KERNEL); item->object = kzalloc(ref->size, GFP_KERNEL);
if (unlikely(item->object == NULL)) { if (unlikely(item->object == NULL)) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_err; goto out_err;
...@@ -89,7 +89,6 @@ int ttm_global_item_ref(struct ttm_global_reference *ref) ...@@ -89,7 +89,6 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
mutex_unlock(&item->mutex); mutex_unlock(&item->mutex);
return 0; return 0;
out_err: out_err:
kfree(item->object);
mutex_unlock(&item->mutex); mutex_unlock(&item->mutex);
item->object = NULL; item->object = NULL;
return ret; return ret;
...@@ -105,7 +104,6 @@ void ttm_global_item_unref(struct ttm_global_reference *ref) ...@@ -105,7 +104,6 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
BUG_ON(ref->object != item->object); BUG_ON(ref->object != item->object);
if (--item->refcount == 0) { if (--item->refcount == 0) {
ref->release(ref); ref->release(ref);
kfree(item->object);
item->object = NULL; item->object = NULL;
} }
mutex_unlock(&item->mutex); mutex_unlock(&item->mutex);
......
This diff is collapsed.
...@@ -166,7 +166,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm) ...@@ -166,7 +166,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
set_page_dirty_lock(page); set_page_dirty_lock(page);
ttm->pages[i] = NULL; ttm->pages[i] = NULL;
ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false); ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE);
put_page(page); put_page(page);
} }
ttm->state = tt_unpopulated; ttm->state = tt_unpopulated;
...@@ -187,22 +187,15 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) ...@@ -187,22 +187,15 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
if (!p) if (!p)
return NULL; return NULL;
if (PageHighMem(p)) { ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
ret =
ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
false, false, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err; goto out_err;
if (PageHighMem(p))
ttm->pages[--ttm->first_himem_page] = p; ttm->pages[--ttm->first_himem_page] = p;
} else { else
ret =
ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
false, false, false);
if (unlikely(ret != 0))
goto out_err;
ttm->pages[++ttm->last_lomem_page] = p; ttm->pages[++ttm->last_lomem_page] = p;
} }
}
return p; return p;
out_err: out_err:
put_page(p); put_page(p);
...@@ -355,8 +348,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) ...@@ -355,8 +348,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
printk(KERN_ERR TTM_PFX printk(KERN_ERR TTM_PFX
"Erroneous page count. " "Erroneous page count. "
"Leaking pages.\n"); "Leaking pages.\n");
ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, ttm_mem_global_free_page(ttm->bdev->mem_glob,
PageHighMem(cur_page)); cur_page);
__free_page(cur_page); __free_page(cur_page);
} }
} }
...@@ -411,7 +404,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm, ...@@ -411,7 +404,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm,
*/ */
ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
false, false, false); false, false);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -422,7 +415,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm, ...@@ -422,7 +415,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm,
if (ret != num_pages && write) { if (ret != num_pages && write) {
ttm_tt_free_user_pages(ttm); ttm_tt_free_user_pages(ttm);
ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false); ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/kobject.h>
/** /**
* struct ttm_mem_shrink - callback to shrink TTM memory usage. * struct ttm_mem_shrink - callback to shrink TTM memory usage.
...@@ -60,34 +61,33 @@ struct ttm_mem_shrink { ...@@ -60,34 +61,33 @@ struct ttm_mem_shrink {
* @queue: Wait queue for processes suspended waiting for memory. * @queue: Wait queue for processes suspended waiting for memory.
* @lock: Lock to protect the @shrink - and the memory accounting members, * @lock: Lock to protect the @shrink - and the memory accounting members,
* that is, essentially the whole structure with some exceptions. * that is, essentially the whole structure with some exceptions.
* @emer_memory: Lowmem memory limit available for root. * @zones: Array of pointers to accounting zones.
* @max_memory: Lowmem memory limit available for non-root. * @num_zones: Number of populated entries in the @zones array.
* @swap_limit: Lowmem memory limit where the shrink workqueue kicks in. * @zone_kernel: Pointer to the kernel zone.
* @used_memory: Currently used lowmem memory. * @zone_highmem: Pointer to the highmem zone if there is one.
* @used_total_memory: Currently used total (lowmem + highmem) memory. * @zone_dma32: Pointer to the dma32 zone if there is one.
* @total_memory_swap_limit: Total memory limit where the shrink workqueue
* kicks in.
* @max_total_memory: Total memory available to non-root processes.
* @emer_total_memory: Total memory available to root processes.
* *
* Note that this structure is not per device. It should be global for all * Note that this structure is not per device. It should be global for all
* graphics devices. * graphics devices.
*/ */
#define TTM_MEM_MAX_ZONES 2
struct ttm_mem_zone;
struct ttm_mem_global { struct ttm_mem_global {
struct kobject kobj;
struct ttm_mem_shrink *shrink; struct ttm_mem_shrink *shrink;
struct workqueue_struct *swap_queue; struct workqueue_struct *swap_queue;
struct work_struct work; struct work_struct work;
wait_queue_head_t queue; wait_queue_head_t queue;
spinlock_t lock; spinlock_t lock;
uint64_t emer_memory; struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
uint64_t max_memory; unsigned int num_zones;
uint64_t swap_limit; struct ttm_mem_zone *zone_kernel;
uint64_t used_memory; #ifdef CONFIG_HIGHMEM
uint64_t used_total_memory; struct ttm_mem_zone *zone_highmem;
uint64_t total_memory_swap_limit; #else
uint64_t max_total_memory; struct ttm_mem_zone *zone_dma32;
uint64_t emer_total_memory; #endif
}; };
/** /**
...@@ -146,8 +146,13 @@ static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob, ...@@ -146,8 +146,13 @@ static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
extern int ttm_mem_global_init(struct ttm_mem_global *glob); extern int ttm_mem_global_init(struct ttm_mem_global *glob);
extern void ttm_mem_global_release(struct ttm_mem_global *glob); extern void ttm_mem_global_release(struct ttm_mem_global *glob);
extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
bool no_wait, bool interruptible, bool himem); bool no_wait, bool interruptible);
extern void ttm_mem_global_free(struct ttm_mem_global *glob, extern void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount, bool himem); uint64_t amount);
extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page,
bool no_wait, bool interruptible);
extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
struct page *page);
extern size_t ttm_round_pot(size_t size); extern size_t ttm_round_pot(size_t size);
#endif #endif
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#define _TTM_MODULE_H_ #define _TTM_MODULE_H_
#include <linux/kernel.h> #include <linux/kernel.h>
struct kobject;
#define TTM_PFX "[TTM]" #define TTM_PFX "[TTM]"
...@@ -54,5 +55,6 @@ extern void ttm_global_init(void); ...@@ -54,5 +55,6 @@ extern void ttm_global_init(void);
extern void ttm_global_release(void); extern void ttm_global_release(void);
extern int ttm_global_item_ref(struct ttm_global_reference *ref); extern int ttm_global_item_ref(struct ttm_global_reference *ref);
extern void ttm_global_item_unref(struct ttm_global_reference *ref); extern void ttm_global_item_unref(struct ttm_global_reference *ref);
extern struct kobject *ttm_get_kobj(void);
#endif /* _TTM_MODULE_H_ */ #endif /* _TTM_MODULE_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment