Commit 7c98501a authored by Matthew Auld's avatar Matthew Auld Committed by Chris Wilson

drm/i915/region: support volatile objects

Volatile objects are marked as DONTNEED while pinned, therefore once
unpinned the backing store can be discarded. This is limited to kernel
internal objects.
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarCQ Tang <cq.tang@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20191008160116.18379-4-matthew.auld@intel.com
parent 2f0b97ca
...@@ -117,13 +117,6 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -117,13 +117,6 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
goto err; goto err;
} }
/* Mark the pages as dontneed whilst they are still pinned. As soon
* as they are unpinned they are allowed to be reaped by the shrinker,
* and the caller is expected to repopulate - the contents of this
* object are only valid whilst active and pinned.
*/
obj->mm.madv = I915_MADV_DONTNEED;
__i915_gem_object_set_pages(obj, st, sg_page_sizes); __i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0; return 0;
...@@ -143,7 +136,6 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj, ...@@ -143,7 +136,6 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
internal_free_pages(pages); internal_free_pages(pages);
obj->mm.dirty = false; obj->mm.dirty = false;
obj->mm.madv = I915_MADV_WILLNEED;
} }
static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = { static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
...@@ -188,6 +180,15 @@ i915_gem_object_create_internal(struct drm_i915_private *i915, ...@@ -188,6 +180,15 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
drm_gem_private_object_init(&i915->drm, &obj->base, size); drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_object_internal_ops); i915_gem_object_init(obj, &i915_gem_object_internal_ops);
/*
* Mark the object as volatile, such that the pages are marked as
* dontneed whilst they are still pinned. As soon as they are unpinned
* they are allowed to be reaped by the shrinker, and the caller is
* expected to repopulate - the contents of this object are only valid
* whilst active and pinned.
*/
i915_gem_object_set_volatile(obj);
obj->read_domains = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU;
......
...@@ -145,6 +145,18 @@ i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj) ...@@ -145,6 +145,18 @@ i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
return obj->flags & I915_BO_ALLOC_CONTIGUOUS; return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
} }
static inline bool
i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
{
return obj->flags & I915_BO_ALLOC_VOLATILE;
}
static inline void
i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
{
obj->flags |= I915_BO_ALLOC_VOLATILE;
}
static inline bool static inline bool
i915_gem_object_type_has(const struct drm_i915_gem_object *obj, i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
unsigned long flags) unsigned long flags)
......
...@@ -121,7 +121,8 @@ struct drm_i915_gem_object { ...@@ -121,7 +121,8 @@ struct drm_i915_gem_object {
unsigned long flags; unsigned long flags;
#define I915_BO_ALLOC_CONTIGUOUS BIT(0) #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS) #define I915_BO_ALLOC_VOLATILE BIT(1)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
/* /*
* Is the object to be mapped as read-only to the GPU * Is the object to be mapped as read-only to the GPU
...@@ -172,6 +173,12 @@ struct drm_i915_gem_object { ...@@ -172,6 +173,12 @@ struct drm_i915_gem_object {
* List of memory region blocks allocated for this object. * List of memory region blocks allocated for this object.
*/ */
struct list_head blocks; struct list_head blocks;
/**
* Element within memory_region->objects or region->purgeable
* if the object is marked as DONTNEED. Access is protected by
* region->obj_lock.
*/
struct list_head region_link;
struct sg_table *pages; struct sg_table *pages;
void *mapping; void *mapping;
......
...@@ -18,6 +18,9 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, ...@@ -18,6 +18,9 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->mm.lock); lockdep_assert_held(&obj->mm.lock);
if (i915_gem_object_is_volatile(obj))
obj->mm.madv = I915_MADV_DONTNEED;
/* Make the pages coherent with the GPU (flushing any swapin). */ /* Make the pages coherent with the GPU (flushing any swapin). */
if (obj->cache_dirty) { if (obj->cache_dirty) {
obj->write_domain = 0; obj->write_domain = 0;
...@@ -160,6 +163,9 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) ...@@ -160,6 +163,9 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
if (IS_ERR_OR_NULL(pages)) if (IS_ERR_OR_NULL(pages))
return pages; return pages;
if (i915_gem_object_is_volatile(obj))
obj->mm.madv = I915_MADV_WILLNEED;
i915_gem_object_make_unshrinkable(obj); i915_gem_object_make_unshrinkable(obj);
if (obj->mm.mapping) { if (obj->mm.mapping) {
......
...@@ -107,11 +107,26 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, ...@@ -107,11 +107,26 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->mm.blocks); INIT_LIST_HEAD(&obj->mm.blocks);
obj->mm.region = intel_memory_region_get(mem); obj->mm.region = intel_memory_region_get(mem);
obj->flags |= flags; obj->flags |= flags;
mutex_lock(&mem->objects.lock);
if (obj->flags & I915_BO_ALLOC_VOLATILE)
list_add(&obj->mm.region_link, &mem->objects.purgeable);
else
list_add(&obj->mm.region_link, &mem->objects.list);
mutex_unlock(&mem->objects.lock);
} }
void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj) void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
{ {
intel_memory_region_put(obj->mm.region); struct intel_memory_region *mem = obj->mm.region;
mutex_lock(&mem->objects.lock);
list_del(&obj->mm.region_link);
mutex_unlock(&mem->objects.lock);
intel_memory_region_put(mem);
} }
struct drm_i915_gem_object * struct drm_i915_gem_object *
......
...@@ -115,8 +115,6 @@ static int get_huge_pages(struct drm_i915_gem_object *obj) ...@@ -115,8 +115,6 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
if (i915_gem_gtt_prepare_pages(obj, st)) if (i915_gem_gtt_prepare_pages(obj, st))
goto err; goto err;
obj->mm.madv = I915_MADV_DONTNEED;
GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask); GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
__i915_gem_object_set_pages(obj, st, sg_page_sizes); __i915_gem_object_set_pages(obj, st, sg_page_sizes);
...@@ -137,7 +135,6 @@ static void put_huge_pages(struct drm_i915_gem_object *obj, ...@@ -137,7 +135,6 @@ static void put_huge_pages(struct drm_i915_gem_object *obj,
huge_pages_free_pages(pages); huge_pages_free_pages(pages);
obj->mm.dirty = false; obj->mm.dirty = false;
obj->mm.madv = I915_MADV_WILLNEED;
} }
static const struct drm_i915_gem_object_ops huge_page_ops = { static const struct drm_i915_gem_object_ops huge_page_ops = {
...@@ -170,6 +167,8 @@ huge_pages_object(struct drm_i915_private *i915, ...@@ -170,6 +167,8 @@ huge_pages_object(struct drm_i915_private *i915,
drm_gem_private_object_init(&i915->drm, &obj->base, size); drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &huge_page_ops); i915_gem_object_init(obj, &huge_page_ops);
i915_gem_object_set_volatile(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE; obj->cache_level = I915_CACHE_NONE;
...@@ -229,8 +228,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) ...@@ -229,8 +228,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
i915_sg_trim(st); i915_sg_trim(st);
obj->mm.madv = I915_MADV_DONTNEED;
__i915_gem_object_set_pages(obj, st, sg_page_sizes); __i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0; return 0;
...@@ -263,8 +260,6 @@ static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj) ...@@ -263,8 +260,6 @@ static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
sg_dma_len(sg) = obj->base.size; sg_dma_len(sg) = obj->base.size;
sg_dma_address(sg) = page_size; sg_dma_address(sg) = page_size;
obj->mm.madv = I915_MADV_DONTNEED;
__i915_gem_object_set_pages(obj, st, sg->length); __i915_gem_object_set_pages(obj, st, sg->length);
return 0; return 0;
...@@ -283,7 +278,6 @@ static void fake_put_huge_pages(struct drm_i915_gem_object *obj, ...@@ -283,7 +278,6 @@ static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
{ {
fake_free_huge_pages(obj, pages); fake_free_huge_pages(obj, pages);
obj->mm.dirty = false; obj->mm.dirty = false;
obj->mm.madv = I915_MADV_WILLNEED;
} }
static const struct drm_i915_gem_object_ops fake_ops = { static const struct drm_i915_gem_object_ops fake_ops = {
...@@ -323,6 +317,8 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) ...@@ -323,6 +317,8 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
else else
i915_gem_object_init(obj, &fake_ops); i915_gem_object_init(obj, &fake_ops);
i915_gem_object_set_volatile(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE; obj->cache_level = I915_CACHE_NONE;
......
...@@ -152,6 +152,10 @@ intel_memory_region_create(struct drm_i915_private *i915, ...@@ -152,6 +152,10 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->min_page_size = min_page_size; mem->min_page_size = min_page_size;
mem->ops = ops; mem->ops = ops;
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
INIT_LIST_HEAD(&mem->objects.purgeable);
mutex_init(&mem->mm_lock); mutex_init(&mem->mm_lock);
if (ops->init) { if (ops->init) {
...@@ -177,6 +181,7 @@ static void __intel_memory_region_destroy(struct kref *kref) ...@@ -177,6 +181,7 @@ static void __intel_memory_region_destroy(struct kref *kref)
mem->ops->release(mem); mem->ops->release(mem);
mutex_destroy(&mem->mm_lock); mutex_destroy(&mem->mm_lock);
mutex_destroy(&mem->objects.lock);
kfree(mem); kfree(mem);
} }
......
...@@ -52,6 +52,12 @@ struct intel_memory_region { ...@@ -52,6 +52,12 @@ struct intel_memory_region {
unsigned int type; unsigned int type;
unsigned int instance; unsigned int instance;
unsigned int id; unsigned int id;
struct {
struct mutex lock; /* Protects access to objects */
struct list_head list;
struct list_head purgeable;
} objects;
}; };
int intel_memory_region_init_buddy(struct intel_memory_region *mem); int intel_memory_region_init_buddy(struct intel_memory_region *mem);
......
...@@ -82,8 +82,6 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) ...@@ -82,8 +82,6 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
} }
GEM_BUG_ON(rem); GEM_BUG_ON(rem);
obj->mm.madv = I915_MADV_DONTNEED;
__i915_gem_object_set_pages(obj, pages, sg_page_sizes); __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
return 0; return 0;
...@@ -95,7 +93,6 @@ static void fake_put_pages(struct drm_i915_gem_object *obj, ...@@ -95,7 +93,6 @@ static void fake_put_pages(struct drm_i915_gem_object *obj,
{ {
fake_free_pages(obj, pages); fake_free_pages(obj, pages);
obj->mm.dirty = false; obj->mm.dirty = false;
obj->mm.madv = I915_MADV_WILLNEED;
} }
static const struct drm_i915_gem_object_ops fake_ops = { static const struct drm_i915_gem_object_ops fake_ops = {
...@@ -122,6 +119,8 @@ fake_dma_object(struct drm_i915_private *i915, u64 size) ...@@ -122,6 +119,8 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
drm_gem_private_object_init(&i915->drm, &obj->base, size); drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &fake_ops); i915_gem_object_init(obj, &fake_ops);
i915_gem_object_set_volatile(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE; obj->cache_level = I915_CACHE_NONE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment