Commit 13f1bfd3 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Make object/vma allocation caches global

As our allocations are not device specific, we can move our slab caches
to a global scope.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190228102035.5857-2-chris@chris-wilson.co.uk
parent 32eb6bcf
......@@ -153,7 +153,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
obj = i915_gem_object_alloc(dev_priv);
obj = i915_gem_object_alloc();
if (obj == NULL)
return NULL;
......
......@@ -1470,10 +1470,6 @@ struct intel_cdclk_state {
struct drm_i915_private {
struct drm_device drm;
struct kmem_cache *objects;
struct kmem_cache *vmas;
struct kmem_cache *luts;
const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
struct intel_driver_caps caps;
......@@ -2802,8 +2798,6 @@ void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
void *i915_gem_object_alloc(struct drm_i915_private *dev_priv);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *
......
......@@ -624,17 +624,6 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
return 0;
}
void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
{
return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
kmem_cache_free(dev_priv->objects, obj);
}
static int
i915_gem_create(struct drm_file *file,
struct drm_i915_private *dev_priv,
......@@ -2895,10 +2884,6 @@ static void shrink_caches(struct drm_i915_private *i915)
* filled slabs to prioritise allocating from the mostly full slabs,
* with the aim of reducing fragmentation.
*/
kmem_cache_shrink(i915->luts);
kmem_cache_shrink(i915->vmas);
kmem_cache_shrink(i915->objects);
i915_globals_park();
}
......@@ -3094,7 +3079,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
list_del(&lut->obj_link);
list_del(&lut->ctx_link);
kmem_cache_free(i915->luts, lut);
i915_lut_handle_free(lut);
__i915_gem_object_release_unless_active(obj);
}
......@@ -4199,7 +4184,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc(dev_priv);
obj = i915_gem_object_alloc();
if (obj == NULL)
return ERR_PTR(-ENOMEM);
......@@ -5225,19 +5210,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
int i915_gem_init_early(struct drm_i915_private *dev_priv)
{
int err = -ENOMEM;
dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
if (!dev_priv->objects)
goto err_out;
dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
if (!dev_priv->vmas)
goto err_objects;
dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
if (!dev_priv->luts)
goto err_vmas;
int err;
INIT_LIST_HEAD(&dev_priv->gt.active_rings);
INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
......@@ -5262,13 +5235,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
return 0;
err_vmas:
kmem_cache_destroy(dev_priv->vmas);
err_objects:
kmem_cache_destroy(dev_priv->objects);
err_out:
return err;
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
......@@ -5280,13 +5246,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
kmem_cache_destroy(dev_priv->luts);
kmem_cache_destroy(dev_priv->vmas);
kmem_cache_destroy(dev_priv->objects);
/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
rcu_barrier();
i915_gemfs_fini(dev_priv);
}
......
......@@ -94,6 +94,20 @@
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
static struct i915_global_context {
struct kmem_cache *slab_luts;
} global;
struct i915_lut_handle *i915_lut_handle_alloc(void)
{
return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
}
void i915_lut_handle_free(struct i915_lut_handle *lut)
{
return kmem_cache_free(global.slab_luts, lut);
}
static void lut_close(struct i915_gem_context *ctx)
{
struct i915_lut_handle *lut, *ln;
......@@ -102,7 +116,7 @@ static void lut_close(struct i915_gem_context *ctx)
list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
list_del(&lut->obj_link);
kmem_cache_free(ctx->i915->luts, lut);
i915_lut_handle_free(lut);
}
rcu_read_lock();
......@@ -1408,3 +1422,22 @@ int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
#include "selftests/mock_context.c"
#include "selftests/i915_gem_context.c"
#endif
int __init i915_global_context_init(void)
{
global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
if (!global.slab_luts)
return -ENOMEM;
return 0;
}
void i915_global_context_shrink(void)
{
kmem_cache_shrink(global.slab_luts);
}
void i915_global_context_exit(void)
{
kmem_cache_destroy(global.slab_luts);
}
......@@ -32,6 +32,7 @@
#include "i915_gem.h"
#include "i915_scheduler.h"
#include "intel_device_info.h"
#include "intel_ringbuffer.h"
struct pid;
......@@ -407,4 +408,11 @@ void intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
struct i915_lut_handle *i915_lut_handle_alloc(void);
void i915_lut_handle_free(struct i915_lut_handle *lut);
int i915_global_context_init(void);
void i915_global_context_shrink(void);
void i915_global_context_exit(void);
#endif /* !__I915_GEM_CONTEXT_H__ */
......@@ -300,7 +300,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
obj = i915_gem_object_alloc(to_i915(dev));
obj = i915_gem_object_alloc();
if (obj == NULL) {
ret = -ENOMEM;
goto fail_detach;
......
......@@ -854,7 +854,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
goto err_obj;
}
lut = kmem_cache_alloc(eb->i915->luts, GFP_KERNEL);
lut = i915_lut_handle_alloc();
if (unlikely(!lut)) {
err = -ENOMEM;
goto err_obj;
......@@ -862,7 +862,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
err = radix_tree_insert(handles_vma, handle, vma);
if (unlikely(err)) {
kmem_cache_free(eb->i915->luts, lut);
i915_lut_handle_free(lut);
goto err_obj;
}
......
......@@ -1913,7 +1913,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(size > ggtt->vm.total);
vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL);
vma = i915_vma_alloc();
if (!vma)
return ERR_PTR(-ENOMEM);
......
......@@ -193,7 +193,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc(i915);
obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
......
......@@ -25,6 +25,20 @@
#include "i915_drv.h"
#include "i915_gem_object.h"
static struct i915_global_object {
struct kmem_cache *slab_objects;
} global;
struct drm_i915_gem_object *i915_gem_object_alloc(void)
{
return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
return kmem_cache_free(global.slab_objects, obj);
}
/**
* Mark up the object's coherency levels for a given cache_level
* @obj: #drm_i915_gem_object
......@@ -46,3 +60,23 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
obj->cache_dirty =
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
int __init i915_global_objects_init(void)
{
global.slab_objects =
KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
if (!global.slab_objects)
return -ENOMEM;
return 0;
}
void i915_global_objects_shrink(void)
{
kmem_cache_shrink(global.slab_objects);
}
void i915_global_objects_exit(void)
{
kmem_cache_destroy(global.slab_objects);
}
......@@ -304,6 +304,9 @@ to_intel_bo(struct drm_gem_object *gem)
return container_of(gem, struct drm_i915_gem_object, base);
}
struct drm_i915_gem_object *i915_gem_object_alloc(void);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
/**
* i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
* @filp: DRM file private date
......@@ -499,5 +502,8 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
#endif
int i915_global_objects_init(void);
void i915_global_objects_shrink(void);
void i915_global_objects_exit(void);
#endif
......@@ -565,7 +565,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *obj;
unsigned int cache_level;
obj = i915_gem_object_alloc(dev_priv);
obj = i915_gem_object_alloc();
if (obj == NULL)
return NULL;
......
......@@ -795,7 +795,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -ENODEV;
}
obj = i915_gem_object_alloc(dev_priv);
obj = i915_gem_object_alloc();
if (obj == NULL)
return -ENOMEM;
......
......@@ -8,9 +8,12 @@
#include <linux/workqueue.h>
#include "i915_active.h"
#include "i915_gem_context.h"
#include "i915_gem_object.h"
#include "i915_globals.h"
#include "i915_request.h"
#include "i915_scheduler.h"
#include "i915_vma.h"
int __init i915_globals_init(void)
{
......@@ -20,18 +23,36 @@ int __init i915_globals_init(void)
if (err)
return err;
err = i915_global_request_init();
err = i915_global_context_init();
if (err)
goto err_active;
err = i915_global_objects_init();
if (err)
goto err_context;
err = i915_global_request_init();
if (err)
goto err_objects;
err = i915_global_scheduler_init();
if (err)
goto err_request;
err = i915_global_vma_init();
if (err)
goto err_scheduler;
return 0;
err_scheduler:
i915_global_scheduler_exit();
err_request:
i915_global_request_exit();
err_objects:
i915_global_objects_exit();
err_context:
i915_global_context_exit();
err_active:
i915_global_active_exit();
return err;
......@@ -45,8 +66,11 @@ static void i915_globals_shrink(void)
* with the aim of reducing fragmentation.
*/
i915_global_active_shrink();
i915_global_context_shrink();
i915_global_objects_shrink();
i915_global_request_shrink();
i915_global_scheduler_shrink();
i915_global_vma_shrink();
}
static atomic_t active;
......@@ -104,8 +128,11 @@ void __exit i915_globals_exit(void)
rcu_barrier();
flush_scheduled_work();
i915_global_vma_exit();
i915_global_scheduler_exit();
i915_global_request_exit();
i915_global_objects_exit();
i915_global_context_exit();
i915_global_active_exit();
/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
......
......@@ -30,6 +30,20 @@
#include <drm/drm_gem.h>
static struct i915_global_vma {
struct kmem_cache *slab_vmas;
} global;
struct i915_vma *i915_vma_alloc(void)
{
return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
}
void i915_vma_free(struct i915_vma *vma)
{
return kmem_cache_free(global.slab_vmas, vma);
}
#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
#include <linux/stackdepot.h>
......@@ -115,7 +129,7 @@ vma_create(struct drm_i915_gem_object *obj,
/* The aliasing_ppgtt should never be used directly! */
GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
vma = i915_vma_alloc();
if (vma == NULL)
return ERR_PTR(-ENOMEM);
......@@ -190,7 +204,7 @@ vma_create(struct drm_i915_gem_object *obj,
cmp = i915_vma_compare(pos, vm, view);
if (cmp == 0) {
spin_unlock(&obj->vma.lock);
kmem_cache_free(vm->i915->vmas, vma);
i915_vma_free(vma);
return pos;
}
......@@ -222,7 +236,7 @@ vma_create(struct drm_i915_gem_object *obj,
return vma;
err_vma:
kmem_cache_free(vm->i915->vmas, vma);
i915_vma_free(vma);
return ERR_PTR(-E2BIG);
}
......@@ -803,8 +817,6 @@ void i915_vma_reopen(struct i915_vma *vma)
static void __i915_vma_destroy(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(vma->fence);
......@@ -825,7 +837,7 @@ static void __i915_vma_destroy(struct i915_vma *vma)
i915_active_fini(&vma->active);
kmem_cache_free(i915->vmas, vma);
i915_vma_free(vma);
}
void i915_vma_destroy(struct i915_vma *vma)
......@@ -1041,3 +1053,22 @@ int i915_vma_unbind(struct i915_vma *vma)
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_vma.c"
#endif
int __init i915_global_vma_init(void)
{
global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
if (!global.slab_vmas)
return -ENOMEM;
return 0;
}
void i915_global_vma_shrink(void)
{
kmem_cache_shrink(global.slab_vmas);
}
void i915_global_vma_exit(void)
{
kmem_cache_destroy(global.slab_vmas);
}
......@@ -440,4 +440,11 @@ void i915_vma_parked(struct drm_i915_private *i915);
list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \
for_each_until(!i915_vma_is_ggtt(V))
struct i915_vma *i915_vma_alloc(void);
void i915_vma_free(struct i915_vma *vma);
int i915_global_vma_init(void);
void i915_global_vma_shrink(void);
void i915_global_vma_exit(void);
#endif
......@@ -122,7 +122,7 @@ huge_gem_object(struct drm_i915_private *i915,
if (overflows_type(dma_size, obj->base.size))
return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc(i915);
obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
......
......@@ -171,7 +171,7 @@ huge_pages_object(struct drm_i915_private *i915,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc(i915);
obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
......@@ -320,7 +320,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc(i915);
obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
......
......@@ -120,7 +120,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc(i915);
obj = i915_gem_object_alloc();
if (!obj)
goto err;
......
......@@ -79,9 +79,6 @@ static void mock_device_release(struct drm_device *dev)
destroy_workqueue(i915->wq);
kmem_cache_destroy(i915->vmas);
kmem_cache_destroy(i915->objects);
i915_gemfs_fini(i915);
drm_mode_config_cleanup(&i915->drm);
......@@ -200,14 +197,6 @@ struct drm_i915_private *mock_gem_device(void)
i915->gt.awake = true;
i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN);
if (!i915->objects)
goto err_wq;
i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
if (!i915->vmas)
goto err_objects;
i915_timelines_init(i915);
INIT_LIST_HEAD(&i915->gt.active_rings);
......@@ -237,10 +226,6 @@ struct drm_i915_private *mock_gem_device(void)
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
i915_timelines_fini(i915);
kmem_cache_destroy(i915->vmas);
err_objects:
kmem_cache_destroy(i915->objects);
err_wq:
destroy_workqueue(i915->wq);
err_drv:
drm_mode_config_cleanup(&i915->drm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment