Commit 213d5092 authored by Thomas Hellström's avatar Thomas Hellström Committed by Maarten Lankhorst

drm/i915/ttm: Introduce a TTM i915 gem object backend

Most logical place to introduce TTM buffer objects is as an i915
gem object backend. We need to add some ops to account for added
functionality like delayed delete and LRU list manipulation.

Initially we support only LMEM and SYSTEM memory, but SYSTEM
(which in this case means evicted LMEM objects) is not
visible to i915 GEM yet. The plan is to move the i915 gem system region
over to the TTM system memory type in upcoming patches.

We set up GPU bindings directly both from LMEM and from the system region,
as there is no need to use the legacy TTM_TT memory type. We reserve
that for future porting of GGTT bindings to TTM.

Remove the old lmem backend.
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210610070152.572423-2-thomas.hellstrom@linux.intel.com
parent 1bd8a7dc
......@@ -155,6 +155,7 @@ gem-y += \
gem/i915_gem_stolen.o \
gem/i915_gem_throttle.o \
gem/i915_gem_tiling.o \
gem/i915_gem_ttm.o \
gem/i915_gem_userptr.o \
gem/i915_gem_wait.o \
gem/i915_gemfs.o
......
......@@ -85,13 +85,10 @@ i915_gem_setup(struct drm_i915_gem_object *obj, u64 size)
return -E2BIG;
/*
* For now resort to CPU based clearing for device local-memory, in the
* near future this will use the blitter engine for accelerated, GPU
* based clearing.
* I915_BO_ALLOC_USER will make sure the object is cleared before
* any user access.
*/
flags = 0;
if (mr->type == INTEL_MEMORY_LOCAL)
flags = I915_BO_ALLOC_CPU_CLEAR;
flags = I915_BO_ALLOC_USER;
ret = mr->ops->init_object(mr, obj, size, flags);
if (ret)
......
......@@ -4,74 +4,10 @@
*/
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
static void lmem_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
obj->mm.dirty = false;
sg_free_table(pages);
kfree(pages);
}
static int lmem_get_pages(struct drm_i915_gem_object *obj)
{
unsigned int flags;
struct sg_table *pages;
flags = I915_ALLOC_MIN_PAGE_SIZE;
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
flags |= I915_ALLOC_CONTIGUOUS;
obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
obj->base.size,
flags);
if (IS_ERR(obj->mm.st_mm_node))
return PTR_ERR(obj->mm.st_mm_node);
/* Range manager is always contigous */
if (obj->mm.region->is_range_manager)
obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
if (IS_ERR(pages)) {
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
return PTR_ERR(pages);
}
__i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
void __iomem *vaddr =
i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
if (!vaddr) {
struct sg_table *pages =
__i915_gem_object_unset_pages(obj);
if (!IS_ERR_OR_NULL(pages))
lmem_put_pages(obj, pages);
}
memset_io(vaddr, 0, obj->base.size);
io_mapping_unmap(vaddr);
}
return 0;
}
const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.name = "i915_gem_object_lmem",
.flags = I915_GEM_OBJECT_HAS_IOMEM,
.get_pages = lmem_get_pages,
.put_pages = lmem_put_pages,
.release = i915_gem_object_release_memory_region,
};
void __iomem *
i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
unsigned long n,
......@@ -103,23 +39,3 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
size, flags);
}
int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t size,
unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class, flags);
obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
i915_gem_object_init_memory_region(obj, mem);
return 0;
}
......@@ -26,9 +26,4 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
unsigned int flags);
int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t size,
unsigned int flags);
#endif /* !__I915_GEM_LMEM_H */
......@@ -172,7 +172,7 @@ static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *f
}
}
static void __i915_gem_free_object_rcu(struct rcu_head *head)
void __i915_gem_free_object_rcu(struct rcu_head *head)
{
struct drm_i915_gem_object *obj =
container_of(head, typeof(*obj), rcu);
......@@ -208,12 +208,8 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
}
}
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
void __i915_gem_free_object(struct drm_i915_gem_object *obj)
{
struct drm_i915_gem_object *obj, *on;
llist_for_each_entry_safe(obj, on, freed, freed) {
trace_i915_gem_object_destroy(obj);
if (!list_empty(&obj->vma.list)) {
......@@ -261,6 +257,20 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (obj->shares_resv_from)
i915_vm_resv_put(obj->shares_resv_from);
}
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
struct drm_i915_gem_object *obj, *on;
llist_for_each_entry_safe(obj, on, freed, freed) {
might_sleep();
if (obj->ops->delayed_free) {
obj->ops->delayed_free(obj);
continue;
}
__i915_gem_free_object(obj);
/* But keep the pointer alive for RCU-protected lookups */
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
......@@ -318,6 +328,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
* worker and performing frees directly from subsequent allocations for
* crude but effective memory throttling.
*/
if (llist_add(&obj->freed, &i915->mm.free_list))
queue_work(i915->wq, &i915->mm.free_work);
}
......@@ -410,6 +421,42 @@ int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset,
return 0;
}
/**
* i915_gem_object_evictable - Whether object is likely evictable after unbind.
* @obj: The object to check
*
* This function checks whether the object is likely unvictable after unbind.
* If the object is not locked when checking, the result is only advisory.
* If the object is locked when checking, and the function returns true,
* then an eviction should indeed be possible. But since unlocked vma
* unpinning and unbinding is currently possible, the object can actually
* become evictable even if this function returns false.
*
* Return: true if the object may be evictable. False otherwise.
*/
bool i915_gem_object_evictable(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
int pin_count = atomic_read(&obj->mm.pages_pin_count);
if (!pin_count)
return true;
spin_lock(&obj->vma.lock);
list_for_each_entry(vma, &obj->vma.list, obj_link) {
if (i915_vma_is_pinned(vma)) {
spin_unlock(&obj->vma.lock);
return false;
}
if (atomic_read(&vma->pages_count))
pin_count--;
}
spin_unlock(&obj->vma.lock);
GEM_WARN_ON(pin_count < 0);
return pin_count == 0;
}
void i915_gem_init__objects(struct drm_i915_private *i915)
{
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
......
......@@ -200,6 +200,9 @@ static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{
if (obj->ops->adjust_lru)
obj->ops->adjust_lru(obj);
dma_resv_unlock(obj->base.resv);
}
......@@ -587,6 +590,12 @@ int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset,
bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
void __i915_gem_free_object_rcu(struct rcu_head *head);
void __i915_gem_free_object(struct drm_i915_gem_object *obj);
bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
#ifdef CONFIG_MMU_NOTIFIER
static inline bool
i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
......
......@@ -63,6 +63,20 @@ struct drm_i915_gem_object_ops {
const struct drm_i915_gem_pwrite *arg);
int (*dmabuf_export)(struct drm_i915_gem_object *obj);
/**
* adjust_lru - notify that the madvise value was updated
* @obj: The gem object
*
* The madvise value may have been updated, or object was recently
* referenced so act accordingly (Perhaps changing an LRU list etc).
*/
void (*adjust_lru)(struct drm_i915_gem_object *obj);
/**
* delayed_free - Override the default delayed free implementation
*/
void (*delayed_free)(struct drm_i915_gem_object *obj);
void (*release)(struct drm_i915_gem_object *obj);
const char *name; /* friendly name for debug, e.g. lockdep classes */
......@@ -187,12 +201,14 @@ struct drm_i915_gem_object {
#define I915_BO_ALLOC_VOLATILE BIT(1)
#define I915_BO_ALLOC_STRUCT_PAGE BIT(2)
#define I915_BO_ALLOC_CPU_CLEAR BIT(3)
#define I915_BO_ALLOC_USER BIT(4)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
I915_BO_ALLOC_VOLATILE | \
I915_BO_ALLOC_STRUCT_PAGE | \
I915_BO_ALLOC_CPU_CLEAR)
#define I915_BO_READONLY BIT(4)
#define I915_TILING_QUIRK_BIT 5 /* unknown swizzling; do not release! */
I915_BO_ALLOC_CPU_CLEAR | \
I915_BO_ALLOC_USER)
#define I915_BO_READONLY BIT(5)
#define I915_TILING_QUIRK_BIT 6 /* unknown swizzling; do not release! */
/*
* Is the object to be mapped as read-only to the GPU
......@@ -310,6 +326,11 @@ struct drm_i915_gem_object {
bool dirty:1;
} mm;
struct {
struct sg_table *cached_io_st;
bool created:1;
} ttm;
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
......
......@@ -18,11 +18,7 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
mutex_lock(&mem->objects.lock);
if (obj->flags & I915_BO_ALLOC_VOLATILE)
list_add(&obj->mm.region_link, &mem->objects.purgeable);
else
list_add(&obj->mm.region_link, &mem->objects.list);
mutex_unlock(&mem->objects.lock);
}
......
This diff is collapsed.
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef _I915_GEM_TTM_H_
#define _I915_GEM_TTM_H_
#include "gem/i915_gem_object_types.h"
/**
* i915_gem_to_ttm - Convert a struct drm_i915_gem_object to a
* struct ttm_buffer_object.
* @obj: Pointer to the gem object.
*
* Return: Pointer to the embedded struct ttm_buffer_object.
*/
static inline struct ttm_buffer_object *
i915_gem_to_ttm(struct drm_i915_gem_object *obj)
{
return &obj->__do_not_access;
}
/*
* i915 ttm gem object destructor. Internal use only.
*/
void i915_ttm_bo_destroy(struct ttm_buffer_object *bo);
/**
* i915_ttm_to_gem - Convert a struct ttm_buffer_object to an embedding
* struct drm_i915_gem_object.
*
* Return: Pointer to the embedding struct ttm_buffer_object, or NULL
* if the object was not an i915 ttm object.
*/
static inline struct drm_i915_gem_object *
i915_ttm_to_gem(struct ttm_buffer_object *bo)
{
if (GEM_WARN_ON(bo->destroy != i915_ttm_bo_destroy))
return NULL;
return container_of(bo, struct drm_i915_gem_object, __do_not_access);
}
int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t size,
unsigned int flags);
#endif
......@@ -9,6 +9,7 @@
#include "intel_region_ttm.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h"
#include "intel_region_lmem.h"
static int init_fake_lmem_bar(struct intel_memory_region *mem)
......@@ -107,7 +108,7 @@ region_lmem_init(struct intel_memory_region *mem)
static const struct intel_memory_region_ops intel_region_lmem_ops = {
.init = region_lmem_init,
.release = region_lmem_release,
.init_object = __i915_gem_lmem_object_init,
.init_object = __i915_gem_ttm_object_init,
};
struct intel_memory_region *
......
......@@ -1005,8 +1005,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
}
}
if (obj->mm.madv != __I915_MADV_PURGED)
if (obj->mm.madv != __I915_MADV_PURGED) {
obj->mm.madv = args->madv;
if (obj->ops->adjust_lru)
obj->ops->adjust_lru(obj);
}
if (i915_gem_object_has_pages(obj)) {
unsigned long flags;
......
......@@ -149,7 +149,6 @@ intel_memory_region_create(struct drm_i915_private *i915,
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
INIT_LIST_HEAD(&mem->objects.purgeable);
INIT_LIST_HEAD(&mem->reserved);
mutex_init(&mem->mm_lock);
......
......@@ -101,7 +101,6 @@ struct intel_memory_region {
struct {
struct mutex lock; /* Protects access to objects */
struct list_head list;
struct list_head purgeable;
} objects;
size_t chunk_size;
......
......@@ -11,6 +11,7 @@
#include "intel_region_ttm.h"
#include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
/**
* DOC: TTM support structure
*
......@@ -20,9 +21,6 @@
* i915 GEM regions to TTM memory types and resource managers.
*/
/* A Zero-initialized driver for now. We don't have a TTM backend yet. */
static struct ttm_device_funcs i915_ttm_bo_driver;
/**
* intel_region_ttm_device_init - Initialize a TTM device
* @dev_priv: Pointer to an i915 device private structure.
......@@ -33,7 +31,7 @@ int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
{
struct drm_device *drm = &dev_priv->drm;
return ttm_device_init(&dev_priv->bdev, &i915_ttm_bo_driver,
return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(),
drm->dev, drm->anon_inode->i_mapping,
drm->vma_offset_manager, false, false);
}
......@@ -177,6 +175,7 @@ struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
mem->region.start);
}
#ifdef CONFIG_DRM_I915_SELFTEST
/**
* intel_region_ttm_node_alloc - Allocate memory resources from a region
* @mem: The memory region,
......@@ -224,3 +223,4 @@ intel_region_ttm_node_alloc(struct intel_memory_region *mem,
ret = -ENXIO;
return ret ? ERR_PTR(ret) : res;
}
#endif
......@@ -12,6 +12,7 @@
struct drm_i915_private;
struct intel_memory_region;
struct ttm_resource;
struct ttm_device_funcs;
int intel_region_ttm_device_init(struct drm_i915_private *dev_priv);
......@@ -24,11 +25,15 @@ void intel_region_ttm_fini(struct intel_memory_region *mem);
struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
struct ttm_resource *res);
void intel_region_ttm_node_free(struct intel_memory_region *mem,
struct ttm_resource *node);
struct ttm_device_funcs *i915_ttm_driver(void);
#ifdef CONFIG_DRM_I915_SELFTEST
struct ttm_resource *
intel_region_ttm_node_alloc(struct intel_memory_region *mem,
resource_size_t size,
unsigned int flags);
void intel_region_ttm_node_free(struct intel_memory_region *mem,
struct ttm_resource *node);
#endif
#endif /* _INTEL_REGION_TTM_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment