Commit 0e9d217b authored by Joonas Lahtinen's avatar Joonas Lahtinen

Merge tag 'topic/i915-ttm-2021-06-11' of...

Merge tag 'topic/i915-ttm-2021-06-11' of git://anongit.freedesktop.org/drm/drm-misc into drm-intel-gt-next

drm-misc and drm-intel pull request for topic/i915-ttm:
- Convert i915 lmem handling to ttm.
- Add a patch to temporarily add a driver_private member to vma_node.
- Use this to allow mixed object mmap handling for i915.
Signed-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/eb71ee2d-3413-6ca8-0b7c-a58695f00b77@linux.intel.com
parents c649432e cf3e3e86
......@@ -1148,15 +1148,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return -EACCES;
}
if (node->readonly) {
if (vma->vm_flags & VM_WRITE) {
drm_gem_object_put(obj);
return -EINVAL;
}
vma->vm_flags &= ~VM_MAYWRITE;
}
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
vma);
......
......@@ -155,6 +155,7 @@ gem-y += \
gem/i915_gem_stolen.o \
gem/i915_gem_throttle.o \
gem/i915_gem_tiling.o \
gem/i915_gem_ttm.o \
gem/i915_gem_userptr.o \
gem/i915_gem_wait.o \
gem/i915_gemfs.o
......
......@@ -11771,7 +11771,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
/* object is backed with LMEM for discrete */
i915 = to_i915(obj->base.dev);
if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj)) {
if (HAS_LMEM(i915) && !i915_gem_object_validates_to_lmem(obj)) {
/* object is "remote", not in local memory */
i915_gem_object_put(obj);
return ERR_PTR(-EREMOTE);
......
......@@ -85,13 +85,10 @@ i915_gem_setup(struct drm_i915_gem_object *obj, u64 size)
return -E2BIG;
/*
* For now resort to CPU based clearing for device local-memory, in the
* near future this will use the blitter engine for accelerated, GPU
* based clearing.
* I915_BO_ALLOC_USER will make sure the object is cleared before
* any user access.
*/
flags = 0;
if (mr->type == INTEL_MEMORY_LOCAL)
flags = I915_BO_ALLOC_CPU_CLEAR;
flags = I915_BO_ALLOC_USER;
ret = mr->ops->init_object(mr, obj, size, flags);
if (ret)
......
......@@ -4,74 +4,10 @@
*/
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
static void lmem_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
obj->mm.dirty = false;
sg_free_table(pages);
kfree(pages);
}
static int lmem_get_pages(struct drm_i915_gem_object *obj)
{
unsigned int flags;
struct sg_table *pages;
flags = I915_ALLOC_MIN_PAGE_SIZE;
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
flags |= I915_ALLOC_CONTIGUOUS;
obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
obj->base.size,
flags);
if (IS_ERR(obj->mm.st_mm_node))
return PTR_ERR(obj->mm.st_mm_node);
/* Range manager is always contigous */
if (obj->mm.region->is_range_manager)
obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
if (IS_ERR(pages)) {
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
return PTR_ERR(pages);
}
__i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
void __iomem *vaddr =
i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
if (!vaddr) {
struct sg_table *pages =
__i915_gem_object_unset_pages(obj);
if (!IS_ERR_OR_NULL(pages))
lmem_put_pages(obj, pages);
}
memset_io(vaddr, 0, obj->base.size);
io_mapping_unmap(vaddr);
}
return 0;
}
const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.name = "i915_gem_object_lmem",
.flags = I915_GEM_OBJECT_HAS_IOMEM,
.get_pages = lmem_get_pages,
.put_pages = lmem_put_pages,
.release = i915_gem_object_release_memory_region,
};
void __iomem *
i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
unsigned long n,
......@@ -87,10 +23,50 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
}
/**
* i915_gem_object_validates_to_lmem - Whether the object is resident in
* lmem when pages are present.
* @obj: The object to check.
*
* Migratable objects residency may change from under us if the object is
* not pinned or locked. This function is intended to be used to check whether
* the object can only reside in lmem when pages are present.
*
* Return: Whether the object is always resident in lmem when pages are
* present.
*/
bool i915_gem_object_validates_to_lmem(struct drm_i915_gem_object *obj)
{
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
return !i915_gem_object_migratable(obj) &&
mr && (mr->type == INTEL_MEMORY_LOCAL ||
mr->type == INTEL_MEMORY_STOLEN_LOCAL);
}
/**
* i915_gem_object_is_lmem - Whether the object is resident in
* lmem
* @obj: The object to check.
*
* Even if an object is allowed to migrate and change memory region,
* this function checks whether it will always be present in lmem when
* valid *or* if that's not the case, whether it's currently resident in lmem.
* For migratable and evictable objects, the latter only makes sense when
* the object is locked.
*
* Return: Whether the object migratable but resident in lmem, or not
* migratable and will be present in lmem when valid.
*/
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
{
struct intel_memory_region *mr = obj->mm.region;
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
#ifdef CONFIG_LOCKDEP
if (i915_gem_object_migratable(obj) &&
i915_gem_object_evictable(obj))
assert_object_held(obj);
#endif
return mr && (mr->type == INTEL_MEMORY_LOCAL ||
mr->type == INTEL_MEMORY_STOLEN_LOCAL);
}
......@@ -103,23 +79,3 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
size, flags);
}
int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t size,
unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class, flags);
obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
i915_gem_object_init_memory_region(obj, mem);
return 0;
}
......@@ -26,9 +26,4 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
unsigned int flags);
int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t size,
unsigned int flags);
#endif /* !__I915_GEM_LMEM_H */
......@@ -19,6 +19,7 @@
#include "i915_gem_mman.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
#include "i915_gem_ttm.h"
#include "i915_vma.h"
static inline bool
......@@ -623,6 +624,8 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
struct i915_mmap_offset *mmo;
int err;
GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops);
mmo = lookup_mmo(obj, mmap_type);
if (mmo)
goto out;
......@@ -665,40 +668,47 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
}
static int
__assign_mmap_offset(struct drm_file *file,
u32 handle,
__assign_mmap_offset(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type,
u64 *offset)
u64 *offset, struct drm_file *file)
{
struct drm_i915_gem_object *obj;
struct i915_mmap_offset *mmo;
int err;
obj = i915_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
if (i915_gem_object_never_mmap(obj))
return -ENODEV;
if (i915_gem_object_never_mmap(obj)) {
err = -ENODEV;
goto out;
if (obj->ops->mmap_offset) {
*offset = obj->ops->mmap_offset(obj);
return 0;
}
if (mmap_type != I915_MMAP_TYPE_GTT &&
!i915_gem_object_has_struct_page(obj) &&
!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) {
err = -ENODEV;
goto out;
}
!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
return -ENODEV;
mmo = mmap_offset_attach(obj, mmap_type, file);
if (IS_ERR(mmo)) {
err = PTR_ERR(mmo);
goto out;
}
if (IS_ERR(mmo))
return PTR_ERR(mmo);
*offset = drm_vma_node_offset_addr(&mmo->vma_node);
err = 0;
out:
return 0;
}
static int
__assign_mmap_offset_handle(struct drm_file *file,
u32 handle,
enum i915_mmap_type mmap_type,
u64 *offset)
{
struct drm_i915_gem_object *obj;
int err;
obj = i915_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
err = __assign_mmap_offset(obj, mmap_type, offset, file);
i915_gem_object_put(obj);
return err;
}
......@@ -718,7 +728,7 @@ i915_gem_dumb_mmap_offset(struct drm_file *file,
else
mmap_type = I915_MMAP_TYPE_GTT;
return __assign_mmap_offset(file, handle, mmap_type, offset);
return __assign_mmap_offset_handle(file, handle, mmap_type, offset);
}
/**
......@@ -786,7 +796,7 @@ i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
return __assign_mmap_offset(file, args->handle, type, &args->offset);
return __assign_mmap_offset_handle(file, args->handle, type, &args->offset);
}
static void vm_open(struct vm_area_struct *vma)
......@@ -890,8 +900,18 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
* destroyed and will be invalid when the vma manager lock
* is released.
*/
mmo = container_of(node, struct i915_mmap_offset, vma_node);
obj = i915_gem_object_get_rcu(mmo->obj);
if (!node->driver_private) {
mmo = container_of(node, struct i915_mmap_offset, vma_node);
obj = i915_gem_object_get_rcu(mmo->obj);
GEM_BUG_ON(obj && obj->ops->mmap_ops);
} else {
obj = i915_gem_object_get_rcu
(container_of(node, struct drm_i915_gem_object,
base.vma_node));
GEM_BUG_ON(obj && !obj->ops->mmap_ops);
}
}
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
rcu_read_unlock();
......@@ -913,7 +933,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
}
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = mmo;
if (i915_gem_object_has_iomem(obj))
vma->vm_flags |= VM_IO;
/*
* We keep the ref on mmo->obj, not vm_file, but we require
......@@ -927,6 +949,15 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
/* Drop the initial creation reference, the vma is now holding one. */
fput(anon);
if (obj->ops->mmap_ops) {
vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
vma->vm_ops = obj->ops->mmap_ops;
vma->vm_private_data = node->driver_private;
return 0;
}
vma->vm_private_data = mmo;
switch (mmo->mmap_type) {
case I915_MMAP_TYPE_WC:
vma->vm_page_prot =
......
......@@ -172,7 +172,7 @@ static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *f
}
}
static void __i915_gem_free_object_rcu(struct rcu_head *head)
void __i915_gem_free_object_rcu(struct rcu_head *head)
{
struct drm_i915_gem_object *obj =
container_of(head, typeof(*obj), rcu);
......@@ -208,59 +208,69 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
}
}
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
void __i915_gem_free_object(struct drm_i915_gem_object *obj)
{
struct drm_i915_gem_object *obj, *on;
trace_i915_gem_object_destroy(obj);
llist_for_each_entry_safe(obj, on, freed, freed) {
trace_i915_gem_object_destroy(obj);
if (!list_empty(&obj->vma.list)) {
struct i915_vma *vma;
if (!list_empty(&obj->vma.list)) {
struct i915_vma *vma;
/*
* Note that the vma keeps an object reference while
* it is active, so it *should* not sleep while we
* destroy it. Our debug code errs insits it *might*.
* For the moment, play along.
*/
spin_lock(&obj->vma.lock);
while ((vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
obj_link))) {
GEM_BUG_ON(vma->obj != obj);
spin_unlock(&obj->vma.lock);
__i915_vma_put(vma);
/*
* Note that the vma keeps an object reference while
* it is active, so it *should* not sleep while we
* destroy it. Our debug code errs insits it *might*.
* For the moment, play along.
*/
spin_lock(&obj->vma.lock);
while ((vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
obj_link))) {
GEM_BUG_ON(vma->obj != obj);
spin_unlock(&obj->vma.lock);
}
spin_unlock(&obj->vma.lock);
}
__i915_vma_put(vma);
__i915_gem_object_free_mmaps(obj);
spin_lock(&obj->vma.lock);
}
spin_unlock(&obj->vma.lock);
}
GEM_BUG_ON(!list_empty(&obj->lut_list));
__i915_gem_object_free_mmaps(obj);
atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
bitmap_free(obj->bit_17);
GEM_BUG_ON(!list_empty(&obj->lut_list));
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
bitmap_free(obj->bit_17);
drm_gem_free_mmap_offset(&obj->base);
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
if (obj->ops->release)
obj->ops->release(obj);
drm_gem_free_mmap_offset(&obj->base);
if (obj->mm.n_placements > 1)
kfree(obj->mm.placements);
if (obj->ops->release)
obj->ops->release(obj);
if (obj->shares_resv_from)
i915_vm_resv_put(obj->shares_resv_from);
}
if (obj->mm.n_placements > 1)
kfree(obj->mm.placements);
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
struct drm_i915_gem_object *obj, *on;
if (obj->shares_resv_from)
i915_vm_resv_put(obj->shares_resv_from);
llist_for_each_entry_safe(obj, on, freed, freed) {
might_sleep();
if (obj->ops->delayed_free) {
obj->ops->delayed_free(obj);
continue;
}
__i915_gem_free_object(obj);
/* But keep the pointer alive for RCU-protected lookups */
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
......@@ -318,6 +328,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
* worker and performing frees directly from subsequent allocations for
* crude but effective memory throttling.
*/
if (llist_add(&obj->freed, &i915->mm.free_list))
queue_work(i915->wq, &i915->mm.free_work);
}
......@@ -410,6 +421,60 @@ int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset,
return 0;
}
/**
* i915_gem_object_evictable - Whether object is likely evictable after unbind.
* @obj: The object to check
*
* This function checks whether the object is likely unvictable after unbind.
* If the object is not locked when checking, the result is only advisory.
* If the object is locked when checking, and the function returns true,
* then an eviction should indeed be possible. But since unlocked vma
* unpinning and unbinding is currently possible, the object can actually
* become evictable even if this function returns false.
*
* Return: true if the object may be evictable. False otherwise.
*/
bool i915_gem_object_evictable(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
int pin_count = atomic_read(&obj->mm.pages_pin_count);
if (!pin_count)
return true;
spin_lock(&obj->vma.lock);
list_for_each_entry(vma, &obj->vma.list, obj_link) {
if (i915_vma_is_pinned(vma)) {
spin_unlock(&obj->vma.lock);
return false;
}
if (atomic_read(&vma->pages_count))
pin_count--;
}
spin_unlock(&obj->vma.lock);
GEM_WARN_ON(pin_count < 0);
return pin_count == 0;
}
/**
* i915_gem_object_migratable - Whether the object is migratable out of the
* current region.
* @obj: Pointer to the object.
*
* Return: Whether the object is allowed to be resident in other
* regions than the current while pages are present.
*/
bool i915_gem_object_migratable(struct drm_i915_gem_object *obj)
{
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
if (!mr)
return false;
return obj->mm.n_placements > 1;
}
void i915_gem_init__objects(struct drm_i915_private *i915)
{
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
......
......@@ -200,6 +200,9 @@ static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{
if (obj->ops->adjust_lru)
obj->ops->adjust_lru(obj);
dma_resv_unlock(obj->base.resv);
}
......@@ -339,14 +342,14 @@ struct scatterlist *
__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
struct i915_gem_object_page_iter *iter,
unsigned int n,
unsigned int *offset, bool allow_alloc);
unsigned int *offset, bool allow_alloc, bool dma);
static inline struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
unsigned int n,
unsigned int *offset, bool allow_alloc)
{
return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, allow_alloc);
return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, allow_alloc, false);
}
static inline struct scatterlist *
......@@ -354,7 +357,7 @@ i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
unsigned int n,
unsigned int *offset, bool allow_alloc)
{
return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, allow_alloc);
return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, allow_alloc, true);
}
struct page *
......@@ -587,6 +590,16 @@ int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset,
bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
void __i915_gem_free_object_rcu(struct rcu_head *head);
void __i915_gem_free_object(struct drm_i915_gem_object *obj);
bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
bool i915_gem_object_validates_to_lmem(struct drm_i915_gem_object *obj);
#ifdef CONFIG_MMU_NOTIFIER
static inline bool
i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
......
......@@ -61,10 +61,26 @@ struct drm_i915_gem_object_ops {
const struct drm_i915_gem_pread *arg);
int (*pwrite)(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg);
u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
int (*dmabuf_export)(struct drm_i915_gem_object *obj);
/**
* adjust_lru - notify that the madvise value was updated
* @obj: The gem object
*
* The madvise value may have been updated, or object was recently
* referenced so act accordingly (Perhaps changing an LRU list etc).
*/
void (*adjust_lru)(struct drm_i915_gem_object *obj);
/**
* delayed_free - Override the default delayed free implementation
*/
void (*delayed_free)(struct drm_i915_gem_object *obj);
void (*release)(struct drm_i915_gem_object *obj);
const struct vm_operations_struct *mmap_ops;
const char *name; /* friendly name for debug, e.g. lockdep classes */
};
......@@ -187,12 +203,14 @@ struct drm_i915_gem_object {
#define I915_BO_ALLOC_VOLATILE BIT(1)
#define I915_BO_ALLOC_STRUCT_PAGE BIT(2)
#define I915_BO_ALLOC_CPU_CLEAR BIT(3)
#define I915_BO_ALLOC_USER BIT(4)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
I915_BO_ALLOC_VOLATILE | \
I915_BO_ALLOC_STRUCT_PAGE | \
I915_BO_ALLOC_CPU_CLEAR)
#define I915_BO_READONLY BIT(4)
#define I915_TILING_QUIRK_BIT 5 /* unknown swizzling; do not release! */
I915_BO_ALLOC_CPU_CLEAR | \
I915_BO_ALLOC_USER)
#define I915_BO_READONLY BIT(5)
#define I915_TILING_QUIRK_BIT 6 /* unknown swizzling; do not release! */
/*
* Is the object to be mapped as read-only to the GPU
......@@ -310,6 +328,12 @@ struct drm_i915_gem_object {
bool dirty:1;
} mm;
struct {
struct sg_table *cached_io_st;
struct i915_gem_object_page_iter get_io_page;
bool created:1;
} ttm;
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
......
......@@ -467,9 +467,8 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
struct i915_gem_object_page_iter *iter,
unsigned int n,
unsigned int *offset,
bool allow_alloc)
bool allow_alloc, bool dma)
{
const bool dma = iter == &obj->mm.get_dma_page;
struct scatterlist *sg;
unsigned int idx, count;
......
......@@ -18,11 +18,7 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
mutex_lock(&mem->objects.lock);
if (obj->flags & I915_BO_ALLOC_VOLATILE)
list_add(&obj->mm.region_link, &mem->objects.purgeable);
else
list_add(&obj->mm.region_link, &mem->objects.list);
list_add(&obj->mm.region_link, &mem->objects.list);
mutex_unlock(&mem->objects.lock);
}
......
This diff is collapsed.
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef _I915_GEM_TTM_H_
#define _I915_GEM_TTM_H_
#include "gem/i915_gem_object_types.h"
/**
* i915_gem_to_ttm - Convert a struct drm_i915_gem_object to a
* struct ttm_buffer_object.
* @obj: Pointer to the gem object.
*
* Return: Pointer to the embedded struct ttm_buffer_object.
*/
static inline struct ttm_buffer_object *
i915_gem_to_ttm(struct drm_i915_gem_object *obj)
{
return &obj->__do_not_access;
}
/*
* i915 ttm gem object destructor. Internal use only.
*/
void i915_ttm_bo_destroy(struct ttm_buffer_object *bo);
/**
* i915_ttm_to_gem - Convert a struct ttm_buffer_object to an embedding
* struct drm_i915_gem_object.
*
* Return: Pointer to the embedding struct ttm_buffer_object, or NULL
* if the object was not an i915 ttm object.
*/
static inline struct drm_i915_gem_object *
i915_ttm_to_gem(struct ttm_buffer_object *bo)
{
if (GEM_WARN_ON(bo->destroy != i915_ttm_bo_destroy))
return NULL;
return container_of(bo, struct drm_i915_gem_object, __do_not_access);
}
int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t size,
unsigned int flags);
#endif
......@@ -578,16 +578,17 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
int expected)
{
struct drm_i915_gem_object *obj;
struct i915_mmap_offset *mmo;
u64 offset;
int ret;
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj))
return false;
return expected && expected == PTR_ERR(obj);
mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
ret = __assign_mmap_offset(obj, I915_MMAP_TYPE_GTT, &offset, NULL);
i915_gem_object_put(obj);
return PTR_ERR_OR_ZERO(mmo) == expected;
return ret == expected;
}
static void disable_retire_worker(struct drm_i915_private *i915)
......@@ -622,8 +623,8 @@ static int igt_mmap_offset_exhaustion(void *arg)
struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
struct drm_i915_gem_object *obj;
struct drm_mm_node *hole, *next;
struct i915_mmap_offset *mmo;
int loop, err = 0;
u64 offset;
/* Disable background reaper */
disable_retire_worker(i915);
......@@ -684,13 +685,13 @@ static int igt_mmap_offset_exhaustion(void *arg)
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
pr_err("Unable to create object for reclaimed hole\n");
goto out;
}
mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
if (IS_ERR(mmo)) {
err = __assign_mmap_offset(obj, I915_MMAP_TYPE_GTT, &offset, NULL);
if (err) {
pr_err("Unable to insert object into reclaimed hole\n");
err = PTR_ERR(mmo);
goto err_obj;
}
......@@ -865,10 +866,10 @@ static int __igt_mmap(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj,
enum i915_mmap_type type)
{
struct i915_mmap_offset *mmo;
struct vm_area_struct *area;
unsigned long addr;
int err, i;
u64 offset;
if (!can_mmap(obj, type))
return 0;
......@@ -879,11 +880,11 @@ static int __igt_mmap(struct drm_i915_private *i915,
if (err)
return err;
mmo = mmap_offset_attach(obj, type, NULL);
if (IS_ERR(mmo))
return PTR_ERR(mmo);
err = __assign_mmap_offset(obj, type, &offset, NULL);
if (err)
return err;
addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
if (IS_ERR_VALUE(addr))
return addr;
......@@ -897,13 +898,6 @@ static int __igt_mmap(struct drm_i915_private *i915,
goto out_unmap;
}
if (area->vm_private_data != mmo) {
pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n",
obj->mm.region->name);
err = -EINVAL;
goto out_unmap;
}
for (i = 0; i < obj->base.size / sizeof(u32); i++) {
u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
u32 x;
......@@ -961,7 +955,7 @@ static int igt_mmap(void *arg)
struct drm_i915_gem_object *obj;
int err;
obj = i915_gem_object_create_region(mr, sizes[i], 0);
obj = i915_gem_object_create_region(mr, sizes[i], I915_BO_ALLOC_USER);
if (obj == ERR_PTR(-ENODEV))
continue;
......@@ -1004,12 +998,12 @@ static int __igt_mmap_access(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj,
enum i915_mmap_type type)
{
struct i915_mmap_offset *mmo;
unsigned long __user *ptr;
unsigned long A, B;
unsigned long x, y;
unsigned long addr;
int err;
u64 offset;
memset(&A, 0xAA, sizeof(A));
memset(&B, 0xBB, sizeof(B));
......@@ -1017,11 +1011,11 @@ static int __igt_mmap_access(struct drm_i915_private *i915,
if (!can_mmap(obj, type) || !can_access(obj))
return 0;
mmo = mmap_offset_attach(obj, type, NULL);
if (IS_ERR(mmo))
return PTR_ERR(mmo);
err = __assign_mmap_offset(obj, type, &offset, NULL);
if (err)
return err;
addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
if (IS_ERR_VALUE(addr))
return addr;
ptr = (unsigned long __user *)addr;
......@@ -1081,7 +1075,7 @@ static int igt_mmap_access(void *arg)
struct drm_i915_gem_object *obj;
int err;
obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER);
if (obj == ERR_PTR(-ENODEV))
continue;
......@@ -1111,11 +1105,11 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
enum i915_mmap_type type)
{
struct intel_engine_cs *engine;
struct i915_mmap_offset *mmo;
unsigned long addr;
u32 __user *ux;
u32 bbe;
int err;
u64 offset;
/*
* Verify that the mmap access into the backing store aligns with
......@@ -1132,11 +1126,11 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
if (err)
return err;
mmo = mmap_offset_attach(obj, type, NULL);
if (IS_ERR(mmo))
return PTR_ERR(mmo);
err = __assign_mmap_offset(obj, type, &offset, NULL);
if (err)
return err;
addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
if (IS_ERR_VALUE(addr))
return addr;
......@@ -1226,7 +1220,7 @@ static int igt_mmap_gpu(void *arg)
struct drm_i915_gem_object *obj;
int err;
obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER);
if (obj == ERR_PTR(-ENODEV))
continue;
......@@ -1303,18 +1297,18 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj,
enum i915_mmap_type type)
{
struct i915_mmap_offset *mmo;
unsigned long addr;
int err;
u64 offset;
if (!can_mmap(obj, type))
return 0;
mmo = mmap_offset_attach(obj, type, NULL);
if (IS_ERR(mmo))
return PTR_ERR(mmo);
err = __assign_mmap_offset(obj, type, &offset, NULL);
if (err)
return err;
addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
if (IS_ERR_VALUE(addr))
return addr;
......@@ -1350,10 +1344,20 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
}
}
err = check_absent(addr, obj->base.size);
if (err) {
pr_err("%s: was not absent\n", obj->mm.region->name);
goto out_unmap;
if (!obj->ops->mmap_ops) {
err = check_absent(addr, obj->base.size);
if (err) {
pr_err("%s: was not absent\n", obj->mm.region->name);
goto out_unmap;
}
} else {
/* ttm allows access to evicted regions by design */
err = check_present(addr, obj->base.size);
if (err) {
pr_err("%s: was not present\n", obj->mm.region->name);
goto out_unmap;
}
}
out_unmap:
......@@ -1371,7 +1375,7 @@ static int igt_mmap_revoke(void *arg)
struct drm_i915_gem_object *obj;
int err;
obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER);
if (obj == ERR_PTR(-ENODEV))
continue;
......
......@@ -9,6 +9,7 @@
#include "intel_region_ttm.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h"
#include "intel_region_lmem.h"
static int init_fake_lmem_bar(struct intel_memory_region *mem)
......@@ -107,7 +108,7 @@ region_lmem_init(struct intel_memory_region *mem)
static const struct intel_memory_region_ops intel_region_lmem_ops = {
.init = region_lmem_init,
.release = region_lmem_release,
.init_object = __i915_gem_lmem_object_init,
.init_object = __i915_gem_ttm_object_init,
};
struct intel_memory_region *
......
......@@ -1005,8 +1005,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
}
}
if (obj->mm.madv != __I915_MADV_PURGED)
if (obj->mm.madv != __I915_MADV_PURGED) {
obj->mm.madv = args->madv;
if (obj->ops->adjust_lru)
obj->ops->adjust_lru(obj);
}
if (i915_gem_object_has_pages(obj)) {
unsigned long flags;
......
......@@ -149,7 +149,6 @@ intel_memory_region_create(struct drm_i915_private *i915,
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
INIT_LIST_HEAD(&mem->objects.purgeable);
INIT_LIST_HEAD(&mem->reserved);
mutex_init(&mem->mm_lock);
......
......@@ -101,7 +101,6 @@ struct intel_memory_region {
struct {
struct mutex lock; /* Protects access to objects */
struct list_head list;
struct list_head purgeable;
} objects;
size_t chunk_size;
......
......@@ -11,6 +11,7 @@
#include "intel_region_ttm.h"
#include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
/**
* DOC: TTM support structure
*
......@@ -20,9 +21,6 @@
* i915 GEM regions to TTM memory types and resource managers.
*/
/* A Zero-initialized driver for now. We don't have a TTM backend yet. */
static struct ttm_device_funcs i915_ttm_bo_driver;
/**
* intel_region_ttm_device_init - Initialize a TTM device
* @dev_priv: Pointer to an i915 device private structure.
......@@ -33,7 +31,7 @@ int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
{
struct drm_device *drm = &dev_priv->drm;
return ttm_device_init(&dev_priv->bdev, &i915_ttm_bo_driver,
return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(),
drm->dev, drm->anon_inode->i_mapping,
drm->vma_offset_manager, false, false);
}
......@@ -177,6 +175,7 @@ struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
mem->region.start);
}
#ifdef CONFIG_DRM_I915_SELFTEST
/**
* intel_region_ttm_node_alloc - Allocate memory resources from a region
* @mem: The memory region,
......@@ -224,3 +223,4 @@ intel_region_ttm_node_alloc(struct intel_memory_region *mem,
ret = -ENXIO;
return ret ? ERR_PTR(ret) : res;
}
#endif
......@@ -12,6 +12,7 @@
struct drm_i915_private;
struct intel_memory_region;
struct ttm_resource;
struct ttm_device_funcs;
int intel_region_ttm_device_init(struct drm_i915_private *dev_priv);
......@@ -24,11 +25,15 @@ void intel_region_ttm_fini(struct intel_memory_region *mem);
struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
struct ttm_resource *res);
void intel_region_ttm_node_free(struct intel_memory_region *mem,
struct ttm_resource *node);
struct ttm_device_funcs *i915_ttm_driver(void);
#ifdef CONFIG_DRM_I915_SELFTEST
struct ttm_resource *
intel_region_ttm_node_alloc(struct intel_memory_region *mem,
resource_size_t size,
unsigned int flags);
void intel_region_ttm_node_free(struct intel_memory_region *mem,
struct ttm_resource *node);
#endif
#endif /* _INTEL_REGION_TTM_H_ */
......@@ -9,15 +9,28 @@
#include "i915_drv.h"
#include "igt_mmap.h"
unsigned long igt_mmap_node(struct drm_i915_private *i915,
struct drm_vma_offset_node *node,
unsigned long addr,
unsigned long prot,
unsigned long flags)
unsigned long igt_mmap_offset(struct drm_i915_private *i915,
u64 offset,
unsigned long size,
unsigned long prot,
unsigned long flags)
{
struct drm_vma_offset_node *node;
struct file *file;
unsigned long addr;
int err;
/* no need to refcount, we own this object */
drm_vma_offset_lock_lookup(i915->drm.vma_offset_manager);
node = drm_vma_offset_exact_lookup_locked(i915->drm.vma_offset_manager,
offset / PAGE_SIZE, size / PAGE_SIZE);
drm_vma_offset_unlock_lookup(i915->drm.vma_offset_manager);
if (GEM_WARN_ON(!node)) {
pr_info("Failed to lookup %llx\n", offset);
return -ENOENT;
}
/* Pretend to open("/dev/dri/card0") */
file = mock_drm_getfile(i915->drm.primary, O_RDWR);
if (IS_ERR(file))
......@@ -29,7 +42,7 @@ unsigned long igt_mmap_node(struct drm_i915_private *i915,
goto out_file;
}
addr = vm_mmap(file, addr, drm_vma_node_size(node) << PAGE_SHIFT,
addr = vm_mmap(file, 0, drm_vma_node_size(node) << PAGE_SHIFT,
prot, flags, drm_vma_node_offset_addr(node));
drm_vma_node_revoke(node, file->private_data);
......
......@@ -7,13 +7,15 @@
#ifndef IGT_MMAP_H
#define IGT_MMAP_H
#include <linux/types.h>
struct drm_i915_private;
struct drm_vma_offset_node;
unsigned long igt_mmap_node(struct drm_i915_private *i915,
struct drm_vma_offset_node *node,
unsigned long addr,
unsigned long prot,
unsigned long flags);
unsigned long igt_mmap_offset(struct drm_i915_private *i915,
u64 offset,
unsigned long size,
unsigned long prot,
unsigned long flags);
#endif /* IGT_MMAP_H */
......@@ -53,7 +53,7 @@ struct drm_vma_offset_node {
rwlock_t vm_lock;
struct drm_mm_node vm_node;
struct rb_root vm_files;
bool readonly:1;
void *driver_private;
};
struct drm_vma_offset_manager {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment