Commit d1487389 authored by Thomas Hellström's avatar Thomas Hellström Committed by Matthew Auld

drm/i915/ttm Initialize the ttm device and memory managers

Temporarily remove the buddy allocator and related selftests
and hook up the TTM range manager for i915 regions.

Also modify the mock region selftests somewhat to account for a
fragmenting manager.
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602083818.241793-2-thomas.hellstrom@linux.intel.com
parent 0e4fe0c9
......@@ -26,6 +26,7 @@ config DRM_I915
select SND_HDA_I915 if SND_HDA_CORE
select CEC_CORE if CEC_NOTIFIER
select VMAP_PFN
select DRM_TTM
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
......
......@@ -50,6 +50,7 @@ i915-y += i915_drv.o \
intel_memory_region.o \
intel_pch.o \
intel_pm.o \
intel_region_ttm.o \
intel_runtime_pm.o \
intel_sideband.o \
intel_step.o \
......@@ -160,7 +161,6 @@ gem-y += \
i915-y += \
$(gem-y) \
i915_active.o \
i915_buddy.o \
i915_cmd_parser.o \
i915_gem_evict.o \
i915_gem_gtt.o \
......
......@@ -4,16 +4,71 @@
*/
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
static void lmem_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
obj->mm.dirty = false;
sg_free_table(pages);
kfree(pages);
}
static int lmem_get_pages(struct drm_i915_gem_object *obj)
{
unsigned int flags;
struct sg_table *pages;
flags = I915_ALLOC_MIN_PAGE_SIZE;
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
flags |= I915_ALLOC_CONTIGUOUS;
obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
obj->base.size,
flags);
if (IS_ERR(obj->mm.st_mm_node))
return PTR_ERR(obj->mm.st_mm_node);
/* Range manager is always contigous */
if (obj->mm.region->is_range_manager)
obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
if (IS_ERR(pages)) {
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
return PTR_ERR(pages);
}
__i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
void __iomem *vaddr =
i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
if (!vaddr) {
struct sg_table *pages =
__i915_gem_object_unset_pages(obj);
if (!IS_ERR_OR_NULL(pages))
lmem_put_pages(obj, pages);
}
memset_io(vaddr, 0, obj->base.size);
io_mapping_unmap(vaddr);
}
return 0;
}
const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.name = "i915_gem_object_lmem",
.flags = I915_GEM_OBJECT_HAS_IOMEM,
.get_pages = i915_gem_object_get_pages_buddy,
.put_pages = i915_gem_object_put_pages_buddy,
.get_pages = lmem_get_pages,
.put_pages = lmem_put_pages,
.release = i915_gem_object_release_memory_region,
};
......
......@@ -235,10 +235,12 @@ struct drm_i915_gem_object {
* Memory region for this object.
*/
struct intel_memory_region *region;
/**
* List of memory region blocks allocated for this object.
* Memory manager node allocated for this object.
*/
struct list_head blocks;
void *st_mm_node;
/**
* Element within memory_region->objects or region->purgeable
* if the object is marked as DONTNEED. Access is protected by
......
......@@ -475,7 +475,8 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
might_sleep();
GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
if (!i915_gem_object_has_pinned_pages(obj))
assert_object_held(obj);
/* As we iterate forward through the sg, we record each entry in a
* radixtree for quick repeated (backwards) lookups. If we have seen
......
......@@ -8,129 +8,9 @@
#include "i915_drv.h"
#include "i915_trace.h"
void
i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
__intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks);
obj->mm.dirty = false;
sg_free_table(pages);
kfree(pages);
}
int
i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
{
const u64 max_segment = i915_sg_segment_size();
struct intel_memory_region *mem = obj->mm.region;
struct list_head *blocks = &obj->mm.blocks;
resource_size_t size = obj->base.size;
resource_size_t prev_end;
struct i915_buddy_block *block;
unsigned int flags;
struct sg_table *st;
struct scatterlist *sg;
unsigned int sg_page_sizes;
int ret;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
if (sg_alloc_table(st, size >> PAGE_SHIFT, GFP_KERNEL)) {
kfree(st);
return -ENOMEM;
}
flags = I915_ALLOC_MIN_PAGE_SIZE;
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
flags |= I915_ALLOC_CONTIGUOUS;
ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
if (ret)
goto err_free_sg;
GEM_BUG_ON(list_empty(blocks));
sg = st->sgl;
st->nents = 0;
sg_page_sizes = 0;
prev_end = (resource_size_t)-1;
list_for_each_entry(block, blocks, link) {
u64 block_size, offset;
block_size = min_t(u64, size,
i915_buddy_block_size(&mem->mm, block));
offset = i915_buddy_block_offset(block);
while (block_size) {
u64 len;
if (offset != prev_end || sg->length >= max_segment) {
if (st->nents) {
sg_page_sizes |= sg->length;
sg = __sg_next(sg);
}
sg_dma_address(sg) = mem->region.start + offset;
sg_dma_len(sg) = 0;
sg->length = 0;
st->nents++;
}
len = min(block_size, max_segment - sg->length);
sg->length += len;
sg_dma_len(sg) += len;
offset += len;
block_size -= len;
prev_end = offset;
}
}
sg_page_sizes |= sg->length;
sg_mark_end(sg);
i915_sg_trim(st);
/* Intended for kernel internal use only */
if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
struct scatterlist *sg;
unsigned long i;
for_each_sg(st->sgl, sg, st->nents, i) {
unsigned int length;
void __iomem *vaddr;
dma_addr_t daddr;
daddr = sg_dma_address(sg);
daddr -= mem->region.start;
length = sg_dma_len(sg);
vaddr = io_mapping_map_wc(&mem->iomap, daddr, length);
memset64((void __force *)vaddr, 0, length / sizeof(u64));
io_mapping_unmap(vaddr);
}
wmb();
}
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0;
err_free_sg:
sg_free_table(st);
kfree(st);
return ret;
}
void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
struct intel_memory_region *mem)
{
INIT_LIST_HEAD(&obj->mm.blocks);
obj->mm.region = intel_memory_region_get(mem);
if (obj->base.size <= mem->min_page_size)
......
......@@ -12,10 +12,6 @@ struct intel_memory_region;
struct drm_i915_gem_object;
struct sg_table;
int i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj);
void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
struct sg_table *pages);
void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
struct intel_memory_region *mem);
void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
......
......@@ -628,11 +628,13 @@ static const struct intel_memory_region_ops shmem_region_ops = {
.init_object = shmem_object_init,
};
struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
u16 type, u16 instance)
{
return intel_memory_region_create(i915, 0,
totalram_pages() << PAGE_SHIFT,
PAGE_SIZE, 0,
type, instance,
&shmem_region_ops);
}
......
......@@ -772,7 +772,8 @@ static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
};
struct intel_memory_region *
i915_gem_stolen_lmem_setup(struct drm_i915_private *i915)
i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
u16 instance)
{
struct intel_uncore *uncore = &i915->uncore;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
......@@ -790,6 +791,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915)
mem = intel_memory_region_create(i915, lmem_base, lmem_size,
I915_GTT_PAGE_SIZE_4K, io_start,
type, instance,
&i915_region_stolen_lmem_ops);
if (IS_ERR(mem))
return mem;
......@@ -811,14 +813,15 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915)
}
struct intel_memory_region*
i915_gem_stolen_smem_setup(struct drm_i915_private *i915)
i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
u16 instance)
{
struct intel_memory_region *mem;
mem = intel_memory_region_create(i915,
intel_graphics_stolen_res.start,
resource_size(&intel_graphics_stolen_res),
PAGE_SIZE, 0,
PAGE_SIZE, 0, type, instance,
&i915_region_stolen_smem_ops);
if (IS_ERR(mem))
return mem;
......@@ -826,7 +829,6 @@ i915_gem_stolen_smem_setup(struct drm_i915_private *i915)
intel_memory_region_set_name(mem, "stolen-system");
mem->private = true;
return mem;
}
......
......@@ -21,8 +21,13 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
u64 end);
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node);
struct intel_memory_region *i915_gem_stolen_smem_setup(struct drm_i915_private *i915);
struct intel_memory_region *i915_gem_stolen_lmem_setup(struct drm_i915_private *i915);
struct intel_memory_region *
i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
u16 instance);
struct intel_memory_region *
i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
u16 instance);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
resource_size_t size);
......
......@@ -68,8 +68,6 @@ int intel_gt_probe_lmem(struct intel_gt *gt)
id = INTEL_REGION_LMEM;
mem->id = id;
mem->type = INTEL_MEMORY_LOCAL;
mem->instance = 0;
intel_memory_region_set_name(mem, "local%u", mem->instance);
......
......@@ -5,6 +5,8 @@
#include "i915_drv.h"
#include "intel_memory_region.h"
#include "intel_region_lmem.h"
#include "intel_region_ttm.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "intel_region_lmem.h"
......@@ -66,9 +68,9 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem)
static void
region_lmem_release(struct intel_memory_region *mem)
{
release_fake_lmem_bar(mem);
intel_region_ttm_fini(mem);
io_mapping_fini(&mem->iomap);
intel_memory_region_release_buddy(mem);
release_fake_lmem_bar(mem);
}
static int
......@@ -83,12 +85,21 @@ region_lmem_init(struct intel_memory_region *mem)
if (!io_mapping_init_wc(&mem->iomap,
mem->io_start,
resource_size(&mem->region)))
return -EIO;
resource_size(&mem->region))) {
ret = -EIO;
goto out_no_io;
}
ret = intel_memory_region_init_buddy(mem);
ret = intel_region_ttm_init(mem);
if (ret)
io_mapping_fini(&mem->iomap);
goto out_no_buddy;
return 0;
out_no_buddy:
io_mapping_fini(&mem->iomap);
out_no_io:
release_fake_lmem_bar(mem);
return ret;
}
......@@ -127,6 +138,8 @@ intel_gt_setup_fake_lmem(struct intel_gt *gt)
mappable_end,
PAGE_SIZE,
io_start,
INTEL_MEMORY_LOCAL,
0,
&intel_region_lmem_ops);
if (!IS_ERR(mem)) {
drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n",
......@@ -198,6 +211,8 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
lmem_size,
I915_GTT_PAGE_SIZE_4K,
io_start,
INTEL_MEMORY_LOCAL,
0,
&intel_region_lmem_ops);
if (IS_ERR(mem))
return mem;
......
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include <linux/kmemleak.h>
#include <linux/slab.h>
#include "i915_buddy.h"
#include "i915_gem.h"
#include "i915_globals.h"
#include "i915_utils.h"
static struct i915_global_block {
struct i915_global base;
struct kmem_cache *slab_blocks;
} global;
static void i915_global_buddy_shrink(void)
{
kmem_cache_shrink(global.slab_blocks);
}
static void i915_global_buddy_exit(void)
{
kmem_cache_destroy(global.slab_blocks);
}
static struct i915_global_block global = { {
.shrink = i915_global_buddy_shrink,
.exit = i915_global_buddy_exit,
} };
int __init i915_global_buddy_init(void)
{
global.slab_blocks = KMEM_CACHE(i915_buddy_block, SLAB_HWCACHE_ALIGN);
if (!global.slab_blocks)
return -ENOMEM;
i915_global_register(&global.base);
return 0;
}
static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_block *parent,
unsigned int order,
u64 offset)
{
struct i915_buddy_block *block;
GEM_BUG_ON(order > I915_BUDDY_MAX_ORDER);
block = kmem_cache_zalloc(global.slab_blocks, GFP_KERNEL);
if (!block)
return NULL;
block->header = offset;
block->header |= order;
block->parent = parent;
GEM_BUG_ON(block->header & I915_BUDDY_HEADER_UNUSED);
return block;
}
static void i915_block_free(struct i915_buddy_block *block)
{
kmem_cache_free(global.slab_blocks, block);
}
static void mark_allocated(struct i915_buddy_block *block)
{
block->header &= ~I915_BUDDY_HEADER_STATE;
block->header |= I915_BUDDY_ALLOCATED;
list_del(&block->link);
}
static void mark_free(struct i915_buddy_mm *mm,
struct i915_buddy_block *block)
{
block->header &= ~I915_BUDDY_HEADER_STATE;
block->header |= I915_BUDDY_FREE;
list_add(&block->link,
&mm->free_list[i915_buddy_block_order(block)]);
}
static void mark_split(struct i915_buddy_block *block)
{
block->header &= ~I915_BUDDY_HEADER_STATE;
block->header |= I915_BUDDY_SPLIT;
list_del(&block->link);
}
int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
{
unsigned int i;
u64 offset;
if (size < chunk_size)
return -EINVAL;
if (chunk_size < PAGE_SIZE)
return -EINVAL;
if (!is_power_of_2(chunk_size))
return -EINVAL;
size = round_down(size, chunk_size);
mm->size = size;
mm->chunk_size = chunk_size;
mm->max_order = ilog2(size) - ilog2(chunk_size);
GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER);
mm->free_list = kmalloc_array(mm->max_order + 1,
sizeof(struct list_head),
GFP_KERNEL);
if (!mm->free_list)
return -ENOMEM;
for (i = 0; i <= mm->max_order; ++i)
INIT_LIST_HEAD(&mm->free_list[i]);
mm->n_roots = hweight64(size);
mm->roots = kmalloc_array(mm->n_roots,
sizeof(struct i915_buddy_block *),
GFP_KERNEL);
if (!mm->roots)
goto out_free_list;
offset = 0;
i = 0;
/*
* Split into power-of-two blocks, in case we are given a size that is
* not itself a power-of-two.
*/
do {
struct i915_buddy_block *root;
unsigned int order;
u64 root_size;
root_size = rounddown_pow_of_two(size);
order = ilog2(root_size) - ilog2(chunk_size);
root = i915_block_alloc(NULL, order, offset);
if (!root)
goto out_free_roots;
mark_free(mm, root);
GEM_BUG_ON(i > mm->max_order);
GEM_BUG_ON(i915_buddy_block_size(mm, root) < chunk_size);
mm->roots[i] = root;
offset += root_size;
size -= root_size;
i++;
} while (size);
return 0;
out_free_roots:
while (i--)
i915_block_free(mm->roots[i]);
kfree(mm->roots);
out_free_list:
kfree(mm->free_list);
return -ENOMEM;
}
void i915_buddy_fini(struct i915_buddy_mm *mm)
{
int i;
for (i = 0; i < mm->n_roots; ++i) {
GEM_WARN_ON(!i915_buddy_block_is_free(mm->roots[i]));
i915_block_free(mm->roots[i]);
}
kfree(mm->roots);
kfree(mm->free_list);
}
static int split_block(struct i915_buddy_mm *mm,
struct i915_buddy_block *block)
{
unsigned int block_order = i915_buddy_block_order(block) - 1;
u64 offset = i915_buddy_block_offset(block);
GEM_BUG_ON(!i915_buddy_block_is_free(block));
GEM_BUG_ON(!i915_buddy_block_order(block));
block->left = i915_block_alloc(block, block_order, offset);
if (!block->left)
return -ENOMEM;
block->right = i915_block_alloc(block, block_order,
offset + (mm->chunk_size << block_order));
if (!block->right) {
i915_block_free(block->left);
return -ENOMEM;
}
mark_free(mm, block->left);
mark_free(mm, block->right);
mark_split(block);
return 0;
}
static struct i915_buddy_block *
get_buddy(struct i915_buddy_block *block)
{
struct i915_buddy_block *parent;
parent = block->parent;
if (!parent)
return NULL;
if (parent->left == block)
return parent->right;
return parent->left;
}
static void __i915_buddy_free(struct i915_buddy_mm *mm,
struct i915_buddy_block *block)
{
struct i915_buddy_block *parent;
while ((parent = block->parent)) {
struct i915_buddy_block *buddy;
buddy = get_buddy(block);
if (!i915_buddy_block_is_free(buddy))
break;
list_del(&buddy->link);
i915_block_free(block);
i915_block_free(buddy);
block = parent;
}
mark_free(mm, block);
}
void i915_buddy_free(struct i915_buddy_mm *mm,
struct i915_buddy_block *block)
{
GEM_BUG_ON(!i915_buddy_block_is_allocated(block));
__i915_buddy_free(mm, block);
}
void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
{
struct i915_buddy_block *block, *on;
list_for_each_entry_safe(block, on, objects, link) {
i915_buddy_free(mm, block);
cond_resched();
}
INIT_LIST_HEAD(objects);
}
/*
* Allocate power-of-two block. The order value here translates to:
*
* 0 = 2^0 * mm->chunk_size
* 1 = 2^1 * mm->chunk_size
* 2 = 2^2 * mm->chunk_size
* ...
*/
struct i915_buddy_block *
i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
{
struct i915_buddy_block *block = NULL;
unsigned int i;
int err;
for (i = order; i <= mm->max_order; ++i) {
block = list_first_entry_or_null(&mm->free_list[i],
struct i915_buddy_block,
link);
if (block)
break;
}
if (!block)
return ERR_PTR(-ENOSPC);
GEM_BUG_ON(!i915_buddy_block_is_free(block));
while (i != order) {
err = split_block(mm, block);
if (unlikely(err))
goto out_free;
/* Go low */
block = block->left;
i--;
}
mark_allocated(block);
kmemleak_update_trace(block);
return block;
out_free:
if (i != order)
__i915_buddy_free(mm, block);
return ERR_PTR(err);
}
static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
{
return s1 <= e2 && e1 >= s2;
}
static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
{
return s1 <= s2 && e1 >= e2;
}
/*
* Allocate range. Note that it's safe to chain together multiple alloc_ranges
* with the same blocks list.
*
* Intended for pre-allocating portions of the address space, for example to
* reserve a block for the initial framebuffer or similar, hence the expectation
* here is that i915_buddy_alloc() is still the main vehicle for
* allocations, so if that's not the case then the drm_mm range allocator is
* probably a much better fit, and so you should probably go use that instead.
*/
int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
struct list_head *blocks,
u64 start, u64 size)
{
struct i915_buddy_block *block;
struct i915_buddy_block *buddy;
LIST_HEAD(allocated);
LIST_HEAD(dfs);
u64 end;
int err;
int i;
if (size < mm->chunk_size)
return -EINVAL;
if (!IS_ALIGNED(size | start, mm->chunk_size))
return -EINVAL;
if (range_overflows(start, size, mm->size))
return -EINVAL;
for (i = 0; i < mm->n_roots; ++i)
list_add_tail(&mm->roots[i]->tmp_link, &dfs);
end = start + size - 1;
do {
u64 block_start;
u64 block_end;
block = list_first_entry_or_null(&dfs,
struct i915_buddy_block,
tmp_link);
if (!block)
break;
list_del(&block->tmp_link);
block_start = i915_buddy_block_offset(block);
block_end = block_start + i915_buddy_block_size(mm, block) - 1;
if (!overlaps(start, end, block_start, block_end))
continue;
if (i915_buddy_block_is_allocated(block)) {
err = -ENOSPC;
goto err_free;
}
if (contains(start, end, block_start, block_end)) {
if (!i915_buddy_block_is_free(block)) {
err = -ENOSPC;
goto err_free;
}
mark_allocated(block);
list_add_tail(&block->link, &allocated);
continue;
}
if (!i915_buddy_block_is_split(block)) {
err = split_block(mm, block);
if (unlikely(err))
goto err_undo;
}
list_add(&block->right->tmp_link, &dfs);
list_add(&block->left->tmp_link, &dfs);
} while (1);
list_splice_tail(&allocated, blocks);
return 0;
err_undo:
/*
* We really don't want to leave around a bunch of split blocks, since
* bigger is better, so make sure we merge everything back before we
* free the allocated blocks.
*/
buddy = get_buddy(block);
if (buddy &&
(i915_buddy_block_is_free(block) &&
i915_buddy_block_is_free(buddy)))
__i915_buddy_free(mm, block);
err_free:
i915_buddy_free_list(mm, &allocated);
return err;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_buddy.c"
#endif
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef __I915_BUDDY_H__
#define __I915_BUDDY_H__
#include <linux/bitops.h>
#include <linux/list.h>
struct i915_buddy_block {
#define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
#define I915_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
#define I915_BUDDY_ALLOCATED (1 << 10)
#define I915_BUDDY_FREE (2 << 10)
#define I915_BUDDY_SPLIT (3 << 10)
/* Free to be used, if needed in the future */
#define I915_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6)
#define I915_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0)
u64 header;
struct i915_buddy_block *left;
struct i915_buddy_block *right;
struct i915_buddy_block *parent;
void *private; /* owned by creator */
/*
* While the block is allocated by the user through i915_buddy_alloc*,
* the user has ownership of the link, for example to maintain within
* a list, if so desired. As soon as the block is freed with
* i915_buddy_free* ownership is given back to the mm.
*/
struct list_head link;
struct list_head tmp_link;
};
/* Order-zero must be at least PAGE_SIZE */
#define I915_BUDDY_MAX_ORDER (63 - PAGE_SHIFT)
/*
* Binary Buddy System.
*
* Locking should be handled by the user, a simple mutex around
* i915_buddy_alloc* and i915_buddy_free* should suffice.
*/
struct i915_buddy_mm {
/* Maintain a free list for each order. */
struct list_head *free_list;
/*
* Maintain explicit binary tree(s) to track the allocation of the
* address space. This gives us a simple way of finding a buddy block
* and performing the potentially recursive merge step when freeing a
* block. Nodes are either allocated or free, in which case they will
* also exist on the respective free list.
*/
struct i915_buddy_block **roots;
/*
* Anything from here is public, and remains static for the lifetime of
* the mm. Everything above is considered do-not-touch.
*/
unsigned int n_roots;
unsigned int max_order;
/* Must be at least PAGE_SIZE */
u64 chunk_size;
u64 size;
};
static inline u64
i915_buddy_block_offset(struct i915_buddy_block *block)
{
return block->header & I915_BUDDY_HEADER_OFFSET;
}
static inline unsigned int
i915_buddy_block_order(struct i915_buddy_block *block)
{
return block->header & I915_BUDDY_HEADER_ORDER;
}
static inline unsigned int
i915_buddy_block_state(struct i915_buddy_block *block)
{
return block->header & I915_BUDDY_HEADER_STATE;
}
static inline bool
i915_buddy_block_is_allocated(struct i915_buddy_block *block)
{
return i915_buddy_block_state(block) == I915_BUDDY_ALLOCATED;
}
static inline bool
i915_buddy_block_is_free(struct i915_buddy_block *block)
{
return i915_buddy_block_state(block) == I915_BUDDY_FREE;
}
static inline bool
i915_buddy_block_is_split(struct i915_buddy_block *block)
{
return i915_buddy_block_state(block) == I915_BUDDY_SPLIT;
}
static inline u64
i915_buddy_block_size(struct i915_buddy_mm *mm,
struct i915_buddy_block *block)
{
return mm->chunk_size << i915_buddy_block_order(block);
}
int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size);
void i915_buddy_fini(struct i915_buddy_mm *mm);
struct i915_buddy_block *
i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order);
int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
struct list_head *blocks,
u64 start, u64 size);
void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block);
void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects);
#endif
......@@ -84,6 +84,7 @@
#include "intel_gvt.h"
#include "intel_memory_region.h"
#include "intel_pm.h"
#include "intel_region_ttm.h"
#include "intel_sideband.h"
#include "vlv_suspend.h"
......@@ -335,6 +336,10 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
if (ret < 0)
goto err_workqueues;
ret = intel_region_ttm_device_init(dev_priv);
if (ret)
goto err_ttm;
intel_wopcm_init_early(&dev_priv->wopcm);
intel_gt_init_early(&dev_priv->gt, dev_priv);
......@@ -359,6 +364,8 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
err_gem:
i915_gem_cleanup_early(dev_priv);
intel_gt_driver_late_release(&dev_priv->gt);
intel_region_ttm_device_fini(dev_priv);
err_ttm:
vlv_suspend_cleanup(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
......@@ -376,6 +383,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
intel_power_domains_cleanup(dev_priv);
i915_gem_cleanup_early(dev_priv);
intel_gt_driver_late_release(&dev_priv->gt);
intel_region_ttm_device_fini(dev_priv);
vlv_suspend_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
......
......@@ -59,6 +59,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_connector.h>
#include <drm/i915_mei_hdcp_interface.h>
#include <drm/ttm/ttm_device.h>
#include "i915_params.h"
#include "i915_reg.h"
......@@ -776,6 +777,7 @@ struct intel_cdclk_config {
struct i915_selftest_stash {
atomic_t counter;
struct ida mock_region_instances;
};
struct drm_i915_private {
......@@ -1165,6 +1167,9 @@ struct drm_i915_private {
/* Mutex to protect the above hdcp component related values. */
struct mutex hdcp_comp_mutex;
/* The TTM device structure. */
struct ttm_device bdev;
I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
/*
......@@ -1758,7 +1763,8 @@ void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
void i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915);
struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
u16 type, u16 instance);
static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
......
......@@ -1108,6 +1108,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
}
i915_gem_drain_freed_objects(dev_priv);
return ret;
}
......
......@@ -87,7 +87,6 @@ static void __i915_globals_cleanup(void)
static __initconst int (* const initfn[])(void) = {
i915_global_active_init,
i915_global_buddy_init,
i915_global_context_init,
i915_global_gem_context_init,
i915_global_objects_init,
......
......@@ -27,7 +27,6 @@ void i915_globals_exit(void);
/* constructors */
int i915_global_active_init(void);
int i915_global_buddy_init(void);
int i915_global_context_init(void);
int i915_global_gem_context_init(void);
int i915_global_objects_init(void);
......
......@@ -6,6 +6,10 @@
#include "i915_scatterlist.h"
#include <drm/drm_mm.h>
#include <linux/slab.h>
bool i915_sg_trim(struct sg_table *orig_st)
{
struct sg_table new_st;
......@@ -34,6 +38,72 @@ bool i915_sg_trim(struct sg_table *orig_st)
return true;
}
/**
* i915_sg_from_mm_node - Create an sg_table from a struct drm_mm_node
* @node: The drm_mm_node.
* @region_start: An offset to add to the dma addresses of the sg list.
*
* Create a struct sg_table, initializing it from a struct drm_mm_node,
* taking a maximum segment length into account, splitting into segments
* if necessary.
*
* Return: A pointer to a kmalloced struct sg_table on success, negative
* error code cast to an error pointer on failure.
*/
struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node,
u64 region_start)
{
const u64 max_segment = SZ_1G; /* Do we have a limit on this? */
u64 segment_pages = max_segment >> PAGE_SHIFT;
u64 block_size, offset, prev_end;
struct sg_table *st;
struct scatterlist *sg;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return ERR_PTR(-ENOMEM);
if (sg_alloc_table(st, DIV_ROUND_UP(node->size, segment_pages),
GFP_KERNEL)) {
kfree(st);
return ERR_PTR(-ENOMEM);
}
sg = st->sgl;
st->nents = 0;
prev_end = (resource_size_t)-1;
block_size = node->size << PAGE_SHIFT;
offset = node->start << PAGE_SHIFT;
while (block_size) {
u64 len;
if (offset != prev_end || sg->length >= max_segment) {
if (st->nents)
sg = __sg_next(sg);
sg_dma_address(sg) = region_start + offset;
sg_dma_len(sg) = 0;
sg->length = 0;
st->nents++;
}
len = min(block_size, max_segment - sg->length);
sg->length += len;
sg_dma_len(sg) += len;
offset += len;
block_size -= len;
prev_end = offset;
}
sg_mark_end(sg);
i915_sg_trim(st);
return st;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/scatterlist.c"
#endif
......@@ -13,6 +13,8 @@
#include "i915_gem.h"
struct drm_mm_node;
/*
* Optimised SGL iterator for GEM objects
*/
......@@ -141,4 +143,6 @@ static inline unsigned int i915_sg_segment_size(void)
bool i915_sg_trim(struct sg_table *orig_st);
struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node,
u64 region_start);
#endif
......@@ -28,6 +28,11 @@ static const struct {
},
};
struct intel_region_reserve {
struct list_head link;
void *node;
};
struct intel_memory_region *
intel_memory_region_lookup(struct drm_i915_private *i915,
u16 class, u16 instance)
......@@ -58,146 +63,61 @@ intel_memory_region_by_type(struct drm_i915_private *i915,
return NULL;
}
static u64
intel_memory_region_free_pages(struct intel_memory_region *mem,
struct list_head *blocks)
/**
* intel_memory_region_unreserve - Unreserve all previously reserved
* ranges
* @mem: The region containing the reserved ranges.
*/
void intel_memory_region_unreserve(struct intel_memory_region *mem)
{
struct i915_buddy_block *block, *on;
u64 size = 0;
struct intel_region_reserve *reserve, *next;
list_for_each_entry_safe(block, on, blocks, link) {
size += i915_buddy_block_size(&mem->mm, block);
i915_buddy_free(&mem->mm, block);
}
INIT_LIST_HEAD(blocks);
if (!mem->priv_ops || !mem->priv_ops->free)
return;
return size;
}
void
__intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
struct list_head *blocks)
{
mutex_lock(&mem->mm_lock);
mem->avail += intel_memory_region_free_pages(mem, blocks);
mutex_unlock(&mem->mm_lock);
}
void
__intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
{
struct list_head blocks;
INIT_LIST_HEAD(&blocks);
list_add(&block->link, &blocks);
__intel_memory_region_put_pages_buddy(block->private, &blocks);
}
int
__intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
resource_size_t size,
unsigned int flags,
struct list_head *blocks)
{
unsigned int min_order = 0;
unsigned long n_pages;
GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
GEM_BUG_ON(!list_empty(blocks));
if (flags & I915_ALLOC_MIN_PAGE_SIZE) {
min_order = ilog2(mem->min_page_size) -
ilog2(mem->mm.chunk_size);
}
if (flags & I915_ALLOC_CONTIGUOUS) {
size = roundup_pow_of_two(size);
min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
list_for_each_entry_safe(reserve, next, &mem->reserved, link) {
list_del(&reserve->link);
mem->priv_ops->free(mem, reserve->node);
kfree(reserve);
}
if (size > mem->mm.size)
return -E2BIG;
n_pages = size >> ilog2(mem->mm.chunk_size);
mutex_lock(&mem->mm_lock);
do {
struct i915_buddy_block *block;
unsigned int order;
order = fls(n_pages) - 1;
GEM_BUG_ON(order > mem->mm.max_order);
GEM_BUG_ON(order < min_order);
do {
block = i915_buddy_alloc(&mem->mm, order);
if (!IS_ERR(block))
break;
if (order-- == min_order)
goto err_free_blocks;
} while (1);
n_pages -= BIT(order);
block->private = mem;
list_add_tail(&block->link, blocks);
if (!n_pages)
break;
} while (1);
mem->avail -= size;
mutex_unlock(&mem->mm_lock);
return 0;
err_free_blocks:
intel_memory_region_free_pages(mem, blocks);
mutex_unlock(&mem->mm_lock);
return -ENXIO;
}
struct i915_buddy_block *
__intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
resource_size_t size,
unsigned int flags)
/**
* intel_memory_region_reserve - Reserve a memory range
* @mem: The region for which we want to reserve a range.
* @offset: Start of the range to reserve.
* @size: The size of the range to reserve.
*
* Return: 0 on success, negative error code on failure.
*/
int intel_memory_region_reserve(struct intel_memory_region *mem,
resource_size_t offset,
resource_size_t size)
{
struct i915_buddy_block *block;
LIST_HEAD(blocks);
int ret;
struct intel_region_reserve *reserve;
ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
if (ret)
return ERR_PTR(ret);
if (!mem->priv_ops || !mem->priv_ops->reserve)
return -EINVAL;
block = list_first_entry(&blocks, typeof(*block), link);
list_del_init(&block->link);
return block;
}
reserve = kzalloc(sizeof(*reserve), GFP_KERNEL);
if (!reserve)
return -ENOMEM;
int intel_memory_region_init_buddy(struct intel_memory_region *mem)
{
return i915_buddy_init(&mem->mm, resource_size(&mem->region),
PAGE_SIZE);
}
void intel_memory_region_release_buddy(struct intel_memory_region *mem)
{
i915_buddy_free_list(&mem->mm, &mem->reserved);
i915_buddy_fini(&mem->mm);
}
int intel_memory_region_reserve(struct intel_memory_region *mem,
u64 offset, u64 size)
{
int ret;
reserve->node = mem->priv_ops->reserve(mem, offset, size);
if (IS_ERR(reserve->node)) {
ret = PTR_ERR(reserve->node);
kfree(reserve);
return ret;
}
mutex_lock(&mem->mm_lock);
ret = i915_buddy_alloc_range(&mem->mm, &mem->reserved, offset, size);
list_add_tail(&reserve->link, &mem->reserved);
mutex_unlock(&mem->mm_lock);
return ret;
return 0;
}
struct intel_memory_region *
......@@ -206,6 +126,8 @@ intel_memory_region_create(struct drm_i915_private *i915,
resource_size_t size,
resource_size_t min_page_size,
resource_size_t io_start,
u16 type,
u16 instance,
const struct intel_memory_region_ops *ops)
{
struct intel_memory_region *mem;
......@@ -222,6 +144,8 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->ops = ops;
mem->total = size;
mem->avail = mem->total;
mem->type = type;
mem->instance = instance;
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
......@@ -259,6 +183,7 @@ static void __intel_memory_region_destroy(struct kref *kref)
struct intel_memory_region *mem =
container_of(kref, typeof(*mem), kref);
intel_memory_region_unreserve(mem);
if (mem->ops->release)
mem->ops->release(mem);
......@@ -296,15 +221,15 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
instance = intel_region_map[i].instance;
switch (type) {
case INTEL_MEMORY_SYSTEM:
mem = i915_gem_shmem_setup(i915);
mem = i915_gem_shmem_setup(i915, type, instance);
break;
case INTEL_MEMORY_STOLEN_LOCAL:
mem = i915_gem_stolen_lmem_setup(i915);
mem = i915_gem_stolen_lmem_setup(i915, type, instance);
if (!IS_ERR(mem))
i915->mm.stolen_region = mem;
break;
case INTEL_MEMORY_STOLEN_SYSTEM:
mem = i915_gem_stolen_smem_setup(i915);
mem = i915_gem_stolen_smem_setup(i915, type, instance);
if (!IS_ERR(mem))
i915->mm.stolen_region = mem;
break;
......@@ -321,9 +246,6 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
}
mem->id = i;
mem->type = type;
mem->instance = instance;
i915->mm.regions[i] = mem;
}
......
......@@ -13,8 +13,6 @@
#include <drm/drm_mm.h>
#include <drm/i915_drm.h>
#include "i915_buddy.h"
struct drm_i915_private;
struct drm_i915_gem_object;
struct intel_memory_region;
......@@ -25,6 +23,7 @@ enum intel_memory_type {
INTEL_MEMORY_LOCAL = I915_MEMORY_CLASS_DEVICE,
INTEL_MEMORY_STOLEN_SYSTEM,
INTEL_MEMORY_STOLEN_LOCAL,
INTEL_MEMORY_MOCK,
};
enum intel_region_id {
......@@ -59,10 +58,19 @@ struct intel_memory_region_ops {
unsigned int flags);
};
struct intel_memory_region_private_ops {
void *(*reserve)(struct intel_memory_region *mem,
resource_size_t offset,
resource_size_t size);
void (*free)(struct intel_memory_region *mem,
void *node);
};
struct intel_memory_region {
struct drm_i915_private *i915;
const struct intel_memory_region_ops *ops;
const struct intel_memory_region_private_ops *priv_ops;
struct io_mapping iomap;
struct resource region;
......@@ -70,7 +78,6 @@ struct intel_memory_region {
/* For fake LMEM */
struct drm_mm_node fake_mappable;
struct i915_buddy_mm mm;
struct mutex mm_lock;
struct kref kref;
......@@ -95,36 +102,26 @@ struct intel_memory_region {
struct list_head list;
struct list_head purgeable;
} objects;
size_t chunk_size;
unsigned int max_order;
bool is_range_manager;
void *region_private;
};
struct intel_memory_region *
intel_memory_region_lookup(struct drm_i915_private *i915,
u16 class, u16 instance);
int intel_memory_region_init_buddy(struct intel_memory_region *mem);
void intel_memory_region_release_buddy(struct intel_memory_region *mem);
int __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
resource_size_t size,
unsigned int flags,
struct list_head *blocks);
struct i915_buddy_block *
__intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
resource_size_t size,
unsigned int flags);
void __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
struct list_head *blocks);
void __intel_memory_region_put_block_buddy(struct i915_buddy_block *block);
int intel_memory_region_reserve(struct intel_memory_region *mem,
u64 offset, u64 size);
struct intel_memory_region *
intel_memory_region_create(struct drm_i915_private *i915,
resource_size_t start,
resource_size_t size,
resource_size_t min_page_size,
resource_size_t io_start,
u16 type,
u16 instance,
const struct intel_memory_region_ops *ops);
struct intel_memory_region *
......@@ -141,4 +138,9 @@ __printf(2, 3) void
intel_memory_region_set_name(struct intel_memory_region *mem,
const char *fmt, ...);
void intel_memory_region_unreserve(struct intel_memory_region *mem);
int intel_memory_region_reserve(struct intel_memory_region *mem,
resource_size_t offset,
resource_size_t size);
#endif
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_device.h>
#include "i915_drv.h"
#include "i915_scatterlist.h"
#include "intel_region_ttm.h"
/**
* DOC: TTM support structure
*
* The code in this file deals with setting up memory managers for TTM
* LMEM and MOCK regions and converting the output from
* the managers to struct sg_table, Basically providing the mapping from
* i915 GEM regions to TTM memory types and resource managers.
*/
/* A Zero-initialized driver for now. We don't have a TTM backend yet. */
static struct ttm_device_funcs i915_ttm_bo_driver;
/**
* intel_region_ttm_device_init - Initialize a TTM device
* @dev_priv: Pointer to an i915 device private structure.
*
* Return: 0 on success, negative error code on failure.
*/
int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
{
struct drm_device *drm = &dev_priv->drm;
return ttm_device_init(&dev_priv->bdev, &i915_ttm_bo_driver,
drm->dev, drm->anon_inode->i_mapping,
drm->vma_offset_manager, false, false);
}
/**
* intel_region_ttm_device_fini - Finalize a TTM device
* @dev_priv: Pointer to an i915 device private structure.
*/
void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv)
{
ttm_device_fini(&dev_priv->bdev);
}
/*
* Map the i915 memory regions to TTM memory types. We use the
* driver-private types for now, reserving TTM_PL_VRAM for stolen
* memory and TTM_PL_TT for GGTT use if decided to implement this.
*/
static int intel_region_to_ttm_type(struct intel_memory_region *mem)
{
int type;
GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL &&
mem->type != INTEL_MEMORY_MOCK);
type = mem->instance + TTM_PL_PRIV;
GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
return type;
}
static void *intel_region_ttm_node_reserve(struct intel_memory_region *mem,
resource_size_t offset,
resource_size_t size)
{
struct ttm_resource_manager *man = mem->region_private;
struct ttm_place place = {};
struct ttm_resource res = {};
struct ttm_buffer_object mock_bo = {};
int ret;
/*
* Having to use a mock_bo is unfortunate but stems from some
* drivers having private managers that insist to know what the
* allocate memory is intended for, using it to send private
* data to the manager. Also recently the bo has been used to send
* alignment info to the manager. Assume that apart from the latter,
* none of the managers we use will ever access the buffer object
* members, hoping we can pass the alignment info in the
* struct ttm_place in the future.
*/
place.fpfn = offset >> PAGE_SHIFT;
place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
res.num_pages = size >> PAGE_SHIFT;
ret = man->func->alloc(man, &mock_bo, &place, &res);
if (ret == -ENOSPC)
ret = -ENXIO;
return ret ? ERR_PTR(ret) : res.mm_node;
}
/**
* intel_region_ttm_node_free - Free a node allocated from a resource manager
* @mem: The region the node was allocated from.
* @node: The opaque node representing an allocation.
*/
void intel_region_ttm_node_free(struct intel_memory_region *mem,
void *node)
{
struct ttm_resource_manager *man = mem->region_private;
struct ttm_resource res = {};
res.mm_node = node;
man->func->free(man, &res);
}
static const struct intel_memory_region_private_ops priv_ops = {
.reserve = intel_region_ttm_node_reserve,
.free = intel_region_ttm_node_free,
};
int intel_region_ttm_init(struct intel_memory_region *mem)
{
struct ttm_device *bdev = &mem->i915->bdev;
int mem_type = intel_region_to_ttm_type(mem);
int ret;
ret = ttm_range_man_init(bdev, mem_type, false,
resource_size(&mem->region) >> PAGE_SHIFT);
if (ret)
return ret;
mem->chunk_size = PAGE_SIZE;
mem->max_order =
get_order(rounddown_pow_of_two(resource_size(&mem->region)));
mem->is_range_manager = true;
mem->priv_ops = &priv_ops;
mem->region_private = ttm_manager_type(bdev, mem_type);
return 0;
}
/**
* intel_region_ttm_fini - Finalize a TTM region.
* @mem: The memory region
*
* This functions takes down the TTM resource manager associated with the
* memory region, and if it was registered with the TTM device,
* removes that registration.
*/
void intel_region_ttm_fini(struct intel_memory_region *mem)
{
int ret;
ret = ttm_range_man_fini(&mem->i915->bdev,
intel_region_to_ttm_type(mem));
GEM_WARN_ON(ret);
mem->region_private = NULL;
}
/**
* intel_region_ttm_node_to_st - Convert an opaque TTM resource manager node
* to an sg_table.
* @mem: The memory region.
* @node: The resource manager node obtained from the TTM resource manager.
*
* The gem backends typically use sg-tables for operations on the underlying
* io_memory. So provide a way for the backends to translate the
* nodes they are handed from TTM to sg-tables.
*
* Return: A malloced sg_table on success, an error pointer on failure.
*/
struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
void *node)
{
return i915_sg_from_mm_node(node, mem->region.start);
}
/**
* intel_region_ttm_node_alloc - Allocate memory resources from a region
* @mem: The memory region,
* @size: The requested size in bytes
* @flags: Allocation flags
*
* This functionality is provided only for callers that need to allocate
* memory from standalone TTM range managers, without the TTM eviction
* functionality. Don't use if you are not completely sure that's the
* case. The returned opaque node can be converted to an sg_table using
* intel_region_ttm_node_to_st(), and can be freed using
* intel_region_ttm_node_free().
*
* Return: A valid pointer on success, an error pointer on failure.
*/
void *intel_region_ttm_node_alloc(struct intel_memory_region *mem,
resource_size_t size,
unsigned int flags)
{
struct ttm_resource_manager *man = mem->region_private;
struct ttm_place place = {};
struct ttm_resource res = {};
struct ttm_buffer_object mock_bo = {};
int ret;
/*
* We ignore the flags for now since we're using the range
* manager and contigous and min page size would be fulfilled
* by default if size is min page size aligned.
*/
res.num_pages = size >> PAGE_SHIFT;
if (mem->is_range_manager) {
if (size >= SZ_1G)
mock_bo.page_alignment = SZ_1G >> PAGE_SHIFT;
else if (size >= SZ_2M)
mock_bo.page_alignment = SZ_2M >> PAGE_SHIFT;
else if (size >= SZ_64K)
mock_bo.page_alignment = SZ_64K >> PAGE_SHIFT;
}
ret = man->func->alloc(man, &mock_bo, &place, &res);
if (ret == -ENOSPC)
ret = -ENXIO;
return ret ? ERR_PTR(ret) : res.mm_node;
}
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef _INTEL_REGION_TTM_H_
#define _INTEL_REGION_TTM_H_
#include <linux/types.h>
#include "i915_selftest.h"
struct drm_i915_private;
struct intel_memory_region;
int intel_region_ttm_device_init(struct drm_i915_private *dev_priv);
void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv);
int intel_region_ttm_init(struct intel_memory_region *mem);
void intel_region_ttm_fini(struct intel_memory_region *mem);
struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
void *node);
void *intel_region_ttm_node_alloc(struct intel_memory_region *mem,
resource_size_t size,
unsigned int flags);
void intel_region_ttm_node_free(struct intel_memory_region *mem,
void *node);
#endif /* _INTEL_REGION_TTM_H_ */
This diff is collapsed.
......@@ -33,5 +33,4 @@ selftest(evict, i915_gem_evict_mock_selftests)
selftest(gtt, i915_gem_gtt_mock_selftests)
selftest(hugepages, i915_gem_huge_page_mock_selftests)
selftest(contexts, i915_gem_context_mock_selftests)
selftest(buddy, i915_buddy_mock_selftests)
selftest(memory_region, intel_memory_region_mock_selftests)
......@@ -57,9 +57,10 @@ static int igt_mock_fill(void *arg)
LIST_HEAD(objects);
int err = 0;
page_size = mem->mm.chunk_size;
max_pages = div64_u64(total, page_size);
page_size = mem->chunk_size;
rem = total;
retry:
max_pages = div64_u64(rem, page_size);
for_each_prime_number_from(page_num, 1, max_pages) {
resource_size_t size = page_num * page_size;
......@@ -85,6 +86,11 @@ static int igt_mock_fill(void *arg)
err = 0;
if (err == -ENXIO) {
if (page_num * page_size <= rem) {
if (mem->is_range_manager && max_pages > 1) {
max_pages >>= 1;
goto retry;
}
pr_err("%s failed, space still left in region\n",
__func__);
err = -EINVAL;
......@@ -199,12 +205,18 @@ static int igt_mock_reserve(void *arg)
do {
u32 size = i915_prandom_u32_max_state(cur_avail, &prng);
retry:
size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE);
obj = igt_object_create(mem, &objects, size, 0);
if (IS_ERR(obj)) {
if (PTR_ERR(obj) == -ENXIO)
if (PTR_ERR(obj) == -ENXIO) {
if (mem->is_range_manager &&
size > mem->chunk_size) {
size >>= 1;
goto retry;
}
break;
}
err = PTR_ERR(obj);
goto out_close;
}
......@@ -220,7 +232,7 @@ static int igt_mock_reserve(void *arg)
out_close:
kfree(order);
close_objects(mem, &objects);
i915_buddy_free_list(&mem->mm, &mem->reserved);
intel_memory_region_unreserve(mem);
return err;
}
......@@ -240,7 +252,7 @@ static int igt_mock_contiguous(void *arg)
total = resource_size(&mem->region);
/* Min size */
obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
obj = igt_object_create(mem, &objects, mem->chunk_size,
I915_BO_ALLOC_CONTIGUOUS);
if (IS_ERR(obj))
return PTR_ERR(obj);
......@@ -321,14 +333,16 @@ static int igt_mock_contiguous(void *arg)
min = target;
target = total >> 1;
/* Make sure we can still allocate all the fragmented space */
obj = igt_object_create(mem, &objects, target, 0);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_close_objects;
}
if (!mem->is_range_manager) {
/* Make sure we can still allocate all the fragmented space */
obj = igt_object_create(mem, &objects, target, 0);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_close_objects;
}
igt_object_release(obj);
igt_object_release(obj);
}
/*
* Even though we have enough free space, we don't have a big enough
......@@ -348,7 +362,7 @@ static int igt_mock_contiguous(void *arg)
}
target >>= 1;
} while (target >= mem->mm.chunk_size);
} while (target >= mem->chunk_size);
err_close_objects:
list_splice_tail(&holes, &objects);
......@@ -368,7 +382,7 @@ static int igt_mock_splintered_region(void *arg)
/*
* Sanity check we can still allocate everything even if the
* mm.max_order != mm.size. i.e our starting address space size is not a
* max_order != mm.size. i.e our starting address space size is not a
* power-of-two.
*/
......@@ -377,17 +391,10 @@ static int igt_mock_splintered_region(void *arg)
if (IS_ERR(mem))
return PTR_ERR(mem);
if (mem->mm.size != size) {
pr_err("%s size mismatch(%llu != %llu)\n",
__func__, mem->mm.size, size);
err = -EINVAL;
goto out_put;
}
expected_order = get_order(rounddown_pow_of_two(size));
if (mem->mm.max_order != expected_order) {
if (mem->max_order != expected_order) {
pr_err("%s order mismatch(%u != %u)\n",
__func__, mem->mm.max_order, expected_order);
__func__, mem->max_order, expected_order);
err = -EINVAL;
goto out_put;
}
......@@ -408,12 +415,15 @@ static int igt_mock_splintered_region(void *arg)
* sure that does indeed hold true.
*/
obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
if (!IS_ERR(obj)) {
pr_err("%s too large contiguous allocation was not rejected\n",
__func__);
err = -EINVAL;
goto out_close;
if (!mem->is_range_manager) {
obj = igt_object_create(mem, &objects, size,
I915_BO_ALLOC_CONTIGUOUS);
if (!IS_ERR(obj)) {
pr_err("%s too large contiguous allocation was not rejected\n",
__func__);
err = -EINVAL;
goto out_close;
}
}
obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
......@@ -432,68 +442,6 @@ static int igt_mock_splintered_region(void *arg)
return err;
}
#ifndef SZ_8G
#define SZ_8G BIT_ULL(33)
#endif
static int igt_mock_max_segment(void *arg)
{
const unsigned int max_segment = i915_sg_segment_size();
struct intel_memory_region *mem = arg;
struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj;
struct i915_buddy_block *block;
struct scatterlist *sg;
LIST_HEAD(objects);
u64 size;
int err = 0;
/*
* While we may create very large contiguous blocks, we may need
* to break those down for consumption elsewhere. In particular,
* dma-mapping with scatterlist elements have an implicit limit of
* UINT_MAX on each element.
*/
size = SZ_8G;
mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
if (IS_ERR(mem))
return PTR_ERR(mem);
obj = igt_object_create(mem, &objects, size, 0);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out_put;
}
size = 0;
list_for_each_entry(block, &obj->mm.blocks, link) {
if (i915_buddy_block_size(&mem->mm, block) > size)
size = i915_buddy_block_size(&mem->mm, block);
}
if (size < max_segment) {
pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
__func__, max_segment, size);
err = -EINVAL;
goto out_close;
}
for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
if (sg->length > max_segment) {
pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
__func__, sg->length, max_segment);
err = -EINVAL;
goto out_close;
}
}
out_close:
close_objects(mem, &objects);
out_put:
intel_memory_region_put(mem);
return err;
}
static int igt_gpu_write_dw(struct intel_context *ce,
struct i915_vma *vma,
u32 dword,
......@@ -1098,7 +1046,6 @@ int intel_memory_region_mock_selftests(void)
SUBTEST(igt_mock_fill),
SUBTEST(igt_mock_contiguous),
SUBTEST(igt_mock_splintered_region),
SUBTEST(igt_mock_max_segment),
};
struct intel_memory_region *mem;
struct drm_i915_private *i915;
......
......@@ -32,6 +32,7 @@
#include "gt/intel_gt_requests.h"
#include "gt/mock_engine.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
#include "mock_request.h"
#include "mock_gem_device.h"
......@@ -70,6 +71,7 @@ static void mock_device_release(struct drm_device *dev)
mock_fini_ggtt(&i915->ggtt);
destroy_workqueue(i915->wq);
intel_region_ttm_device_fini(i915);
intel_gt_driver_late_release(&i915->gt);
intel_memory_regions_driver_release(i915);
......@@ -116,6 +118,7 @@ struct drm_i915_private *mock_gem_device(void)
#endif
struct drm_i915_private *i915;
struct pci_dev *pdev;
int ret;
pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
if (!pdev)
......@@ -178,6 +181,10 @@ struct drm_i915_private *mock_gem_device(void)
atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */
i915->gt.awake = -ENODEV;
ret = intel_region_ttm_device_init(i915);
if (ret)
goto err_ttm;
i915->wq = alloc_ordered_workqueue("mock", 0);
if (!i915->wq)
goto err_drv;
......@@ -201,6 +208,7 @@ struct drm_i915_private *mock_gem_device(void)
intel_engines_driver_register(i915);
i915->do_release = true;
ida_init(&i915->selftest.mock_region_instances);
return i915;
......@@ -209,6 +217,8 @@ struct drm_i915_private *mock_gem_device(void)
err_unlock:
destroy_workqueue(i915->wq);
err_drv:
intel_region_ttm_device_fini(i915);
err_ttm:
intel_gt_driver_late_release(&i915->gt);
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
......
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
* Copyright © 2019-2021 Intel Corporation
*/
#include <linux/scatterlist.h>
#include <drm/ttm/ttm_placement.h>
#include "gem/i915_gem_region.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
#include "mock_region.h"
static void mock_region_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
sg_free_table(pages);
kfree(pages);
}
static int mock_region_get_pages(struct drm_i915_gem_object *obj)
{
unsigned int flags;
struct sg_table *pages;
flags = I915_ALLOC_MIN_PAGE_SIZE;
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
flags |= I915_ALLOC_CONTIGUOUS;
obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
obj->base.size,
flags);
if (IS_ERR(obj->mm.st_mm_node))
return PTR_ERR(obj->mm.st_mm_node);
pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
if (IS_ERR(pages)) {
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
return PTR_ERR(pages);
}
__i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
return 0;
}
static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
.name = "mock-region",
.get_pages = i915_gem_object_get_pages_buddy,
.put_pages = i915_gem_object_put_pages_buddy,
.get_pages = mock_region_get_pages,
.put_pages = mock_region_put_pages,
.release = i915_gem_object_release_memory_region,
};
......@@ -23,7 +62,7 @@ static int mock_object_init(struct intel_memory_region *mem,
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
if (size > mem->mm.size)
if (size > resource_size(&mem->region))
return -E2BIG;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
......@@ -38,9 +77,18 @@ static int mock_object_init(struct intel_memory_region *mem,
return 0;
}
static void mock_region_fini(struct intel_memory_region *mem)
{
struct drm_i915_private *i915 = mem->i915;
int instance = mem->instance;
intel_region_ttm_fini(mem);
ida_free(&i915->selftest.mock_region_instances, instance);
}
static const struct intel_memory_region_ops mock_region_ops = {
.init = intel_memory_region_init_buddy,
.release = intel_memory_region_release_buddy,
.init = intel_region_ttm_init,
.release = mock_region_fini,
.init_object = mock_object_init,
};
......@@ -51,6 +99,14 @@ mock_region_create(struct drm_i915_private *i915,
resource_size_t min_page_size,
resource_size_t io_start)
{
int instance = ida_alloc_max(&i915->selftest.mock_region_instances,
TTM_NUM_MEM_TYPES - TTM_PL_PRIV - 1,
GFP_KERNEL);
if (instance < 0)
return ERR_PTR(instance);
return intel_memory_region_create(i915, start, size, min_page_size,
io_start, &mock_region_ops);
io_start, INTEL_MEMORY_MOCK, instance,
&mock_region_ops);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment