xe_bo.c 57.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2021 Intel Corporation
 */

#include "xe_bo.h"

#include <linux/dma-buf.h>

#include <drm/drm_drv.h>
#include <drm/drm_gem_ttm_helper.h>
12
#include <drm/drm_managed.h>
13 14 15 16 17 18 19
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/xe_drm.h>

#include "xe_device.h"
#include "xe_dma_buf.h"
20
#include "xe_drm_client.h"
21 22 23 24 25 26 27
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_map.h"
#include "xe_migrate.h"
#include "xe_preempt_fence.h"
#include "xe_res_cursor.h"
#include "xe_trace.h"
28
#include "xe_ttm_stolen_mgr.h"
29 30
#include "xe_vm.h"

31 32 33 34 35 36 37 38
const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES]  = {
	[XE_PL_SYSTEM] = "system",
	[XE_PL_TT] = "gtt",
	[XE_PL_VRAM0] = "vram0",
	[XE_PL_VRAM1] = "vram1",
	[XE_PL_STOLEN] = "stolen"
};

39 40 41 42 43 44 45 46 47 48 49 50
static const struct ttm_place sys_placement_flags = {
	.fpfn = 0,
	.lpfn = 0,
	.mem_type = XE_PL_SYSTEM,
	.flags = 0,
};

static struct ttm_placement sys_placement = {
	.num_placement = 1,
	.placement = &sys_placement_flags,
};

51 52 53 54 55 56 57 58 59 60 61 62 63
static const struct ttm_place tt_placement_flags[] = {
	{
		.fpfn = 0,
		.lpfn = 0,
		.mem_type = XE_PL_TT,
		.flags = TTM_PL_FLAG_DESIRED,
	},
	{
		.fpfn = 0,
		.lpfn = 0,
		.mem_type = XE_PL_SYSTEM,
		.flags = TTM_PL_FLAG_FALLBACK,
	}
64 65 66
};

static struct ttm_placement tt_placement = {
67 68
	.num_placement = 2,
	.placement = tt_placement_flags,
69 70
};

71 72
bool mem_type_is_vram(u32 mem_type)
{
73 74 75 76 77 78
	return mem_type >= XE_PL_VRAM0 && mem_type != XE_PL_STOLEN;
}

static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res)
{
	return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe);
79 80 81 82 83 84 85 86 87
}

static bool resource_is_vram(struct ttm_resource *res)
{
	return mem_type_is_vram(res->mem_type);
}

bool xe_bo_is_vram(struct xe_bo *bo)
{
88 89 90 91 92 93 94
	return resource_is_vram(bo->ttm.resource) ||
		resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource);
}

bool xe_bo_is_stolen(struct xe_bo *bo)
{
	return bo->ttm.resource->mem_type == XE_PL_STOLEN;
95 96
}

97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
/**
 * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR
 * @bo: The BO
 *
 * The stolen memory is accessed through the PCI BAR for both DGFX and some
 * integrated platforms that have a dedicated bit in the PTE for devmem (DM).
 *
 * Returns: true if it's stolen memory accessed via PCI BAR, false otherwise.
 */
bool xe_bo_is_stolen_devmem(struct xe_bo *bo)
{
	return xe_bo_is_stolen(bo) &&
		GRAPHICS_VERx100(xe_bo_device(bo)) >= 1270;
}

112 113
static bool xe_bo_is_user(struct xe_bo *bo)
{
114
	return bo->flags & XE_BO_FLAG_USER;
115 116
}

117 118
static struct xe_migrate *
mem_type_to_migrate(struct xe_device *xe, u32 mem_type)
119
{
120 121
	struct xe_tile *tile;

122
	xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type));
123 124 125 126 127 128 129 130
	tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)];
	return tile->migrate;
}

static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res)
{
	struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
	struct ttm_resource_manager *mgr;
131

132 133 134
	xe_assert(xe, resource_is_vram(res));
	mgr = ttm_manager_type(&xe->ttm, res->mem_type);
	return to_xe_ttm_vram_mgr(mgr)->vram;
135 136
}

137
static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
138 139
			   u32 bo_flags, u32 *c)
{
140
	if (bo_flags & XE_BO_FLAG_SYSTEM) {
141 142
		xe_assert(xe, *c < ARRAY_SIZE(bo->placements));

143
		bo->placements[*c] = (struct ttm_place) {
144 145 146 147 148 149
			.mem_type = XE_PL_TT,
		};
		*c += 1;
	}
}

150 151
static void add_vram(struct xe_device *xe, struct xe_bo *bo,
		     struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
152
{
153
	struct ttm_place place = { .mem_type = mem_type };
154 155
	struct xe_mem_region *vram;
	u64 io_size;
156

157 158
	xe_assert(xe, *c < ARRAY_SIZE(bo->placements));

159 160 161
	vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram;
	xe_assert(xe, vram && vram->usable_size);
	io_size = vram->io_size;
162

163 164 165 166
	/*
	 * For eviction / restore on suspend / resume objects
	 * pinned in VRAM must be contiguous
	 */
167 168
	if (bo_flags & (XE_BO_FLAG_PINNED |
			XE_BO_FLAG_GGTT))
169 170
		place.flags |= TTM_PL_FLAG_CONTIGUOUS;

171
	if (io_size < vram->usable_size) {
172
		if (bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) {
173 174 175 176 177 178 179
			place.fpfn = 0;
			place.lpfn = io_size >> PAGE_SHIFT;
		} else {
			place.flags |= TTM_PL_FLAG_TOPDOWN;
		}
	}
	places[*c] = place;
180
	*c += 1;
181 182
}

183
static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
184
			 u32 bo_flags, u32 *c)
185
{
186
	if (bo_flags & XE_BO_FLAG_VRAM0)
187
		add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
188
	if (bo_flags & XE_BO_FLAG_VRAM1)
189
		add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
190 191
}

192
static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
193
			   u32 bo_flags, u32 *c)
194
{
195
	if (bo_flags & XE_BO_FLAG_STOLEN) {
196 197
		xe_assert(xe, *c < ARRAY_SIZE(bo->placements));

198
		bo->placements[*c] = (struct ttm_place) {
199
			.mem_type = XE_PL_STOLEN,
200 201
			.flags = bo_flags & (XE_BO_FLAG_PINNED |
					     XE_BO_FLAG_GGTT) ?
202 203 204 205 206 207
				TTM_PL_FLAG_CONTIGUOUS : 0,
		};
		*c += 1;
	}
}

208 209 210 211 212
static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
				       u32 bo_flags)
{
	u32 c = 0;

213 214
	try_add_vram(xe, bo, bo_flags, &c);
	try_add_system(xe, bo, bo_flags, &c);
215
	try_add_stolen(xe, bo, bo_flags, &c);
216 217 218 219 220 221

	if (!c)
		return -EINVAL;

	bo->placement = (struct ttm_placement) {
		.num_placement = c,
222
		.placement = bo->placements,
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
	};

	return 0;
}

int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
			      u32 bo_flags)
{
	xe_bo_assert_held(bo);
	return __xe_bo_placement_for_flags(xe, bo, bo_flags);
}

static void xe_evict_flags(struct ttm_buffer_object *tbo,
			   struct ttm_placement *placement)
{
	if (!xe_bo_is_xe_bo(tbo)) {
		/* Don't handle scatter gather BOs */
		if (tbo->type == ttm_bo_type_sg) {
			placement->num_placement = 0;
			return;
		}

		*placement = sys_placement;
		return;
	}

	/*
	 * For xe, sg bos that are evicted to system just triggers a
	 * rebind of the sg list upon subsequent validation to XE_PL_TT.
	 */
	switch (tbo->resource->mem_type) {
	case XE_PL_VRAM0:
	case XE_PL_VRAM1:
256
	case XE_PL_STOLEN:
257 258
		*placement = tt_placement;
		break;
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
	case XE_PL_TT:
	default:
		*placement = sys_placement;
		break;
	}
}

struct xe_ttm_tt {
	struct ttm_tt ttm;
	struct device *dev;
	struct sg_table sgt;
	struct sg_table *sg;
};

static int xe_tt_map_sg(struct ttm_tt *tt)
{
	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
	unsigned long num_pages = tt->num_pages;
	int ret;

279
	XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL);
280 281 282 283

	if (xe_tt->sg)
		return 0;

284 285 286 287 288
	ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
						num_pages, 0,
						(u64)num_pages << PAGE_SHIFT,
						xe_sg_segment_size(xe_tt->dev),
						GFP_KERNEL);
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
	if (ret)
		return ret;

	xe_tt->sg = &xe_tt->sgt;
	ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL,
			      DMA_ATTR_SKIP_CPU_SYNC);
	if (ret) {
		sg_free_table(xe_tt->sg);
		xe_tt->sg = NULL;
		return ret;
	}

	return 0;
}

304
struct sg_table *xe_bo_sg(struct xe_bo *bo)
305 306 307 308 309 310 311 312 313 314 315 316 317
{
	struct ttm_tt *tt = bo->ttm.ttm;
	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);

	return xe_tt->sg;
}

static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
				       u32 page_flags)
{
	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
	struct xe_device *xe = xe_bo_device(bo);
	struct xe_ttm_tt *tt;
318
	unsigned long extra_pages;
319
	enum ttm_caching caching;
320 321 322 323 324 325 326 327
	int err;

	tt = kzalloc(sizeof(*tt), GFP_KERNEL);
	if (!tt)
		return NULL;

	tt->dev = xe->drm.dev;

328 329 330 331 332
	extra_pages = 0;
	if (xe_bo_needs_ccs_pages(bo))
		extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size),
					   PAGE_SIZE);

333 334 335 336 337 338 339 340 341
	switch (bo->cpu_caching) {
	case DRM_XE_GEM_CPU_CACHING_WC:
		caching = ttm_write_combined;
		break;
	default:
		caching = ttm_cached;
		break;
	}

342
	WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching);
343

344 345 346 347 348 349
	/*
	 * Display scanout is always non-coherent with the CPU cache.
	 *
	 * For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and
	 * require a CPU:WC mapping.
	 */
350 351
	if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) ||
	    (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_FLAG_PAGETABLE))
352 353 354
		caching = ttm_write_combined;

	err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
	if (err) {
		kfree(tt);
		return NULL;
	}

	return &tt->ttm;
}

static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
			      struct ttm_operation_ctx *ctx)
{
	int err;

	/*
	 * dma-bufs are not populated with pages, and the dma-
	 * addresses are set up when moved to XE_PL_TT.
	 */
	if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
		return 0;

	err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
	if (err)
		return err;

	/* A follow up may move this xe_bo_move when BO is moved to XE_PL_TT */
	err = xe_tt_map_sg(tt);
	if (err)
		ttm_pool_free(&ttm_dev->pool, tt);

	return err;
}

static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
{
	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);

	if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
		return;

	if (xe_tt->sg) {
		dma_unmap_sgtable(xe_tt->dev, xe_tt->sg,
				  DMA_BIDIRECTIONAL, 0);
		sg_free_table(xe_tt->sg);
		xe_tt->sg = NULL;
	}

	return ttm_pool_free(&ttm_dev->pool, tt);
}

static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
{
	ttm_tt_fini(tt);
	kfree(tt);
}

static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
				 struct ttm_resource *mem)
{
	struct xe_device *xe = ttm_to_xe_device(bdev);

	switch (mem->mem_type) {
	case XE_PL_SYSTEM:
	case XE_PL_TT:
		return 0;
	case XE_PL_VRAM0:
420 421 422
	case XE_PL_VRAM1: {
		struct xe_ttm_vram_mgr_resource *vres =
			to_xe_ttm_vram_mgr_resource(mem);
423
		struct xe_mem_region *vram = res_to_mem_region(mem);
424 425 426 427

		if (vres->used_visible_size < mem->size)
			return -EINVAL;

428 429
		mem->bus.offset = mem->start << PAGE_SHIFT;

430
		if (vram->mapping &&
431
		    mem->placement & TTM_PL_FLAG_CONTIGUOUS)
432
			mem->bus.addr = (u8 __force *)vram->mapping +
433 434
				mem->bus.offset;

435
		mem->bus.offset += vram->io_start;
436 437 438 439 440
		mem->bus.is_iomem = true;

#if  !defined(CONFIG_X86)
		mem->bus.caching = ttm_write_combined;
#endif
441
		return 0;
442
	} case XE_PL_STOLEN:
443
		return xe_ttm_stolen_io_mem_reserve(xe, mem);
444 445 446 447 448 449 450 451 452 453
	default:
		return -EINVAL;
	}
}

static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
				const struct ttm_operation_ctx *ctx)
{
	struct dma_resv_iter cursor;
	struct dma_fence *fence;
Matthew Brost's avatar
Matthew Brost committed
454 455
	struct drm_gem_object *obj = &bo->ttm.base;
	struct drm_gpuvm_bo *vm_bo;
456
	bool idle = false;
457 458 459 460
	int ret = 0;

	dma_resv_assert_held(bo->ttm.base.resv);

461
	if (!list_empty(&bo->ttm.base.gpuva.list)) {
462 463 464 465 466 467 468
		dma_resv_iter_begin(&cursor, bo->ttm.base.resv,
				    DMA_RESV_USAGE_BOOKKEEP);
		dma_resv_for_each_fence_unlocked(&cursor, fence)
			dma_fence_enable_sw_signaling(fence);
		dma_resv_iter_end(&cursor);
	}

Matthew Brost's avatar
Matthew Brost committed
469
	drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
470 471
		struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
		struct drm_gpuva *gpuva;
472

473 474 475 476
		if (!xe_vm_in_fault_mode(vm)) {
			drm_gpuvm_bo_evict(vm_bo, true);
			continue;
		}
477

478
		if (!idle) {
479 480 481 482 483 484 485 486 487 488 489
			long timeout;

			if (ctx->no_wait_gpu &&
			    !dma_resv_test_signaled(bo->ttm.base.resv,
						    DMA_RESV_USAGE_BOOKKEEP))
				return -EBUSY;

			timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
							DMA_RESV_USAGE_BOOKKEEP,
							ctx->interruptible,
							MAX_SCHEDULE_TIMEOUT);
490 491 492 493
			if (!timeout)
				return -ETIME;
			if (timeout < 0)
				return timeout;
494

495
			idle = true;
Matthew Brost's avatar
Matthew Brost committed
496
		}
497 498 499 500 501 502 503 504

		drm_gpuvm_bo_for_each_va(gpuva, vm_bo) {
			struct xe_vma *vma = gpuva_to_vma(gpuva);

			trace_xe_vma_evict(vma);
			ret = xe_vm_invalidate_vma(vma);
			if (XE_WARN_ON(ret))
				return ret;
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
		}
	}

	return ret;
}

/*
 * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
 * Note that unmapping the attachment is deferred to the next
 * map_attachment time, or to bo destroy (after idling) whichever comes first.
 * This is to avoid syncing before unmap_attachment(), assuming that the
 * caller relies on idling the reservation object before moving the
 * backing store out. Should that assumption not hold, then we will be able
 * to unconditionally call unmap_attachment() when moving out to system.
 */
static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
			     struct ttm_resource *new_res)
{
	struct dma_buf_attachment *attach = ttm_bo->base.import_attach;
	struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
					       ttm);
526
	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
527 528
	struct sg_table *sg;

529 530
	xe_assert(xe, attach);
	xe_assert(xe, ttm_bo->ttm);
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575

	if (new_res->mem_type == XE_PL_SYSTEM)
		goto out;

	if (ttm_bo->sg) {
		dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
		ttm_bo->sg = NULL;
	}

	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sg))
		return PTR_ERR(sg);

	ttm_bo->sg = sg;
	xe_tt->sg = sg;

out:
	ttm_bo_move_null(ttm_bo, new_res);

	return 0;
}

/**
 * xe_bo_move_notify - Notify subsystems of a pending move
 * @bo: The buffer object
 * @ctx: The struct ttm_operation_ctx controlling locking and waits.
 *
 * This function notifies subsystems of an upcoming buffer move.
 * Upon receiving such a notification, subsystems should schedule
 * halting access to the underlying pages and optionally add a fence
 * to the buffer object's dma_resv object, that signals when access is
 * stopped. The caller will wait on all dma_resv fences before
 * starting the move.
 *
 * A subsystem may commence access to the object after obtaining
 * bindings to the new backing memory under the object lock.
 *
 * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
 * negative error code on error.
 */
static int xe_bo_move_notify(struct xe_bo *bo,
			     const struct ttm_operation_ctx *ctx)
{
	struct ttm_buffer_object *ttm_bo = &bo->ttm;
	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
576 577
	struct ttm_resource *old_mem = ttm_bo->resource;
	u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
	int ret;

	/*
	 * If this starts to call into many components, consider
	 * using a notification chain here.
	 */

	if (xe_bo_is_pinned(bo))
		return -EINVAL;

	xe_bo_vunmap(bo);
	ret = xe_bo_trigger_rebind(xe, bo, ctx);
	if (ret)
		return ret;

	/* Don't call move_notify() for imported dma-bufs. */
	if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach)
		dma_buf_move_notify(ttm_bo->base.dma_buf);

597 598 599 600 601 602 603 604 605 606 607 608
	/*
	 * TTM has already nuked the mmap for us (see ttm_bo_unmap_virtual),
	 * so if we moved from VRAM make sure to unlink this from the userfault
	 * tracking.
	 */
	if (mem_type_is_vram(old_mem_type)) {
		mutex_lock(&xe->mem_access.vram_userfault.lock);
		if (!list_empty(&bo->vram_userfault_link))
			list_del_init(&bo->vram_userfault_link);
		mutex_unlock(&xe->mem_access.vram_userfault.lock);
	}

609 610 611 612 613 614 615 616 617 618 619
	return 0;
}

static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
		      struct ttm_operation_ctx *ctx,
		      struct ttm_resource *new_mem,
		      struct ttm_place *hop)
{
	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
	struct ttm_resource *old_mem = ttm_bo->resource;
620
	u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
621
	struct ttm_tt *ttm = ttm_bo->ttm;
622
	struct xe_migrate *migrate = NULL;
623 624
	struct dma_fence *fence;
	bool move_lacks_source;
625
	bool tt_has_data;
626
	bool needs_clear;
627 628
	bool handle_system_ccs = (!IS_DGFX(xe) && xe_bo_needs_ccs_pages(bo) &&
				  ttm && ttm_tt_is_populated(ttm)) ? true : false;
629
	int ret = 0;
630 631
	/* Bo creation path, moving to system or TT. */
	if ((!old_mem && ttm) && !handle_system_ccs) {
632
		ttm_bo_move_null(ttm_bo, new_mem);
633
		return 0;
634 635 636 637 638
	}

	if (ttm_bo->type == ttm_bo_type_sg) {
		ret = xe_bo_move_notify(bo, ctx);
		if (!ret)
639
			ret = xe_bo_move_dmabuf(ttm_bo, new_mem);
640 641 642
		goto out;
	}

643 644 645
	tt_has_data = ttm && (ttm_tt_is_populated(ttm) ||
			      (ttm->page_flags & TTM_TT_FLAG_SWAPPED));

646 647
	move_lacks_source = handle_system_ccs ? (!bo->ccs_cleared)  :
						(!mem_type_is_vram(old_mem_type) && !tt_has_data);
648 649 650 651

	needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
		(!ttm && ttm_bo->type == ttm_bo_type_device);

652 653 654 655 656 657
	if ((move_lacks_source && !needs_clear)) {
		ttm_bo_move_null(ttm_bo, new_mem);
		goto out;
	}

	if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) {
658 659 660 661
		ttm_bo_move_null(ttm_bo, new_mem);
		goto out;
	}

662 663 664 665
	/*
	 * Failed multi-hop where the old_mem is still marked as
	 * TTM_PL_FLAG_TEMPORARY, should just be a dummy move.
	 */
666
	if (old_mem_type == XE_PL_TT &&
667 668 669 670 671
	    new_mem->mem_type == XE_PL_TT) {
		ttm_bo_move_null(ttm_bo, new_mem);
		goto out;
	}

672 673 674 675 676 677
	if (!move_lacks_source && !xe_bo_is_pinned(bo)) {
		ret = xe_bo_move_notify(bo, ctx);
		if (ret)
			goto out;
	}

678
	if (old_mem_type == XE_PL_TT &&
679 680 681 682 683 684 685 686 687
	    new_mem->mem_type == XE_PL_SYSTEM) {
		long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
						     DMA_RESV_USAGE_BOOKKEEP,
						     true,
						     MAX_SCHEDULE_TIMEOUT);
		if (timeout < 0) {
			ret = timeout;
			goto out;
		}
688 689 690 691 692

		if (!handle_system_ccs) {
			ttm_bo_move_null(ttm_bo, new_mem);
			goto out;
		}
693 694 695
	}

	if (!move_lacks_source &&
696 697
	    ((old_mem_type == XE_PL_SYSTEM && resource_is_vram(new_mem)) ||
	     (mem_type_is_vram(old_mem_type) &&
698 699 700 701 702 703 704 705 706
	      new_mem->mem_type == XE_PL_SYSTEM))) {
		hop->fpfn = 0;
		hop->lpfn = 0;
		hop->mem_type = XE_PL_TT;
		hop->flags = TTM_PL_FLAG_TEMPORARY;
		ret = -EMULTIHOP;
		goto out;
	}

707
	if (bo->tile)
708
		migrate = bo->tile->migrate;
709
	else if (resource_is_vram(new_mem))
710
		migrate = mem_type_to_migrate(xe, new_mem->mem_type);
711
	else if (mem_type_is_vram(old_mem_type))
712
		migrate = mem_type_to_migrate(xe, old_mem_type);
713 714
	else
		migrate = xe->tiles[0].migrate;
715

716
	xe_assert(xe, migrate);
717
	trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735
	xe_device_mem_access_get(xe);

	if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
		/*
		 * Kernel memory that is pinned should only be moved on suspend
		 * / resume, some of the pinned memory is required for the
		 * device to resume / use the GPU to move other evicted memory
		 * (user memory) around. This likely could be optimized a bit
		 * futher where we find the minimum set of pinned memory
		 * required for resume but for simplity doing a memcpy for all
		 * pinned memory.
		 */
		ret = xe_bo_vmap(bo);
		if (!ret) {
			ret = ttm_bo_move_memcpy(ttm_bo, ctx, new_mem);

			/* Create a new VMAP once kernel BO back in VRAM */
			if (!ret && resource_is_vram(new_mem)) {
736
				struct xe_mem_region *vram = res_to_mem_region(new_mem);
737
				void __iomem *new_addr = vram->mapping +
738 739
					(new_mem->start << PAGE_SHIFT);

740 741 742 743 744 745
				if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
					ret = -EINVAL;
					xe_device_mem_access_put(xe);
					goto out;
				}

746
				xe_assert(xe, new_mem->start ==
747 748 749 750 751 752 753
					  bo->placements->fpfn);

				iosys_map_set_vaddr_iomem(&bo->vmap, new_addr);
			}
		}
	} else {
		if (move_lacks_source)
754
			fence = xe_migrate_clear(migrate, bo, new_mem);
755
		else
756 757
			fence = xe_migrate_copy(migrate, bo, bo, old_mem,
						new_mem, handle_system_ccs);
758 759 760 761 762
		if (IS_ERR(fence)) {
			ret = PTR_ERR(fence);
			xe_device_mem_access_put(xe);
			goto out;
		}
763 764 765
		if (!move_lacks_source) {
			ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict,
							true, new_mem);
766 767 768 769 770
			if (ret) {
				dma_fence_wait(fence, false);
				ttm_bo_move_null(ttm_bo, new_mem);
				ret = 0;
			}
771 772 773 774 775 776 777 778 779 780 781
		} else {
			/*
			 * ttm_bo_move_accel_cleanup() may blow up if
			 * bo->resource == NULL, so just attach the
			 * fence and set the new resource.
			 */
			dma_resv_add_fence(ttm_bo->base.resv, fence,
					   DMA_RESV_USAGE_KERNEL);
			ttm_bo_move_null(ttm_bo, new_mem);
		}

782 783 784 785 786 787 788 789 790 791
		dma_fence_put(fence);
	}

	xe_device_mem_access_put(xe);

out:
	return ret;

}

792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
/**
 * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
 * @bo: The buffer object to move.
 *
 * On successful completion, the object memory will be moved to sytem memory.
 *
 * This is needed to for special handling of pinned VRAM object during
 * suspend-resume.
 *
 * Return: 0 on success. Negative error code on failure.
 */
int xe_bo_evict_pinned(struct xe_bo *bo)
{
	struct ttm_place place = {
		.mem_type = XE_PL_TT,
	};
	struct ttm_placement placement = {
		.placement = &place,
		.num_placement = 1,
	};
	struct ttm_operation_ctx ctx = {
		.interruptible = false,
	};
	struct ttm_resource *new_mem;
	int ret;

	xe_bo_assert_held(bo);

	if (WARN_ON(!bo->ttm.resource))
		return -EINVAL;

	if (WARN_ON(!xe_bo_is_pinned(bo)))
		return -EINVAL;

	if (WARN_ON(!xe_bo_is_vram(bo)))
		return -EINVAL;

	ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
	if (ret)
		return ret;

	if (!bo->ttm.ttm) {
		bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0);
		if (!bo->ttm.ttm) {
			ret = -ENOMEM;
			goto err_res_free;
		}
	}

	ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
	if (ret)
		goto err_res_free;

	ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
	if (ret)
		goto err_res_free;

	ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
	if (ret)
		goto err_res_free;

	return 0;

err_res_free:
	ttm_resource_free(&bo->ttm, &new_mem);
	return ret;
}

/**
 * xe_bo_restore_pinned() - Restore a pinned VRAM object
 * @bo: The buffer object to move.
 *
 * On successful completion, the object memory will be moved back to VRAM.
 *
 * This is needed to for special handling of pinned VRAM object during
 * suspend-resume.
 *
 * Return: 0 on success. Negative error code on failure.
 */
int xe_bo_restore_pinned(struct xe_bo *bo)
{
	struct ttm_operation_ctx ctx = {
		.interruptible = false,
	};
	struct ttm_resource *new_mem;
	int ret;

	xe_bo_assert_held(bo);

	if (WARN_ON(!bo->ttm.resource))
		return -EINVAL;

	if (WARN_ON(!xe_bo_is_pinned(bo)))
		return -EINVAL;

	if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm))
		return -EINVAL;

	ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
	if (ret)
		return ret;

	ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
	if (ret)
		goto err_res_free;

	ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
	if (ret)
		goto err_res_free;

	ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
	if (ret)
		goto err_res_free;

	return 0;

err_res_free:
	ttm_resource_free(&bo->ttm, &new_mem);
	return ret;
}

913
static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
914 915
				       unsigned long page_offset)
{
916
	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
917
	struct xe_res_cursor cursor;
918
	struct xe_mem_region *vram;
919

920 921 922
	if (ttm_bo->resource->mem_type == XE_PL_STOLEN)
		return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT;

923
	vram = res_to_mem_region(ttm_bo->resource);
924
	xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
925
	return (vram->io_start + cursor.start) >> PAGE_SHIFT;
926 927 928 929 930 931 932 933 934 935
}

static void __xe_bo_vunmap(struct xe_bo *bo);

/*
 * TODO: Move this function to TTM so we don't rely on how TTM does its
 * locking, thereby abusing TTM internals.
 */
static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
{
936
	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
937 938
	bool locked;

939
	xe_assert(xe, !kref_read(&ttm_bo->kref));
940 941 942 943 944 945 946 947 948 949

	/*
	 * We can typically only race with TTM trylocking under the
	 * lru_lock, which will immediately be unlocked again since
	 * the ttm_bo refcount is zero at this point. So trylocking *should*
	 * always succeed here, as long as we hold the lru lock.
	 */
	spin_lock(&ttm_bo->bdev->lru_lock);
	locked = dma_resv_trylock(ttm_bo->base.resv);
	spin_unlock(&ttm_bo->bdev->lru_lock);
950
	xe_assert(xe, locked);
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965

	return locked;
}

static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
{
	struct dma_resv_iter cursor;
	struct dma_fence *fence;
	struct dma_fence *replacement = NULL;
	struct xe_bo *bo;

	if (!xe_bo_is_xe_bo(ttm_bo))
		return;

	bo = ttm_to_xe_bo(ttm_bo);
966
	xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021

	/*
	 * Corner case where TTM fails to allocate memory and this BOs resv
	 * still points the VMs resv
	 */
	if (ttm_bo->base.resv != &ttm_bo->base._resv)
		return;

	if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
		return;

	/*
	 * Scrub the preempt fences if any. The unbind fence is already
	 * attached to the resv.
	 * TODO: Don't do this for external bos once we scrub them after
	 * unbind.
	 */
	dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
				DMA_RESV_USAGE_BOOKKEEP, fence) {
		if (xe_fence_is_xe_preempt(fence) &&
		    !dma_fence_is_signaled(fence)) {
			if (!replacement)
				replacement = dma_fence_get_stub();

			dma_resv_replace_fences(ttm_bo->base.resv,
						fence->context,
						replacement,
						DMA_RESV_USAGE_BOOKKEEP);
		}
	}
	dma_fence_put(replacement);

	dma_resv_unlock(ttm_bo->base.resv);
}

static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
{
	if (!xe_bo_is_xe_bo(ttm_bo))
		return;

	/*
	 * Object is idle and about to be destroyed. Release the
	 * dma-buf attachment.
	 */
	if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
		struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm,
						       struct xe_ttm_tt, ttm);

		dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg,
					 DMA_BIDIRECTIONAL);
		ttm_bo->sg = NULL;
		xe_tt->sg = NULL;
	}
}

1022
const struct ttm_device_funcs xe_ttm_funcs = {
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
	.ttm_tt_create = xe_ttm_tt_create,
	.ttm_tt_populate = xe_ttm_tt_populate,
	.ttm_tt_unpopulate = xe_ttm_tt_unpopulate,
	.ttm_tt_destroy = xe_ttm_tt_destroy,
	.evict_flags = xe_evict_flags,
	.move = xe_bo_move,
	.io_mem_reserve = xe_ttm_io_mem_reserve,
	.io_mem_pfn = xe_ttm_io_mem_pfn,
	.release_notify = xe_ttm_bo_release_notify,
	.eviction_valuable = ttm_bo_eviction_valuable,
	.delete_mem_notify = xe_ttm_bo_delete_mem_notify,
};

static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
{
	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1039
	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1040 1041 1042 1043 1044

	if (bo->ttm.base.import_attach)
		drm_prime_gem_destroy(&bo->ttm.base, NULL);
	drm_gem_object_release(&bo->ttm.base);

1045
	xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
1046 1047

	if (bo->ggtt_node.size)
1048
		xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
1049

1050 1051 1052 1053 1054
#ifdef CONFIG_PROC_FS
	if (bo->client)
		xe_drm_client_remove_bo(bo);
#endif

1055 1056 1057
	if (bo->vm && xe_bo_is_user(bo))
		xe_vm_put(bo->vm);

1058 1059 1060 1061 1062
	mutex_lock(&xe->mem_access.vram_userfault.lock);
	if (!list_empty(&bo->vram_userfault_link))
		list_del(&bo->vram_userfault_link);
	mutex_unlock(&xe->mem_access.vram_userfault.lock);

1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
	kfree(bo);
}

static void xe_gem_object_free(struct drm_gem_object *obj)
{
	/* Our BO reference counting scheme works as follows:
	 *
	 * The gem object kref is typically used throughout the driver,
	 * and the gem object holds a ttm_buffer_object refcount, so
	 * that when the last gem object reference is put, which is when
	 * we end up in this function, we put also that ttm_buffer_object
	 * refcount. Anything using gem interfaces is then no longer
	 * allowed to access the object in a way that requires a gem
	 * refcount, including locking the object.
	 *
	 * driver ttm callbacks is allowed to use the ttm_buffer_object
	 * refcount directly if needed.
	 */
	__xe_bo_vunmap(gem_to_xe_bo(obj));
	ttm_bo_put(container_of(obj, struct ttm_buffer_object, base));
}

Matthew Brost's avatar
Matthew Brost committed
1085 1086 1087 1088 1089 1090
static void xe_gem_object_close(struct drm_gem_object *obj,
				struct drm_file *file_priv)
{
	struct xe_bo *bo = gem_to_xe_bo(obj);

	if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) {
1091
		xe_assert(xe_bo_device(bo), xe_bo_is_user(bo));
Matthew Brost's avatar
Matthew Brost committed
1092

1093
		xe_bo_lock(bo, false);
Matthew Brost's avatar
Matthew Brost committed
1094
		ttm_bo_set_bulk_move(&bo->ttm, NULL);
1095
		xe_bo_unlock(bo);
Matthew Brost's avatar
Matthew Brost committed
1096 1097 1098
	}
}

1099 1100 1101 1102
static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
{
	struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
	struct drm_device *ddev = tbo->base.dev;
1103 1104
	struct xe_device *xe = to_xe_device(ddev);
	struct xe_bo *bo = ttm_to_xe_bo(tbo);
1105
	bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
1106
	vm_fault_t ret;
1107
	int idx;
1108

1109 1110 1111
	if (needs_rpm)
		xe_device_mem_access_get(xe);

1112 1113
	ret = ttm_bo_vm_reserve(tbo, vmf);
	if (ret)
1114
		goto out;
1115 1116 1117 1118

	if (drm_dev_enter(ddev, &idx)) {
		trace_xe_bo_cpu_fault(bo);

1119 1120
		ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
					       TTM_BO_VM_NUM_PREFAULT);
1121 1122 1123 1124
		drm_dev_exit(idx);
	} else {
		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
	}
1125

1126
	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
		goto out;
	/*
	 * ttm_bo_vm_reserve() already has dma_resv_lock.
	 */
	if (ret == VM_FAULT_NOPAGE && mem_type_is_vram(tbo->resource->mem_type)) {
		mutex_lock(&xe->mem_access.vram_userfault.lock);
		if (list_empty(&bo->vram_userfault_link))
			list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list);
		mutex_unlock(&xe->mem_access.vram_userfault.lock);
	}
1137 1138

	dma_resv_unlock(tbo->base.resv);
1139 1140 1141 1142
out:
	if (needs_rpm)
		xe_device_mem_access_put(xe);

1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
	return ret;
}

static const struct vm_operations_struct xe_gem_vm_ops = {
	.fault = xe_gem_fault,
	.open = ttm_bo_vm_open,
	.close = ttm_bo_vm_close,
	.access = ttm_bo_vm_access
};

static const struct drm_gem_object_funcs xe_gem_object_funcs = {
	.free = xe_gem_object_free,
Matthew Brost's avatar
Matthew Brost committed
1155
	.close = xe_gem_object_close,
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
	.mmap = drm_gem_ttm_mmap,
	.export = xe_gem_prime_export,
	.vm_ops = &xe_gem_vm_ops,
};

/**
 * xe_bo_alloc - Allocate storage for a struct xe_bo
 *
 * This funcition is intended to allocate storage to be used for input
 * to __xe_bo_create_locked(), in the case a pointer to the bo to be
 * created is needed before the call to __xe_bo_create_locked().
 * If __xe_bo_create_locked ends up never to be called, then the
 * storage allocated with this function needs to be freed using
 * xe_bo_free().
 *
 * Return: A pointer to an uninitialized struct xe_bo on success,
 * ERR_PTR(-ENOMEM) on error.
 */
struct xe_bo *xe_bo_alloc(void)
{
	struct xe_bo *bo = kzalloc(sizeof(*bo), GFP_KERNEL);

	if (!bo)
		return ERR_PTR(-ENOMEM);

	return bo;
}

/**
 * xe_bo_free - Free storage allocated using xe_bo_alloc()
 * @bo: The buffer object storage.
 *
 * Refer to xe_bo_alloc() documentation for valid use-cases.
 */
void xe_bo_free(struct xe_bo *bo)
{
	kfree(bo);
}

1195 1196 1197 1198 1199
struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
				     struct xe_tile *tile, struct dma_resv *resv,
				     struct ttm_lru_bulk_move *bulk, size_t size,
				     u16 cpu_caching, enum ttm_bo_type type,
				     u32 flags)
1200 1201 1202 1203 1204 1205 1206
{
	struct ttm_operation_ctx ctx = {
		.interruptible = true,
		.no_wait_gpu = false,
	};
	struct ttm_placement *placement;
	uint32_t alignment;
1207
	size_t aligned_size;
1208 1209 1210
	int err;

	/* Only kernel objects should set GT */
1211
	xe_assert(xe, !tile || type == ttm_bo_type_kernel);
1212

1213 1214
	if (XE_WARN_ON(!size)) {
		xe_bo_free(bo);
1215
		return ERR_PTR(-EINVAL);
1216
	}
1217

1218 1219
	if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
	    !(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
1220 1221
	    ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
	     (flags & XE_BO_NEEDS_64K))) {
1222 1223 1224
		aligned_size = ALIGN(size, SZ_64K);
		if (type != ttm_bo_type_device)
			size = ALIGN(size, SZ_64K);
1225
		flags |= XE_BO_FLAG_INTERNAL_64K;
1226
		alignment = SZ_64K >> PAGE_SHIFT;
1227

1228
	} else {
1229
		aligned_size = ALIGN(size, SZ_4K);
1230
		flags &= ~XE_BO_FLAG_INTERNAL_64K;
1231 1232 1233
		alignment = SZ_4K >> PAGE_SHIFT;
	}

1234 1235 1236 1237 1238 1239 1240 1241 1242
	if (type == ttm_bo_type_device && aligned_size != size)
		return ERR_PTR(-EINVAL);

	if (!bo) {
		bo = xe_bo_alloc();
		if (IS_ERR(bo))
			return bo;
	}

1243
	bo->ccs_cleared = false;
1244
	bo->tile = tile;
1245 1246
	bo->size = size;
	bo->flags = flags;
1247
	bo->cpu_caching = cpu_caching;
1248
	bo->ttm.base.funcs = &xe_gem_object_funcs;
1249
	bo->ttm.priority = XE_BO_PRIORITY_NORMAL;
1250
	INIT_LIST_HEAD(&bo->pinned_link);
1251 1252 1253
#ifdef CONFIG_PROC_FS
	INIT_LIST_HEAD(&bo->client_link);
#endif
1254
	INIT_LIST_HEAD(&bo->vram_userfault_link);
1255 1256 1257 1258

	drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);

	if (resv) {
1259
		ctx.allow_res_evict = !(flags & XE_BO_FLAG_NO_RESV_EVICT);
1260 1261 1262
		ctx.resv = resv;
	}

1263
	if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) {
1264
		err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
1265 1266
		if (WARN_ON(err)) {
			xe_ttm_bo_destroy(&bo->ttm);
1267
			return ERR_PTR(err);
1268
		}
1269
	}
1270 1271 1272

	/* Defer populating type_sg bos */
	placement = (type == ttm_bo_type_sg ||
1273
		     bo->flags & XE_BO_FLAG_DEFER_BACKING) ? &sys_placement :
1274 1275 1276 1277 1278 1279 1280
		&bo->placement;
	err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
				   placement, alignment,
				   &ctx, NULL, resv, xe_ttm_bo_destroy);
	if (err)
		return ERR_PTR(err);

1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
	/*
	 * The VRAM pages underneath are potentially still being accessed by the
	 * GPU, as per async GPU clearing and async evictions. However TTM makes
	 * sure to add any corresponding move/clear fences into the objects
	 * dma-resv using the DMA_RESV_USAGE_KERNEL slot.
	 *
	 * For KMD internal buffers we don't care about GPU clearing, however we
	 * still need to handle async evictions, where the VRAM is still being
	 * accessed by the GPU. Most internal callers are not expecting this,
	 * since they are missing the required synchronisation before accessing
	 * the memory. To keep things simple just sync wait any kernel fences
	 * here, if the buffer is designated KMD internal.
	 *
	 * For normal userspace objects we should already have the required
	 * pipelining or sync waiting elsewhere, since we already have to deal
	 * with things like async GPU clearing.
	 */
	if (type == ttm_bo_type_kernel) {
		long timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
						     DMA_RESV_USAGE_KERNEL,
						     ctx.interruptible,
						     MAX_SCHEDULE_TIMEOUT);

		if (timeout < 0) {
			if (!resv)
				dma_resv_unlock(bo->ttm.base.resv);
			xe_bo_put(bo);
			return ERR_PTR(timeout);
		}
	}

1312
	bo->created = true;
Matthew Brost's avatar
Matthew Brost committed
1313 1314 1315 1316
	if (bulk)
		ttm_bo_set_bulk_move(&bo->ttm, bulk);
	else
		ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1317 1318 1319 1320

	return bo;
}

1321 1322 1323 1324
static int __xe_bo_fixed_placement(struct xe_device *xe,
				   struct xe_bo *bo,
				   u32 flags,
				   u64 start, u64 end, u64 size)
1325
{
1326 1327
	struct ttm_place *place = bo->placements;

1328
	if (flags & (XE_BO_FLAG_USER | XE_BO_FLAG_SYSTEM))
1329 1330 1331 1332 1333 1334
		return -EINVAL;

	place->flags = TTM_PL_FLAG_CONTIGUOUS;
	place->fpfn = start >> PAGE_SHIFT;
	place->lpfn = end >> PAGE_SHIFT;

1335 1336
	switch (flags & (XE_BO_FLAG_STOLEN | XE_BO_FLAG_VRAM_MASK)) {
	case XE_BO_FLAG_VRAM0:
1337 1338
		place->mem_type = XE_PL_VRAM0;
		break;
1339
	case XE_BO_FLAG_VRAM1:
1340 1341
		place->mem_type = XE_PL_VRAM1;
		break;
1342
	case XE_BO_FLAG_STOLEN:
1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
		place->mem_type = XE_PL_STOLEN;
		break;

	default:
		/* 0 or multiple of the above set */
		return -EINVAL;
	}

	bo->placement = (struct ttm_placement) {
		.num_placement = 1,
		.placement = place,
	};

	return 0;
}

1359 1360 1361 1362 1363
static struct xe_bo *
__xe_bo_create_locked(struct xe_device *xe,
		      struct xe_tile *tile, struct xe_vm *vm,
		      size_t size, u64 start, u64 end,
		      u16 cpu_caching, enum ttm_bo_type type, u32 flags)
1364 1365
{
	struct xe_bo *bo = NULL;
1366 1367 1368 1369
	int err;

	if (vm)
		xe_vm_assert_held(vm);
1370 1371 1372 1373 1374 1375

	if (start || end != ~0ULL) {
		bo = xe_bo_alloc();
		if (IS_ERR(bo))
			return bo;

1376
		flags |= XE_BO_FLAG_FIXED_PLACEMENT;
1377 1378 1379 1380 1381 1382 1383
		err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size);
		if (err) {
			xe_bo_free(bo);
			return ERR_PTR(err);
		}
	}

1384 1385
	bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
				    vm && !xe_vm_in_fault_mode(vm) &&
1386
				    flags & XE_BO_FLAG_USER ?
1387 1388
				    &vm->lru_bulk_move : NULL, size,
				    cpu_caching, type, flags);
1389 1390 1391
	if (IS_ERR(bo))
		return bo;

Matthew Brost's avatar
Matthew Brost committed
1392 1393 1394 1395 1396 1397 1398
	/*
	 * Note that instead of taking a reference no the drm_gpuvm_resv_bo(),
	 * to ensure the shared resv doesn't disappear under the bo, the bo
	 * will keep a reference to the vm, and avoid circular references
	 * by having all the vm's bo refereferences released at vm close
	 * time.
	 */
1399 1400 1401 1402
	if (vm && xe_bo_is_user(bo))
		xe_vm_get(vm);
	bo->vm = vm;

1403 1404
	if (bo->flags & XE_BO_FLAG_GGTT) {
		if (!tile && flags & XE_BO_FLAG_STOLEN)
1405
			tile = xe_device_get_root_tile(xe);
1406

1407
		xe_assert(xe, tile);
1408

1409
		if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
1410 1411
			err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo,
						   start + bo->size, U64_MAX);
1412
		} else {
1413
			err = xe_ggtt_insert_bo(tile->mem.ggtt, bo);
1414
		}
1415 1416 1417 1418 1419 1420 1421
		if (err)
			goto err_unlock_put_bo;
	}

	return bo;

err_unlock_put_bo:
1422
	__xe_bo_unset_bulk_move(bo);
1423 1424 1425 1426 1427
	xe_bo_unlock_vm_held(bo);
	xe_bo_put(bo);
	return ERR_PTR(err);
}

1428 1429 1430 1431 1432 1433 1434 1435 1436
struct xe_bo *
xe_bo_create_locked_range(struct xe_device *xe,
			  struct xe_tile *tile, struct xe_vm *vm,
			  size_t size, u64 start, u64 end,
			  enum ttm_bo_type type, u32 flags)
{
	return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags);
}

1437
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
1438 1439 1440
				  struct xe_vm *vm, size_t size,
				  enum ttm_bo_type type, u32 flags)
{
1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451
	return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags);
}

struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
				struct xe_vm *vm, size_t size,
				u16 cpu_caching,
				enum ttm_bo_type type,
				u32 flags)
{
	struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
						 cpu_caching, type,
1452
						 flags | XE_BO_FLAG_USER);
1453 1454 1455 1456
	if (!IS_ERR(bo))
		xe_bo_unlock_vm_held(bo);

	return bo;
1457 1458
}

1459
struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
1460 1461 1462
			   struct xe_vm *vm, size_t size,
			   enum ttm_bo_type type, u32 flags)
{
1463
	struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags);
1464 1465 1466 1467 1468 1469 1470

	if (!IS_ERR(bo))
		xe_bo_unlock_vm_held(bo);

	return bo;
}

1471
struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
1472 1473 1474
				      struct xe_vm *vm,
				      size_t size, u64 offset,
				      enum ttm_bo_type type, u32 flags)
1475
{
1476
	struct xe_bo *bo;
1477
	int err;
1478 1479 1480
	u64 start = offset == ~0ull ? 0 : offset;
	u64 end = offset == ~0ull ? offset : start + size;

1481
	if (flags & XE_BO_FLAG_STOLEN &&
1482
	    xe_ttm_stolen_cpu_access_needs_ggtt(xe))
1483
		flags |= XE_BO_FLAG_GGTT;
1484

1485
	bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
1486
				       flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
	if (IS_ERR(bo))
		return bo;

	err = xe_bo_pin(bo);
	if (err)
		goto err_put;

	err = xe_bo_vmap(bo);
	if (err)
		goto err_unpin;

	xe_bo_unlock_vm_held(bo);

	return bo;

err_unpin:
	xe_bo_unpin(bo);
err_put:
	xe_bo_unlock_vm_held(bo);
	xe_bo_put(bo);
	return ERR_PTR(err);
}

1510
struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
1511 1512 1513
				   struct xe_vm *vm, size_t size,
				   enum ttm_bo_type type, u32 flags)
{
1514
	return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags);
1515 1516
}

1517
struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
1518 1519 1520
				     const void *data, size_t size,
				     enum ttm_bo_type type, u32 flags)
{
1521
	struct xe_bo *bo = xe_bo_create_pin_map(xe, tile, NULL,
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
						ALIGN(size, PAGE_SIZE),
						type, flags);
	if (IS_ERR(bo))
		return bo;

	xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);

	return bo;
}

1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556
static void __xe_bo_unpin_map_no_vm(struct drm_device *drm, void *arg)
{
	xe_bo_unpin_map_no_vm(arg);
}

struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
					   size_t size, u32 flags)
{
	struct xe_bo *bo;
	int ret;

	bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags);
	if (IS_ERR(bo))
		return bo;

	ret = drmm_add_action_or_reset(&xe->drm, __xe_bo_unpin_map_no_vm, bo);
	if (ret)
		return ERR_PTR(ret);

	return bo;
}

struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
					     const void *data, size_t size, u32 flags)
{
1557
	struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, ALIGN(size, PAGE_SIZE), flags);
1558 1559 1560 1561 1562 1563 1564 1565 1566

	if (IS_ERR(bo))
		return bo;

	xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);

	return bo;
}

1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582
/**
 * xe_managed_bo_reinit_in_vram
 * @xe: xe device
 * @tile: Tile where the new buffer will be created
 * @src: Managed buffer object allocated in system memory
 *
 * Replace a managed src buffer object allocated in system memory with a new
 * one allocated in vram, copying the data between them.
 * Buffer object in VRAM is not going to have the same GGTT address, the caller
 * is responsible for making sure that any old references to it are updated.
 *
 * Returns 0 for success, negative error code otherwise.
 */
int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src)
{
	struct xe_bo *bo;
1583
	u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT;
1584

1585
	dst_flags |= (*src)->flags & XE_BO_FLAG_GGTT_INVALIDATE;
1586 1587 1588 1589

	xe_assert(xe, IS_DGFX(xe));
	xe_assert(xe, !(*src)->vmap.is_iomem);

1590 1591
	bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr,
					    (*src)->size, dst_flags);
1592 1593 1594 1595 1596 1597 1598 1599 1600
	if (IS_ERR(bo))
		return PTR_ERR(bo);

	drmm_release_action(&xe->drm, __xe_bo_unpin_map_no_vm, *src);
	*src = bo;

	return 0;
}

1601 1602 1603 1604
/*
 * XXX: This is in the VM bind data path, likely should calculate this once and
 * store, with a recalculation if the BO is moved.
 */
1605
uint64_t vram_region_gpu_offset(struct ttm_resource *res)
1606
{
1607
	struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
1608

1609
	if (res->mem_type == XE_PL_STOLEN)
1610 1611
		return xe_ttm_stolen_gpu_offset(xe);

1612
	return res_to_mem_region(res)->dpa_base;
1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
}

/**
 * xe_bo_pin_external - pin an external BO
 * @bo: buffer object to be pinned
 *
 * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
 * BO. Unique call compared to xe_bo_pin as this function has it own set of
 * asserts and code to ensure evict / restore on suspend / resume.
 *
 * Returns 0 for success, negative error code otherwise.
 */
int xe_bo_pin_external(struct xe_bo *bo)
{
	struct xe_device *xe = xe_bo_device(bo);
	int err;

1630 1631
	xe_assert(xe, !bo->vm);
	xe_assert(xe, xe_bo_is_user(bo));
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662

	if (!xe_bo_is_pinned(bo)) {
		err = xe_bo_validate(bo, NULL, false);
		if (err)
			return err;

		if (xe_bo_is_vram(bo)) {
			spin_lock(&xe->pinned.lock);
			list_add_tail(&bo->pinned_link,
				      &xe->pinned.external_vram);
			spin_unlock(&xe->pinned.lock);
		}
	}

	ttm_bo_pin(&bo->ttm);

	/*
	 * FIXME: If we always use the reserve / unreserve functions for locking
	 * we do not need this.
	 */
	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);

	return 0;
}

int xe_bo_pin(struct xe_bo *bo)
{
	struct xe_device *xe = xe_bo_device(bo);
	int err;

	/* We currently don't expect user BO to be pinned */
1663
	xe_assert(xe, !xe_bo_is_user(bo));
1664 1665

	/* Pinned object must be in GGTT or have pinned flag */
1666 1667
	xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED |
				   XE_BO_FLAG_GGTT));
1668 1669 1670 1671 1672

	/*
	 * No reason we can't support pinning imported dma-bufs we just don't
	 * expect to pin an imported dma-buf.
	 */
1673
	xe_assert(xe, !bo->ttm.base.import_attach);
1674 1675

	/* We only expect at most 1 pin */
1676
	xe_assert(xe, !xe_bo_is_pinned(bo));
1677 1678 1679 1680 1681 1682

	err = xe_bo_validate(bo, NULL, false);
	if (err)
		return err;

	/*
1683 1684 1685
	 * For pinned objects in on DGFX, which are also in vram, we expect
	 * these to be in contiguous VRAM memory. Required eviction / restore
	 * during suspend / resume (force restore to same physical address).
1686 1687
	 */
	if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
1688
	    bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
1689 1690
		struct ttm_place *place = &(bo->placements[0]);

1691
		if (mem_type_is_vram(place->mem_type)) {
1692
			xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
1693

1694
			place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
1695
				       vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
1696
			place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
1697

1698 1699 1700 1701
			spin_lock(&xe->pinned.lock);
			list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
			spin_unlock(&xe->pinned.lock);
		}
1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
	}

	ttm_bo_pin(&bo->ttm);

	/*
	 * FIXME: If we always use the reserve / unreserve functions for locking
	 * we do not need this.
	 */
	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);

	return 0;
}

/**
 * xe_bo_unpin_external - unpin an external BO
 * @bo: buffer object to be unpinned
 *
 * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
 * BO. Unique call compared to xe_bo_unpin as this function has it own set of
 * asserts and code to ensure evict / restore on suspend / resume.
 *
 * Returns 0 for success, negative error code otherwise.
 */
void xe_bo_unpin_external(struct xe_bo *bo)
{
	struct xe_device *xe = xe_bo_device(bo);

1729 1730 1731
	xe_assert(xe, !bo->vm);
	xe_assert(xe, xe_bo_is_pinned(bo));
	xe_assert(xe, xe_bo_is_user(bo));
1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751

	if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) {
		spin_lock(&xe->pinned.lock);
		list_del_init(&bo->pinned_link);
		spin_unlock(&xe->pinned.lock);
	}

	ttm_bo_unpin(&bo->ttm);

	/*
	 * FIXME: If we always use the reserve / unreserve functions for locking
	 * we do not need this.
	 */
	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
}

void xe_bo_unpin(struct xe_bo *bo)
{
	struct xe_device *xe = xe_bo_device(bo);

1752 1753
	xe_assert(xe, !bo->ttm.base.import_attach);
	xe_assert(xe, xe_bo_is_pinned(bo));
1754 1755

	if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
1756
	    bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
1757
		struct ttm_place *place = &(bo->placements[0]);
1758

1759
		if (mem_type_is_vram(place->mem_type)) {
1760
			xe_assert(xe, !list_empty(&bo->pinned_link));
1761 1762 1763 1764 1765

			spin_lock(&xe->pinned.lock);
			list_del_init(&bo->pinned_link);
			spin_unlock(&xe->pinned.lock);
		}
1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798
	}

	ttm_bo_unpin(&bo->ttm);
}

/**
 * xe_bo_validate() - Make sure the bo is in an allowed placement
 * @bo: The bo,
 * @vm: Pointer to a the vm the bo shares a locked dma_resv object with, or
 *      NULL. Used together with @allow_res_evict.
 * @allow_res_evict: Whether it's allowed to evict bos sharing @vm's
 *                   reservation object.
 *
 * Make sure the bo is in allowed placement, migrating it if necessary. If
 * needed, other bos will be evicted. If bos selected for eviction shares
 * the @vm's reservation object, they can be evicted iff @allow_res_evict is
 * set to true, otherwise they will be bypassed.
 *
 * Return: 0 on success, negative error code on failure. May return
 * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
 */
int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
{
	struct ttm_operation_ctx ctx = {
		.interruptible = true,
		.no_wait_gpu = false,
	};

	if (vm) {
		lockdep_assert_held(&vm->lock);
		xe_vm_assert_held(vm);

		ctx.allow_res_evict = allow_res_evict;
Matthew Brost's avatar
Matthew Brost committed
1799
		ctx.resv = xe_vm_resv(vm);
1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812
	}

	return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
}

bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
{
	if (bo->destroy == &xe_ttm_bo_destroy)
		return true;

	return false;
}

1813 1814 1815 1816 1817 1818
/*
 * Resolve a BO address. There is no assert to check if the proper lock is held
 * so it should only be used in cases where it is not fatal to get the wrong
 * address, such as printing debug information, but not in cases where memory is
 * written based on this result.
 */
1819
dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
1820
{
1821
	struct xe_device *xe = xe_bo_device(bo);
1822 1823 1824
	struct xe_res_cursor cur;
	u64 page;

1825
	xe_assert(xe, page_size <= PAGE_SIZE);
1826 1827 1828
	page = offset >> PAGE_SHIFT;
	offset &= (PAGE_SIZE - 1);

1829
	if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
1830
		xe_assert(xe, bo->ttm.ttm);
1831

1832
		xe_res_first_sg(xe_bo_sg(bo), page << PAGE_SHIFT,
1833 1834 1835 1836 1837 1838 1839
				page_size, &cur);
		return xe_res_dma(&cur) + offset;
	} else {
		struct xe_res_cursor cur;

		xe_res_first(bo->ttm.resource, page << PAGE_SHIFT,
			     page_size, &cur);
1840
		return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource);
1841 1842 1843
	}
}

1844
dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
1845 1846 1847
{
	if (!READ_ONCE(bo->ttm.pin_count))
		xe_bo_assert_held(bo);
1848
	return __xe_bo_addr(bo, offset, page_size);
1849 1850
}

1851 1852 1853 1854 1855 1856 1857 1858
int xe_bo_vmap(struct xe_bo *bo)
{
	void *virtual;
	bool is_iomem;
	int ret;

	xe_bo_assert_held(bo);

1859
	if (!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS))
1860 1861
		return -EINVAL;

1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906
	if (!iosys_map_is_null(&bo->vmap))
		return 0;

	/*
	 * We use this more or less deprecated interface for now since
	 * ttm_bo_vmap() doesn't offer the optimization of kmapping
	 * single page bos, which is done here.
	 * TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap
	 * to use struct iosys_map.
	 */
	ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap);
	if (ret)
		return ret;

	virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
	if (is_iomem)
		iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual);
	else
		iosys_map_set_vaddr(&bo->vmap, virtual);

	return 0;
}

static void __xe_bo_vunmap(struct xe_bo *bo)
{
	if (!iosys_map_is_null(&bo->vmap)) {
		iosys_map_clear(&bo->vmap);
		ttm_bo_kunmap(&bo->kmap);
	}
}

void xe_bo_vunmap(struct xe_bo *bo)
{
	xe_bo_assert_held(bo);
	__xe_bo_vunmap(bo);
}

int xe_gem_create_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file)
{
	struct xe_device *xe = to_xe_device(dev);
	struct xe_file *xef = to_xe_file(file);
	struct drm_xe_gem_create *args = data;
	struct xe_vm *vm = NULL;
	struct xe_bo *bo;
1907
	unsigned int bo_flags;
1908 1909 1910
	u32 handle;
	int err;

1911 1912
	if (XE_IOCTL_DBG(xe, args->extensions) ||
	    XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
1913
	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1914 1915
		return -EINVAL;

1916 1917 1918 1919 1920
	/* at least one valid memory placement must be specified */
	if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) ||
			 !args->placement))
		return -EINVAL;

1921
	if (XE_IOCTL_DBG(xe, args->flags &
1922 1923
			 ~(DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING |
			   DRM_XE_GEM_CREATE_FLAG_SCANOUT |
1924
			   DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM)))
1925 1926
		return -EINVAL;

1927
	if (XE_IOCTL_DBG(xe, args->handle))
1928 1929
		return -EINVAL;

1930
	if (XE_IOCTL_DBG(xe, !args->size))
1931 1932
		return -EINVAL;

1933
	if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX))
1934 1935
		return -EINVAL;

1936
	if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK))
1937 1938
		return -EINVAL;

1939
	bo_flags = 0;
1940
	if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING)
1941
		bo_flags |= XE_BO_FLAG_DEFER_BACKING;
1942

1943
	if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT)
1944
		bo_flags |= XE_BO_FLAG_SCANOUT;
1945

1946
	bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1);
1947

1948
	if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
1949
		if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK)))
1950 1951
			return -EINVAL;

1952
		bo_flags |= XE_BO_FLAG_NEEDS_CPU_ACCESS;
1953 1954
	}

1955 1956 1957 1958
	if (XE_IOCTL_DBG(xe, !args->cpu_caching ||
			 args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC))
		return -EINVAL;

1959
	if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_VRAM_MASK &&
1960 1961 1962
			 args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC))
		return -EINVAL;

1963
	if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT &&
1964 1965 1966
			 args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB))
		return -EINVAL;

1967 1968 1969 1970
	if (args->vm_id) {
		vm = xe_vm_lookup(xef, args->vm_id);
		if (XE_IOCTL_DBG(xe, !vm))
			return -ENOENT;
1971
		err = xe_vm_lock(vm, true);
1972 1973
		if (err)
			goto out_vm;
1974 1975
	}

1976 1977
	bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching,
			       ttm_bo_type_device, bo_flags);
1978 1979 1980 1981

	if (vm)
		xe_vm_unlock(vm);

1982 1983 1984
	if (IS_ERR(bo)) {
		err = PTR_ERR(bo);
		goto out_vm;
1985 1986 1987 1988
	}

	err = drm_gem_handle_create(file, &bo->ttm.base, &handle);
	if (err)
1989
		goto out_bulk;
1990 1991

	args->handle = handle;
1992
	goto out_put;
1993

1994
out_bulk:
1995 1996
	if (vm && !xe_vm_in_fault_mode(vm)) {
		xe_vm_lock(vm, false);
1997
		__xe_bo_unset_bulk_move(bo);
1998 1999
		xe_vm_unlock(vm);
	}
2000 2001 2002
out_put:
	xe_bo_put(bo);
out_vm:
2003
	if (vm)
2004
		xe_vm_put(vm);
2005

2006
	return err;
2007 2008 2009 2010 2011 2012 2013 2014 2015
}

int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
			     struct drm_file *file)
{
	struct xe_device *xe = to_xe_device(dev);
	struct drm_xe_gem_mmap_offset *args = data;
	struct drm_gem_object *gem_obj;

2016 2017
	if (XE_IOCTL_DBG(xe, args->extensions) ||
	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2018 2019
		return -EINVAL;

2020
	if (XE_IOCTL_DBG(xe, args->flags))
2021 2022 2023
		return -EINVAL;

	gem_obj = drm_gem_object_lookup(file, args->handle);
2024
	if (XE_IOCTL_DBG(xe, !gem_obj))
2025 2026 2027 2028 2029 2030 2031 2032 2033
		return -ENOENT;

	/* The mmap offset was set up at BO allocation time. */
	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);

	xe_bo_put(gem_to_xe_bo(gem_obj));
	return 0;
}

2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046
/**
 * xe_bo_lock() - Lock the buffer object's dma_resv object
 * @bo: The struct xe_bo whose lock is to be taken
 * @intr: Whether to perform any wait interruptible
 *
 * Locks the buffer object's dma_resv object. If the buffer object is
 * pointing to a shared dma_resv object, that shared lock is locked.
 *
 * Return: 0 on success, -EINTR if @intr is true and the wait for a
 * contended lock was interrupted. If @intr is set to false, the
 * function always returns 0.
 */
int xe_bo_lock(struct xe_bo *bo, bool intr)
2047
{
2048 2049
	if (intr)
		return dma_resv_lock_interruptible(bo->ttm.base.resv, NULL);
2050

2051
	dma_resv_lock(bo->ttm.base.resv, NULL);
2052

2053
	return 0;
2054 2055
}

2056 2057 2058 2059 2060 2061 2062
/**
 * xe_bo_unlock() - Unlock the buffer object's dma_resv object
 * @bo: The struct xe_bo whose lock is to be released.
 *
 * Unlock a buffer object lock that was locked by xe_bo_lock().
 */
void xe_bo_unlock(struct xe_bo *bo)
2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123
{
	dma_resv_unlock(bo->ttm.base.resv);
}

/**
 * xe_bo_can_migrate - Whether a buffer object likely can be migrated
 * @bo: The buffer object to migrate
 * @mem_type: The TTM memory type intended to migrate to
 *
 * Check whether the buffer object supports migration to the
 * given memory type. Note that pinning may affect the ability to migrate as
 * returned by this function.
 *
 * This function is primarily intended as a helper for checking the
 * possibility to migrate buffer objects and can be called without
 * the object lock held.
 *
 * Return: true if migration is possible, false otherwise.
 */
bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type)
{
	unsigned int cur_place;

	if (bo->ttm.type == ttm_bo_type_kernel)
		return true;

	if (bo->ttm.type == ttm_bo_type_sg)
		return false;

	for (cur_place = 0; cur_place < bo->placement.num_placement;
	     cur_place++) {
		if (bo->placements[cur_place].mem_type == mem_type)
			return true;
	}

	return false;
}

static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
{
	memset(place, 0, sizeof(*place));
	place->mem_type = mem_type;
}

/**
 * xe_bo_migrate - Migrate an object to the desired region id
 * @bo: The buffer object to migrate.
 * @mem_type: The TTM region type to migrate to.
 *
 * Attempt to migrate the buffer object to the desired memory region. The
 * buffer object may not be pinned, and must be locked.
 * On successful completion, the object memory type will be updated,
 * but an async migration task may not have completed yet, and to
 * accomplish that, the object's kernel fences must be signaled with
 * the object lock held.
 *
 * Return: 0 on success. Negative error code on failure. In particular may
 * return -EINTR or -ERESTARTSYS if signal pending.
 */
int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
{
2124
	struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146
	struct ttm_operation_ctx ctx = {
		.interruptible = true,
		.no_wait_gpu = false,
	};
	struct ttm_placement placement;
	struct ttm_place requested;

	xe_bo_assert_held(bo);

	if (bo->ttm.resource->mem_type == mem_type)
		return 0;

	if (xe_bo_is_pinned(bo))
		return -EBUSY;

	if (!xe_bo_can_migrate(bo, mem_type))
		return -EINVAL;

	xe_place_from_ttm_type(mem_type, &requested);
	placement.num_placement = 1;
	placement.placement = &requested;

2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158
	/*
	 * Stolen needs to be handled like below VRAM handling if we ever need
	 * to support it.
	 */
	drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN);

	if (mem_type_is_vram(mem_type)) {
		u32 c = 0;

		add_vram(xe, bo, &requested, bo->flags, mem_type, &c);
	}

2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201
	return ttm_bo_validate(&bo->ttm, &placement, &ctx);
}

/**
 * xe_bo_evict - Evict an object to evict placement
 * @bo: The buffer object to migrate.
 * @force_alloc: Set force_alloc in ttm_operation_ctx
 *
 * On successful completion, the object memory will be moved to evict
 * placement. Ths function blocks until the object has been fully moved.
 *
 * Return: 0 on success. Negative error code on failure.
 */
int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
{
	struct ttm_operation_ctx ctx = {
		.interruptible = false,
		.no_wait_gpu = false,
		.force_alloc = force_alloc,
	};
	struct ttm_placement placement;
	int ret;

	xe_evict_flags(&bo->ttm, &placement);
	ret = ttm_bo_validate(&bo->ttm, &placement, &ctx);
	if (ret)
		return ret;

	dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
			      false, MAX_SCHEDULE_TIMEOUT);

	return 0;
}

/**
 * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
 * placed in system memory.
 * @bo: The xe_bo
 *
 * Return: true if extra pages need to be allocated, false otherwise.
 */
bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
{
2202 2203 2204 2205 2206 2207 2208 2209 2210 2211
	struct xe_device *xe = xe_bo_device(bo);

	if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device)
		return false;

	/* On discrete GPUs, if the GPU can access this buffer from
	 * system memory (i.e., it allows XE_PL_TT placement), FlatCCS
	 * can't be used since there's no CCS storage associated with
	 * non-VRAM addresses.
	 */
2212
	if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM))
2213 2214 2215
		return false;

	return true;
2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277
}

/**
 * __xe_bo_release_dummy() - Dummy kref release function
 * @kref: The embedded struct kref.
 *
 * Dummy release function for xe_bo_put_deferred(). Keep off.
 */
void __xe_bo_release_dummy(struct kref *kref)
{
}

/**
 * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
 * @deferred: The lockless list used for the call to xe_bo_put_deferred().
 *
 * Puts all bos whose put was deferred by xe_bo_put_deferred().
 * The @deferred list can be either an onstack local list or a global
 * shared list used by a workqueue.
 */
void xe_bo_put_commit(struct llist_head *deferred)
{
	struct llist_node *freed;
	struct xe_bo *bo, *next;

	if (!deferred)
		return;

	freed = llist_del_all(deferred);
	if (!freed)
		return;

	llist_for_each_entry_safe(bo, next, freed, freed)
		drm_gem_object_free(&bo->ttm.base.refcount);
}

/**
 * xe_bo_dumb_create - Create a dumb bo as backing for a fb
 * @file_priv: ...
 * @dev: ...
 * @args: ...
 *
 * See dumb_create() hook in include/drm/drm_drv.h
 *
 * Return: ...
 */
int xe_bo_dumb_create(struct drm_file *file_priv,
		      struct drm_device *dev,
		      struct drm_mode_create_dumb *args)
{
	struct xe_device *xe = to_xe_device(dev);
	struct xe_bo *bo;
	uint32_t handle;
	int cpp = DIV_ROUND_UP(args->bpp, 8);
	int err;
	u32 page_size = max_t(u32, PAGE_SIZE,
		xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K);

	args->pitch = ALIGN(args->width * cpp, 64);
	args->size = ALIGN(mul_u32_u32(args->pitch, args->height),
			   page_size);

2278 2279 2280
	bo = xe_bo_create_user(xe, NULL, NULL, args->size,
			       DRM_XE_GEM_CPU_CACHING_WC,
			       ttm_bo_type_device,
2281 2282 2283
			       XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
			       XE_BO_FLAG_SCANOUT |
			       XE_BO_FLAG_NEEDS_CPU_ACCESS);
2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294
	if (IS_ERR(bo))
		return PTR_ERR(bo);

	err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle);
	/* drop reference from allocate - handle holds it now */
	drm_gem_object_put(&bo->ttm.base);
	if (!err)
		args->handle = handle;
	return err;
}

2295 2296 2297 2298 2299 2300 2301 2302 2303 2304
void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo)
{
	struct ttm_buffer_object *tbo = &bo->ttm;
	struct ttm_device *bdev = tbo->bdev;

	drm_vma_node_unmap(&tbo->base.vma_node, bdev->dev_mapping);

	list_del_init(&bo->vram_userfault_link);
}

2305 2306 2307
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
#include "tests/xe_bo.c"
#endif