v3d_sched.c 22.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2018 Broadcom */

/**
 * DOC: Broadcom V3D scheduling
 *
 * The shared DRM GPU scheduler is used to coordinate submitting jobs
 * to the hardware.  Each DRM fd (roughly a client process) gets its
 * own scheduler entity, which will process jobs in order.  The GPU
 * scheduler will round-robin between clients to submit the next job.
 *
 * For simplicity, and in order to keep latency low for interactive
 * jobs when bulk background jobs are queued up, we submit a new job
 * to the HW only when it has completed the last one, instead of
 * filling up the CT[01]Q FIFOs with jobs.  Similarly, we use
16
 * drm_sched_job_add_dependency() to manage the dependency between bin and
17 18
 * render, instead of having the clients submit jobs using the HW's
 * semaphores to interlock between them.
19 20
 */

21
#include <linux/sched/clock.h>
22 23
#include <linux/kthread.h>

24 25
#include <drm/drm_syncobj.h>

26 27 28 29
#include "v3d_drv.h"
#include "v3d_regs.h"
#include "v3d_trace.h"

30 31
#define V3D_CSD_CFG012_WG_COUNT_SHIFT 16

32 33 34 35 36 37
static struct v3d_job *
to_v3d_job(struct drm_sched_job *sched_job)
{
	return container_of(sched_job, struct v3d_job, base);
}

38 39
static struct v3d_bin_job *
to_bin_job(struct drm_sched_job *sched_job)
40
{
41
	return container_of(sched_job, struct v3d_bin_job, base.base);
42 43
}

44 45
static struct v3d_render_job *
to_render_job(struct drm_sched_job *sched_job)
46
{
47 48
	return container_of(sched_job, struct v3d_render_job, base.base);
}
49

50 51 52 53
static struct v3d_tfu_job *
to_tfu_job(struct drm_sched_job *sched_job)
{
	return container_of(sched_job, struct v3d_tfu_job, base.base);
54 55
}

56 57 58 59 60 61
static struct v3d_csd_job *
to_csd_job(struct drm_sched_job *sched_job)
{
	return container_of(sched_job, struct v3d_csd_job, base.base);
}

62 63 64 65 66 67
static struct v3d_cpu_job *
to_cpu_job(struct drm_sched_job *sched_job)
{
	return container_of(sched_job, struct v3d_cpu_job, base.base);
}

68
static void
69
v3d_sched_job_free(struct drm_sched_job *sched_job)
70
{
71
	struct v3d_job *job = to_v3d_job(sched_job);
72

73
	v3d_job_cleanup(job);
74 75
}

76 77 78 79 80 81 82 83 84 85 86 87 88 89
void
v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
			      unsigned int count)
{
	if (query_info->queries) {
		unsigned int i;

		for (i = 0; i < count; i++)
			drm_syncobj_put(query_info->queries[i].syncobj);

		kvfree(query_info->queries);
	}
}

90 91 92 93
static void
v3d_cpu_job_free(struct drm_sched_job *sched_job)
{
	struct v3d_cpu_job *job = to_cpu_job(sched_job);
94
	struct v3d_performance_query_info *performance_query = &job->performance_query;
95

96 97
	v3d_timestamp_query_info_free(&job->timestamp_query,
				      job->timestamp_query.count);
98

99 100 101 102 103 104
	if (performance_query->queries) {
		for (int i = 0; i < performance_query->count; i++)
			drm_syncobj_put(performance_query->queries[i].syncobj);
		kvfree(performance_query->queries);
	}

105 106 107
	v3d_job_cleanup(&job->base);
}

108 109 110 111 112 113 114 115 116 117
static void
v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
{
	if (job->perfmon != v3d->active_perfmon)
		v3d_perfmon_stop(v3d, v3d->active_perfmon, true);

	if (job->perfmon && v3d->active_perfmon != job->perfmon)
		v3d_perfmon_start(v3d, job->perfmon);
}

118 119 120 121 122
static void
v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
{
	struct v3d_dev *v3d = job->v3d;
	struct v3d_file_priv *file = job->file->driver_priv;
123 124
	struct v3d_stats *global_stats = &v3d->queue[queue].stats;
	struct v3d_stats *local_stats = &file->stats[queue];
125 126
	u64 now = local_clock();

127
	write_seqcount_begin(&local_stats->lock);
128
	local_stats->start_ns = now;
129 130 131
	write_seqcount_end(&local_stats->lock);

	write_seqcount_begin(&global_stats->lock);
132
	global_stats->start_ns = now;
133
	write_seqcount_end(&global_stats->lock);
134 135
}

136 137 138
static void
v3d_stats_update(struct v3d_stats *stats, u64 now)
{
139
	write_seqcount_begin(&stats->lock);
140 141 142
	stats->enabled_ns += now - stats->start_ns;
	stats->jobs_completed++;
	stats->start_ns = 0;
143
	write_seqcount_end(&stats->lock);
144 145
}

146 147 148 149 150
void
v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
{
	struct v3d_dev *v3d = job->v3d;
	struct v3d_file_priv *file = job->file->driver_priv;
151 152
	struct v3d_stats *global_stats = &v3d->queue[queue].stats;
	struct v3d_stats *local_stats = &file->stats[queue];
153 154
	u64 now = local_clock();

155 156
	v3d_stats_update(local_stats, now);
	v3d_stats_update(global_stats, now);
157 158
}

159
static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
160
{
161 162
	struct v3d_bin_job *job = to_bin_job(sched_job);
	struct v3d_dev *v3d = job->base.v3d;
163 164 165 166
	struct drm_device *dev = &v3d->drm;
	struct dma_fence *fence;
	unsigned long irqflags;

167
	if (unlikely(job->base.base.s_fence->finished.error))
168 169 170 171 172 173
		return NULL;

	/* Lock required around bin_job update vs
	 * v3d_overflow_mem_work().
	 */
	spin_lock_irqsave(&v3d->job_lock, irqflags);
174 175 176 177 178
	v3d->bin_job = job;
	/* Clear out the overflow allocation, so we don't
	 * reuse the overflow attached to a previous job.
	 */
	V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0);
179 180
	spin_unlock_irqrestore(&v3d->job_lock, irqflags);

181 182 183 184 185 186 187 188 189 190 191 192 193
	v3d_invalidate_caches(v3d);

	fence = v3d_fence_create(v3d, V3D_BIN);
	if (IS_ERR(fence))
		return NULL;

	if (job->base.irq_fence)
		dma_fence_put(job->base.irq_fence);
	job->base.irq_fence = dma_fence_get(fence);

	trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno,
			    job->start, job->end);

194
	v3d_job_start_stats(&job->base, V3D_BIN);
195 196
	v3d_switch_perfmon(v3d, &job->base);

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
	/* Set the current and end address of the control list.
	 * Writing the end register is what starts the job.
	 */
	if (job->qma) {
		V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, job->qma);
		V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, job->qms);
	}
	if (job->qts) {
		V3D_CORE_WRITE(0, V3D_CLE_CT0QTS,
			       V3D_CLE_CT0QTS_ENABLE |
			       job->qts);
	}
	V3D_CORE_WRITE(0, V3D_CLE_CT0QBA, job->start);
	V3D_CORE_WRITE(0, V3D_CLE_CT0QEA, job->end);

	return fence;
}

static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
{
	struct v3d_render_job *job = to_render_job(sched_job);
	struct v3d_dev *v3d = job->base.v3d;
	struct drm_device *dev = &v3d->drm;
	struct dma_fence *fence;

	if (unlikely(job->base.base.s_fence->finished.error))
		return NULL;

	v3d->render_job = job;

	/* Can we avoid this flush?  We need to be careful of
	 * scheduling, though -- imagine job0 rendering to texture and
	 * job1 reading, and them being executed as bin0, bin1,
	 * render0, render1, so that render1's flush at bin time
231 232 233 234
	 * wasn't enough.
	 */
	v3d_invalidate_caches(v3d);

235
	fence = v3d_fence_create(v3d, V3D_RENDER);
236 237
	if (IS_ERR(fence))
		return NULL;
238

239 240 241
	if (job->base.irq_fence)
		dma_fence_put(job->base.irq_fence);
	job->base.irq_fence = dma_fence_get(fence);
242

243
	trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno,
244 245
			    job->start, job->end);

246
	v3d_job_start_stats(&job->base, V3D_RENDER);
247 248
	v3d_switch_perfmon(v3d, &job->base);

249
	/* XXX: Set the QCFG */
250 251 252 253

	/* Set the current and end address of the control list.
	 * Writing the end register is what starts the job.
	 */
254 255
	V3D_CORE_WRITE(0, V3D_CLE_CT1QBA, job->start);
	V3D_CORE_WRITE(0, V3D_CLE_CT1QEA, job->end);
256 257 258 259

	return fence;
}

260 261
static struct dma_fence *
v3d_tfu_job_run(struct drm_sched_job *sched_job)
262
{
263
	struct v3d_tfu_job *job = to_tfu_job(sched_job);
264
	struct v3d_dev *v3d = job->base.v3d;
265 266
	struct drm_device *dev = &v3d->drm;
	struct dma_fence *fence;
267

268 269 270 271 272
	fence = v3d_fence_create(v3d, V3D_TFU);
	if (IS_ERR(fence))
		return NULL;

	v3d->tfu_job = job;
273 274 275
	if (job->base.irq_fence)
		dma_fence_put(job->base.irq_fence);
	job->base.irq_fence = dma_fence_get(fence);
276 277 278

	trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);

279
	v3d_job_start_stats(&job->base, V3D_TFU);
280

281 282 283 284 285 286 287 288 289 290 291 292 293
	V3D_WRITE(V3D_TFU_IIA(v3d->ver), job->args.iia);
	V3D_WRITE(V3D_TFU_IIS(v3d->ver), job->args.iis);
	V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica);
	V3D_WRITE(V3D_TFU_IUA(v3d->ver), job->args.iua);
	V3D_WRITE(V3D_TFU_IOA(v3d->ver), job->args.ioa);
	if (v3d->ver >= 71)
		V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc);
	V3D_WRITE(V3D_TFU_IOS(v3d->ver), job->args.ios);
	V3D_WRITE(V3D_TFU_COEF0(v3d->ver), job->args.coef[0]);
	if (v3d->ver >= 71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
		V3D_WRITE(V3D_TFU_COEF1(v3d->ver), job->args.coef[1]);
		V3D_WRITE(V3D_TFU_COEF2(v3d->ver), job->args.coef[2]);
		V3D_WRITE(V3D_TFU_COEF3(v3d->ver), job->args.coef[3]);
294
	}
295
	/* ICFG kicks off the job. */
296
	V3D_WRITE(V3D_TFU_ICFG(v3d->ver), job->args.icfg | V3D_TFU_ICFG_IOC);
297 298 299 300

	return fence;
}

301 302 303 304 305 306 307
static struct dma_fence *
v3d_csd_job_run(struct drm_sched_job *sched_job)
{
	struct v3d_csd_job *job = to_csd_job(sched_job);
	struct v3d_dev *v3d = job->base.v3d;
	struct drm_device *dev = &v3d->drm;
	struct dma_fence *fence;
308
	int i, csd_cfg0_reg, csd_cfg_reg_count;
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323

	v3d->csd_job = job;

	v3d_invalidate_caches(v3d);

	fence = v3d_fence_create(v3d, V3D_CSD);
	if (IS_ERR(fence))
		return NULL;

	if (job->base.irq_fence)
		dma_fence_put(job->base.irq_fence);
	job->base.irq_fence = dma_fence_get(fence);

	trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno);

324
	v3d_job_start_stats(&job->base, V3D_CSD);
325 326
	v3d_switch_perfmon(v3d, &job->base);

327 328 329 330
	csd_cfg0_reg = V3D_CSD_QUEUED_CFG0(v3d->ver);
	csd_cfg_reg_count = v3d->ver < 71 ? 6 : 7;
	for (i = 1; i <= csd_cfg_reg_count; i++)
		V3D_CORE_WRITE(0, csd_cfg0_reg + 4 * i, job->args.cfg[i]);
331
	/* CFG0 write kicks off the job. */
332
	V3D_CORE_WRITE(0, csd_cfg0_reg, job->args.cfg[0]);
333 334 335 336

	return fence;
}

337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
static void
v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
{
	struct v3d_indirect_csd_info *indirect_csd = &job->indirect_csd;
	struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
	struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect);
	struct drm_v3d_submit_csd *args = &indirect_csd->job->args;
	u32 *wg_counts;

	v3d_get_bo_vaddr(bo);
	v3d_get_bo_vaddr(indirect);

	wg_counts = (uint32_t *)(bo->vaddr + indirect_csd->offset);

	if (wg_counts[0] == 0 || wg_counts[1] == 0 || wg_counts[2] == 0)
		return;

	args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
	args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
	args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
	args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
		       (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1;

	for (int i = 0; i < 3; i++) {
		/* 0xffffffff indicates that the uniform rewrite is not needed */
		if (indirect_csd->wg_uniform_offsets[i] != 0xffffffff) {
			u32 uniform_idx = indirect_csd->wg_uniform_offsets[i];
			((uint32_t *)indirect->vaddr)[uniform_idx] = wg_counts[i];
		}
	}

	v3d_put_bo_vaddr(indirect);
	v3d_put_bo_vaddr(bo);
}

372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
static void
v3d_timestamp_query(struct v3d_cpu_job *job)
{
	struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
	struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
	u8 *value_addr;

	v3d_get_bo_vaddr(bo);

	for (int i = 0; i < timestamp_query->count; i++) {
		value_addr = ((u8 *)bo->vaddr) + timestamp_query->queries[i].offset;
		*((u64 *)value_addr) = i == 0 ? ktime_get_ns() : 0ull;

		drm_syncobj_replace_fence(timestamp_query->queries[i].syncobj,
					  job->base.done_fence);
	}

	v3d_put_bo_vaddr(bo);
}

392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
static void
v3d_reset_timestamp_queries(struct v3d_cpu_job *job)
{
	struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
	struct v3d_timestamp_query *queries = timestamp_query->queries;
	struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
	u8 *value_addr;

	v3d_get_bo_vaddr(bo);

	for (int i = 0; i < timestamp_query->count; i++) {
		value_addr = ((u8 *)bo->vaddr) + queries[i].offset;
		*((u64 *)value_addr) = 0;

		drm_syncobj_replace_fence(queries[i].syncobj, NULL);
	}

	v3d_put_bo_vaddr(bo);
}

412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
static void
write_to_buffer(void *dst, u32 idx, bool do_64bit, u64 value)
{
	if (do_64bit) {
		u64 *dst64 = (u64 *)dst;

		dst64[idx] = value;
	} else {
		u32 *dst32 = (u32 *)dst;

		dst32[idx] = (u32)value;
	}
}

static void
v3d_copy_query_results(struct v3d_cpu_job *job)
{
	struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
	struct v3d_timestamp_query *queries = timestamp_query->queries;
	struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
	struct v3d_bo *timestamp = to_v3d_bo(job->base.bo[1]);
	struct v3d_copy_query_results_info *copy = &job->copy;
	struct dma_fence *fence;
	u8 *query_addr;
	bool available, write_result;
	u8 *data;
	int i;

	v3d_get_bo_vaddr(bo);
	v3d_get_bo_vaddr(timestamp);

	data = ((u8 *)bo->vaddr) + copy->offset;

	for (i = 0; i < timestamp_query->count; i++) {
		fence = drm_syncobj_fence_get(queries[i].syncobj);
		available = fence ? dma_fence_is_signaled(fence) : false;

		write_result = available || copy->do_partial;
		if (write_result) {
			query_addr = ((u8 *)timestamp->vaddr) + queries[i].offset;
			write_to_buffer(data, 0, copy->do_64bit, *((u64 *)query_addr));
		}

		if (copy->availability_bit)
			write_to_buffer(data, 1, copy->do_64bit, available ? 1u : 0u);

		data += copy->stride;

		dma_fence_put(fence);
	}

	v3d_put_bo_vaddr(timestamp);
	v3d_put_bo_vaddr(bo);
}

467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
static void
v3d_reset_performance_queries(struct v3d_cpu_job *job)
{
	struct v3d_performance_query_info *performance_query = &job->performance_query;
	struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
	struct v3d_dev *v3d = job->base.v3d;
	struct v3d_perfmon *perfmon;

	for (int i = 0; i < performance_query->count; i++) {
		for (int j = 0; j < performance_query->nperfmons; j++) {
			perfmon = v3d_perfmon_find(v3d_priv,
						   performance_query->queries[i].kperfmon_ids[j]);
			if (!perfmon) {
				DRM_DEBUG("Failed to find perfmon.");
				continue;
			}

			v3d_perfmon_stop(v3d, perfmon, false);

			memset(perfmon->values, 0, perfmon->ncounters * sizeof(u64));

			v3d_perfmon_put(perfmon);
		}

		drm_syncobj_replace_fence(performance_query->queries[i].syncobj, NULL);
	}
}

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
static void
v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 query)
{
	struct v3d_performance_query_info *performance_query = &job->performance_query;
	struct v3d_copy_query_results_info *copy = &job->copy;
	struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
	struct v3d_dev *v3d = job->base.v3d;
	struct v3d_perfmon *perfmon;
	u64 counter_values[V3D_PERFCNT_NUM];

	for (int i = 0; i < performance_query->nperfmons; i++) {
		perfmon = v3d_perfmon_find(v3d_priv,
					   performance_query->queries[query].kperfmon_ids[i]);
		if (!perfmon) {
			DRM_DEBUG("Failed to find perfmon.");
			continue;
		}

		v3d_perfmon_stop(v3d, perfmon, true);

		memcpy(&counter_values[i * DRM_V3D_MAX_PERF_COUNTERS], perfmon->values,
		       perfmon->ncounters * sizeof(u64));

		v3d_perfmon_put(perfmon);
	}

	for (int i = 0; i < performance_query->ncounters; i++)
		write_to_buffer(data, i, copy->do_64bit, counter_values[i]);
}

static void
v3d_copy_performance_query(struct v3d_cpu_job *job)
{
	struct v3d_performance_query_info *performance_query = &job->performance_query;
	struct v3d_copy_query_results_info *copy = &job->copy;
	struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
	struct dma_fence *fence;
	bool available, write_result;
	u8 *data;

	v3d_get_bo_vaddr(bo);

	data = ((u8 *)bo->vaddr) + copy->offset;

	for (int i = 0; i < performance_query->count; i++) {
		fence = drm_syncobj_fence_get(performance_query->queries[i].syncobj);
		available = fence ? dma_fence_is_signaled(fence) : false;

		write_result = available || copy->do_partial;
		if (write_result)
			v3d_write_performance_query_result(job, data, i);

		if (copy->availability_bit)
			write_to_buffer(data, performance_query->ncounters,
					copy->do_64bit, available ? 1u : 0u);

		data += copy->stride;

		dma_fence_put(fence);
	}

	v3d_put_bo_vaddr(bo);
}

559 560
static const v3d_cpu_job_fn cpu_job_function[] = {
	[V3D_CPU_JOB_TYPE_INDIRECT_CSD] = v3d_rewrite_csd_job_wg_counts_from_indirect,
561
	[V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = v3d_timestamp_query,
562
	[V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = v3d_reset_timestamp_queries,
563
	[V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = v3d_copy_query_results,
564
	[V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = v3d_reset_performance_queries,
565
	[V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = v3d_copy_performance_query,
566
};
567 568 569 570 571 572 573 574 575 576 577 578 579 580

static struct dma_fence *
v3d_cpu_job_run(struct drm_sched_job *sched_job)
{
	struct v3d_cpu_job *job = to_cpu_job(sched_job);
	struct v3d_dev *v3d = job->base.v3d;

	v3d->cpu_job = job;

	if (job->job_type >= ARRAY_SIZE(cpu_job_function)) {
		DRM_DEBUG_DRIVER("Unknown CPU job: %d\n", job->job_type);
		return NULL;
	}

581
	v3d_job_start_stats(&job->base, V3D_CPU);
582 583
	trace_v3d_cpu_job_begin(&v3d->drm, job->job_type);

584 585
	cpu_job_function[job->job_type](job);

586
	trace_v3d_cpu_job_end(&v3d->drm, job->job_type);
587
	v3d_job_update_stats(&job->base, V3D_CPU);
588 589 590 591

	return NULL;
}

592 593 594 595 596
static struct dma_fence *
v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
{
	struct v3d_job *job = to_v3d_job(sched_job);
	struct v3d_dev *v3d = job->v3d;
597

598
	v3d_job_start_stats(job, V3D_CACHE_CLEAN);
599 600 601

	v3d_clean_caches(v3d);

602
	v3d_job_update_stats(job, V3D_CACHE_CLEAN);
603

604 605 606
	return NULL;
}

607
static enum drm_gpu_sched_stat
608 609 610
v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
{
	enum v3d_queue q;
611 612 613 614

	mutex_lock(&v3d->reset_lock);

	/* block scheduler */
615
	for (q = 0; q < V3D_MAX_QUEUES; q++)
616
		drm_sched_stop(&v3d->queue[q].sched, sched_job);
617

618 619
	if (sched_job)
		drm_sched_increase_karma(sched_job);
620 621 622 623

	/* get the GPU back into the init state */
	v3d_reset(v3d);

624
	for (q = 0; q < V3D_MAX_QUEUES; q++)
625
		drm_sched_resubmit_jobs(&v3d->queue[q].sched);
626

627 628
	/* Unblock schedulers and restart their jobs. */
	for (q = 0; q < V3D_MAX_QUEUES; q++) {
629
		drm_sched_start(&v3d->queue[q].sched, true);
630 631 632
	}

	mutex_unlock(&v3d->reset_lock);
633 634

	return DRM_GPU_SCHED_STAT_NOMINAL;
635 636
}

637 638 639 640 641
/* If the current address or return address have changed, then the GPU
 * has probably made progress and we should delay the reset.  This
 * could fail if the GPU got in an infinite loop in the CL, but that
 * is pretty unlikely outside of an i-g-t testcase.
 */
642
static enum drm_gpu_sched_stat
643 644
v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
		    u32 *timedout_ctca, u32 *timedout_ctra)
645 646
{
	struct v3d_job *job = to_v3d_job(sched_job);
647 648 649 650 651 652 653
	struct v3d_dev *v3d = job->v3d;
	u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q));
	u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q));

	if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
		*timedout_ctca = ctca;
		*timedout_ctra = ctra;
654
		return DRM_GPU_SCHED_STAT_NOMINAL;
655 656
	}

657
	return v3d_gpu_reset_for_timeout(v3d, sched_job);
658 659
}

660
static enum drm_gpu_sched_stat
661 662 663 664
v3d_bin_job_timedout(struct drm_sched_job *sched_job)
{
	struct v3d_bin_job *job = to_bin_job(sched_job);

665 666
	return v3d_cl_job_timedout(sched_job, V3D_BIN,
				   &job->timedout_ctca, &job->timedout_ctra);
667 668
}

669
static enum drm_gpu_sched_stat
670 671 672 673
v3d_render_job_timedout(struct drm_sched_job *sched_job)
{
	struct v3d_render_job *job = to_render_job(sched_job);

674 675
	return v3d_cl_job_timedout(sched_job, V3D_RENDER,
				   &job->timedout_ctca, &job->timedout_ctra);
676 677
}

678
static enum drm_gpu_sched_stat
679
v3d_generic_job_timedout(struct drm_sched_job *sched_job)
680
{
681
	struct v3d_job *job = to_v3d_job(sched_job);
682

683
	return v3d_gpu_reset_for_timeout(job->v3d, sched_job);
684 685
}

686
static enum drm_gpu_sched_stat
687 688 689 690
v3d_csd_job_timedout(struct drm_sched_job *sched_job)
{
	struct v3d_csd_job *job = to_csd_job(sched_job);
	struct v3d_dev *v3d = job->base.v3d;
691
	u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver));
692 693 694 695 696 697

	/* If we've made progress, skip reset and let the timer get
	 * rearmed.
	 */
	if (job->timedout_batches != batches) {
		job->timedout_batches = batches;
698
		return DRM_GPU_SCHED_STAT_NOMINAL;
699 700
	}

701
	return v3d_gpu_reset_for_timeout(v3d, sched_job);
702 703
}

704 705 706
static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
	.run_job = v3d_bin_job_run,
	.timedout_job = v3d_bin_job_timedout,
707
	.free_job = v3d_sched_job_free,
708 709 710 711 712
};

static const struct drm_sched_backend_ops v3d_render_sched_ops = {
	.run_job = v3d_render_job_run,
	.timedout_job = v3d_render_job_timedout,
713
	.free_job = v3d_sched_job_free,
714 715
};

716 717
static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
	.run_job = v3d_tfu_job_run,
718
	.timedout_job = v3d_generic_job_timedout,
719
	.free_job = v3d_sched_job_free,
720 721
};

722 723 724
static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
	.run_job = v3d_csd_job_run,
	.timedout_job = v3d_csd_job_timedout,
725
	.free_job = v3d_sched_job_free
726 727 728 729 730
};

static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
	.run_job = v3d_cache_clean_job_run,
	.timedout_job = v3d_generic_job_timedout,
731
	.free_job = v3d_sched_job_free
732 733
};

734 735 736
static const struct drm_sched_backend_ops v3d_cpu_sched_ops = {
	.run_job = v3d_cpu_job_run,
	.timedout_job = v3d_generic_job_timedout,
737
	.free_job = v3d_cpu_job_free
738 739
};

740 741 742 743 744 745 746 747 748
int
v3d_sched_init(struct v3d_dev *v3d)
{
	int hw_jobs_limit = 1;
	int job_hang_limit = 0;
	int hang_limit_ms = 500;
	int ret;

	ret = drm_sched_init(&v3d->queue[V3D_BIN].sched,
749
			     &v3d_bin_sched_ops, NULL,
750
			     DRM_SCHED_PRIORITY_COUNT,
751
			     hw_jobs_limit, job_hang_limit,
752
			     msecs_to_jiffies(hang_limit_ms), NULL,
753
			     NULL, "v3d_bin", v3d->drm.dev);
754
	if (ret)
755 756 757
		return ret;

	ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched,
758
			     &v3d_render_sched_ops, NULL,
759
			     DRM_SCHED_PRIORITY_COUNT,
760
			     hw_jobs_limit, job_hang_limit,
761
			     msecs_to_jiffies(hang_limit_ms), NULL,
762
			     NULL, "v3d_render", v3d->drm.dev);
763 764
	if (ret)
		goto fail;
765

766
	ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
767
			     &v3d_tfu_sched_ops, NULL,
768
			     DRM_SCHED_PRIORITY_COUNT,
769
			     hw_jobs_limit, job_hang_limit,
770
			     msecs_to_jiffies(hang_limit_ms), NULL,
771
			     NULL, "v3d_tfu", v3d->drm.dev);
772 773
	if (ret)
		goto fail;
774

775 776
	if (v3d_has_csd(v3d)) {
		ret = drm_sched_init(&v3d->queue[V3D_CSD].sched,
777
				     &v3d_csd_sched_ops, NULL,
778
				     DRM_SCHED_PRIORITY_COUNT,
779
				     hw_jobs_limit, job_hang_limit,
780
				     msecs_to_jiffies(hang_limit_ms), NULL,
781
				     NULL, "v3d_csd", v3d->drm.dev);
782 783
		if (ret)
			goto fail;
784 785

		ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched,
786
				     &v3d_cache_clean_sched_ops, NULL,
787
				     DRM_SCHED_PRIORITY_COUNT,
788
				     hw_jobs_limit, job_hang_limit,
789
				     msecs_to_jiffies(hang_limit_ms), NULL,
790
				     NULL, "v3d_cache_clean", v3d->drm.dev);
791 792
		if (ret)
			goto fail;
793 794
	}

795 796 797 798 799 800 801 802 803
	ret = drm_sched_init(&v3d->queue[V3D_CPU].sched,
			     &v3d_cpu_sched_ops, NULL,
			     DRM_SCHED_PRIORITY_COUNT,
			     1, job_hang_limit,
			     msecs_to_jiffies(hang_limit_ms), NULL,
			     NULL, "v3d_cpu", v3d->drm.dev);
	if (ret)
		goto fail;

804
	return 0;
805 806 807 808

fail:
	v3d_sched_fini(v3d);
	return ret;
809 810 811 812 813 814 815
}

void
v3d_sched_fini(struct v3d_dev *v3d)
{
	enum v3d_queue q;

816 817 818 819
	for (q = 0; q < V3D_MAX_QUEUES; q++) {
		if (v3d->queue[q].sched.ready)
			drm_sched_fini(&v3d->queue[q].sched);
	}
820
}