1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <drm/drm_print.h>
26 
27 #include "gem/i915_gem_context.h"
28 
29 #include "i915_drv.h"
30 
31 #include "intel_context.h"
32 #include "intel_engine.h"
33 #include "intel_engine_pm.h"
34 #include "intel_engine_user.h"
35 #include "intel_gt.h"
36 #include "intel_gt_requests.h"
37 #include "intel_gt_pm.h"
38 #include "intel_lrc.h"
39 #include "intel_reset.h"
40 #include "intel_ring.h"
41 
42 /* Haswell does have the CXT_SIZE register however it does not appear to be
43  * valid. Now, docs explain in dwords what is in the context object. The full
44  * size is 70720 bytes, however, the power context and execlist context will
45  * never be saved (power context is stored elsewhere, and execlists don't work
46  * on HSW) - so the final size, including the extra state required for the
47  * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
48  */
49 #define HSW_CXT_TOTAL_SIZE		(17 * PAGE_SIZE)
50 
51 #define DEFAULT_LR_CONTEXT_RENDER_SIZE	(22 * PAGE_SIZE)
52 #define GEN8_LR_CONTEXT_RENDER_SIZE	(20 * PAGE_SIZE)
53 #define GEN9_LR_CONTEXT_RENDER_SIZE	(22 * PAGE_SIZE)
54 #define GEN10_LR_CONTEXT_RENDER_SIZE	(18 * PAGE_SIZE)
55 #define GEN11_LR_CONTEXT_RENDER_SIZE	(14 * PAGE_SIZE)
56 
57 #define GEN8_LR_CONTEXT_OTHER_SIZE	( 2 * PAGE_SIZE)
58 
59 #define MAX_MMIO_BASES 3
60 struct engine_info {
61 	unsigned int hw_id;
62 	u8 class;
63 	u8 instance;
64 	/* mmio bases table *must* be sorted in reverse gen order */
65 	struct engine_mmio_base {
66 		u32 gen : 8;
67 		u32 base : 24;
68 	} mmio_bases[MAX_MMIO_BASES];
69 };
70 
71 static const struct engine_info intel_engines[] = {
72 	[RCS0] = {
73 		.hw_id = RCS0_HW,
74 		.class = RENDER_CLASS,
75 		.instance = 0,
76 		.mmio_bases = {
77 			{ .gen = 1, .base = RENDER_RING_BASE }
78 		},
79 	},
80 	[BCS0] = {
81 		.hw_id = BCS0_HW,
82 		.class = COPY_ENGINE_CLASS,
83 		.instance = 0,
84 		.mmio_bases = {
85 			{ .gen = 6, .base = BLT_RING_BASE }
86 		},
87 	},
88 	[VCS0] = {
89 		.hw_id = VCS0_HW,
90 		.class = VIDEO_DECODE_CLASS,
91 		.instance = 0,
92 		.mmio_bases = {
93 			{ .gen = 11, .base = GEN11_BSD_RING_BASE },
94 			{ .gen = 6, .base = GEN6_BSD_RING_BASE },
95 			{ .gen = 4, .base = BSD_RING_BASE }
96 		},
97 	},
98 	[VCS1] = {
99 		.hw_id = VCS1_HW,
100 		.class = VIDEO_DECODE_CLASS,
101 		.instance = 1,
102 		.mmio_bases = {
103 			{ .gen = 11, .base = GEN11_BSD2_RING_BASE },
104 			{ .gen = 8, .base = GEN8_BSD2_RING_BASE }
105 		},
106 	},
107 	[VCS2] = {
108 		.hw_id = VCS2_HW,
109 		.class = VIDEO_DECODE_CLASS,
110 		.instance = 2,
111 		.mmio_bases = {
112 			{ .gen = 11, .base = GEN11_BSD3_RING_BASE }
113 		},
114 	},
115 	[VCS3] = {
116 		.hw_id = VCS3_HW,
117 		.class = VIDEO_DECODE_CLASS,
118 		.instance = 3,
119 		.mmio_bases = {
120 			{ .gen = 11, .base = GEN11_BSD4_RING_BASE }
121 		},
122 	},
123 	[VECS0] = {
124 		.hw_id = VECS0_HW,
125 		.class = VIDEO_ENHANCEMENT_CLASS,
126 		.instance = 0,
127 		.mmio_bases = {
128 			{ .gen = 11, .base = GEN11_VEBOX_RING_BASE },
129 			{ .gen = 7, .base = VEBOX_RING_BASE }
130 		},
131 	},
132 	[VECS1] = {
133 		.hw_id = VECS1_HW,
134 		.class = VIDEO_ENHANCEMENT_CLASS,
135 		.instance = 1,
136 		.mmio_bases = {
137 			{ .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
138 		},
139 	},
140 };
141 
142 /**
143  * intel_engine_context_size() - return the size of the context for an engine
144  * @gt: the gt
145  * @class: engine class
146  *
147  * Each engine class may require a different amount of space for a context
148  * image.
149  *
150  * Return: size (in bytes) of an engine class specific context image
151  *
152  * Note: this size includes the HWSP, which is part of the context image
153  * in LRC mode, but does not include the "shared data page" used with
154  * GuC submission. The caller should account for this if using the GuC.
155  */
156 u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
157 {
158 	struct intel_uncore *uncore = gt->uncore;
159 	u32 cxt_size;
160 
161 	BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
162 
163 	switch (class) {
164 	case RENDER_CLASS:
165 		switch (INTEL_GEN(gt->i915)) {
166 		default:
167 			MISSING_CASE(INTEL_GEN(gt->i915));
168 			return DEFAULT_LR_CONTEXT_RENDER_SIZE;
169 		case 12:
170 		case 11:
171 			return GEN11_LR_CONTEXT_RENDER_SIZE;
172 		case 10:
173 			return GEN10_LR_CONTEXT_RENDER_SIZE;
174 		case 9:
175 			return GEN9_LR_CONTEXT_RENDER_SIZE;
176 		case 8:
177 			return GEN8_LR_CONTEXT_RENDER_SIZE;
178 		case 7:
179 			if (IS_HASWELL(gt->i915))
180 				return HSW_CXT_TOTAL_SIZE;
181 
182 			cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE);
183 			return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
184 					PAGE_SIZE);
185 		case 6:
186 			cxt_size = intel_uncore_read(uncore, CXT_SIZE);
187 			return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
188 					PAGE_SIZE);
189 		case 5:
190 		case 4:
191 			/*
192 			 * There is a discrepancy here between the size reported
193 			 * by the register and the size of the context layout
194 			 * in the docs. Both are described as authorative!
195 			 *
196 			 * The discrepancy is on the order of a few cachelines,
197 			 * but the total is under one page (4k), which is our
198 			 * minimum allocation anyway so it should all come
199 			 * out in the wash.
200 			 */
201 			cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
202 			drm_dbg(&gt->i915->drm,
203 				"gen%d CXT_SIZE = %d bytes [0x%08x]\n",
204 				INTEL_GEN(gt->i915), cxt_size * 64,
205 				cxt_size - 1);
206 			return round_up(cxt_size * 64, PAGE_SIZE);
207 		case 3:
208 		case 2:
209 		/* For the special day when i810 gets merged. */
210 		case 1:
211 			return 0;
212 		}
213 		break;
214 	default:
215 		MISSING_CASE(class);
216 		/* fall through */
217 	case VIDEO_DECODE_CLASS:
218 	case VIDEO_ENHANCEMENT_CLASS:
219 	case COPY_ENGINE_CLASS:
220 		if (INTEL_GEN(gt->i915) < 8)
221 			return 0;
222 		return GEN8_LR_CONTEXT_OTHER_SIZE;
223 	}
224 }
225 
226 static u32 __engine_mmio_base(struct drm_i915_private *i915,
227 			      const struct engine_mmio_base *bases)
228 {
229 	int i;
230 
231 	for (i = 0; i < MAX_MMIO_BASES; i++)
232 		if (INTEL_GEN(i915) >= bases[i].gen)
233 			break;
234 
235 	GEM_BUG_ON(i == MAX_MMIO_BASES);
236 	GEM_BUG_ON(!bases[i].base);
237 
238 	return bases[i].base;
239 }
240 
241 static void __sprint_engine_name(struct intel_engine_cs *engine)
242 {
243 	/*
244 	 * Before we know what the uABI name for this engine will be,
245 	 * we still would like to keep track of this engine in the debug logs.
246 	 * We throw in a ' here as a reminder that this isn't its final name.
247 	 */
248 	GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
249 			     intel_engine_class_repr(engine->class),
250 			     engine->instance) >= sizeof(engine->name));
251 }
252 
253 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
254 {
255 	/*
256 	 * Though they added more rings on g4x/ilk, they did not add
257 	 * per-engine HWSTAM until gen6.
258 	 */
259 	if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
260 		return;
261 
262 	if (INTEL_GEN(engine->i915) >= 3)
263 		ENGINE_WRITE(engine, RING_HWSTAM, mask);
264 	else
265 		ENGINE_WRITE16(engine, RING_HWSTAM, mask);
266 }
267 
268 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
269 {
270 	/* Mask off all writes into the unknown HWSP */
271 	intel_engine_set_hwsp_writemask(engine, ~0u);
272 }
273 
274 static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
275 {
276 	const struct engine_info *info = &intel_engines[id];
277 	struct drm_i915_private *i915 = gt->i915;
278 	struct intel_engine_cs *engine;
279 
280 	BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
281 	BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
282 
283 	if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
284 		return -EINVAL;
285 
286 	if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
287 		return -EINVAL;
288 
289 	if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
290 		return -EINVAL;
291 
292 	if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
293 		return -EINVAL;
294 
295 	engine = kzalloc(sizeof(*engine), GFP_KERNEL);
296 	if (!engine)
297 		return -ENOMEM;
298 
299 	BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
300 
301 	engine->id = id;
302 	engine->legacy_idx = INVALID_ENGINE;
303 	engine->mask = BIT(id);
304 	engine->i915 = i915;
305 	engine->gt = gt;
306 	engine->uncore = gt->uncore;
307 	engine->hw_id = engine->guc_id = info->hw_id;
308 	engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
309 
310 	engine->class = info->class;
311 	engine->instance = info->instance;
312 	__sprint_engine_name(engine);
313 
314 	engine->props.heartbeat_interval_ms =
315 		CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
316 	engine->props.max_busywait_duration_ns =
317 		CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT;
318 	engine->props.preempt_timeout_ms =
319 		CONFIG_DRM_I915_PREEMPT_TIMEOUT;
320 	engine->props.stop_timeout_ms =
321 		CONFIG_DRM_I915_STOP_TIMEOUT;
322 	engine->props.timeslice_duration_ms =
323 		CONFIG_DRM_I915_TIMESLICE_DURATION;
324 
325 	/* Override to uninterruptible for OpenCL workloads. */
326 	if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS)
327 		engine->props.preempt_timeout_ms = 0;
328 
329 	engine->defaults = engine->props; /* never to change again */
330 
331 	engine->context_size = intel_engine_context_size(gt, engine->class);
332 	if (WARN_ON(engine->context_size > BIT(20)))
333 		engine->context_size = 0;
334 	if (engine->context_size)
335 		DRIVER_CAPS(i915)->has_logical_contexts = true;
336 
337 	/* Nothing to do here, execute in order of dependencies */
338 	engine->schedule = NULL;
339 
340 	ewma__engine_latency_init(&engine->latency);
341 	seqlock_init(&engine->stats.lock);
342 
343 	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
344 
345 	/* Scrub mmio state on takeover */
346 	intel_engine_sanitize_mmio(engine);
347 
348 	gt->engine_class[info->class][info->instance] = engine;
349 	gt->engine[id] = engine;
350 
351 	return 0;
352 }
353 
354 static void __setup_engine_capabilities(struct intel_engine_cs *engine)
355 {
356 	struct drm_i915_private *i915 = engine->i915;
357 
358 	if (engine->class == VIDEO_DECODE_CLASS) {
359 		/*
360 		 * HEVC support is present on first engine instance
361 		 * before Gen11 and on all instances afterwards.
362 		 */
363 		if (INTEL_GEN(i915) >= 11 ||
364 		    (INTEL_GEN(i915) >= 9 && engine->instance == 0))
365 			engine->uabi_capabilities |=
366 				I915_VIDEO_CLASS_CAPABILITY_HEVC;
367 
368 		/*
369 		 * SFC block is present only on even logical engine
370 		 * instances.
371 		 */
372 		if ((INTEL_GEN(i915) >= 11 &&
373 		     RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) ||
374 		    (INTEL_GEN(i915) >= 9 && engine->instance == 0))
375 			engine->uabi_capabilities |=
376 				I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
377 	} else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
378 		if (INTEL_GEN(i915) >= 9)
379 			engine->uabi_capabilities |=
380 				I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
381 	}
382 }
383 
384 static void intel_setup_engine_capabilities(struct intel_gt *gt)
385 {
386 	struct intel_engine_cs *engine;
387 	enum intel_engine_id id;
388 
389 	for_each_engine(engine, gt, id)
390 		__setup_engine_capabilities(engine);
391 }
392 
393 /**
394  * intel_engines_release() - free the resources allocated for Command Streamers
395  * @gt: pointer to struct intel_gt
396  */
397 void intel_engines_release(struct intel_gt *gt)
398 {
399 	struct intel_engine_cs *engine;
400 	enum intel_engine_id id;
401 
402 	/*
403 	 * Before we release the resources held by engine, we must be certain
404 	 * that the HW is no longer accessing them -- having the GPU scribble
405 	 * to or read from a page being used for something else causes no end
406 	 * of fun.
407 	 *
408 	 * The GPU should be reset by this point, but assume the worst just
409 	 * in case we aborted before completely initialising the engines.
410 	 */
411 	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
412 	if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
413 		__intel_gt_reset(gt, ALL_ENGINES);
414 
415 	/* Decouple the backend; but keep the layout for late GPU resets */
416 	for_each_engine(engine, gt, id) {
417 		intel_wakeref_wait_for_idle(&engine->wakeref);
418 		GEM_BUG_ON(intel_engine_pm_is_awake(engine));
419 
420 		if (!engine->release)
421 			continue;
422 
423 		engine->release(engine);
424 		engine->release = NULL;
425 
426 		memset(&engine->reset, 0, sizeof(engine->reset));
427 	}
428 }
429 
430 void intel_engine_free_request_pool(struct intel_engine_cs *engine)
431 {
432 	if (!engine->request_pool)
433 		return;
434 
435 	kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
436 }
437 
438 void intel_engines_free(struct intel_gt *gt)
439 {
440 	struct intel_engine_cs *engine;
441 	enum intel_engine_id id;
442 
443 	/* Free the requests! dma-resv keeps fences around for an eternity */
444 	rcu_barrier();
445 
446 	for_each_engine(engine, gt, id) {
447 		intel_engine_free_request_pool(engine);
448 		kfree(engine);
449 		gt->engine[id] = NULL;
450 	}
451 }
452 
453 /**
454  * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
455  * @gt: pointer to struct intel_gt
456  *
457  * Return: non-zero if the initialization failed.
458  */
459 int intel_engines_init_mmio(struct intel_gt *gt)
460 {
461 	struct drm_i915_private *i915 = gt->i915;
462 	struct intel_device_info *device_info = mkwrite_device_info(i915);
463 	const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask;
464 	unsigned int mask = 0;
465 	unsigned int i;
466 	int err;
467 
468 	drm_WARN_ON(&i915->drm, engine_mask == 0);
469 	drm_WARN_ON(&i915->drm, engine_mask &
470 		    GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
471 
472 	if (i915_inject_probe_failure(i915))
473 		return -ENODEV;
474 
475 	for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
476 		if (!HAS_ENGINE(i915, i))
477 			continue;
478 
479 		err = intel_engine_setup(gt, i);
480 		if (err)
481 			goto cleanup;
482 
483 		mask |= BIT(i);
484 	}
485 
486 	/*
487 	 * Catch failures to update intel_engines table when the new engines
488 	 * are added to the driver by a warning and disabling the forgotten
489 	 * engines.
490 	 */
491 	if (drm_WARN_ON(&i915->drm, mask != engine_mask))
492 		device_info->engine_mask = mask;
493 
494 	RUNTIME_INFO(i915)->num_engines = hweight32(mask);
495 
496 	intel_gt_check_and_clear_faults(gt);
497 
498 	intel_setup_engine_capabilities(gt);
499 
500 	return 0;
501 
502 cleanup:
503 	intel_engines_free(gt);
504 	return err;
505 }
506 
507 void intel_engine_init_execlists(struct intel_engine_cs *engine)
508 {
509 	struct intel_engine_execlists * const execlists = &engine->execlists;
510 
511 	execlists->port_mask = 1;
512 	GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
513 	GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
514 
515 	memset(execlists->pending, 0, sizeof(execlists->pending));
516 	execlists->active =
517 		memset(execlists->inflight, 0, sizeof(execlists->inflight));
518 
519 	execlists->queue_priority_hint = INT_MIN;
520 	execlists->queue = RB_ROOT_CACHED;
521 }
522 
523 static void cleanup_status_page(struct intel_engine_cs *engine)
524 {
525 	struct i915_vma *vma;
526 
527 	/* Prevent writes into HWSP after returning the page to the system */
528 	intel_engine_set_hwsp_writemask(engine, ~0u);
529 
530 	vma = fetch_and_zero(&engine->status_page.vma);
531 	if (!vma)
532 		return;
533 
534 	if (!HWS_NEEDS_PHYSICAL(engine->i915))
535 		i915_vma_unpin(vma);
536 
537 	i915_gem_object_unpin_map(vma->obj);
538 	i915_gem_object_put(vma->obj);
539 }
540 
541 static int pin_ggtt_status_page(struct intel_engine_cs *engine,
542 				struct i915_vma *vma)
543 {
544 	unsigned int flags;
545 
546 	if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
547 		/*
548 		 * On g33, we cannot place HWS above 256MiB, so
549 		 * restrict its pinning to the low mappable arena.
550 		 * Though this restriction is not documented for
551 		 * gen4, gen5, or byt, they also behave similarly
552 		 * and hang if the HWS is placed at the top of the
553 		 * GTT. To generalise, it appears that all !llc
554 		 * platforms have issues with us placing the HWS
555 		 * above the mappable region (even though we never
556 		 * actually map it).
557 		 */
558 		flags = PIN_MAPPABLE;
559 	else
560 		flags = PIN_HIGH;
561 
562 	return i915_ggtt_pin(vma, 0, flags);
563 }
564 
565 static int init_status_page(struct intel_engine_cs *engine)
566 {
567 	struct drm_i915_gem_object *obj;
568 	struct i915_vma *vma;
569 	void *vaddr;
570 	int ret;
571 
572 	/*
573 	 * Though the HWS register does support 36bit addresses, historically
574 	 * we have had hangs and corruption reported due to wild writes if
575 	 * the HWS is placed above 4G. We only allow objects to be allocated
576 	 * in GFP_DMA32 for i965, and no earlier physical address users had
577 	 * access to more than 4G.
578 	 */
579 	obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
580 	if (IS_ERR(obj)) {
581 		drm_err(&engine->i915->drm,
582 			"Failed to allocate status page\n");
583 		return PTR_ERR(obj);
584 	}
585 
586 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
587 
588 	vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
589 	if (IS_ERR(vma)) {
590 		ret = PTR_ERR(vma);
591 		goto err;
592 	}
593 
594 	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
595 	if (IS_ERR(vaddr)) {
596 		ret = PTR_ERR(vaddr);
597 		goto err;
598 	}
599 
600 	engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
601 	engine->status_page.vma = vma;
602 
603 	if (!HWS_NEEDS_PHYSICAL(engine->i915)) {
604 		ret = pin_ggtt_status_page(engine, vma);
605 		if (ret)
606 			goto err_unpin;
607 	}
608 
609 	return 0;
610 
611 err_unpin:
612 	i915_gem_object_unpin_map(obj);
613 err:
614 	i915_gem_object_put(obj);
615 	return ret;
616 }
617 
618 static int engine_setup_common(struct intel_engine_cs *engine)
619 {
620 	int err;
621 
622 	init_llist_head(&engine->barrier_tasks);
623 
624 	err = init_status_page(engine);
625 	if (err)
626 		return err;
627 
628 	intel_engine_init_active(engine, ENGINE_PHYSICAL);
629 	intel_engine_init_breadcrumbs(engine);
630 	intel_engine_init_execlists(engine);
631 	intel_engine_init_cmd_parser(engine);
632 	intel_engine_init__pm(engine);
633 	intel_engine_init_retire(engine);
634 
635 	/* Use the whole device by default */
636 	engine->sseu =
637 		intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
638 
639 	intel_engine_init_workarounds(engine);
640 	intel_engine_init_whitelist(engine);
641 	intel_engine_init_ctx_wa(engine);
642 
643 	return 0;
644 }
645 
646 struct measure_breadcrumb {
647 	struct i915_request rq;
648 	struct intel_ring ring;
649 	u32 cs[2048];
650 };
651 
652 static int measure_breadcrumb_dw(struct intel_context *ce)
653 {
654 	struct intel_engine_cs *engine = ce->engine;
655 	struct measure_breadcrumb *frame;
656 	int dw;
657 
658 	GEM_BUG_ON(!engine->gt->scratch);
659 
660 	frame = kzalloc(sizeof(*frame), GFP_KERNEL);
661 	if (!frame)
662 		return -ENOMEM;
663 
664 	frame->rq.i915 = engine->i915;
665 	frame->rq.engine = engine;
666 	frame->rq.context = ce;
667 	rcu_assign_pointer(frame->rq.timeline, ce->timeline);
668 
669 	frame->ring.vaddr = frame->cs;
670 	frame->ring.size = sizeof(frame->cs);
671 	frame->ring.wrap =
672 		BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size);
673 	frame->ring.effective_size = frame->ring.size;
674 	intel_ring_update_space(&frame->ring);
675 	frame->rq.ring = &frame->ring;
676 
677 	mutex_lock(&ce->timeline->mutex);
678 	spin_lock_irq(&engine->active.lock);
679 
680 	dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
681 
682 	spin_unlock_irq(&engine->active.lock);
683 	mutex_unlock(&ce->timeline->mutex);
684 
685 	GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
686 
687 	kfree(frame);
688 	return dw;
689 }
690 
691 void
692 intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
693 {
694 	INIT_LIST_HEAD(&engine->active.requests);
695 	INIT_LIST_HEAD(&engine->active.hold);
696 
697 	spin_lock_init(&engine->active.lock);
698 	lockdep_set_subclass(&engine->active.lock, subclass);
699 
700 	/*
701 	 * Due to an interesting quirk in lockdep's internal debug tracking,
702 	 * after setting a subclass we must ensure the lock is used. Otherwise,
703 	 * nr_unused_locks is incremented once too often.
704 	 */
705 #ifdef CONFIG_DEBUG_LOCK_ALLOC
706 	local_irq_disable();
707 	lock_map_acquire(&engine->active.lock.dep_map);
708 	lock_map_release(&engine->active.lock.dep_map);
709 	local_irq_enable();
710 #endif
711 }
712 
713 static struct intel_context *
714 create_kernel_context(struct intel_engine_cs *engine)
715 {
716 	static struct lock_class_key kernel;
717 	struct intel_context *ce;
718 	int err;
719 
720 	ce = intel_context_create(engine);
721 	if (IS_ERR(ce))
722 		return ce;
723 
724 	__set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
725 
726 	err = intel_context_pin(ce); /* perma-pin so it is always available */
727 	if (err) {
728 		intel_context_put(ce);
729 		return ERR_PTR(err);
730 	}
731 
732 	/*
733 	 * Give our perma-pinned kernel timelines a separate lockdep class,
734 	 * so that we can use them from within the normal user timelines
735 	 * should we need to inject GPU operations during their request
736 	 * construction.
737 	 */
738 	lockdep_set_class(&ce->timeline->mutex, &kernel);
739 
740 	return ce;
741 }
742 
743 /**
744  * intel_engines_init_common - initialize cengine state which might require hw access
745  * @engine: Engine to initialize.
746  *
747  * Initializes @engine@ structure members shared between legacy and execlists
748  * submission modes which do require hardware access.
749  *
750  * Typcally done at later stages of submission mode specific engine setup.
751  *
752  * Returns zero on success or an error code on failure.
753  */
754 static int engine_init_common(struct intel_engine_cs *engine)
755 {
756 	struct intel_context *ce;
757 	int ret;
758 
759 	engine->set_default_submission(engine);
760 
761 	/*
762 	 * We may need to do things with the shrinker which
763 	 * require us to immediately switch back to the default
764 	 * context. This can cause a problem as pinning the
765 	 * default context also requires GTT space which may not
766 	 * be available. To avoid this we always pin the default
767 	 * context.
768 	 */
769 	ce = create_kernel_context(engine);
770 	if (IS_ERR(ce))
771 		return PTR_ERR(ce);
772 
773 	ret = measure_breadcrumb_dw(ce);
774 	if (ret < 0)
775 		goto err_context;
776 
777 	engine->emit_fini_breadcrumb_dw = ret;
778 	engine->kernel_context = ce;
779 
780 	return 0;
781 
782 err_context:
783 	intel_context_put(ce);
784 	return ret;
785 }
786 
787 int intel_engines_init(struct intel_gt *gt)
788 {
789 	int (*setup)(struct intel_engine_cs *engine);
790 	struct intel_engine_cs *engine;
791 	enum intel_engine_id id;
792 	int err;
793 
794 	if (HAS_EXECLISTS(gt->i915))
795 		setup = intel_execlists_submission_setup;
796 	else
797 		setup = intel_ring_submission_setup;
798 
799 	for_each_engine(engine, gt, id) {
800 		err = engine_setup_common(engine);
801 		if (err)
802 			return err;
803 
804 		err = setup(engine);
805 		if (err)
806 			return err;
807 
808 		err = engine_init_common(engine);
809 		if (err)
810 			return err;
811 
812 		intel_engine_add_user(engine);
813 	}
814 
815 	return 0;
816 }
817 
818 /**
819  * intel_engines_cleanup_common - cleans up the engine state created by
820  *                                the common initiailizers.
821  * @engine: Engine to cleanup.
822  *
823  * This cleans up everything created by the common helpers.
824  */
825 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
826 {
827 	GEM_BUG_ON(!list_empty(&engine->active.requests));
828 	tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
829 
830 	cleanup_status_page(engine);
831 
832 	intel_engine_fini_retire(engine);
833 	intel_engine_fini_breadcrumbs(engine);
834 	intel_engine_cleanup_cmd_parser(engine);
835 
836 	if (engine->default_state)
837 		fput(engine->default_state);
838 
839 	if (engine->kernel_context) {
840 		intel_context_unpin(engine->kernel_context);
841 		intel_context_put(engine->kernel_context);
842 	}
843 	GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
844 
845 	intel_wa_list_free(&engine->ctx_wa_list);
846 	intel_wa_list_free(&engine->wa_list);
847 	intel_wa_list_free(&engine->whitelist);
848 }
849 
850 /**
851  * intel_engine_resume - re-initializes the HW state of the engine
852  * @engine: Engine to resume.
853  *
854  * Returns zero on success or an error code on failure.
855  */
856 int intel_engine_resume(struct intel_engine_cs *engine)
857 {
858 	intel_engine_apply_workarounds(engine);
859 	intel_engine_apply_whitelist(engine);
860 
861 	return engine->resume(engine);
862 }
863 
864 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
865 {
866 	struct drm_i915_private *i915 = engine->i915;
867 
868 	u64 acthd;
869 
870 	if (INTEL_GEN(i915) >= 8)
871 		acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
872 	else if (INTEL_GEN(i915) >= 4)
873 		acthd = ENGINE_READ(engine, RING_ACTHD);
874 	else
875 		acthd = ENGINE_READ(engine, ACTHD);
876 
877 	return acthd;
878 }
879 
880 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
881 {
882 	u64 bbaddr;
883 
884 	if (INTEL_GEN(engine->i915) >= 8)
885 		bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
886 	else
887 		bbaddr = ENGINE_READ(engine, RING_BBADDR);
888 
889 	return bbaddr;
890 }
891 
892 static unsigned long stop_timeout(const struct intel_engine_cs *engine)
893 {
894 	if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
895 		return 0;
896 
897 	/*
898 	 * If we are doing a normal GPU reset, we can take our time and allow
899 	 * the engine to quiesce. We've stopped submission to the engine, and
900 	 * if we wait long enough an innocent context should complete and
901 	 * leave the engine idle. So they should not be caught unaware by
902 	 * the forthcoming GPU reset (which usually follows the stop_cs)!
903 	 */
904 	return READ_ONCE(engine->props.stop_timeout_ms);
905 }
906 
907 int intel_engine_stop_cs(struct intel_engine_cs *engine)
908 {
909 	struct intel_uncore *uncore = engine->uncore;
910 	const u32 base = engine->mmio_base;
911 	const i915_reg_t mode = RING_MI_MODE(base);
912 	int err;
913 
914 	if (INTEL_GEN(engine->i915) < 3)
915 		return -ENODEV;
916 
917 	ENGINE_TRACE(engine, "\n");
918 
919 	intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
920 
921 	err = 0;
922 	if (__intel_wait_for_register_fw(uncore,
923 					 mode, MODE_IDLE, MODE_IDLE,
924 					 1000, stop_timeout(engine),
925 					 NULL)) {
926 		ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n");
927 		err = -ETIMEDOUT;
928 	}
929 
930 	/* A final mmio read to let GPU writes be hopefully flushed to memory */
931 	intel_uncore_posting_read_fw(uncore, mode);
932 
933 	return err;
934 }
935 
936 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
937 {
938 	ENGINE_TRACE(engine, "\n");
939 
940 	ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
941 }
942 
943 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
944 {
945 	switch (type) {
946 	case I915_CACHE_NONE: return " uncached";
947 	case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
948 	case I915_CACHE_L3_LLC: return " L3+LLC";
949 	case I915_CACHE_WT: return " WT";
950 	default: return "";
951 	}
952 }
953 
954 static u32
955 read_subslice_reg(const struct intel_engine_cs *engine,
956 		  int slice, int subslice, i915_reg_t reg)
957 {
958 	struct drm_i915_private *i915 = engine->i915;
959 	struct intel_uncore *uncore = engine->uncore;
960 	u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
961 	enum forcewake_domains fw_domains;
962 
963 	if (INTEL_GEN(i915) >= 11) {
964 		mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
965 		mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
966 	} else {
967 		mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
968 		mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
969 	}
970 
971 	fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
972 						    FW_REG_READ);
973 	fw_domains |= intel_uncore_forcewake_for_reg(uncore,
974 						     GEN8_MCR_SELECTOR,
975 						     FW_REG_READ | FW_REG_WRITE);
976 
977 	spin_lock_irq(&uncore->lock);
978 	intel_uncore_forcewake_get__locked(uncore, fw_domains);
979 
980 	old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
981 
982 	mcr &= ~mcr_mask;
983 	mcr |= mcr_ss;
984 	intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
985 
986 	val = intel_uncore_read_fw(uncore, reg);
987 
988 	mcr &= ~mcr_mask;
989 	mcr |= old_mcr & mcr_mask;
990 
991 	intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
992 
993 	intel_uncore_forcewake_put__locked(uncore, fw_domains);
994 	spin_unlock_irq(&uncore->lock);
995 
996 	return val;
997 }
998 
999 /* NB: please notice the memset */
1000 void intel_engine_get_instdone(const struct intel_engine_cs *engine,
1001 			       struct intel_instdone *instdone)
1002 {
1003 	struct drm_i915_private *i915 = engine->i915;
1004 	const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
1005 	struct intel_uncore *uncore = engine->uncore;
1006 	u32 mmio_base = engine->mmio_base;
1007 	int slice;
1008 	int subslice;
1009 
1010 	memset(instdone, 0, sizeof(*instdone));
1011 
1012 	switch (INTEL_GEN(i915)) {
1013 	default:
1014 		instdone->instdone =
1015 			intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1016 
1017 		if (engine->id != RCS0)
1018 			break;
1019 
1020 		instdone->slice_common =
1021 			intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1022 		if (INTEL_GEN(i915) >= 12) {
1023 			instdone->slice_common_extra[0] =
1024 				intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
1025 			instdone->slice_common_extra[1] =
1026 				intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2);
1027 		}
1028 		for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
1029 			instdone->sampler[slice][subslice] =
1030 				read_subslice_reg(engine, slice, subslice,
1031 						  GEN7_SAMPLER_INSTDONE);
1032 			instdone->row[slice][subslice] =
1033 				read_subslice_reg(engine, slice, subslice,
1034 						  GEN7_ROW_INSTDONE);
1035 		}
1036 		break;
1037 	case 7:
1038 		instdone->instdone =
1039 			intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1040 
1041 		if (engine->id != RCS0)
1042 			break;
1043 
1044 		instdone->slice_common =
1045 			intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1046 		instdone->sampler[0][0] =
1047 			intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
1048 		instdone->row[0][0] =
1049 			intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
1050 
1051 		break;
1052 	case 6:
1053 	case 5:
1054 	case 4:
1055 		instdone->instdone =
1056 			intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1057 		if (engine->id == RCS0)
1058 			/* HACK: Using the wrong struct member */
1059 			instdone->slice_common =
1060 				intel_uncore_read(uncore, GEN4_INSTDONE1);
1061 		break;
1062 	case 3:
1063 	case 2:
1064 		instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
1065 		break;
1066 	}
1067 }
1068 
1069 static bool ring_is_idle(struct intel_engine_cs *engine)
1070 {
1071 	bool idle = true;
1072 
1073 	if (I915_SELFTEST_ONLY(!engine->mmio_base))
1074 		return true;
1075 
1076 	if (!intel_engine_pm_get_if_awake(engine))
1077 		return true;
1078 
1079 	/* First check that no commands are left in the ring */
1080 	if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
1081 	    (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
1082 		idle = false;
1083 
1084 	/* No bit for gen2, so assume the CS parser is idle */
1085 	if (INTEL_GEN(engine->i915) > 2 &&
1086 	    !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
1087 		idle = false;
1088 
1089 	intel_engine_pm_put(engine);
1090 
1091 	return idle;
1092 }
1093 
1094 void intel_engine_flush_submission(struct intel_engine_cs *engine)
1095 {
1096 	struct tasklet_struct *t = &engine->execlists.tasklet;
1097 
1098 	if (__tasklet_is_scheduled(t)) {
1099 		local_bh_disable();
1100 		if (tasklet_trylock(t)) {
1101 			/* Must wait for any GPU reset in progress. */
1102 			if (__tasklet_is_enabled(t))
1103 				t->func(t->data);
1104 			tasklet_unlock(t);
1105 		}
1106 		local_bh_enable();
1107 	}
1108 
1109 	/* Otherwise flush the tasklet if it was running on another cpu */
1110 	tasklet_unlock_wait(t);
1111 }
1112 
1113 /**
1114  * intel_engine_is_idle() - Report if the engine has finished process all work
1115  * @engine: the intel_engine_cs
1116  *
1117  * Return true if there are no requests pending, nothing left to be submitted
1118  * to hardware, and that the engine is idle.
1119  */
1120 bool intel_engine_is_idle(struct intel_engine_cs *engine)
1121 {
1122 	/* More white lies, if wedged, hw state is inconsistent */
1123 	if (intel_gt_is_wedged(engine->gt))
1124 		return true;
1125 
1126 	if (!intel_engine_pm_is_awake(engine))
1127 		return true;
1128 
1129 	/* Waiting to drain ELSP? */
1130 	if (execlists_active(&engine->execlists)) {
1131 		synchronize_hardirq(engine->i915->drm.pdev->irq);
1132 
1133 		intel_engine_flush_submission(engine);
1134 
1135 		if (execlists_active(&engine->execlists))
1136 			return false;
1137 	}
1138 
1139 	/* ELSP is empty, but there are ready requests? E.g. after reset */
1140 	if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
1141 		return false;
1142 
1143 	/* Ring stopped? */
1144 	return ring_is_idle(engine);
1145 }
1146 
1147 bool intel_engines_are_idle(struct intel_gt *gt)
1148 {
1149 	struct intel_engine_cs *engine;
1150 	enum intel_engine_id id;
1151 
1152 	/*
1153 	 * If the driver is wedged, HW state may be very inconsistent and
1154 	 * report that it is still busy, even though we have stopped using it.
1155 	 */
1156 	if (intel_gt_is_wedged(gt))
1157 		return true;
1158 
1159 	/* Already parked (and passed an idleness test); must still be idle */
1160 	if (!READ_ONCE(gt->awake))
1161 		return true;
1162 
1163 	for_each_engine(engine, gt, id) {
1164 		if (!intel_engine_is_idle(engine))
1165 			return false;
1166 	}
1167 
1168 	return true;
1169 }
1170 
1171 void intel_engines_reset_default_submission(struct intel_gt *gt)
1172 {
1173 	struct intel_engine_cs *engine;
1174 	enum intel_engine_id id;
1175 
1176 	for_each_engine(engine, gt, id)
1177 		engine->set_default_submission(engine);
1178 }
1179 
1180 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1181 {
1182 	switch (INTEL_GEN(engine->i915)) {
1183 	case 2:
1184 		return false; /* uses physical not virtual addresses */
1185 	case 3:
1186 		/* maybe only uses physical not virtual addresses */
1187 		return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1188 	case 4:
1189 		return !IS_I965G(engine->i915); /* who knows! */
1190 	case 6:
1191 		return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1192 	default:
1193 		return true;
1194 	}
1195 }
1196 
1197 static int print_sched_attr(struct drm_i915_private *i915,
1198 			    const struct i915_sched_attr *attr,
1199 			    char *buf, int x, int len)
1200 {
1201 	if (attr->priority == I915_PRIORITY_INVALID)
1202 		return x;
1203 
1204 	x += snprintf(buf + x, len - x,
1205 		      " prio=%d", attr->priority);
1206 
1207 	return x;
1208 }
1209 
1210 static void print_request(struct drm_printer *m,
1211 			  struct i915_request *rq,
1212 			  const char *prefix)
1213 {
1214 	const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
1215 	char buf[80] = "";
1216 	int x = 0;
1217 
1218 	x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
1219 
1220 	drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
1221 		   prefix,
1222 		   rq->fence.context, rq->fence.seqno,
1223 		   i915_request_completed(rq) ? "!" :
1224 		   i915_request_started(rq) ? "*" :
1225 		   "",
1226 		   test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1227 			    &rq->fence.flags) ? "+" :
1228 		   test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1229 			    &rq->fence.flags) ? "-" :
1230 		   "",
1231 		   buf,
1232 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
1233 		   name);
1234 }
1235 
1236 static struct intel_timeline *get_timeline(struct i915_request *rq)
1237 {
1238 	struct intel_timeline *tl;
1239 
1240 	/*
1241 	 * Even though we are holding the engine->active.lock here, there
1242 	 * is no control over the submission queue per-se and we are
1243 	 * inspecting the active state at a random point in time, with an
1244 	 * unknown queue. Play safe and make sure the timeline remains valid.
1245 	 * (Only being used for pretty printing, one extra kref shouldn't
1246 	 * cause a camel stampede!)
1247 	 */
1248 	rcu_read_lock();
1249 	tl = rcu_dereference(rq->timeline);
1250 	if (!kref_get_unless_zero(&tl->kref))
1251 		tl = NULL;
1252 	rcu_read_unlock();
1253 
1254 	return tl;
1255 }
1256 
1257 static int print_ring(char *buf, int sz, struct i915_request *rq)
1258 {
1259 	int len = 0;
1260 
1261 	if (!i915_request_signaled(rq)) {
1262 		struct intel_timeline *tl = get_timeline(rq);
1263 
1264 		len = scnprintf(buf, sz,
1265 				"ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
1266 				i915_ggtt_offset(rq->ring->vma),
1267 				tl ? tl->hwsp_offset : 0,
1268 				hwsp_seqno(rq),
1269 				DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
1270 						      1000 * 1000));
1271 
1272 		if (tl)
1273 			intel_timeline_put(tl);
1274 	}
1275 
1276 	return len;
1277 }
1278 
1279 static void hexdump(struct drm_printer *m, const void *buf, size_t len)
1280 {
1281 	const size_t rowsize = 8 * sizeof(u32);
1282 	const void *prev = NULL;
1283 	bool skip = false;
1284 	size_t pos;
1285 
1286 	for (pos = 0; pos < len; pos += rowsize) {
1287 		char line[128];
1288 
1289 		if (prev && !memcmp(prev, buf + pos, rowsize)) {
1290 			if (!skip) {
1291 				drm_printf(m, "*\n");
1292 				skip = true;
1293 			}
1294 			continue;
1295 		}
1296 
1297 		WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
1298 						rowsize, sizeof(u32),
1299 						line, sizeof(line),
1300 						false) >= sizeof(line));
1301 		drm_printf(m, "[%04zx] %s\n", pos, line);
1302 
1303 		prev = buf + pos;
1304 		skip = false;
1305 	}
1306 }
1307 
1308 static const char *repr_timer(const struct timer_list *t)
1309 {
1310 	if (!READ_ONCE(t->expires))
1311 		return "inactive";
1312 
1313 	if (timer_pending(t))
1314 		return "active";
1315 
1316 	return "expired";
1317 }
1318 
1319 static void intel_engine_print_registers(struct intel_engine_cs *engine,
1320 					 struct drm_printer *m)
1321 {
1322 	struct drm_i915_private *dev_priv = engine->i915;
1323 	struct intel_engine_execlists * const execlists = &engine->execlists;
1324 	u64 addr;
1325 
1326 	if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
1327 		drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
1328 	if (HAS_EXECLISTS(dev_priv)) {
1329 		drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
1330 			   ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
1331 		drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
1332 			   ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
1333 	}
1334 	drm_printf(m, "\tRING_START: 0x%08x\n",
1335 		   ENGINE_READ(engine, RING_START));
1336 	drm_printf(m, "\tRING_HEAD:  0x%08x\n",
1337 		   ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
1338 	drm_printf(m, "\tRING_TAIL:  0x%08x\n",
1339 		   ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
1340 	drm_printf(m, "\tRING_CTL:   0x%08x%s\n",
1341 		   ENGINE_READ(engine, RING_CTL),
1342 		   ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
1343 	if (INTEL_GEN(engine->i915) > 2) {
1344 		drm_printf(m, "\tRING_MODE:  0x%08x%s\n",
1345 			   ENGINE_READ(engine, RING_MI_MODE),
1346 			   ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
1347 	}
1348 
1349 	if (INTEL_GEN(dev_priv) >= 6) {
1350 		drm_printf(m, "\tRING_IMR:   0x%08x\n",
1351 			   ENGINE_READ(engine, RING_IMR));
1352 		drm_printf(m, "\tRING_ESR:   0x%08x\n",
1353 			   ENGINE_READ(engine, RING_ESR));
1354 		drm_printf(m, "\tRING_EMR:   0x%08x\n",
1355 			   ENGINE_READ(engine, RING_EMR));
1356 		drm_printf(m, "\tRING_EIR:   0x%08x\n",
1357 			   ENGINE_READ(engine, RING_EIR));
1358 	}
1359 
1360 	addr = intel_engine_get_active_head(engine);
1361 	drm_printf(m, "\tACTHD:  0x%08x_%08x\n",
1362 		   upper_32_bits(addr), lower_32_bits(addr));
1363 	addr = intel_engine_get_last_batch_head(engine);
1364 	drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
1365 		   upper_32_bits(addr), lower_32_bits(addr));
1366 	if (INTEL_GEN(dev_priv) >= 8)
1367 		addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
1368 	else if (INTEL_GEN(dev_priv) >= 4)
1369 		addr = ENGINE_READ(engine, RING_DMA_FADD);
1370 	else
1371 		addr = ENGINE_READ(engine, DMA_FADD_I8XX);
1372 	drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
1373 		   upper_32_bits(addr), lower_32_bits(addr));
1374 	if (INTEL_GEN(dev_priv) >= 4) {
1375 		drm_printf(m, "\tIPEIR: 0x%08x\n",
1376 			   ENGINE_READ(engine, RING_IPEIR));
1377 		drm_printf(m, "\tIPEHR: 0x%08x\n",
1378 			   ENGINE_READ(engine, RING_IPEHR));
1379 	} else {
1380 		drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
1381 		drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
1382 	}
1383 
1384 	if (HAS_EXECLISTS(dev_priv)) {
1385 		struct i915_request * const *port, *rq;
1386 		const u32 *hws =
1387 			&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
1388 		const u8 num_entries = execlists->csb_size;
1389 		unsigned int idx;
1390 		u8 read, write;
1391 
1392 		drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
1393 			   yesno(test_bit(TASKLET_STATE_SCHED,
1394 					  &engine->execlists.tasklet.state)),
1395 			   enableddisabled(!atomic_read(&engine->execlists.tasklet.count)),
1396 			   repr_timer(&engine->execlists.preempt),
1397 			   repr_timer(&engine->execlists.timer));
1398 
1399 		read = execlists->csb_head;
1400 		write = READ_ONCE(*execlists->csb_write);
1401 
1402 		drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
1403 			   ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
1404 			   ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
1405 			   read, write, num_entries);
1406 
1407 		if (read >= num_entries)
1408 			read = 0;
1409 		if (write >= num_entries)
1410 			write = 0;
1411 		if (read > write)
1412 			write += num_entries;
1413 		while (read < write) {
1414 			idx = ++read % num_entries;
1415 			drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
1416 				   idx, hws[idx * 2], hws[idx * 2 + 1]);
1417 		}
1418 
1419 		execlists_active_lock_bh(execlists);
1420 		rcu_read_lock();
1421 		for (port = execlists->active; (rq = *port); port++) {
1422 			char hdr[160];
1423 			int len;
1424 
1425 			len = scnprintf(hdr, sizeof(hdr),
1426 					"\t\tActive[%d]:  ccid:%08x, ",
1427 					(int)(port - execlists->active),
1428 					rq->context->lrc.ccid);
1429 			len += print_ring(hdr + len, sizeof(hdr) - len, rq);
1430 			scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
1431 			print_request(m, rq, hdr);
1432 		}
1433 		for (port = execlists->pending; (rq = *port); port++) {
1434 			char hdr[160];
1435 			int len;
1436 
1437 			len = scnprintf(hdr, sizeof(hdr),
1438 					"\t\tPending[%d]: ccid:%08x, ",
1439 					(int)(port - execlists->pending),
1440 					rq->context->lrc.ccid);
1441 			len += print_ring(hdr + len, sizeof(hdr) - len, rq);
1442 			scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
1443 			print_request(m, rq, hdr);
1444 		}
1445 		rcu_read_unlock();
1446 		execlists_active_unlock_bh(execlists);
1447 	} else if (INTEL_GEN(dev_priv) > 6) {
1448 		drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
1449 			   ENGINE_READ(engine, RING_PP_DIR_BASE));
1450 		drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
1451 			   ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
1452 		drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1453 			   ENGINE_READ(engine, RING_PP_DIR_DCLV));
1454 	}
1455 }
1456 
1457 static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
1458 {
1459 	void *ring;
1460 	int size;
1461 
1462 	drm_printf(m,
1463 		   "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
1464 		   rq->head, rq->postfix, rq->tail,
1465 		   rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1466 		   rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1467 
1468 	size = rq->tail - rq->head;
1469 	if (rq->tail < rq->head)
1470 		size += rq->ring->size;
1471 
1472 	ring = kmalloc(size, GFP_ATOMIC);
1473 	if (ring) {
1474 		const void *vaddr = rq->ring->vaddr;
1475 		unsigned int head = rq->head;
1476 		unsigned int len = 0;
1477 
1478 		if (rq->tail < head) {
1479 			len = rq->ring->size - head;
1480 			memcpy(ring, vaddr + head, len);
1481 			head = 0;
1482 		}
1483 		memcpy(ring + len, vaddr + head, size - len);
1484 
1485 		hexdump(m, ring, size);
1486 		kfree(ring);
1487 	}
1488 }
1489 
1490 static unsigned long list_count(struct list_head *list)
1491 {
1492 	struct list_head *pos;
1493 	unsigned long count = 0;
1494 
1495 	list_for_each(pos, list)
1496 		count++;
1497 
1498 	return count;
1499 }
1500 
1501 void intel_engine_dump(struct intel_engine_cs *engine,
1502 		       struct drm_printer *m,
1503 		       const char *header, ...)
1504 {
1505 	struct i915_gpu_error * const error = &engine->i915->gpu_error;
1506 	struct i915_request *rq;
1507 	intel_wakeref_t wakeref;
1508 	unsigned long flags;
1509 
1510 	if (header) {
1511 		va_list ap;
1512 
1513 		va_start(ap, header);
1514 		drm_vprintf(m, header, &ap);
1515 		va_end(ap);
1516 	}
1517 
1518 	if (intel_gt_is_wedged(engine->gt))
1519 		drm_printf(m, "*** WEDGED ***\n");
1520 
1521 	drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
1522 	drm_printf(m, "\tBarriers?: %s\n",
1523 		   yesno(!llist_empty(&engine->barrier_tasks)));
1524 	drm_printf(m, "\tLatency: %luus\n",
1525 		   ewma__engine_latency_read(&engine->latency));
1526 
1527 	rcu_read_lock();
1528 	rq = READ_ONCE(engine->heartbeat.systole);
1529 	if (rq)
1530 		drm_printf(m, "\tHeartbeat: %d ms ago\n",
1531 			   jiffies_to_msecs(jiffies - rq->emitted_jiffies));
1532 	rcu_read_unlock();
1533 	drm_printf(m, "\tReset count: %d (global %d)\n",
1534 		   i915_reset_engine_count(error, engine),
1535 		   i915_reset_count(error));
1536 
1537 	drm_printf(m, "\tRequests:\n");
1538 
1539 	spin_lock_irqsave(&engine->active.lock, flags);
1540 	rq = intel_engine_find_active_request(engine);
1541 	if (rq) {
1542 		struct intel_timeline *tl = get_timeline(rq);
1543 
1544 		print_request(m, rq, "\t\tactive ");
1545 
1546 		drm_printf(m, "\t\tring->start:  0x%08x\n",
1547 			   i915_ggtt_offset(rq->ring->vma));
1548 		drm_printf(m, "\t\tring->head:   0x%08x\n",
1549 			   rq->ring->head);
1550 		drm_printf(m, "\t\tring->tail:   0x%08x\n",
1551 			   rq->ring->tail);
1552 		drm_printf(m, "\t\tring->emit:   0x%08x\n",
1553 			   rq->ring->emit);
1554 		drm_printf(m, "\t\tring->space:  0x%08x\n",
1555 			   rq->ring->space);
1556 
1557 		if (tl) {
1558 			drm_printf(m, "\t\tring->hwsp:   0x%08x\n",
1559 				   tl->hwsp_offset);
1560 			intel_timeline_put(tl);
1561 		}
1562 
1563 		print_request_ring(m, rq);
1564 
1565 		if (rq->context->lrc_reg_state) {
1566 			drm_printf(m, "Logical Ring Context:\n");
1567 			hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
1568 		}
1569 	}
1570 	drm_printf(m, "\tOn hold?: %lu\n", list_count(&engine->active.hold));
1571 	spin_unlock_irqrestore(&engine->active.lock, flags);
1572 
1573 	drm_printf(m, "\tMMIO base:  0x%08x\n", engine->mmio_base);
1574 	wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
1575 	if (wakeref) {
1576 		intel_engine_print_registers(engine, m);
1577 		intel_runtime_pm_put(engine->uncore->rpm, wakeref);
1578 	} else {
1579 		drm_printf(m, "\tDevice is asleep; skipping register dump\n");
1580 	}
1581 
1582 	intel_execlists_show_requests(engine, m, print_request, 8);
1583 
1584 	drm_printf(m, "HWSP:\n");
1585 	hexdump(m, engine->status_page.addr, PAGE_SIZE);
1586 
1587 	drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
1588 
1589 	intel_engine_print_breadcrumbs(engine, m);
1590 }
1591 
1592 static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
1593 {
1594 	ktime_t total = engine->stats.total;
1595 
1596 	/*
1597 	 * If the engine is executing something at the moment
1598 	 * add it to the total.
1599 	 */
1600 	if (atomic_read(&engine->stats.active))
1601 		total = ktime_add(total,
1602 				  ktime_sub(ktime_get(), engine->stats.start));
1603 
1604 	return total;
1605 }
1606 
1607 /**
1608  * intel_engine_get_busy_time() - Return current accumulated engine busyness
1609  * @engine: engine to report on
1610  *
1611  * Returns accumulated time @engine was busy since engine stats were enabled.
1612  */
1613 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
1614 {
1615 	unsigned int seq;
1616 	ktime_t total;
1617 
1618 	do {
1619 		seq = read_seqbegin(&engine->stats.lock);
1620 		total = __intel_engine_get_busy_time(engine);
1621 	} while (read_seqretry(&engine->stats.lock, seq));
1622 
1623 	return total;
1624 }
1625 
1626 static bool match_ring(struct i915_request *rq)
1627 {
1628 	u32 ring = ENGINE_READ(rq->engine, RING_START);
1629 
1630 	return ring == i915_ggtt_offset(rq->ring->vma);
1631 }
1632 
1633 struct i915_request *
1634 intel_engine_find_active_request(struct intel_engine_cs *engine)
1635 {
1636 	struct i915_request *request, *active = NULL;
1637 
1638 	/*
1639 	 * We are called by the error capture, reset and to dump engine
1640 	 * state at random points in time. In particular, note that neither is
1641 	 * crucially ordered with an interrupt. After a hang, the GPU is dead
1642 	 * and we assume that no more writes can happen (we waited long enough
1643 	 * for all writes that were in transaction to be flushed) - adding an
1644 	 * extra delay for a recent interrupt is pointless. Hence, we do
1645 	 * not need an engine->irq_seqno_barrier() before the seqno reads.
1646 	 * At all other times, we must assume the GPU is still running, but
1647 	 * we only care about the snapshot of this moment.
1648 	 */
1649 	lockdep_assert_held(&engine->active.lock);
1650 
1651 	rcu_read_lock();
1652 	request = execlists_active(&engine->execlists);
1653 	if (request) {
1654 		struct intel_timeline *tl = request->context->timeline;
1655 
1656 		list_for_each_entry_from_reverse(request, &tl->requests, link) {
1657 			if (i915_request_completed(request))
1658 				break;
1659 
1660 			active = request;
1661 		}
1662 	}
1663 	rcu_read_unlock();
1664 	if (active)
1665 		return active;
1666 
1667 	list_for_each_entry(request, &engine->active.requests, sched.link) {
1668 		if (i915_request_completed(request))
1669 			continue;
1670 
1671 		if (!i915_request_started(request))
1672 			continue;
1673 
1674 		/* More than one preemptible request may match! */
1675 		if (!match_ring(request))
1676 			continue;
1677 
1678 		active = request;
1679 		break;
1680 	}
1681 
1682 	return active;
1683 }
1684 
1685 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1686 #include "mock_engine.c"
1687 #include "selftest_engine.c"
1688 #include "selftest_engine_cs.c"
1689 #endif
1690