xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_gt.c (revision 801543b2)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 #include <drm/intel-gtt.h>
8 
9 #include "gem/i915_gem_internal.h"
10 #include "gem/i915_gem_lmem.h"
11 #include "pxp/intel_pxp.h"
12 
13 #include "i915_drv.h"
14 #include "i915_perf_oa_regs.h"
15 #include "i915_reg.h"
16 #include "intel_context.h"
17 #include "intel_engine_pm.h"
18 #include "intel_engine_regs.h"
19 #include "intel_ggtt_gmch.h"
20 #include "intel_gt.h"
21 #include "intel_gt_buffer_pool.h"
22 #include "intel_gt_clock_utils.h"
23 #include "intel_gt_debugfs.h"
24 #include "intel_gt_mcr.h"
25 #include "intel_gt_pm.h"
26 #include "intel_gt_regs.h"
27 #include "intel_gt_requests.h"
28 #include "intel_migrate.h"
29 #include "intel_mocs.h"
30 #include "intel_pci_config.h"
31 #include "intel_pm.h"
32 #include "intel_rc6.h"
33 #include "intel_renderstate.h"
34 #include "intel_rps.h"
35 #include "intel_gt_sysfs.h"
36 #include "intel_uncore.h"
37 #include "shmem_utils.h"
38 
39 static void __intel_gt_init_early(struct intel_gt *gt)
40 {
41 	spin_lock_init(&gt->irq_lock);
42 
43 	INIT_LIST_HEAD(&gt->closed_vma);
44 	spin_lock_init(&gt->closed_lock);
45 
46 	init_llist_head(&gt->watchdog.list);
47 	INIT_WORK(&gt->watchdog.work, intel_gt_watchdog_work);
48 
49 	intel_gt_init_buffer_pool(gt);
50 	intel_gt_init_reset(gt);
51 	intel_gt_init_requests(gt);
52 	intel_gt_init_timelines(gt);
53 	mutex_init(&gt->tlb.invalidate_lock);
54 	seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
55 	intel_gt_pm_init_early(gt);
56 
57 	intel_uc_init_early(&gt->uc);
58 	intel_rps_init_early(&gt->rps);
59 }
60 
61 /* Preliminary initialization of Tile 0 */
62 void intel_root_gt_init_early(struct drm_i915_private *i915)
63 {
64 	struct intel_gt *gt = to_gt(i915);
65 
66 	gt->i915 = i915;
67 	gt->uncore = &i915->uncore;
68 
69 	__intel_gt_init_early(gt);
70 }
71 
72 static int intel_gt_probe_lmem(struct intel_gt *gt)
73 {
74 	struct drm_i915_private *i915 = gt->i915;
75 	unsigned int instance = gt->info.id;
76 	int id = INTEL_REGION_LMEM_0 + instance;
77 	struct intel_memory_region *mem;
78 	int err;
79 
80 	mem = intel_gt_setup_lmem(gt);
81 	if (IS_ERR(mem)) {
82 		err = PTR_ERR(mem);
83 		if (err == -ENODEV)
84 			return 0;
85 
86 		drm_err(&i915->drm,
87 			"Failed to setup region(%d) type=%d\n",
88 			err, INTEL_MEMORY_LOCAL);
89 		return err;
90 	}
91 
92 	mem->id = id;
93 	mem->instance = instance;
94 
95 	intel_memory_region_set_name(mem, "local%u", mem->instance);
96 
97 	GEM_BUG_ON(!HAS_REGION(i915, id));
98 	GEM_BUG_ON(i915->mm.regions[id]);
99 	i915->mm.regions[id] = mem;
100 
101 	return 0;
102 }
103 
104 int intel_gt_assign_ggtt(struct intel_gt *gt)
105 {
106 	gt->ggtt = drmm_kzalloc(&gt->i915->drm, sizeof(*gt->ggtt), GFP_KERNEL);
107 
108 	return gt->ggtt ? 0 : -ENOMEM;
109 }
110 
111 int intel_gt_init_mmio(struct intel_gt *gt)
112 {
113 	intel_gt_init_clock_frequency(gt);
114 
115 	intel_uc_init_mmio(&gt->uc);
116 	intel_sseu_info_init(gt);
117 	intel_gt_mcr_init(gt);
118 
119 	return intel_engines_init_mmio(gt);
120 }
121 
122 static void init_unused_ring(struct intel_gt *gt, u32 base)
123 {
124 	struct intel_uncore *uncore = gt->uncore;
125 
126 	intel_uncore_write(uncore, RING_CTL(base), 0);
127 	intel_uncore_write(uncore, RING_HEAD(base), 0);
128 	intel_uncore_write(uncore, RING_TAIL(base), 0);
129 	intel_uncore_write(uncore, RING_START(base), 0);
130 }
131 
132 static void init_unused_rings(struct intel_gt *gt)
133 {
134 	struct drm_i915_private *i915 = gt->i915;
135 
136 	if (IS_I830(i915)) {
137 		init_unused_ring(gt, PRB1_BASE);
138 		init_unused_ring(gt, SRB0_BASE);
139 		init_unused_ring(gt, SRB1_BASE);
140 		init_unused_ring(gt, SRB2_BASE);
141 		init_unused_ring(gt, SRB3_BASE);
142 	} else if (GRAPHICS_VER(i915) == 2) {
143 		init_unused_ring(gt, SRB0_BASE);
144 		init_unused_ring(gt, SRB1_BASE);
145 	} else if (GRAPHICS_VER(i915) == 3) {
146 		init_unused_ring(gt, PRB1_BASE);
147 		init_unused_ring(gt, PRB2_BASE);
148 	}
149 }
150 
151 int intel_gt_init_hw(struct intel_gt *gt)
152 {
153 	struct drm_i915_private *i915 = gt->i915;
154 	struct intel_uncore *uncore = gt->uncore;
155 	int ret;
156 
157 	gt->last_init_time = ktime_get();
158 
159 	/* Double layer security blanket, see i915_gem_init() */
160 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
161 
162 	if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9)
163 		intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
164 
165 	if (IS_HASWELL(i915))
166 		intel_uncore_write(uncore,
167 				   HSW_MI_PREDICATE_RESULT_2,
168 				   IS_HSW_GT3(i915) ?
169 				   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
170 
171 	/* Apply the GT workarounds... */
172 	intel_gt_apply_workarounds(gt);
173 	/* ...and determine whether they are sticking. */
174 	intel_gt_verify_workarounds(gt, "init");
175 
176 	intel_gt_init_swizzling(gt);
177 
178 	/*
179 	 * At least 830 can leave some of the unused rings
180 	 * "active" (ie. head != tail) after resume which
181 	 * will prevent c3 entry. Makes sure all unused rings
182 	 * are totally idle.
183 	 */
184 	init_unused_rings(gt);
185 
186 	ret = i915_ppgtt_init_hw(gt);
187 	if (ret) {
188 		DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
189 		goto out;
190 	}
191 
192 	/* We can't enable contexts until all firmware is loaded */
193 	ret = intel_uc_init_hw(&gt->uc);
194 	if (ret) {
195 		i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
196 		goto out;
197 	}
198 
199 	intel_mocs_init(gt);
200 
201 out:
202 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
203 	return ret;
204 }
205 
206 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
207 {
208 	intel_uncore_rmw(uncore, reg, 0, set);
209 }
210 
211 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
212 {
213 	intel_uncore_rmw(uncore, reg, clr, 0);
214 }
215 
216 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
217 {
218 	intel_uncore_rmw(uncore, reg, 0, 0);
219 }
220 
221 static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
222 {
223 	GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
224 	GEN6_RING_FAULT_REG_POSTING_READ(engine);
225 }
226 
227 void
228 intel_gt_clear_error_registers(struct intel_gt *gt,
229 			       intel_engine_mask_t engine_mask)
230 {
231 	struct drm_i915_private *i915 = gt->i915;
232 	struct intel_uncore *uncore = gt->uncore;
233 	u32 eir;
234 
235 	if (GRAPHICS_VER(i915) != 2)
236 		clear_register(uncore, PGTBL_ER);
237 
238 	if (GRAPHICS_VER(i915) < 4)
239 		clear_register(uncore, IPEIR(RENDER_RING_BASE));
240 	else
241 		clear_register(uncore, IPEIR_I965);
242 
243 	clear_register(uncore, EIR);
244 	eir = intel_uncore_read(uncore, EIR);
245 	if (eir) {
246 		/*
247 		 * some errors might have become stuck,
248 		 * mask them.
249 		 */
250 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
251 		rmw_set(uncore, EMR, eir);
252 		intel_uncore_write(uncore, GEN2_IIR,
253 				   I915_MASTER_ERROR_INTERRUPT);
254 	}
255 
256 	if (GRAPHICS_VER(i915) >= 12) {
257 		rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
258 		intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
259 	} else if (GRAPHICS_VER(i915) >= 8) {
260 		rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
261 		intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
262 	} else if (GRAPHICS_VER(i915) >= 6) {
263 		struct intel_engine_cs *engine;
264 		enum intel_engine_id id;
265 
266 		for_each_engine_masked(engine, gt, engine_mask, id)
267 			gen6_clear_engine_error_register(engine);
268 	}
269 }
270 
271 static void gen6_check_faults(struct intel_gt *gt)
272 {
273 	struct intel_engine_cs *engine;
274 	enum intel_engine_id id;
275 	u32 fault;
276 
277 	for_each_engine(engine, gt, id) {
278 		fault = GEN6_RING_FAULT_REG_READ(engine);
279 		if (fault & RING_FAULT_VALID) {
280 			drm_dbg(&engine->i915->drm, "Unexpected fault\n"
281 				"\tAddr: 0x%08lx\n"
282 				"\tAddress space: %s\n"
283 				"\tSource ID: %d\n"
284 				"\tType: %d\n",
285 				fault & PAGE_MASK,
286 				fault & RING_FAULT_GTTSEL_MASK ?
287 				"GGTT" : "PPGTT",
288 				RING_FAULT_SRCID(fault),
289 				RING_FAULT_FAULT_TYPE(fault));
290 		}
291 	}
292 }
293 
294 static void gen8_check_faults(struct intel_gt *gt)
295 {
296 	struct intel_uncore *uncore = gt->uncore;
297 	i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
298 	u32 fault;
299 
300 	if (GRAPHICS_VER(gt->i915) >= 12) {
301 		fault_reg = GEN12_RING_FAULT_REG;
302 		fault_data0_reg = GEN12_FAULT_TLB_DATA0;
303 		fault_data1_reg = GEN12_FAULT_TLB_DATA1;
304 	} else {
305 		fault_reg = GEN8_RING_FAULT_REG;
306 		fault_data0_reg = GEN8_FAULT_TLB_DATA0;
307 		fault_data1_reg = GEN8_FAULT_TLB_DATA1;
308 	}
309 
310 	fault = intel_uncore_read(uncore, fault_reg);
311 	if (fault & RING_FAULT_VALID) {
312 		u32 fault_data0, fault_data1;
313 		u64 fault_addr;
314 
315 		fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
316 		fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
317 
318 		fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
319 			     ((u64)fault_data0 << 12);
320 
321 		drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
322 			"\tAddr: 0x%08x_%08x\n"
323 			"\tAddress space: %s\n"
324 			"\tEngine ID: %d\n"
325 			"\tSource ID: %d\n"
326 			"\tType: %d\n",
327 			upper_32_bits(fault_addr), lower_32_bits(fault_addr),
328 			fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
329 			GEN8_RING_FAULT_ENGINE_ID(fault),
330 			RING_FAULT_SRCID(fault),
331 			RING_FAULT_FAULT_TYPE(fault));
332 	}
333 }
334 
335 void intel_gt_check_and_clear_faults(struct intel_gt *gt)
336 {
337 	struct drm_i915_private *i915 = gt->i915;
338 
339 	/* From GEN8 onwards we only have one 'All Engine Fault Register' */
340 	if (GRAPHICS_VER(i915) >= 8)
341 		gen8_check_faults(gt);
342 	else if (GRAPHICS_VER(i915) >= 6)
343 		gen6_check_faults(gt);
344 	else
345 		return;
346 
347 	intel_gt_clear_error_registers(gt, ALL_ENGINES);
348 }
349 
350 void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
351 {
352 	struct intel_uncore *uncore = gt->uncore;
353 	intel_wakeref_t wakeref;
354 
355 	/*
356 	 * No actual flushing is required for the GTT write domain for reads
357 	 * from the GTT domain. Writes to it "immediately" go to main memory
358 	 * as far as we know, so there's no chipset flush. It also doesn't
359 	 * land in the GPU render cache.
360 	 *
361 	 * However, we do have to enforce the order so that all writes through
362 	 * the GTT land before any writes to the device, such as updates to
363 	 * the GATT itself.
364 	 *
365 	 * We also have to wait a bit for the writes to land from the GTT.
366 	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
367 	 * timing. This issue has only been observed when switching quickly
368 	 * between GTT writes and CPU reads from inside the kernel on recent hw,
369 	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
370 	 * system agents we cannot reproduce this behaviour, until Cannonlake
371 	 * that was!).
372 	 */
373 
374 	wmb();
375 
376 	if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
377 		return;
378 
379 	intel_gt_chipset_flush(gt);
380 
381 	with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
382 		unsigned long flags;
383 
384 		spin_lock_irqsave(&uncore->lock, flags);
385 		intel_uncore_posting_read_fw(uncore,
386 					     RING_HEAD(RENDER_RING_BASE));
387 		spin_unlock_irqrestore(&uncore->lock, flags);
388 	}
389 }
390 
391 void intel_gt_chipset_flush(struct intel_gt *gt)
392 {
393 	wmb();
394 	if (GRAPHICS_VER(gt->i915) < 6)
395 		intel_ggtt_gmch_flush();
396 }
397 
398 void intel_gt_driver_register(struct intel_gt *gt)
399 {
400 	intel_gsc_init(&gt->gsc, gt->i915);
401 
402 	intel_rps_driver_register(&gt->rps);
403 
404 	intel_gt_debugfs_register(gt);
405 	intel_gt_sysfs_register(gt);
406 }
407 
408 static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
409 {
410 	struct drm_i915_private *i915 = gt->i915;
411 	struct drm_i915_gem_object *obj;
412 	struct i915_vma *vma;
413 	int ret;
414 
415 	obj = i915_gem_object_create_lmem(i915, size,
416 					  I915_BO_ALLOC_VOLATILE |
417 					  I915_BO_ALLOC_GPU_ONLY);
418 	if (IS_ERR(obj))
419 		obj = i915_gem_object_create_stolen(i915, size);
420 	if (IS_ERR(obj))
421 		obj = i915_gem_object_create_internal(i915, size);
422 	if (IS_ERR(obj)) {
423 		drm_err(&i915->drm, "Failed to allocate scratch page\n");
424 		return PTR_ERR(obj);
425 	}
426 
427 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
428 	if (IS_ERR(vma)) {
429 		ret = PTR_ERR(vma);
430 		goto err_unref;
431 	}
432 
433 	ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
434 	if (ret)
435 		goto err_unref;
436 
437 	gt->scratch = i915_vma_make_unshrinkable(vma);
438 
439 	return 0;
440 
441 err_unref:
442 	i915_gem_object_put(obj);
443 	return ret;
444 }
445 
446 static void intel_gt_fini_scratch(struct intel_gt *gt)
447 {
448 	i915_vma_unpin_and_release(&gt->scratch, 0);
449 }
450 
451 static struct i915_address_space *kernel_vm(struct intel_gt *gt)
452 {
453 	if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
454 		return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY)->vm;
455 	else
456 		return i915_vm_get(&gt->ggtt->vm);
457 }
458 
459 static int __engines_record_defaults(struct intel_gt *gt)
460 {
461 	struct i915_request *requests[I915_NUM_ENGINES] = {};
462 	struct intel_engine_cs *engine;
463 	enum intel_engine_id id;
464 	int err = 0;
465 
466 	/*
467 	 * As we reset the gpu during very early sanitisation, the current
468 	 * register state on the GPU should reflect its defaults values.
469 	 * We load a context onto the hw (with restore-inhibit), then switch
470 	 * over to a second context to save that default register state. We
471 	 * can then prime every new context with that state so they all start
472 	 * from the same default HW values.
473 	 */
474 
475 	for_each_engine(engine, gt, id) {
476 		struct intel_renderstate so;
477 		struct intel_context *ce;
478 		struct i915_request *rq;
479 
480 		/* We must be able to switch to something! */
481 		GEM_BUG_ON(!engine->kernel_context);
482 
483 		ce = intel_context_create(engine);
484 		if (IS_ERR(ce)) {
485 			err = PTR_ERR(ce);
486 			goto out;
487 		}
488 
489 		err = intel_renderstate_init(&so, ce);
490 		if (err)
491 			goto err;
492 
493 		rq = i915_request_create(ce);
494 		if (IS_ERR(rq)) {
495 			err = PTR_ERR(rq);
496 			goto err_fini;
497 		}
498 
499 		err = intel_engine_emit_ctx_wa(rq);
500 		if (err)
501 			goto err_rq;
502 
503 		err = intel_renderstate_emit(&so, rq);
504 		if (err)
505 			goto err_rq;
506 
507 err_rq:
508 		requests[id] = i915_request_get(rq);
509 		i915_request_add(rq);
510 err_fini:
511 		intel_renderstate_fini(&so, ce);
512 err:
513 		if (err) {
514 			intel_context_put(ce);
515 			goto out;
516 		}
517 	}
518 
519 	/* Flush the default context image to memory, and enable powersaving. */
520 	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
521 		err = -EIO;
522 		goto out;
523 	}
524 
525 	for (id = 0; id < ARRAY_SIZE(requests); id++) {
526 		struct i915_request *rq;
527 		struct file *state;
528 
529 		rq = requests[id];
530 		if (!rq)
531 			continue;
532 
533 		if (rq->fence.error) {
534 			err = -EIO;
535 			goto out;
536 		}
537 
538 		GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
539 		if (!rq->context->state)
540 			continue;
541 
542 		/* Keep a copy of the state's backing pages; free the obj */
543 		state = shmem_create_from_object(rq->context->state->obj);
544 		if (IS_ERR(state)) {
545 			err = PTR_ERR(state);
546 			goto out;
547 		}
548 		rq->engine->default_state = state;
549 	}
550 
551 out:
552 	/*
553 	 * If we have to abandon now, we expect the engines to be idle
554 	 * and ready to be torn-down. The quickest way we can accomplish
555 	 * this is by declaring ourselves wedged.
556 	 */
557 	if (err)
558 		intel_gt_set_wedged(gt);
559 
560 	for (id = 0; id < ARRAY_SIZE(requests); id++) {
561 		struct intel_context *ce;
562 		struct i915_request *rq;
563 
564 		rq = requests[id];
565 		if (!rq)
566 			continue;
567 
568 		ce = rq->context;
569 		i915_request_put(rq);
570 		intel_context_put(ce);
571 	}
572 	return err;
573 }
574 
575 static int __engines_verify_workarounds(struct intel_gt *gt)
576 {
577 	struct intel_engine_cs *engine;
578 	enum intel_engine_id id;
579 	int err = 0;
580 
581 	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
582 		return 0;
583 
584 	for_each_engine(engine, gt, id) {
585 		if (intel_engine_verify_workarounds(engine, "load"))
586 			err = -EIO;
587 	}
588 
589 	/* Flush and restore the kernel context for safety */
590 	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
591 		err = -EIO;
592 
593 	return err;
594 }
595 
596 static void __intel_gt_disable(struct intel_gt *gt)
597 {
598 	intel_gt_set_wedged_on_fini(gt);
599 
600 	intel_gt_suspend_prepare(gt);
601 	intel_gt_suspend_late(gt);
602 
603 	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
604 }
605 
606 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
607 {
608 	long remaining_timeout;
609 
610 	/* If the device is asleep, we have no requests outstanding */
611 	if (!intel_gt_pm_is_awake(gt))
612 		return 0;
613 
614 	while ((timeout = intel_gt_retire_requests_timeout(gt, timeout,
615 							   &remaining_timeout)) > 0) {
616 		cond_resched();
617 		if (signal_pending(current))
618 			return -EINTR;
619 	}
620 
621 	return timeout ? timeout : intel_uc_wait_for_idle(&gt->uc,
622 							  remaining_timeout);
623 }
624 
625 int intel_gt_init(struct intel_gt *gt)
626 {
627 	int err;
628 
629 	err = i915_inject_probe_error(gt->i915, -ENODEV);
630 	if (err)
631 		return err;
632 
633 	intel_gt_init_workarounds(gt);
634 
635 	/*
636 	 * This is just a security blanket to placate dragons.
637 	 * On some systems, we very sporadically observe that the first TLBs
638 	 * used by the CS may be stale, despite us poking the TLB reset. If
639 	 * we hold the forcewake during initialisation these problems
640 	 * just magically go away.
641 	 */
642 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
643 
644 	err = intel_gt_init_scratch(gt,
645 				    GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K);
646 	if (err)
647 		goto out_fw;
648 
649 	intel_gt_pm_init(gt);
650 
651 	gt->vm = kernel_vm(gt);
652 	if (!gt->vm) {
653 		err = -ENOMEM;
654 		goto err_pm;
655 	}
656 
657 	intel_set_mocs_index(gt);
658 
659 	err = intel_engines_init(gt);
660 	if (err)
661 		goto err_engines;
662 
663 	err = intel_uc_init(&gt->uc);
664 	if (err)
665 		goto err_engines;
666 
667 	err = intel_gt_resume(gt);
668 	if (err)
669 		goto err_uc_init;
670 
671 	err = intel_gt_init_hwconfig(gt);
672 	if (err)
673 		drm_err(&gt->i915->drm, "Failed to retrieve hwconfig table: %pe\n",
674 			ERR_PTR(err));
675 
676 	err = __engines_record_defaults(gt);
677 	if (err)
678 		goto err_gt;
679 
680 	err = __engines_verify_workarounds(gt);
681 	if (err)
682 		goto err_gt;
683 
684 	intel_uc_init_late(&gt->uc);
685 
686 	err = i915_inject_probe_error(gt->i915, -EIO);
687 	if (err)
688 		goto err_gt;
689 
690 	intel_migrate_init(&gt->migrate, gt);
691 
692 	intel_pxp_init(&gt->pxp);
693 
694 	goto out_fw;
695 err_gt:
696 	__intel_gt_disable(gt);
697 	intel_uc_fini_hw(&gt->uc);
698 err_uc_init:
699 	intel_uc_fini(&gt->uc);
700 err_engines:
701 	intel_engines_release(gt);
702 	i915_vm_put(fetch_and_zero(&gt->vm));
703 err_pm:
704 	intel_gt_pm_fini(gt);
705 	intel_gt_fini_scratch(gt);
706 out_fw:
707 	if (err)
708 		intel_gt_set_wedged_on_init(gt);
709 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
710 	return err;
711 }
712 
713 void intel_gt_driver_remove(struct intel_gt *gt)
714 {
715 	__intel_gt_disable(gt);
716 
717 	intel_migrate_fini(&gt->migrate);
718 	intel_uc_driver_remove(&gt->uc);
719 
720 	intel_engines_release(gt);
721 
722 	intel_gt_flush_buffer_pool(gt);
723 }
724 
725 void intel_gt_driver_unregister(struct intel_gt *gt)
726 {
727 	intel_wakeref_t wakeref;
728 
729 	intel_gt_sysfs_unregister(gt);
730 	intel_rps_driver_unregister(&gt->rps);
731 	intel_gsc_fini(&gt->gsc);
732 
733 	intel_pxp_fini(&gt->pxp);
734 
735 	/*
736 	 * Upon unregistering the device to prevent any new users, cancel
737 	 * all in-flight requests so that we can quickly unbind the active
738 	 * resources.
739 	 */
740 	intel_gt_set_wedged_on_fini(gt);
741 
742 	/* Scrub all HW state upon release */
743 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
744 		__intel_gt_reset(gt, ALL_ENGINES);
745 }
746 
747 void intel_gt_driver_release(struct intel_gt *gt)
748 {
749 	struct i915_address_space *vm;
750 
751 	vm = fetch_and_zero(&gt->vm);
752 	if (vm) /* FIXME being called twice on error paths :( */
753 		i915_vm_put(vm);
754 
755 	intel_wa_list_free(&gt->wa_list);
756 	intel_gt_pm_fini(gt);
757 	intel_gt_fini_scratch(gt);
758 	intel_gt_fini_buffer_pool(gt);
759 	intel_gt_fini_hwconfig(gt);
760 }
761 
762 void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
763 {
764 	struct intel_gt *gt;
765 	unsigned int id;
766 
767 	/* We need to wait for inflight RCU frees to release their grip */
768 	rcu_barrier();
769 
770 	for_each_gt(gt, i915, id) {
771 		intel_uc_driver_late_release(&gt->uc);
772 		intel_gt_fini_requests(gt);
773 		intel_gt_fini_reset(gt);
774 		intel_gt_fini_timelines(gt);
775 		mutex_destroy(&gt->tlb.invalidate_lock);
776 		intel_engines_free(gt);
777 	}
778 }
779 
780 static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
781 {
782 	int ret;
783 
784 	if (!gt_is_root(gt)) {
785 		struct intel_uncore_mmio_debug *mmio_debug;
786 		struct intel_uncore *uncore;
787 
788 		uncore = kzalloc(sizeof(*uncore), GFP_KERNEL);
789 		if (!uncore)
790 			return -ENOMEM;
791 
792 		mmio_debug = kzalloc(sizeof(*mmio_debug), GFP_KERNEL);
793 		if (!mmio_debug) {
794 			kfree(uncore);
795 			return -ENOMEM;
796 		}
797 
798 		gt->uncore = uncore;
799 		gt->uncore->debug = mmio_debug;
800 
801 		__intel_gt_init_early(gt);
802 	}
803 
804 	intel_uncore_init_early(gt->uncore, gt);
805 
806 	ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
807 	if (ret)
808 		return ret;
809 
810 	gt->phys_addr = phys_addr;
811 
812 	return 0;
813 }
814 
815 static void
816 intel_gt_tile_cleanup(struct intel_gt *gt)
817 {
818 	intel_uncore_cleanup_mmio(gt->uncore);
819 
820 	if (!gt_is_root(gt)) {
821 		kfree(gt->uncore->debug);
822 		kfree(gt->uncore);
823 		kfree(gt);
824 	}
825 }
826 
827 int intel_gt_probe_all(struct drm_i915_private *i915)
828 {
829 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
830 	struct intel_gt *gt = &i915->gt0;
831 	phys_addr_t phys_addr;
832 	unsigned int mmio_bar;
833 	int ret;
834 
835 	mmio_bar = GRAPHICS_VER(i915) == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR;
836 	phys_addr = pci_resource_start(pdev, mmio_bar);
837 
838 	/*
839 	 * We always have at least one primary GT on any device
840 	 * and it has been already initialized early during probe
841 	 * in i915_driver_probe()
842 	 */
843 	ret = intel_gt_tile_setup(gt, phys_addr);
844 	if (ret)
845 		return ret;
846 
847 	i915->gt[0] = gt;
848 
849 	/* TODO: add more tiles */
850 	return 0;
851 }
852 
853 int intel_gt_tiles_init(struct drm_i915_private *i915)
854 {
855 	struct intel_gt *gt;
856 	unsigned int id;
857 	int ret;
858 
859 	for_each_gt(gt, i915, id) {
860 		ret = intel_gt_probe_lmem(gt);
861 		if (ret)
862 			return ret;
863 	}
864 
865 	return 0;
866 }
867 
868 void intel_gt_release_all(struct drm_i915_private *i915)
869 {
870 	struct intel_gt *gt;
871 	unsigned int id;
872 
873 	for_each_gt(gt, i915, id) {
874 		intel_gt_tile_cleanup(gt);
875 		i915->gt[id] = NULL;
876 	}
877 }
878 
879 void intel_gt_info_print(const struct intel_gt_info *info,
880 			 struct drm_printer *p)
881 {
882 	drm_printf(p, "available engines: %x\n", info->engine_mask);
883 
884 	intel_sseu_dump(&info->sseu, p);
885 }
886 
887 struct reg_and_bit {
888 	i915_reg_t reg;
889 	u32 bit;
890 };
891 
892 static struct reg_and_bit
893 get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
894 		const i915_reg_t *regs, const unsigned int num)
895 {
896 	const unsigned int class = engine->class;
897 	struct reg_and_bit rb = { };
898 
899 	if (drm_WARN_ON_ONCE(&engine->i915->drm,
900 			     class >= num || !regs[class].reg))
901 		return rb;
902 
903 	rb.reg = regs[class];
904 	if (gen8 && class == VIDEO_DECODE_CLASS)
905 		rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
906 	else
907 		rb.bit = engine->instance;
908 
909 	rb.bit = BIT(rb.bit);
910 
911 	return rb;
912 }
913 
914 static void mmio_invalidate_full(struct intel_gt *gt)
915 {
916 	static const i915_reg_t gen8_regs[] = {
917 		[RENDER_CLASS]			= GEN8_RTCR,
918 		[VIDEO_DECODE_CLASS]		= GEN8_M1TCR, /* , GEN8_M2TCR */
919 		[VIDEO_ENHANCEMENT_CLASS]	= GEN8_VTCR,
920 		[COPY_ENGINE_CLASS]		= GEN8_BTCR,
921 	};
922 	static const i915_reg_t gen12_regs[] = {
923 		[RENDER_CLASS]			= GEN12_GFX_TLB_INV_CR,
924 		[VIDEO_DECODE_CLASS]		= GEN12_VD_TLB_INV_CR,
925 		[VIDEO_ENHANCEMENT_CLASS]	= GEN12_VE_TLB_INV_CR,
926 		[COPY_ENGINE_CLASS]		= GEN12_BLT_TLB_INV_CR,
927 		[COMPUTE_CLASS]			= GEN12_COMPCTX_TLB_INV_CR,
928 	};
929 	struct drm_i915_private *i915 = gt->i915;
930 	struct intel_uncore *uncore = gt->uncore;
931 	struct intel_engine_cs *engine;
932 	intel_engine_mask_t awake, tmp;
933 	enum intel_engine_id id;
934 	const i915_reg_t *regs;
935 	unsigned int num = 0;
936 
937 	if (GRAPHICS_VER(i915) == 12) {
938 		regs = gen12_regs;
939 		num = ARRAY_SIZE(gen12_regs);
940 	} else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
941 		regs = gen8_regs;
942 		num = ARRAY_SIZE(gen8_regs);
943 	} else if (GRAPHICS_VER(i915) < 8) {
944 		return;
945 	}
946 
947 	if (drm_WARN_ONCE(&i915->drm, !num,
948 			  "Platform does not implement TLB invalidation!"))
949 		return;
950 
951 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
952 
953 	spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
954 
955 	awake = 0;
956 	for_each_engine(engine, gt, id) {
957 		struct reg_and_bit rb;
958 
959 		if (!intel_engine_pm_is_awake(engine))
960 			continue;
961 
962 		rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
963 		if (!i915_mmio_reg_offset(rb.reg))
964 			continue;
965 
966 		intel_uncore_write_fw(uncore, rb.reg, rb.bit);
967 		awake |= engine->mask;
968 	}
969 
970 	GT_TRACE(gt, "invalidated engines %08x\n", awake);
971 
972 	/* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
973 	if (awake &&
974 	    (IS_TIGERLAKE(i915) ||
975 	     IS_DG1(i915) ||
976 	     IS_ROCKETLAKE(i915) ||
977 	     IS_ALDERLAKE_S(i915) ||
978 	     IS_ALDERLAKE_P(i915)))
979 		intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
980 
981 	spin_unlock_irq(&uncore->lock);
982 
983 	for_each_engine_masked(engine, gt, awake, tmp) {
984 		struct reg_and_bit rb;
985 
986 		/*
987 		 * HW architecture suggest typical invalidation time at 40us,
988 		 * with pessimistic cases up to 100us and a recommendation to
989 		 * cap at 1ms. We go a bit higher just in case.
990 		 */
991 		const unsigned int timeout_us = 100;
992 		const unsigned int timeout_ms = 4;
993 
994 		rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
995 		if (__intel_wait_for_register_fw(uncore,
996 						 rb.reg, rb.bit, 0,
997 						 timeout_us, timeout_ms,
998 						 NULL))
999 			drm_err_ratelimited(&gt->i915->drm,
1000 					    "%s TLB invalidation did not complete in %ums!\n",
1001 					    engine->name, timeout_ms);
1002 	}
1003 
1004 	/*
1005 	 * Use delayed put since a) we mostly expect a flurry of TLB
1006 	 * invalidations so it is good to avoid paying the forcewake cost and
1007 	 * b) it works around a bug in Icelake which cannot cope with too rapid
1008 	 * transitions.
1009 	 */
1010 	intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
1011 }
1012 
1013 static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
1014 {
1015 	u32 cur = intel_gt_tlb_seqno(gt);
1016 
1017 	/* Only skip if a *full* TLB invalidate barrier has passed */
1018 	return (s32)(cur - ALIGN(seqno, 2)) > 0;
1019 }
1020 
1021 void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
1022 {
1023 	intel_wakeref_t wakeref;
1024 
1025 	if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
1026 		return;
1027 
1028 	if (intel_gt_is_wedged(gt))
1029 		return;
1030 
1031 	if (tlb_seqno_passed(gt, seqno))
1032 		return;
1033 
1034 	with_intel_gt_pm_if_awake(gt, wakeref) {
1035 		mutex_lock(&gt->tlb.invalidate_lock);
1036 		if (tlb_seqno_passed(gt, seqno))
1037 			goto unlock;
1038 
1039 		mmio_invalidate_full(gt);
1040 
1041 		write_seqcount_invalidate(&gt->tlb.seqno);
1042 unlock:
1043 		mutex_unlock(&gt->tlb.invalidate_lock);
1044 	}
1045 }
1046