xref: /openbmc/linux/drivers/gpu/drm/i915/gt/uc/intel_uc.c (revision d9e32672)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include "gt/intel_gt.h"
7 #include "gt/intel_reset.h"
8 #include "intel_guc.h"
9 #include "intel_guc_ads.h"
10 #include "intel_guc_submission.h"
11 #include "intel_uc.h"
12 
13 #include "i915_drv.h"
14 
15 /* Reset GuC providing us with fresh state for both GuC and HuC.
16  */
17 static int __intel_uc_reset_hw(struct intel_uc *uc)
18 {
19 	struct intel_gt *gt = uc_to_gt(uc);
20 	int ret;
21 	u32 guc_status;
22 
23 	ret = i915_inject_probe_error(gt->i915, -ENXIO);
24 	if (ret)
25 		return ret;
26 
27 	ret = intel_reset_guc(gt);
28 	if (ret) {
29 		DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
30 		return ret;
31 	}
32 
33 	guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
34 	WARN(!(guc_status & GS_MIA_IN_RESET),
35 	     "GuC status: 0x%x, MIA core expected to be in reset\n",
36 	     guc_status);
37 
38 	return ret;
39 }
40 
41 static void __confirm_options(struct intel_uc *uc)
42 {
43 	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
44 
45 	DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
46 			     "enable_guc=%d (guc:%s submission:%s huc:%s)\n",
47 			     i915_modparams.enable_guc,
48 			     yesno(intel_uc_uses_guc(uc)),
49 			     yesno(intel_uc_uses_guc_submission(uc)),
50 			     yesno(intel_uc_uses_huc(uc)));
51 
52 	if (i915_modparams.enable_guc == -1)
53 		return;
54 
55 	if (i915_modparams.enable_guc == 0) {
56 		GEM_BUG_ON(intel_uc_uses_guc(uc));
57 		GEM_BUG_ON(intel_uc_uses_guc_submission(uc));
58 		GEM_BUG_ON(intel_uc_uses_huc(uc));
59 		return;
60 	}
61 
62 	if (!intel_uc_supports_guc(uc))
63 		dev_info(i915->drm.dev,
64 			 "Incompatible option enable_guc=%d - %s\n",
65 			 i915_modparams.enable_guc, "GuC is not supported!");
66 
67 	if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC &&
68 	    !intel_uc_supports_huc(uc))
69 		dev_info(i915->drm.dev,
70 			 "Incompatible option enable_guc=%d - %s\n",
71 			 i915_modparams.enable_guc, "HuC is not supported!");
72 
73 	if (i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION &&
74 	    !intel_uc_supports_guc_submission(uc))
75 		dev_info(i915->drm.dev,
76 			 "Incompatible option enable_guc=%d - %s\n",
77 			 i915_modparams.enable_guc, "GuC submission is N/A");
78 
79 	if (i915_modparams.enable_guc & ~(ENABLE_GUC_SUBMISSION |
80 					  ENABLE_GUC_LOAD_HUC))
81 		dev_info(i915->drm.dev,
82 			 "Incompatible option enable_guc=%d - %s\n",
83 			 i915_modparams.enable_guc, "undocumented flag");
84 }
85 
86 void intel_uc_init_early(struct intel_uc *uc)
87 {
88 	intel_guc_init_early(&uc->guc);
89 	intel_huc_init_early(&uc->huc);
90 
91 	__confirm_options(uc);
92 }
93 
94 void intel_uc_driver_late_release(struct intel_uc *uc)
95 {
96 }
97 
98 /**
99  * intel_uc_init_mmio - setup uC MMIO access
100  * @uc: the intel_uc structure
101  *
102  * Setup minimal state necessary for MMIO accesses later in the
103  * initialization sequence.
104  */
105 void intel_uc_init_mmio(struct intel_uc *uc)
106 {
107 	intel_guc_init_send_regs(&uc->guc);
108 }
109 
110 static void __uc_capture_load_err_log(struct intel_uc *uc)
111 {
112 	struct intel_guc *guc = &uc->guc;
113 
114 	if (guc->log.vma && !uc->load_err_log)
115 		uc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
116 }
117 
118 static void __uc_free_load_err_log(struct intel_uc *uc)
119 {
120 	struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log);
121 
122 	if (log)
123 		i915_gem_object_put(log);
124 }
125 
126 /*
127  * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
128  * register using the same bits used in the CT message payload. Since our
129  * communication channel with guc is turned off at this point, we can save the
130  * message and handle it after we turn it back on.
131  */
132 static void guc_clear_mmio_msg(struct intel_guc *guc)
133 {
134 	intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0);
135 }
136 
137 static void guc_get_mmio_msg(struct intel_guc *guc)
138 {
139 	u32 val;
140 
141 	spin_lock_irq(&guc->irq_lock);
142 
143 	val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15));
144 	guc->mmio_msg |= val & guc->msg_enabled_mask;
145 
146 	/*
147 	 * clear all events, including the ones we're not currently servicing,
148 	 * to make sure we don't try to process a stale message if we enable
149 	 * handling of more events later.
150 	 */
151 	guc_clear_mmio_msg(guc);
152 
153 	spin_unlock_irq(&guc->irq_lock);
154 }
155 
156 static void guc_handle_mmio_msg(struct intel_guc *guc)
157 {
158 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
159 
160 	/* we need communication to be enabled to reply to GuC */
161 	GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop);
162 
163 	if (!guc->mmio_msg)
164 		return;
165 
166 	spin_lock_irq(&i915->irq_lock);
167 	intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
168 	spin_unlock_irq(&i915->irq_lock);
169 
170 	guc->mmio_msg = 0;
171 }
172 
173 static void guc_reset_interrupts(struct intel_guc *guc)
174 {
175 	guc->interrupts.reset(guc);
176 }
177 
178 static void guc_enable_interrupts(struct intel_guc *guc)
179 {
180 	guc->interrupts.enable(guc);
181 }
182 
183 static void guc_disable_interrupts(struct intel_guc *guc)
184 {
185 	guc->interrupts.disable(guc);
186 }
187 
188 static inline bool guc_communication_enabled(struct intel_guc *guc)
189 {
190 	return guc->send != intel_guc_send_nop;
191 }
192 
193 static int guc_enable_communication(struct intel_guc *guc)
194 {
195 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
196 	int ret;
197 
198 	GEM_BUG_ON(guc_communication_enabled(guc));
199 
200 	ret = i915_inject_probe_error(i915, -ENXIO);
201 	if (ret)
202 		return ret;
203 
204 	ret = intel_guc_ct_enable(&guc->ct);
205 	if (ret)
206 		return ret;
207 
208 	guc->send = intel_guc_send_ct;
209 	guc->handler = intel_guc_to_host_event_handler_ct;
210 
211 	/* check for mmio messages received before/during the CT enable */
212 	guc_get_mmio_msg(guc);
213 	guc_handle_mmio_msg(guc);
214 
215 	guc_enable_interrupts(guc);
216 
217 	/* check for CT messages received before we enabled interrupts */
218 	spin_lock_irq(&i915->irq_lock);
219 	intel_guc_to_host_event_handler_ct(guc);
220 	spin_unlock_irq(&i915->irq_lock);
221 
222 	DRM_INFO("GuC communication enabled\n");
223 
224 	return 0;
225 }
226 
227 static void __guc_stop_communication(struct intel_guc *guc)
228 {
229 	/*
230 	 * Events generated during or after CT disable are logged by guc in
231 	 * via mmio. Make sure the register is clear before disabling CT since
232 	 * all events we cared about have already been processed via CT.
233 	 */
234 	guc_clear_mmio_msg(guc);
235 
236 	guc_disable_interrupts(guc);
237 
238 	guc->send = intel_guc_send_nop;
239 	guc->handler = intel_guc_to_host_event_handler_nop;
240 }
241 
242 static void guc_stop_communication(struct intel_guc *guc)
243 {
244 	intel_guc_ct_stop(&guc->ct);
245 
246 	__guc_stop_communication(guc);
247 
248 	DRM_INFO("GuC communication stopped\n");
249 }
250 
251 static void guc_disable_communication(struct intel_guc *guc)
252 {
253 	__guc_stop_communication(guc);
254 
255 	intel_guc_ct_disable(&guc->ct);
256 
257 	/*
258 	 * Check for messages received during/after the CT disable. We do not
259 	 * expect any messages to have arrived via CT between the interrupt
260 	 * disable and the CT disable because GuC should've been idle until we
261 	 * triggered the CT disable protocol.
262 	 */
263 	guc_get_mmio_msg(guc);
264 
265 	DRM_INFO("GuC communication disabled\n");
266 }
267 
268 void intel_uc_fetch_firmwares(struct intel_uc *uc)
269 {
270 	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
271 	int err;
272 
273 	if (!intel_uc_uses_guc(uc))
274 		return;
275 
276 	err = intel_uc_fw_fetch(&uc->guc.fw, i915);
277 	if (err)
278 		return;
279 
280 	if (intel_uc_uses_huc(uc))
281 		intel_uc_fw_fetch(&uc->huc.fw, i915);
282 }
283 
284 void intel_uc_cleanup_firmwares(struct intel_uc *uc)
285 {
286 	if (!intel_uc_uses_guc(uc))
287 		return;
288 
289 	if (intel_uc_uses_huc(uc))
290 		intel_uc_fw_cleanup_fetch(&uc->huc.fw);
291 
292 	intel_uc_fw_cleanup_fetch(&uc->guc.fw);
293 }
294 
295 void intel_uc_init(struct intel_uc *uc)
296 {
297 	struct intel_guc *guc = &uc->guc;
298 	struct intel_huc *huc = &uc->huc;
299 	int ret;
300 
301 	if (!intel_uc_uses_guc(uc))
302 		return;
303 
304 	/* XXX: GuC submission is unavailable for now */
305 	GEM_BUG_ON(intel_uc_supports_guc_submission(uc));
306 
307 	ret = intel_guc_init(guc);
308 	if (ret) {
309 		intel_uc_fw_cleanup_fetch(&huc->fw);
310 		return;
311 	}
312 
313 	if (intel_uc_uses_huc(uc))
314 		intel_huc_init(huc);
315 }
316 
317 void intel_uc_fini(struct intel_uc *uc)
318 {
319 	struct intel_guc *guc = &uc->guc;
320 
321 	if (!intel_uc_uses_guc(uc))
322 		return;
323 
324 	if (intel_uc_uses_huc(uc))
325 		intel_huc_fini(&uc->huc);
326 
327 	intel_guc_fini(guc);
328 
329 	__uc_free_load_err_log(uc);
330 }
331 
332 static int __uc_sanitize(struct intel_uc *uc)
333 {
334 	struct intel_guc *guc = &uc->guc;
335 	struct intel_huc *huc = &uc->huc;
336 
337 	GEM_BUG_ON(!intel_uc_supports_guc(uc));
338 
339 	intel_huc_sanitize(huc);
340 	intel_guc_sanitize(guc);
341 
342 	return __intel_uc_reset_hw(uc);
343 }
344 
345 void intel_uc_sanitize(struct intel_uc *uc)
346 {
347 	if (!intel_uc_supports_guc(uc))
348 		return;
349 
350 	__uc_sanitize(uc);
351 }
352 
353 /* Initialize and verify the uC regs related to uC positioning in WOPCM */
354 static int uc_init_wopcm(struct intel_uc *uc)
355 {
356 	struct intel_gt *gt = uc_to_gt(uc);
357 	struct intel_uncore *uncore = gt->uncore;
358 	u32 base = intel_wopcm_guc_base(&gt->i915->wopcm);
359 	u32 size = intel_wopcm_guc_size(&gt->i915->wopcm);
360 	u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0;
361 	u32 mask;
362 	int err;
363 
364 	if (unlikely(!base || !size)) {
365 		i915_probe_error(gt->i915, "Unsuccessful WOPCM partitioning\n");
366 		return -E2BIG;
367 	}
368 
369 	GEM_BUG_ON(!intel_uc_supports_guc(uc));
370 	GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK));
371 	GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK);
372 	GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK));
373 	GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
374 
375 	err = i915_inject_probe_error(gt->i915, -ENXIO);
376 	if (err)
377 		return err;
378 
379 	mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
380 	err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask,
381 					    size | GUC_WOPCM_SIZE_LOCKED);
382 	if (err)
383 		goto err_out;
384 
385 	mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
386 	err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET,
387 					    base | huc_agent, mask,
388 					    base | huc_agent |
389 					    GUC_WOPCM_OFFSET_VALID);
390 	if (err)
391 		goto err_out;
392 
393 	return 0;
394 
395 err_out:
396 	i915_probe_error(gt->i915, "Failed to init uC WOPCM registers!\n");
397 	i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
398 			 i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET),
399 			 intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
400 	i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
401 			 i915_mmio_reg_offset(GUC_WOPCM_SIZE),
402 			 intel_uncore_read(uncore, GUC_WOPCM_SIZE));
403 
404 	return err;
405 }
406 
407 static bool uc_is_wopcm_locked(struct intel_uc *uc)
408 {
409 	struct intel_gt *gt = uc_to_gt(uc);
410 	struct intel_uncore *uncore = gt->uncore;
411 
412 	return (intel_uncore_read(uncore, GUC_WOPCM_SIZE) & GUC_WOPCM_SIZE_LOCKED) ||
413 	       (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID);
414 }
415 
416 int intel_uc_init_hw(struct intel_uc *uc)
417 {
418 	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
419 	struct intel_guc *guc = &uc->guc;
420 	struct intel_huc *huc = &uc->huc;
421 	int ret, attempts;
422 
423 	if (!intel_uc_supports_guc(uc))
424 		return 0;
425 
426 	/*
427 	 * We can silently continue without GuC only if it was never enabled
428 	 * before on this system after reboot, otherwise we risk GPU hangs.
429 	 * To check if GuC was loaded before we look at WOPCM registers.
430 	 */
431 	if (!intel_uc_uses_guc(uc) && !uc_is_wopcm_locked(uc))
432 		return 0;
433 
434 	if (!intel_uc_fw_is_available(&guc->fw)) {
435 		ret = uc_is_wopcm_locked(uc) ||
436 		      intel_uc_fw_is_overridden(&guc->fw) ||
437 		      intel_uc_supports_guc_submission(uc) ?
438 		      intel_uc_fw_status_to_error(guc->fw.status) : 0;
439 		goto err_out;
440 	}
441 
442 	ret = uc_init_wopcm(uc);
443 	if (ret)
444 		goto err_out;
445 
446 	guc_reset_interrupts(guc);
447 
448 	/* WaEnableuKernelHeaderValidFix:skl */
449 	/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
450 	if (IS_GEN(i915, 9))
451 		attempts = 3;
452 	else
453 		attempts = 1;
454 
455 	while (attempts--) {
456 		/*
457 		 * Always reset the GuC just before (re)loading, so
458 		 * that the state and timing are fairly predictable
459 		 */
460 		ret = __uc_sanitize(uc);
461 		if (ret)
462 			goto err_out;
463 
464 		intel_huc_fw_upload(huc);
465 		intel_guc_ads_reset(guc);
466 		intel_guc_write_params(guc);
467 		ret = intel_guc_fw_upload(guc);
468 		if (ret == 0)
469 			break;
470 
471 		DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
472 				 "retry %d more time(s)\n", ret, attempts);
473 	}
474 
475 	/* Did we succeded or run out of retries? */
476 	if (ret)
477 		goto err_log_capture;
478 
479 	ret = guc_enable_communication(guc);
480 	if (ret)
481 		goto err_log_capture;
482 
483 	intel_huc_auth(huc);
484 
485 	ret = intel_guc_sample_forcewake(guc);
486 	if (ret)
487 		goto err_communication;
488 
489 	if (intel_uc_supports_guc_submission(uc)) {
490 		ret = intel_guc_submission_enable(guc);
491 		if (ret)
492 			goto err_communication;
493 	}
494 
495 	dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
496 		 intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
497 		 guc->fw.major_ver_found, guc->fw.minor_ver_found,
498 		 "submission",
499 		 enableddisabled(intel_uc_supports_guc_submission(uc)));
500 
501 	if (intel_uc_uses_huc(uc)) {
502 		dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
503 			 intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
504 			 huc->fw.path,
505 			 huc->fw.major_ver_found, huc->fw.minor_ver_found,
506 			 "authenticated",
507 			 yesno(intel_huc_is_authenticated(huc)));
508 	}
509 
510 	return 0;
511 
512 	/*
513 	 * We've failed to load the firmware :(
514 	 */
515 err_communication:
516 	guc_disable_communication(guc);
517 err_log_capture:
518 	__uc_capture_load_err_log(uc);
519 err_out:
520 	__uc_sanitize(uc);
521 
522 	if (!ret) {
523 		dev_notice(i915->drm.dev, "GuC is uninitialized\n");
524 		/* We want to run without GuC submission */
525 		return 0;
526 	}
527 
528 	i915_probe_error(i915, "GuC initialization failed %d\n", ret);
529 
530 	/* We want to keep KMS alive */
531 	return -EIO;
532 }
533 
534 void intel_uc_fini_hw(struct intel_uc *uc)
535 {
536 	struct intel_guc *guc = &uc->guc;
537 
538 	if (!intel_guc_is_running(guc))
539 		return;
540 
541 	if (intel_uc_supports_guc_submission(uc))
542 		intel_guc_submission_disable(guc);
543 
544 	if (guc_communication_enabled(guc))
545 		guc_disable_communication(guc);
546 
547 	__uc_sanitize(uc);
548 }
549 
550 /**
551  * intel_uc_reset_prepare - Prepare for reset
552  * @uc: the intel_uc structure
553  *
554  * Preparing for full gpu reset.
555  */
556 void intel_uc_reset_prepare(struct intel_uc *uc)
557 {
558 	struct intel_guc *guc = &uc->guc;
559 
560 	if (!intel_guc_is_running(guc))
561 		return;
562 
563 	guc_stop_communication(guc);
564 	__uc_sanitize(uc);
565 }
566 
567 void intel_uc_runtime_suspend(struct intel_uc *uc)
568 {
569 	struct intel_guc *guc = &uc->guc;
570 	int err;
571 
572 	if (!intel_guc_is_running(guc))
573 		return;
574 
575 	err = intel_guc_suspend(guc);
576 	if (err)
577 		DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
578 
579 	guc_disable_communication(guc);
580 }
581 
582 void intel_uc_suspend(struct intel_uc *uc)
583 {
584 	struct intel_guc *guc = &uc->guc;
585 	intel_wakeref_t wakeref;
586 
587 	if (!intel_guc_is_running(guc))
588 		return;
589 
590 	with_intel_runtime_pm(uc_to_gt(uc)->uncore->rpm, wakeref)
591 		intel_uc_runtime_suspend(uc);
592 }
593 
594 static int __uc_resume(struct intel_uc *uc, bool enable_communication)
595 {
596 	struct intel_guc *guc = &uc->guc;
597 	int err;
598 
599 	if (!intel_guc_is_running(guc))
600 		return 0;
601 
602 	/* Make sure we enable communication if and only if it's disabled */
603 	GEM_BUG_ON(enable_communication == guc_communication_enabled(guc));
604 
605 	if (enable_communication)
606 		guc_enable_communication(guc);
607 
608 	err = intel_guc_resume(guc);
609 	if (err) {
610 		DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
611 		return err;
612 	}
613 
614 	return 0;
615 }
616 
617 int intel_uc_resume(struct intel_uc *uc)
618 {
619 	/*
620 	 * When coming out of S3/S4 we sanitize and re-init the HW, so
621 	 * communication is already re-enabled at this point.
622 	 */
623 	return __uc_resume(uc, false);
624 }
625 
626 int intel_uc_runtime_resume(struct intel_uc *uc)
627 {
628 	/*
629 	 * During runtime resume we don't sanitize, so we need to re-init
630 	 * communication as well.
631 	 */
632 	return __uc_resume(uc, true);
633 }
634