xref: /openbmc/linux/drivers/gpu/drm/i915/gt/uc/intel_uc.c (revision e761cc20)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include <linux/string_helpers.h>
7 
8 #include "gt/intel_gt.h"
9 #include "gt/intel_reset.h"
10 #include "intel_gsc_fw.h"
11 #include "intel_gsc_uc.h"
12 #include "intel_guc.h"
13 #include "intel_guc_ads.h"
14 #include "intel_guc_submission.h"
15 #include "gt/intel_rps.h"
16 #include "intel_uc.h"
17 
18 #include "i915_drv.h"
19 
20 static const struct intel_uc_ops uc_ops_off;
21 static const struct intel_uc_ops uc_ops_on;
22 
23 static void uc_expand_default_options(struct intel_uc *uc)
24 {
25 	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
26 
27 	if (i915->params.enable_guc != -1)
28 		return;
29 
30 	/* Don't enable GuC/HuC on pre-Gen12 */
31 	if (GRAPHICS_VER(i915) < 12) {
32 		i915->params.enable_guc = 0;
33 		return;
34 	}
35 
36 	/* Don't enable GuC/HuC on older Gen12 platforms */
37 	if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) {
38 		i915->params.enable_guc = 0;
39 		return;
40 	}
41 
42 	/* Intermediate platforms are HuC authentication only */
43 	if (IS_ALDERLAKE_S(i915) && !IS_ADLS_RPLS(i915)) {
44 		i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
45 		return;
46 	}
47 
48 	/* Default: enable HuC authentication and GuC submission */
49 	i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION;
50 
51 	/* XEHPSDV and PVC do not use HuC */
52 	if (IS_XEHPSDV(i915) || IS_PONTEVECCHIO(i915))
53 		i915->params.enable_guc &= ~ENABLE_GUC_LOAD_HUC;
54 }
55 
56 /* Reset GuC providing us with fresh state for both GuC and HuC.
57  */
58 static int __intel_uc_reset_hw(struct intel_uc *uc)
59 {
60 	struct intel_gt *gt = uc_to_gt(uc);
61 	int ret;
62 	u32 guc_status;
63 
64 	ret = i915_inject_probe_error(gt->i915, -ENXIO);
65 	if (ret)
66 		return ret;
67 
68 	ret = intel_reset_guc(gt);
69 	if (ret) {
70 		DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
71 		return ret;
72 	}
73 
74 	guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
75 	WARN(!(guc_status & GS_MIA_IN_RESET),
76 	     "GuC status: 0x%x, MIA core expected to be in reset\n",
77 	     guc_status);
78 
79 	return ret;
80 }
81 
82 static void __confirm_options(struct intel_uc *uc)
83 {
84 	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
85 
86 	drm_dbg(&i915->drm,
87 		"enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
88 		i915->params.enable_guc,
89 		str_yes_no(intel_uc_wants_guc(uc)),
90 		str_yes_no(intel_uc_wants_guc_submission(uc)),
91 		str_yes_no(intel_uc_wants_huc(uc)),
92 		str_yes_no(intel_uc_wants_guc_slpc(uc)));
93 
94 	if (i915->params.enable_guc == 0) {
95 		GEM_BUG_ON(intel_uc_wants_guc(uc));
96 		GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
97 		GEM_BUG_ON(intel_uc_wants_huc(uc));
98 		GEM_BUG_ON(intel_uc_wants_guc_slpc(uc));
99 		return;
100 	}
101 
102 	if (!intel_uc_supports_guc(uc))
103 		drm_info(&i915->drm,
104 			 "Incompatible option enable_guc=%d - %s\n",
105 			 i915->params.enable_guc, "GuC is not supported!");
106 
107 	if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC &&
108 	    !intel_uc_supports_huc(uc))
109 		drm_info(&i915->drm,
110 			 "Incompatible option enable_guc=%d - %s\n",
111 			 i915->params.enable_guc, "HuC is not supported!");
112 
113 	if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
114 	    !intel_uc_supports_guc_submission(uc))
115 		drm_info(&i915->drm,
116 			 "Incompatible option enable_guc=%d - %s\n",
117 			 i915->params.enable_guc, "GuC submission is N/A");
118 
119 	if (i915->params.enable_guc & ~ENABLE_GUC_MASK)
120 		drm_info(&i915->drm,
121 			 "Incompatible option enable_guc=%d - %s\n",
122 			 i915->params.enable_guc, "undocumented flag");
123 }
124 
125 void intel_uc_init_early(struct intel_uc *uc)
126 {
127 	uc_expand_default_options(uc);
128 
129 	intel_guc_init_early(&uc->guc);
130 	intel_huc_init_early(&uc->huc);
131 	intel_gsc_uc_init_early(&uc->gsc);
132 
133 	__confirm_options(uc);
134 
135 	if (intel_uc_wants_guc(uc))
136 		uc->ops = &uc_ops_on;
137 	else
138 		uc->ops = &uc_ops_off;
139 }
140 
141 void intel_uc_init_late(struct intel_uc *uc)
142 {
143 	intel_guc_init_late(&uc->guc);
144 }
145 
146 void intel_uc_driver_late_release(struct intel_uc *uc)
147 {
148 }
149 
150 /**
151  * intel_uc_init_mmio - setup uC MMIO access
152  * @uc: the intel_uc structure
153  *
154  * Setup minimal state necessary for MMIO accesses later in the
155  * initialization sequence.
156  */
157 void intel_uc_init_mmio(struct intel_uc *uc)
158 {
159 	intel_guc_init_send_regs(&uc->guc);
160 }
161 
162 static void __uc_capture_load_err_log(struct intel_uc *uc)
163 {
164 	struct intel_guc *guc = &uc->guc;
165 
166 	if (guc->log.vma && !uc->load_err_log)
167 		uc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
168 }
169 
170 static void __uc_free_load_err_log(struct intel_uc *uc)
171 {
172 	struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log);
173 
174 	if (log)
175 		i915_gem_object_put(log);
176 }
177 
178 void intel_uc_driver_remove(struct intel_uc *uc)
179 {
180 	intel_uc_fini_hw(uc);
181 	intel_uc_fini(uc);
182 	__uc_free_load_err_log(uc);
183 }
184 
185 /*
186  * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
187  * register using the same bits used in the CT message payload. Since our
188  * communication channel with guc is turned off at this point, we can save the
189  * message and handle it after we turn it back on.
190  */
191 static void guc_clear_mmio_msg(struct intel_guc *guc)
192 {
193 	intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0);
194 }
195 
196 static void guc_get_mmio_msg(struct intel_guc *guc)
197 {
198 	u32 val;
199 
200 	spin_lock_irq(&guc->irq_lock);
201 
202 	val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15));
203 	guc->mmio_msg |= val & guc->msg_enabled_mask;
204 
205 	/*
206 	 * clear all events, including the ones we're not currently servicing,
207 	 * to make sure we don't try to process a stale message if we enable
208 	 * handling of more events later.
209 	 */
210 	guc_clear_mmio_msg(guc);
211 
212 	spin_unlock_irq(&guc->irq_lock);
213 }
214 
215 static void guc_handle_mmio_msg(struct intel_guc *guc)
216 {
217 	/* we need communication to be enabled to reply to GuC */
218 	GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct));
219 
220 	spin_lock_irq(&guc->irq_lock);
221 	if (guc->mmio_msg) {
222 		intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
223 		guc->mmio_msg = 0;
224 	}
225 	spin_unlock_irq(&guc->irq_lock);
226 }
227 
228 static int guc_enable_communication(struct intel_guc *guc)
229 {
230 	struct intel_gt *gt = guc_to_gt(guc);
231 	struct drm_i915_private *i915 = gt->i915;
232 	int ret;
233 
234 	GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
235 
236 	ret = i915_inject_probe_error(i915, -ENXIO);
237 	if (ret)
238 		return ret;
239 
240 	ret = intel_guc_ct_enable(&guc->ct);
241 	if (ret)
242 		return ret;
243 
244 	/* check for mmio messages received before/during the CT enable */
245 	guc_get_mmio_msg(guc);
246 	guc_handle_mmio_msg(guc);
247 
248 	intel_guc_enable_interrupts(guc);
249 
250 	/* check for CT messages received before we enabled interrupts */
251 	spin_lock_irq(gt->irq_lock);
252 	intel_guc_ct_event_handler(&guc->ct);
253 	spin_unlock_irq(gt->irq_lock);
254 
255 	drm_dbg(&i915->drm, "GuC communication enabled\n");
256 
257 	return 0;
258 }
259 
260 static void guc_disable_communication(struct intel_guc *guc)
261 {
262 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
263 
264 	/*
265 	 * Events generated during or after CT disable are logged by guc in
266 	 * via mmio. Make sure the register is clear before disabling CT since
267 	 * all events we cared about have already been processed via CT.
268 	 */
269 	guc_clear_mmio_msg(guc);
270 
271 	intel_guc_disable_interrupts(guc);
272 
273 	intel_guc_ct_disable(&guc->ct);
274 
275 	/*
276 	 * Check for messages received during/after the CT disable. We do not
277 	 * expect any messages to have arrived via CT between the interrupt
278 	 * disable and the CT disable because GuC should've been idle until we
279 	 * triggered the CT disable protocol.
280 	 */
281 	guc_get_mmio_msg(guc);
282 
283 	drm_dbg(&i915->drm, "GuC communication disabled\n");
284 }
285 
286 static void __uc_fetch_firmwares(struct intel_uc *uc)
287 {
288 	int err;
289 
290 	GEM_BUG_ON(!intel_uc_wants_guc(uc));
291 
292 	err = intel_uc_fw_fetch(&uc->guc.fw);
293 	if (err) {
294 		/* Make sure we transition out of transient "SELECTED" state */
295 		if (intel_uc_wants_huc(uc)) {
296 			drm_dbg(&uc_to_gt(uc)->i915->drm,
297 				"Failed to fetch GuC: %d disabling HuC\n", err);
298 			intel_uc_fw_change_status(&uc->huc.fw,
299 						  INTEL_UC_FIRMWARE_ERROR);
300 		}
301 
302 		if (intel_uc_wants_gsc_uc(uc)) {
303 			drm_dbg(&uc_to_gt(uc)->i915->drm,
304 				"Failed to fetch GuC: %d disabling GSC\n", err);
305 			intel_uc_fw_change_status(&uc->gsc.fw,
306 						  INTEL_UC_FIRMWARE_ERROR);
307 		}
308 
309 		return;
310 	}
311 
312 	if (intel_uc_wants_huc(uc))
313 		intel_uc_fw_fetch(&uc->huc.fw);
314 
315 	if (intel_uc_wants_gsc_uc(uc))
316 		intel_uc_fw_fetch(&uc->gsc.fw);
317 }
318 
319 static void __uc_cleanup_firmwares(struct intel_uc *uc)
320 {
321 	intel_uc_fw_cleanup_fetch(&uc->gsc.fw);
322 	intel_uc_fw_cleanup_fetch(&uc->huc.fw);
323 	intel_uc_fw_cleanup_fetch(&uc->guc.fw);
324 }
325 
326 static int __uc_init(struct intel_uc *uc)
327 {
328 	struct intel_guc *guc = &uc->guc;
329 	struct intel_huc *huc = &uc->huc;
330 	int ret;
331 
332 	GEM_BUG_ON(!intel_uc_wants_guc(uc));
333 
334 	if (!intel_uc_uses_guc(uc))
335 		return 0;
336 
337 	if (i915_inject_probe_failure(uc_to_gt(uc)->i915))
338 		return -ENOMEM;
339 
340 	ret = intel_guc_init(guc);
341 	if (ret)
342 		return ret;
343 
344 	if (intel_uc_uses_huc(uc))
345 		intel_huc_init(huc);
346 
347 	if (intel_uc_uses_gsc_uc(uc))
348 		intel_gsc_uc_init(&uc->gsc);
349 
350 	return 0;
351 }
352 
353 static void __uc_fini(struct intel_uc *uc)
354 {
355 	intel_gsc_uc_fini(&uc->gsc);
356 	intel_huc_fini(&uc->huc);
357 	intel_guc_fini(&uc->guc);
358 }
359 
360 static int __uc_sanitize(struct intel_uc *uc)
361 {
362 	struct intel_guc *guc = &uc->guc;
363 	struct intel_huc *huc = &uc->huc;
364 
365 	GEM_BUG_ON(!intel_uc_supports_guc(uc));
366 
367 	intel_huc_sanitize(huc);
368 	intel_guc_sanitize(guc);
369 
370 	return __intel_uc_reset_hw(uc);
371 }
372 
373 /* Initialize and verify the uC regs related to uC positioning in WOPCM */
374 static int uc_init_wopcm(struct intel_uc *uc)
375 {
376 	struct intel_gt *gt = uc_to_gt(uc);
377 	struct intel_uncore *uncore = gt->uncore;
378 	u32 base = intel_wopcm_guc_base(&gt->wopcm);
379 	u32 size = intel_wopcm_guc_size(&gt->wopcm);
380 	u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0;
381 	u32 mask;
382 	int err;
383 
384 	if (unlikely(!base || !size)) {
385 		i915_probe_error(gt->i915, "Unsuccessful WOPCM partitioning\n");
386 		return -E2BIG;
387 	}
388 
389 	GEM_BUG_ON(!intel_uc_supports_guc(uc));
390 	GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK));
391 	GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK);
392 	GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK));
393 	GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
394 
395 	err = i915_inject_probe_error(gt->i915, -ENXIO);
396 	if (err)
397 		return err;
398 
399 	mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
400 	err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask,
401 					    size | GUC_WOPCM_SIZE_LOCKED);
402 	if (err)
403 		goto err_out;
404 
405 	mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
406 	err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET,
407 					    base | huc_agent, mask,
408 					    base | huc_agent |
409 					    GUC_WOPCM_OFFSET_VALID);
410 	if (err)
411 		goto err_out;
412 
413 	return 0;
414 
415 err_out:
416 	i915_probe_error(gt->i915, "Failed to init uC WOPCM registers!\n");
417 	i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
418 			 i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET),
419 			 intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
420 	i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
421 			 i915_mmio_reg_offset(GUC_WOPCM_SIZE),
422 			 intel_uncore_read(uncore, GUC_WOPCM_SIZE));
423 
424 	return err;
425 }
426 
427 static bool uc_is_wopcm_locked(struct intel_uc *uc)
428 {
429 	struct intel_gt *gt = uc_to_gt(uc);
430 	struct intel_uncore *uncore = gt->uncore;
431 
432 	return (intel_uncore_read(uncore, GUC_WOPCM_SIZE) & GUC_WOPCM_SIZE_LOCKED) ||
433 	       (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID);
434 }
435 
436 static int __uc_check_hw(struct intel_uc *uc)
437 {
438 	if (!intel_uc_supports_guc(uc))
439 		return 0;
440 
441 	/*
442 	 * We can silently continue without GuC only if it was never enabled
443 	 * before on this system after reboot, otherwise we risk GPU hangs.
444 	 * To check if GuC was loaded before we look at WOPCM registers.
445 	 */
446 	if (uc_is_wopcm_locked(uc))
447 		return -EIO;
448 
449 	return 0;
450 }
451 
452 static void print_fw_ver(struct intel_uc *uc, struct intel_uc_fw *fw)
453 {
454 	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
455 
456 	drm_info(&i915->drm, "%s firmware %s version %u.%u.%u\n",
457 		 intel_uc_fw_type_repr(fw->type), fw->file_selected.path,
458 		 fw->file_selected.ver.major,
459 		 fw->file_selected.ver.minor,
460 		 fw->file_selected.ver.patch);
461 }
462 
463 static int __uc_init_hw(struct intel_uc *uc)
464 {
465 	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
466 	struct intel_guc *guc = &uc->guc;
467 	struct intel_huc *huc = &uc->huc;
468 	int ret, attempts;
469 
470 	GEM_BUG_ON(!intel_uc_supports_guc(uc));
471 	GEM_BUG_ON(!intel_uc_wants_guc(uc));
472 
473 	print_fw_ver(uc, &guc->fw);
474 
475 	if (intel_uc_uses_huc(uc))
476 		print_fw_ver(uc, &huc->fw);
477 
478 	if (!intel_uc_fw_is_loadable(&guc->fw)) {
479 		ret = __uc_check_hw(uc) ||
480 		      intel_uc_fw_is_overridden(&guc->fw) ||
481 		      intel_uc_wants_guc_submission(uc) ?
482 		      intel_uc_fw_status_to_error(guc->fw.status) : 0;
483 		goto err_out;
484 	}
485 
486 	ret = uc_init_wopcm(uc);
487 	if (ret)
488 		goto err_out;
489 
490 	intel_guc_reset_interrupts(guc);
491 
492 	/* WaEnableuKernelHeaderValidFix:skl */
493 	/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
494 	if (GRAPHICS_VER(i915) == 9)
495 		attempts = 3;
496 	else
497 		attempts = 1;
498 
499 	intel_rps_raise_unslice(&uc_to_gt(uc)->rps);
500 
501 	while (attempts--) {
502 		/*
503 		 * Always reset the GuC just before (re)loading, so
504 		 * that the state and timing are fairly predictable
505 		 */
506 		ret = __uc_sanitize(uc);
507 		if (ret)
508 			goto err_out;
509 
510 		intel_huc_fw_upload(huc);
511 		intel_guc_ads_reset(guc);
512 		intel_guc_write_params(guc);
513 		ret = intel_guc_fw_upload(guc);
514 		if (ret == 0)
515 			break;
516 
517 		DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
518 				 "retry %d more time(s)\n", ret, attempts);
519 	}
520 
521 	/* Did we succeded or run out of retries? */
522 	if (ret)
523 		goto err_log_capture;
524 
525 	ret = guc_enable_communication(guc);
526 	if (ret)
527 		goto err_log_capture;
528 
529 	/*
530 	 * GSC-loaded HuC is authenticated by the GSC, so we don't need to
531 	 * trigger the auth here. However, given that the HuC loaded this way
532 	 * survive GT reset, we still need to update our SW bookkeeping to make
533 	 * sure it reflects the correct HW status.
534 	 */
535 	if (intel_huc_is_loaded_by_gsc(huc))
536 		intel_huc_update_auth_status(huc);
537 	else
538 		intel_huc_auth(huc);
539 
540 	if (intel_uc_uses_guc_submission(uc))
541 		intel_guc_submission_enable(guc);
542 
543 	if (intel_uc_uses_guc_slpc(uc)) {
544 		ret = intel_guc_slpc_enable(&guc->slpc);
545 		if (ret)
546 			goto err_submission;
547 	} else {
548 		/* Restore GT back to RPn for non-SLPC path */
549 		intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
550 	}
551 
552 	intel_gsc_uc_load_start(&uc->gsc);
553 
554 	drm_info(&i915->drm, "GuC submission %s\n",
555 		 str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
556 	drm_info(&i915->drm, "GuC SLPC %s\n",
557 		 str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
558 
559 	return 0;
560 
561 	/*
562 	 * We've failed to load the firmware :(
563 	 */
564 err_submission:
565 	intel_guc_submission_disable(guc);
566 err_log_capture:
567 	__uc_capture_load_err_log(uc);
568 err_out:
569 	/* Return GT back to RPn */
570 	intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
571 
572 	__uc_sanitize(uc);
573 
574 	if (!ret) {
575 		drm_notice(&i915->drm, "GuC is uninitialized\n");
576 		/* We want to run without GuC submission */
577 		return 0;
578 	}
579 
580 	i915_probe_error(i915, "GuC initialization failed %d\n", ret);
581 
582 	/* We want to keep KMS alive */
583 	return -EIO;
584 }
585 
586 static void __uc_fini_hw(struct intel_uc *uc)
587 {
588 	struct intel_guc *guc = &uc->guc;
589 
590 	if (!intel_guc_is_fw_running(guc))
591 		return;
592 
593 	if (intel_uc_uses_guc_submission(uc))
594 		intel_guc_submission_disable(guc);
595 
596 	__uc_sanitize(uc);
597 }
598 
599 /**
600  * intel_uc_reset_prepare - Prepare for reset
601  * @uc: the intel_uc structure
602  *
603  * Preparing for full gpu reset.
604  */
605 void intel_uc_reset_prepare(struct intel_uc *uc)
606 {
607 	struct intel_guc *guc = &uc->guc;
608 
609 	uc->reset_in_progress = true;
610 
611 	/* Nothing to do if GuC isn't supported */
612 	if (!intel_uc_supports_guc(uc))
613 		return;
614 
615 	/* Firmware expected to be running when this function is called */
616 	if (!intel_guc_is_ready(guc))
617 		goto sanitize;
618 
619 	if (intel_uc_uses_guc_submission(uc))
620 		intel_guc_submission_reset_prepare(guc);
621 
622 sanitize:
623 	__uc_sanitize(uc);
624 }
625 
626 void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled)
627 {
628 	struct intel_guc *guc = &uc->guc;
629 
630 	/* Firmware can not be running when this function is called  */
631 	if (intel_uc_uses_guc_submission(uc))
632 		intel_guc_submission_reset(guc, stalled);
633 }
634 
635 void intel_uc_reset_finish(struct intel_uc *uc)
636 {
637 	struct intel_guc *guc = &uc->guc;
638 
639 	uc->reset_in_progress = false;
640 
641 	/* Firmware expected to be running when this function is called */
642 	if (intel_guc_is_fw_running(guc) && intel_uc_uses_guc_submission(uc))
643 		intel_guc_submission_reset_finish(guc);
644 }
645 
646 void intel_uc_cancel_requests(struct intel_uc *uc)
647 {
648 	struct intel_guc *guc = &uc->guc;
649 
650 	/* Firmware can not be running when this function is called  */
651 	if (intel_uc_uses_guc_submission(uc))
652 		intel_guc_submission_cancel_requests(guc);
653 }
654 
655 void intel_uc_runtime_suspend(struct intel_uc *uc)
656 {
657 	struct intel_guc *guc = &uc->guc;
658 
659 	if (!intel_guc_is_ready(guc)) {
660 		guc->interrupts.enabled = false;
661 		return;
662 	}
663 
664 	/*
665 	 * Wait for any outstanding CTB before tearing down communication /w the
666 	 * GuC.
667 	 */
668 #define OUTSTANDING_CTB_TIMEOUT_PERIOD	(HZ / 5)
669 	intel_guc_wait_for_pending_msg(guc, &guc->outstanding_submission_g2h,
670 				       false, OUTSTANDING_CTB_TIMEOUT_PERIOD);
671 	GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
672 
673 	guc_disable_communication(guc);
674 }
675 
676 void intel_uc_suspend(struct intel_uc *uc)
677 {
678 	struct intel_guc *guc = &uc->guc;
679 	intel_wakeref_t wakeref;
680 	int err;
681 
682 	/* flush the GSC worker */
683 	intel_gsc_uc_suspend(&uc->gsc);
684 
685 	if (!intel_guc_is_ready(guc)) {
686 		guc->interrupts.enabled = false;
687 		return;
688 	}
689 
690 	with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) {
691 		err = intel_guc_suspend(guc);
692 		if (err)
693 			DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
694 	}
695 }
696 
697 static int __uc_resume(struct intel_uc *uc, bool enable_communication)
698 {
699 	struct intel_guc *guc = &uc->guc;
700 	struct intel_gt *gt = guc_to_gt(guc);
701 	int err;
702 
703 	if (!intel_guc_is_fw_running(guc))
704 		return 0;
705 
706 	/* Make sure we enable communication if and only if it's disabled */
707 	GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct));
708 
709 	if (enable_communication)
710 		guc_enable_communication(guc);
711 
712 	/* If we are only resuming GuC communication but not reloading
713 	 * GuC, we need to ensure the ARAT timer interrupt is enabled
714 	 * again. In case of GuC reload, it is enabled during SLPC enable.
715 	 */
716 	if (enable_communication && intel_uc_uses_guc_slpc(uc))
717 		intel_guc_pm_intrmsk_enable(gt);
718 
719 	err = intel_guc_resume(guc);
720 	if (err) {
721 		DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
722 		return err;
723 	}
724 
725 	return 0;
726 }
727 
728 int intel_uc_resume(struct intel_uc *uc)
729 {
730 	/*
731 	 * When coming out of S3/S4 we sanitize and re-init the HW, so
732 	 * communication is already re-enabled at this point.
733 	 */
734 	return __uc_resume(uc, false);
735 }
736 
737 int intel_uc_runtime_resume(struct intel_uc *uc)
738 {
739 	/*
740 	 * During runtime resume we don't sanitize, so we need to re-init
741 	 * communication as well.
742 	 */
743 	return __uc_resume(uc, true);
744 }
745 
746 static const struct intel_uc_ops uc_ops_off = {
747 	.init_hw = __uc_check_hw,
748 	.fini = __uc_fini, /* to clean-up the init_early initialization */
749 };
750 
751 static const struct intel_uc_ops uc_ops_on = {
752 	.sanitize = __uc_sanitize,
753 
754 	.init_fw = __uc_fetch_firmwares,
755 	.fini_fw = __uc_cleanup_firmwares,
756 
757 	.init = __uc_init,
758 	.fini = __uc_fini,
759 
760 	.init_hw = __uc_init_hw,
761 	.fini_hw = __uc_fini_hw,
762 };
763