xref: /openbmc/linux/drivers/gpu/drm/i915/gt/uc/intel_uc.c (revision 91db9311)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "gt/intel_gt.h"
26 #include "gt/intel_reset.h"
27 #include "intel_guc.h"
28 #include "intel_guc_ads.h"
29 #include "intel_guc_submission.h"
30 #include "intel_uc.h"
31 
32 #include "i915_drv.h"
33 
34 static void guc_free_load_err_log(struct intel_guc *guc);
35 
36 /* Reset GuC providing us with fresh state for both GuC and HuC.
37  */
38 static int __intel_uc_reset_hw(struct intel_uc *uc)
39 {
40 	struct intel_gt *gt = uc_to_gt(uc);
41 	int ret;
42 	u32 guc_status;
43 
44 	ret = intel_reset_guc(gt);
45 	if (ret) {
46 		DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
47 		return ret;
48 	}
49 
50 	guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
51 	WARN(!(guc_status & GS_MIA_IN_RESET),
52 	     "GuC status: 0x%x, MIA core expected to be in reset\n",
53 	     guc_status);
54 
55 	return ret;
56 }
57 
58 static int __get_platform_enable_guc(struct intel_uc *uc)
59 {
60 	struct intel_uc_fw *guc_fw = &uc->guc.fw;
61 	struct intel_uc_fw *huc_fw = &uc->huc.fw;
62 	int enable_guc = 0;
63 
64 	if (!HAS_GT_UC(uc_to_gt(uc)->i915))
65 		return 0;
66 
67 	/* We don't want to enable GuC/HuC on pre-Gen11 by default */
68 	if (INTEL_GEN(uc_to_gt(uc)->i915) < 11)
69 		return 0;
70 
71 	if (intel_uc_fw_supported(guc_fw) && intel_uc_fw_supported(huc_fw))
72 		enable_guc |= ENABLE_GUC_LOAD_HUC;
73 
74 	return enable_guc;
75 }
76 
77 /**
78  * sanitize_options_early - sanitize uC related modparam options
79  * @uc: the intel_uc structure
80  *
81  * In case of "enable_guc" option this function will attempt to modify
82  * it only if it was initially set to "auto(-1)". Default value for this
83  * modparam varies between platforms and it is hardcoded in driver code.
84  * Any other modparam value is only monitored against availability of the
85  * related hardware or firmware definitions.
86  */
87 static void sanitize_options_early(struct intel_uc *uc)
88 {
89 	struct intel_uc_fw *guc_fw = &uc->guc.fw;
90 	struct intel_uc_fw *huc_fw = &uc->huc.fw;
91 
92 	/* A negative value means "use platform default" */
93 	if (i915_modparams.enable_guc < 0)
94 		i915_modparams.enable_guc = __get_platform_enable_guc(uc);
95 
96 	DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
97 			 i915_modparams.enable_guc,
98 			 yesno(intel_uc_is_using_guc_submission(uc)),
99 			 yesno(intel_uc_is_using_huc(uc)));
100 
101 	/* Verify GuC firmware availability */
102 	if (intel_uc_is_using_guc(uc) && !intel_uc_fw_supported(guc_fw)) {
103 		DRM_WARN("Incompatible option detected: enable_guc=%d, "
104 			 "but GuC is not supported!\n",
105 			 i915_modparams.enable_guc);
106 		DRM_INFO("Disabling GuC/HuC loading!\n");
107 		i915_modparams.enable_guc = 0;
108 	}
109 
110 	/* Verify HuC firmware availability */
111 	if (intel_uc_is_using_huc(uc) && !intel_uc_fw_supported(huc_fw)) {
112 		DRM_WARN("Incompatible option detected: enable_guc=%d, "
113 			 "but HuC is not supported!\n",
114 			 i915_modparams.enable_guc);
115 		DRM_INFO("Disabling HuC loading!\n");
116 		i915_modparams.enable_guc &= ~ENABLE_GUC_LOAD_HUC;
117 	}
118 
119 	/* XXX: GuC submission is unavailable for now */
120 	if (intel_uc_is_using_guc_submission(uc)) {
121 		DRM_INFO("Incompatible option detected: enable_guc=%d, "
122 			 "but GuC submission is not supported!\n",
123 			 i915_modparams.enable_guc);
124 		DRM_INFO("Switching to non-GuC submission mode!\n");
125 		i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION;
126 	}
127 
128 	/* Make sure that sanitization was done */
129 	GEM_BUG_ON(i915_modparams.enable_guc < 0);
130 }
131 
132 void intel_uc_init_early(struct intel_uc *uc)
133 {
134 	intel_guc_init_early(&uc->guc);
135 	intel_huc_init_early(&uc->huc);
136 
137 	sanitize_options_early(uc);
138 }
139 
140 void intel_uc_cleanup_early(struct intel_uc *uc)
141 {
142 	guc_free_load_err_log(&uc->guc);
143 }
144 
145 /**
146  * intel_uc_init_mmio - setup uC MMIO access
147  * @uc: the intel_uc structure
148  *
149  * Setup minimal state necessary for MMIO accesses later in the
150  * initialization sequence.
151  */
152 void intel_uc_init_mmio(struct intel_uc *uc)
153 {
154 	intel_guc_init_send_regs(&uc->guc);
155 }
156 
157 static void guc_capture_load_err_log(struct intel_guc *guc)
158 {
159 	if (!guc->log.vma || !intel_guc_log_get_level(&guc->log))
160 		return;
161 
162 	if (!guc->load_err_log)
163 		guc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
164 
165 	return;
166 }
167 
168 static void guc_free_load_err_log(struct intel_guc *guc)
169 {
170 	if (guc->load_err_log)
171 		i915_gem_object_put(guc->load_err_log);
172 }
173 
174 /*
175  * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
176  * register using the same bits used in the CT message payload. Since our
177  * communication channel with guc is turned off at this point, we can save the
178  * message and handle it after we turn it back on.
179  */
180 static void guc_clear_mmio_msg(struct intel_guc *guc)
181 {
182 	intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0);
183 }
184 
185 static void guc_get_mmio_msg(struct intel_guc *guc)
186 {
187 	u32 val;
188 
189 	spin_lock_irq(&guc->irq_lock);
190 
191 	val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15));
192 	guc->mmio_msg |= val & guc->msg_enabled_mask;
193 
194 	/*
195 	 * clear all events, including the ones we're not currently servicing,
196 	 * to make sure we don't try to process a stale message if we enable
197 	 * handling of more events later.
198 	 */
199 	guc_clear_mmio_msg(guc);
200 
201 	spin_unlock_irq(&guc->irq_lock);
202 }
203 
204 static void guc_handle_mmio_msg(struct intel_guc *guc)
205 {
206 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
207 
208 	/* we need communication to be enabled to reply to GuC */
209 	GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop);
210 
211 	if (!guc->mmio_msg)
212 		return;
213 
214 	spin_lock_irq(&i915->irq_lock);
215 	intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
216 	spin_unlock_irq(&i915->irq_lock);
217 
218 	guc->mmio_msg = 0;
219 }
220 
221 static void guc_reset_interrupts(struct intel_guc *guc)
222 {
223 	guc->interrupts.reset(guc);
224 }
225 
226 static void guc_enable_interrupts(struct intel_guc *guc)
227 {
228 	guc->interrupts.enable(guc);
229 }
230 
231 static void guc_disable_interrupts(struct intel_guc *guc)
232 {
233 	guc->interrupts.disable(guc);
234 }
235 
236 static int guc_enable_communication(struct intel_guc *guc)
237 {
238 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
239 	int ret;
240 
241 	ret = intel_guc_ct_enable(&guc->ct);
242 	if (ret)
243 		return ret;
244 
245 	guc->send = intel_guc_send_ct;
246 	guc->handler = intel_guc_to_host_event_handler_ct;
247 
248 	/* check for mmio messages received before/during the CT enable */
249 	guc_get_mmio_msg(guc);
250 	guc_handle_mmio_msg(guc);
251 
252 	guc_enable_interrupts(guc);
253 
254 	/* check for CT messages received before we enabled interrupts */
255 	spin_lock_irq(&i915->irq_lock);
256 	intel_guc_to_host_event_handler_ct(guc);
257 	spin_unlock_irq(&i915->irq_lock);
258 
259 	DRM_INFO("GuC communication enabled\n");
260 
261 	return 0;
262 }
263 
264 static void guc_stop_communication(struct intel_guc *guc)
265 {
266 	intel_guc_ct_stop(&guc->ct);
267 
268 	guc->send = intel_guc_send_nop;
269 	guc->handler = intel_guc_to_host_event_handler_nop;
270 
271 	guc_clear_mmio_msg(guc);
272 }
273 
274 static void guc_disable_communication(struct intel_guc *guc)
275 {
276 	/*
277 	 * Events generated during or after CT disable are logged by guc in
278 	 * via mmio. Make sure the register is clear before disabling CT since
279 	 * all events we cared about have already been processed via CT.
280 	 */
281 	guc_clear_mmio_msg(guc);
282 
283 	guc_disable_interrupts(guc);
284 
285 	guc->send = intel_guc_send_nop;
286 	guc->handler = intel_guc_to_host_event_handler_nop;
287 
288 	intel_guc_ct_disable(&guc->ct);
289 
290 	/*
291 	 * Check for messages received during/after the CT disable. We do not
292 	 * expect any messages to have arrived via CT between the interrupt
293 	 * disable and the CT disable because GuC should've been idle until we
294 	 * triggered the CT disable protocol.
295 	 */
296 	guc_get_mmio_msg(guc);
297 
298 	DRM_INFO("GuC communication disabled\n");
299 }
300 
301 void intel_uc_fetch_firmwares(struct intel_uc *uc)
302 {
303 	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
304 
305 	if (!intel_uc_is_using_guc(uc))
306 		return;
307 
308 	intel_uc_fw_fetch(&uc->guc.fw, i915);
309 
310 	if (intel_uc_is_using_huc(uc))
311 		intel_uc_fw_fetch(&uc->huc.fw, i915);
312 }
313 
314 void intel_uc_cleanup_firmwares(struct intel_uc *uc)
315 {
316 	if (!intel_uc_is_using_guc(uc))
317 		return;
318 
319 	if (intel_uc_is_using_huc(uc))
320 		intel_uc_fw_cleanup_fetch(&uc->huc.fw);
321 
322 	intel_uc_fw_cleanup_fetch(&uc->guc.fw);
323 }
324 
325 int intel_uc_init(struct intel_uc *uc)
326 {
327 	struct intel_guc *guc = &uc->guc;
328 	struct intel_huc *huc = &uc->huc;
329 	int ret;
330 
331 	if (!intel_uc_is_using_guc(uc))
332 		return 0;
333 
334 	if (!intel_uc_fw_supported(&guc->fw))
335 		return -ENODEV;
336 
337 	/* XXX: GuC submission is unavailable for now */
338 	GEM_BUG_ON(intel_uc_is_using_guc_submission(uc));
339 
340 	ret = intel_guc_init(guc);
341 	if (ret)
342 		return ret;
343 
344 	if (intel_uc_is_using_huc(uc)) {
345 		ret = intel_huc_init(huc);
346 		if (ret)
347 			goto err_guc;
348 	}
349 
350 	return 0;
351 
352 err_guc:
353 	intel_guc_fini(guc);
354 	return ret;
355 }
356 
357 void intel_uc_fini(struct intel_uc *uc)
358 {
359 	struct intel_guc *guc = &uc->guc;
360 
361 	if (!intel_uc_is_using_guc(uc))
362 		return;
363 
364 	GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
365 
366 	if (intel_uc_is_using_huc(uc))
367 		intel_huc_fini(&uc->huc);
368 
369 	intel_guc_fini(guc);
370 }
371 
372 static void __uc_sanitize(struct intel_uc *uc)
373 {
374 	struct intel_guc *guc = &uc->guc;
375 	struct intel_huc *huc = &uc->huc;
376 
377 	GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
378 
379 	intel_huc_sanitize(huc);
380 	intel_guc_sanitize(guc);
381 
382 	__intel_uc_reset_hw(uc);
383 }
384 
385 void intel_uc_sanitize(struct intel_uc *uc)
386 {
387 	if (!intel_uc_is_using_guc(uc))
388 		return;
389 
390 	__uc_sanitize(uc);
391 }
392 
393 int intel_uc_init_hw(struct intel_uc *uc)
394 {
395 	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
396 	struct intel_guc *guc = &uc->guc;
397 	struct intel_huc *huc = &uc->huc;
398 	int ret, attempts;
399 
400 	if (!intel_uc_is_using_guc(uc))
401 		return 0;
402 
403 	GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
404 
405 	guc_reset_interrupts(guc);
406 
407 	/* WaEnableuKernelHeaderValidFix:skl */
408 	/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
409 	if (IS_GEN(i915, 9))
410 		attempts = 3;
411 	else
412 		attempts = 1;
413 
414 	while (attempts--) {
415 		/*
416 		 * Always reset the GuC just before (re)loading, so
417 		 * that the state and timing are fairly predictable
418 		 */
419 		ret = __intel_uc_reset_hw(uc);
420 		if (ret)
421 			goto err_out;
422 
423 		if (intel_uc_is_using_huc(uc)) {
424 			ret = intel_huc_fw_upload(huc);
425 			if (ret && intel_uc_fw_is_overridden(&huc->fw))
426 				goto err_out;
427 		}
428 
429 		intel_guc_ads_reset(guc);
430 		intel_guc_write_params(guc);
431 		ret = intel_guc_fw_upload(guc);
432 		if (ret == 0)
433 			break;
434 
435 		DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
436 				 "retry %d more time(s)\n", ret, attempts);
437 	}
438 
439 	/* Did we succeded or run out of retries? */
440 	if (ret)
441 		goto err_log_capture;
442 
443 	ret = guc_enable_communication(guc);
444 	if (ret)
445 		goto err_log_capture;
446 
447 	if (intel_uc_fw_is_loaded(&huc->fw)) {
448 		ret = intel_huc_auth(huc);
449 		if (ret && intel_uc_fw_is_overridden(&huc->fw))
450 			goto err_communication;
451 	}
452 
453 	ret = intel_guc_sample_forcewake(guc);
454 	if (ret)
455 		goto err_communication;
456 
457 	if (intel_uc_is_using_guc_submission(uc)) {
458 		ret = intel_guc_submission_enable(guc);
459 		if (ret)
460 			goto err_communication;
461 	}
462 
463 	dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
464 		 guc->fw.major_ver_found, guc->fw.minor_ver_found);
465 	dev_info(i915->drm.dev, "GuC submission %s\n",
466 		 enableddisabled(intel_uc_is_using_guc_submission(uc)));
467 	dev_info(i915->drm.dev, "HuC %s\n",
468 		 enableddisabled(intel_huc_is_authenticated(huc)));
469 
470 	return 0;
471 
472 	/*
473 	 * We've failed to load the firmware :(
474 	 */
475 err_communication:
476 	guc_disable_communication(guc);
477 err_log_capture:
478 	guc_capture_load_err_log(guc);
479 err_out:
480 	__uc_sanitize(uc);
481 
482 	/*
483 	 * Note that there is no fallback as either user explicitly asked for
484 	 * the GuC or driver default option was to run with the GuC enabled.
485 	 */
486 	if (GEM_WARN_ON(ret == -EIO))
487 		ret = -EINVAL;
488 
489 	dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret);
490 	return ret;
491 }
492 
493 void intel_uc_fini_hw(struct intel_uc *uc)
494 {
495 	struct intel_guc *guc = &uc->guc;
496 
497 	if (!intel_guc_is_running(guc))
498 		return;
499 
500 	GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
501 
502 	if (intel_uc_is_using_guc_submission(uc))
503 		intel_guc_submission_disable(guc);
504 
505 	guc_disable_communication(guc);
506 	__uc_sanitize(uc);
507 }
508 
509 /**
510  * intel_uc_reset_prepare - Prepare for reset
511  * @uc: the intel_uc structure
512  *
513  * Preparing for full gpu reset.
514  */
515 void intel_uc_reset_prepare(struct intel_uc *uc)
516 {
517 	struct intel_guc *guc = &uc->guc;
518 
519 	if (!intel_guc_is_running(guc))
520 		return;
521 
522 	guc_stop_communication(guc);
523 	__uc_sanitize(uc);
524 }
525 
526 void intel_uc_runtime_suspend(struct intel_uc *uc)
527 {
528 	struct intel_guc *guc = &uc->guc;
529 	int err;
530 
531 	if (!intel_guc_is_running(guc))
532 		return;
533 
534 	err = intel_guc_suspend(guc);
535 	if (err)
536 		DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
537 
538 	guc_disable_communication(guc);
539 }
540 
541 void intel_uc_suspend(struct intel_uc *uc)
542 {
543 	struct intel_guc *guc = &uc->guc;
544 	intel_wakeref_t wakeref;
545 
546 	if (!intel_guc_is_running(guc))
547 		return;
548 
549 	with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref)
550 		intel_uc_runtime_suspend(uc);
551 }
552 
553 int intel_uc_resume(struct intel_uc *uc)
554 {
555 	struct intel_guc *guc = &uc->guc;
556 	int err;
557 
558 	if (!intel_guc_is_running(guc))
559 		return 0;
560 
561 	guc_enable_communication(guc);
562 
563 	err = intel_guc_resume(guc);
564 	if (err) {
565 		DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
566 		return err;
567 	}
568 
569 	return 0;
570 }
571