xref: /openbmc/linux/drivers/gpu/drm/i915/gt/uc/intel_huc.c (revision 9a32dd32)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include <linux/types.h>
7 
8 #include "gt/intel_gt.h"
9 #include "gt/intel_gt_print.h"
10 #include "intel_guc_reg.h"
11 #include "intel_huc.h"
12 #include "i915_drv.h"
13 
14 #include <linux/device/bus.h>
15 #include <linux/mei_aux.h>
16 
17 #define huc_printk(_huc, _level, _fmt, ...) \
18 	gt_##_level(huc_to_gt(_huc), "HuC: " _fmt, ##__VA_ARGS__)
19 #define huc_err(_huc, _fmt, ...)	huc_printk((_huc), err, _fmt, ##__VA_ARGS__)
20 #define huc_warn(_huc, _fmt, ...)	huc_printk((_huc), warn, _fmt, ##__VA_ARGS__)
21 #define huc_notice(_huc, _fmt, ...)	huc_printk((_huc), notice, _fmt, ##__VA_ARGS__)
22 #define huc_info(_huc, _fmt, ...)	huc_printk((_huc), info, _fmt, ##__VA_ARGS__)
23 #define huc_dbg(_huc, _fmt, ...)	huc_printk((_huc), dbg, _fmt, ##__VA_ARGS__)
24 #define huc_probe_error(_huc, _fmt, ...) huc_printk((_huc), probe_error, _fmt, ##__VA_ARGS__)
25 
26 /**
27  * DOC: HuC
28  *
29  * The HuC is a dedicated microcontroller for usage in media HEVC (High
30  * Efficiency Video Coding) operations. Userspace can directly use the firmware
31  * capabilities by adding HuC specific commands to batch buffers.
32  *
33  * The kernel driver is only responsible for loading the HuC firmware and
34  * triggering its security authentication, which is performed by the GuC on
35  * older platforms and by the GSC on newer ones. For the GuC to correctly
36  * perform the authentication, the HuC binary must be loaded before the GuC one.
37  * Loading the HuC is optional; however, not using the HuC might negatively
38  * impact power usage and/or performance of media workloads, depending on the
39  * use-cases.
40  * HuC must be reloaded on events that cause the WOPCM to lose its contents
41  * (S3/S4, FLR); GuC-authenticated HuC must also be reloaded on GuC/GT reset,
42  * while GSC-managed HuC will survive that.
43  *
44  * See https://github.com/intel/media-driver for the latest details on HuC
45  * functionality.
46  */
47 
48 /**
49  * DOC: HuC Memory Management
50  *
51  * Similarly to the GuC, the HuC can't do any memory allocations on its own,
52  * with the difference being that the allocations for HuC usage are handled by
53  * the userspace driver instead of the kernel one. The HuC accesses the memory
54  * via the PPGTT belonging to the context loaded on the VCS executing the
55  * HuC-specific commands.
56  */
57 
58 /*
59  * MEI-GSC load is an async process. The probing of the exposed aux device
60  * (see intel_gsc.c) usually happens a few seconds after i915 probe, depending
61  * on when the kernel schedules it. Unless something goes terribly wrong, we're
62  * guaranteed for this to happen during boot, so the big timeout is a safety net
63  * that we never expect to need.
64  * MEI-PXP + HuC load usually takes ~300ms, but if the GSC needs to be resumed
65  * and/or reset, this can take longer. Note that the kernel might schedule
66  * other work between the i915 init/resume and the MEI one, which can add to
67  * the delay.
68  */
69 #define GSC_INIT_TIMEOUT_MS 10000
70 #define PXP_INIT_TIMEOUT_MS 5000
71 
72 static int sw_fence_dummy_notify(struct i915_sw_fence *sf,
73 				 enum i915_sw_fence_notify state)
74 {
75 	return NOTIFY_DONE;
76 }
77 
78 static void __delayed_huc_load_complete(struct intel_huc *huc)
79 {
80 	if (!i915_sw_fence_done(&huc->delayed_load.fence))
81 		i915_sw_fence_complete(&huc->delayed_load.fence);
82 }
83 
84 static void delayed_huc_load_complete(struct intel_huc *huc)
85 {
86 	hrtimer_cancel(&huc->delayed_load.timer);
87 	__delayed_huc_load_complete(huc);
88 }
89 
90 static void __gsc_init_error(struct intel_huc *huc)
91 {
92 	huc->delayed_load.status = INTEL_HUC_DELAYED_LOAD_ERROR;
93 	__delayed_huc_load_complete(huc);
94 }
95 
96 static void gsc_init_error(struct intel_huc *huc)
97 {
98 	hrtimer_cancel(&huc->delayed_load.timer);
99 	__gsc_init_error(huc);
100 }
101 
102 static void gsc_init_done(struct intel_huc *huc)
103 {
104 	hrtimer_cancel(&huc->delayed_load.timer);
105 
106 	/* MEI-GSC init is done, now we wait for MEI-PXP to bind */
107 	huc->delayed_load.status = INTEL_HUC_WAITING_ON_PXP;
108 	if (!i915_sw_fence_done(&huc->delayed_load.fence))
109 		hrtimer_start(&huc->delayed_load.timer,
110 			      ms_to_ktime(PXP_INIT_TIMEOUT_MS),
111 			      HRTIMER_MODE_REL);
112 }
113 
114 static enum hrtimer_restart huc_delayed_load_timer_callback(struct hrtimer *hrtimer)
115 {
116 	struct intel_huc *huc = container_of(hrtimer, struct intel_huc, delayed_load.timer);
117 
118 	if (!intel_huc_is_authenticated(huc)) {
119 		if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_GSC)
120 			huc_notice(huc, "timed out waiting for MEI GSC\n");
121 		else if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_PXP)
122 			huc_notice(huc, "timed out waiting for MEI PXP\n");
123 		else
124 			MISSING_CASE(huc->delayed_load.status);
125 
126 		__gsc_init_error(huc);
127 	}
128 
129 	return HRTIMER_NORESTART;
130 }
131 
132 static void huc_delayed_load_start(struct intel_huc *huc)
133 {
134 	ktime_t delay;
135 
136 	GEM_BUG_ON(intel_huc_is_authenticated(huc));
137 
138 	/*
139 	 * On resume we don't have to wait for MEI-GSC to be re-probed, but we
140 	 * do need to wait for MEI-PXP to reset & re-bind
141 	 */
142 	switch (huc->delayed_load.status) {
143 	case INTEL_HUC_WAITING_ON_GSC:
144 		delay = ms_to_ktime(GSC_INIT_TIMEOUT_MS);
145 		break;
146 	case INTEL_HUC_WAITING_ON_PXP:
147 		delay = ms_to_ktime(PXP_INIT_TIMEOUT_MS);
148 		break;
149 	default:
150 		gsc_init_error(huc);
151 		return;
152 	}
153 
154 	/*
155 	 * This fence is always complete unless we're waiting for the
156 	 * GSC device to come up to load the HuC. We arm the fence here
157 	 * and complete it when we confirm that the HuC is loaded from
158 	 * the PXP bind callback.
159 	 */
160 	GEM_BUG_ON(!i915_sw_fence_done(&huc->delayed_load.fence));
161 	i915_sw_fence_fini(&huc->delayed_load.fence);
162 	i915_sw_fence_reinit(&huc->delayed_load.fence);
163 	i915_sw_fence_await(&huc->delayed_load.fence);
164 	i915_sw_fence_commit(&huc->delayed_load.fence);
165 
166 	hrtimer_start(&huc->delayed_load.timer, delay, HRTIMER_MODE_REL);
167 }
168 
169 static int gsc_notifier(struct notifier_block *nb, unsigned long action, void *data)
170 {
171 	struct device *dev = data;
172 	struct intel_huc *huc = container_of(nb, struct intel_huc, delayed_load.nb);
173 	struct intel_gsc_intf *intf = &huc_to_gt(huc)->gsc.intf[0];
174 
175 	if (!intf->adev || &intf->adev->aux_dev.dev != dev)
176 		return 0;
177 
178 	switch (action) {
179 	case BUS_NOTIFY_BOUND_DRIVER: /* mei driver bound to aux device */
180 		gsc_init_done(huc);
181 		break;
182 
183 	case BUS_NOTIFY_DRIVER_NOT_BOUND: /* mei driver fails to be bound */
184 	case BUS_NOTIFY_UNBIND_DRIVER: /* mei driver about to be unbound */
185 		huc_info(huc, "MEI driver not bound, disabling load\n");
186 		gsc_init_error(huc);
187 		break;
188 	}
189 
190 	return 0;
191 }
192 
193 void intel_huc_register_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus)
194 {
195 	int ret;
196 
197 	if (!intel_huc_is_loaded_by_gsc(huc))
198 		return;
199 
200 	huc->delayed_load.nb.notifier_call = gsc_notifier;
201 	ret = bus_register_notifier(bus, &huc->delayed_load.nb);
202 	if (ret) {
203 		huc_err(huc, "failed to register GSC notifier %pe\n", ERR_PTR(ret));
204 		huc->delayed_load.nb.notifier_call = NULL;
205 		gsc_init_error(huc);
206 	}
207 }
208 
209 void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus)
210 {
211 	if (!huc->delayed_load.nb.notifier_call)
212 		return;
213 
214 	delayed_huc_load_complete(huc);
215 
216 	bus_unregister_notifier(bus, &huc->delayed_load.nb);
217 	huc->delayed_load.nb.notifier_call = NULL;
218 }
219 
220 static void delayed_huc_load_init(struct intel_huc *huc)
221 {
222 	/*
223 	 * Initialize fence to be complete as this is expected to be complete
224 	 * unless there is a delayed HuC load in progress.
225 	 */
226 	i915_sw_fence_init(&huc->delayed_load.fence,
227 			   sw_fence_dummy_notify);
228 	i915_sw_fence_commit(&huc->delayed_load.fence);
229 
230 	hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
231 	huc->delayed_load.timer.function = huc_delayed_load_timer_callback;
232 }
233 
234 static void delayed_huc_load_fini(struct intel_huc *huc)
235 {
236 	/*
237 	 * the fence is initialized in init_early, so we need to clean it up
238 	 * even if HuC loading is off.
239 	 */
240 	delayed_huc_load_complete(huc);
241 	i915_sw_fence_fini(&huc->delayed_load.fence);
242 }
243 
244 int intel_huc_sanitize(struct intel_huc *huc)
245 {
246 	delayed_huc_load_complete(huc);
247 	intel_uc_fw_sanitize(&huc->fw);
248 	return 0;
249 }
250 
251 static bool vcs_supported(struct intel_gt *gt)
252 {
253 	intel_engine_mask_t mask = gt->info.engine_mask;
254 
255 	/*
256 	 * We reach here from i915_driver_early_probe for the primary GT before
257 	 * its engine mask is set, so we use the device info engine mask for it;
258 	 * this means we're not taking VCS fusing into account, but if the
259 	 * primary GT supports VCS engines we expect at least one of them to
260 	 * remain unfused so we're fine.
261 	 * For other GTs we expect the GT-specific mask to be set before we
262 	 * call this function.
263 	 */
264 	GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask);
265 
266 	if (gt_is_root(gt))
267 		mask = RUNTIME_INFO(gt->i915)->platform_engine_mask;
268 	else
269 		mask = gt->info.engine_mask;
270 
271 	return __ENGINE_INSTANCES_MASK(mask, VCS0, I915_MAX_VCS);
272 }
273 
274 void intel_huc_init_early(struct intel_huc *huc)
275 {
276 	struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
277 	struct intel_gt *gt = huc_to_gt(huc);
278 
279 	intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC);
280 
281 	/*
282 	 * we always init the fence as already completed, even if HuC is not
283 	 * supported. This way we don't have to distinguish between HuC not
284 	 * supported/disabled or already loaded, and can focus on if the load
285 	 * is currently in progress (fence not complete) or not, which is what
286 	 * we care about for stalling userspace submissions.
287 	 */
288 	delayed_huc_load_init(huc);
289 
290 	if (!vcs_supported(gt)) {
291 		intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
292 		return;
293 	}
294 
295 	if (GRAPHICS_VER(i915) >= 11) {
296 		huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO;
297 		huc->status.mask = HUC_LOAD_SUCCESSFUL;
298 		huc->status.value = HUC_LOAD_SUCCESSFUL;
299 	} else {
300 		huc->status.reg = HUC_STATUS2;
301 		huc->status.mask = HUC_FW_VERIFIED;
302 		huc->status.value = HUC_FW_VERIFIED;
303 	}
304 }
305 
306 #define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy")
307 static int check_huc_loading_mode(struct intel_huc *huc)
308 {
309 	struct intel_gt *gt = huc_to_gt(huc);
310 	bool fw_needs_gsc = intel_huc_is_loaded_by_gsc(huc);
311 	bool hw_uses_gsc = false;
312 
313 	/*
314 	 * The fuse for HuC load via GSC is only valid on platforms that have
315 	 * GuC deprivilege.
316 	 */
317 	if (HAS_GUC_DEPRIVILEGE(gt->i915))
318 		hw_uses_gsc = intel_uncore_read(gt->uncore, GUC_SHIM_CONTROL2) &
319 			      GSC_LOADS_HUC;
320 
321 	if (fw_needs_gsc != hw_uses_gsc) {
322 		huc_err(huc, "mismatch between FW (%s) and HW (%s) load modes\n",
323 			HUC_LOAD_MODE_STRING(fw_needs_gsc), HUC_LOAD_MODE_STRING(hw_uses_gsc));
324 		return -ENOEXEC;
325 	}
326 
327 	/* make sure we can access the GSC via the mei driver if we need it */
328 	if (!(IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC)) &&
329 	    fw_needs_gsc) {
330 		huc_info(huc, "can't load due to missing MEI modules\n");
331 		return -EIO;
332 	}
333 
334 	huc_dbg(huc, "loaded by GSC = %s\n", str_yes_no(fw_needs_gsc));
335 
336 	return 0;
337 }
338 
339 int intel_huc_init(struct intel_huc *huc)
340 {
341 	int err;
342 
343 	err = check_huc_loading_mode(huc);
344 	if (err)
345 		goto out;
346 
347 	err = intel_uc_fw_init(&huc->fw);
348 	if (err)
349 		goto out;
350 
351 	intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE);
352 
353 	return 0;
354 
355 out:
356 	intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
357 	huc_info(huc, "initialization failed %pe\n", ERR_PTR(err));
358 	return err;
359 }
360 
361 void intel_huc_fini(struct intel_huc *huc)
362 {
363 	/*
364 	 * the fence is initialized in init_early, so we need to clean it up
365 	 * even if HuC loading is off.
366 	 */
367 	delayed_huc_load_fini(huc);
368 
369 	if (intel_uc_fw_is_loadable(&huc->fw))
370 		intel_uc_fw_fini(&huc->fw);
371 }
372 
373 void intel_huc_suspend(struct intel_huc *huc)
374 {
375 	if (!intel_uc_fw_is_loadable(&huc->fw))
376 		return;
377 
378 	/*
379 	 * in the unlikely case that we're suspending before the GSC has
380 	 * completed its loading sequence, just stop waiting. We'll restart
381 	 * on resume.
382 	 */
383 	delayed_huc_load_complete(huc);
384 }
385 
386 int intel_huc_wait_for_auth_complete(struct intel_huc *huc)
387 {
388 	struct intel_gt *gt = huc_to_gt(huc);
389 	int ret;
390 
391 	ret = __intel_wait_for_register(gt->uncore,
392 					huc->status.reg,
393 					huc->status.mask,
394 					huc->status.value,
395 					2, 50, NULL);
396 
397 	/* mark the load process as complete even if the wait failed */
398 	delayed_huc_load_complete(huc);
399 
400 	if (ret) {
401 		huc_err(huc, "firmware not verified %pe\n", ERR_PTR(ret));
402 		intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
403 		return ret;
404 	}
405 
406 	intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
407 	huc_info(huc, "authenticated!\n");
408 	return 0;
409 }
410 
411 /**
412  * intel_huc_auth() - Authenticate HuC uCode
413  * @huc: intel_huc structure
414  *
415  * Called after HuC and GuC firmware loading during intel_uc_init_hw().
416  *
417  * This function invokes the GuC action to authenticate the HuC firmware,
418  * passing the offset of the RSA signature to intel_guc_auth_huc(). It then
419  * waits for up to 50ms for firmware verification ACK.
420  */
421 int intel_huc_auth(struct intel_huc *huc)
422 {
423 	struct intel_gt *gt = huc_to_gt(huc);
424 	struct intel_guc *guc = &gt->uc.guc;
425 	int ret;
426 
427 	if (!intel_uc_fw_is_loaded(&huc->fw))
428 		return -ENOEXEC;
429 
430 	/* GSC will do the auth */
431 	if (intel_huc_is_loaded_by_gsc(huc))
432 		return -ENODEV;
433 
434 	ret = i915_inject_probe_error(gt->i915, -ENXIO);
435 	if (ret)
436 		goto fail;
437 
438 	GEM_BUG_ON(intel_uc_fw_is_running(&huc->fw));
439 
440 	ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
441 	if (ret) {
442 		huc_err(huc, "authentication by GuC failed %pe\n", ERR_PTR(ret));
443 		goto fail;
444 	}
445 
446 	/* Check authentication status, it should be done by now */
447 	ret = intel_huc_wait_for_auth_complete(huc);
448 	if (ret)
449 		goto fail;
450 
451 	return 0;
452 
453 fail:
454 	huc_probe_error(huc, "authentication failed %pe\n", ERR_PTR(ret));
455 	return ret;
456 }
457 
458 bool intel_huc_is_authenticated(struct intel_huc *huc)
459 {
460 	struct intel_gt *gt = huc_to_gt(huc);
461 	intel_wakeref_t wakeref;
462 	u32 status = 0;
463 
464 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
465 		status = intel_uncore_read(gt->uncore, huc->status.reg);
466 
467 	return (status & huc->status.mask) == huc->status.value;
468 }
469 
470 /**
471  * intel_huc_check_status() - check HuC status
472  * @huc: intel_huc structure
473  *
474  * This function reads status register to verify if HuC
475  * firmware was successfully loaded.
476  *
477  * The return values match what is expected for the I915_PARAM_HUC_STATUS
478  * getparam.
479  */
480 int intel_huc_check_status(struct intel_huc *huc)
481 {
482 	switch (__intel_uc_fw_status(&huc->fw)) {
483 	case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
484 		return -ENODEV;
485 	case INTEL_UC_FIRMWARE_DISABLED:
486 		return -EOPNOTSUPP;
487 	case INTEL_UC_FIRMWARE_MISSING:
488 		return -ENOPKG;
489 	case INTEL_UC_FIRMWARE_ERROR:
490 		return -ENOEXEC;
491 	case INTEL_UC_FIRMWARE_INIT_FAIL:
492 		return -ENOMEM;
493 	case INTEL_UC_FIRMWARE_LOAD_FAIL:
494 		return -EIO;
495 	default:
496 		break;
497 	}
498 
499 	return intel_huc_is_authenticated(huc);
500 }
501 
502 static bool huc_has_delayed_load(struct intel_huc *huc)
503 {
504 	return intel_huc_is_loaded_by_gsc(huc) &&
505 	       (huc->delayed_load.status != INTEL_HUC_DELAYED_LOAD_ERROR);
506 }
507 
508 void intel_huc_update_auth_status(struct intel_huc *huc)
509 {
510 	if (!intel_uc_fw_is_loadable(&huc->fw))
511 		return;
512 
513 	if (intel_huc_is_authenticated(huc))
514 		intel_uc_fw_change_status(&huc->fw,
515 					  INTEL_UC_FIRMWARE_RUNNING);
516 	else if (huc_has_delayed_load(huc))
517 		huc_delayed_load_start(huc);
518 }
519 
520 /**
521  * intel_huc_load_status - dump information about HuC load status
522  * @huc: the HuC
523  * @p: the &drm_printer
524  *
525  * Pretty printer for HuC load status.
526  */
527 void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p)
528 {
529 	struct intel_gt *gt = huc_to_gt(huc);
530 	intel_wakeref_t wakeref;
531 
532 	if (!intel_huc_is_supported(huc)) {
533 		drm_printf(p, "HuC not supported\n");
534 		return;
535 	}
536 
537 	if (!intel_huc_is_wanted(huc)) {
538 		drm_printf(p, "HuC disabled\n");
539 		return;
540 	}
541 
542 	intel_uc_fw_dump(&huc->fw, p);
543 
544 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
545 		drm_printf(p, "HuC status: 0x%08x\n",
546 			   intel_uncore_read(gt->uncore, huc->status.reg));
547 }
548