xref: /openbmc/linux/drivers/gpu/drm/i915/gt/uc/intel_huc.c (revision 8ab59da2)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include <linux/types.h>
7 
8 #include "gt/intel_gt.h"
9 #include "intel_guc_reg.h"
10 #include "intel_huc.h"
11 #include "i915_drv.h"
12 
13 #include <linux/device/bus.h>
14 #include <linux/mei_aux.h>
15 
16 /**
17  * DOC: HuC
18  *
19  * The HuC is a dedicated microcontroller for usage in media HEVC (High
20  * Efficiency Video Coding) operations. Userspace can directly use the firmware
21  * capabilities by adding HuC specific commands to batch buffers.
22  *
23  * The kernel driver is only responsible for loading the HuC firmware and
24  * triggering its security authentication, which is performed by the GuC on
25  * older platforms and by the GSC on newer ones. For the GuC to correctly
26  * perform the authentication, the HuC binary must be loaded before the GuC one.
27  * Loading the HuC is optional; however, not using the HuC might negatively
28  * impact power usage and/or performance of media workloads, depending on the
29  * use-cases.
30  * HuC must be reloaded on events that cause the WOPCM to lose its contents
31  * (S3/S4, FLR); GuC-authenticated HuC must also be reloaded on GuC/GT reset,
32  * while GSC-managed HuC will survive that.
33  *
34  * See https://github.com/intel/media-driver for the latest details on HuC
35  * functionality.
36  */
37 
38 /**
39  * DOC: HuC Memory Management
40  *
41  * Similarly to the GuC, the HuC can't do any memory allocations on its own,
42  * with the difference being that the allocations for HuC usage are handled by
43  * the userspace driver instead of the kernel one. The HuC accesses the memory
44  * via the PPGTT belonging to the context loaded on the VCS executing the
45  * HuC-specific commands.
46  */
47 
48 /*
49  * MEI-GSC load is an async process. The probing of the exposed aux device
50  * (see intel_gsc.c) usually happens a few seconds after i915 probe, depending
51  * on when the kernel schedules it. Unless something goes terribly wrong, we're
52  * guaranteed for this to happen during boot, so the big timeout is a safety net
53  * that we never expect to need.
54  * MEI-PXP + HuC load usually takes ~300ms, but if the GSC needs to be resumed
55  * and/or reset, this can take longer. Note that the kernel might schedule
56  * other work between the i915 init/resume and the MEI one, which can add to
57  * the delay.
58  */
59 #define GSC_INIT_TIMEOUT_MS 10000
60 #define PXP_INIT_TIMEOUT_MS 5000
61 
62 static int sw_fence_dummy_notify(struct i915_sw_fence *sf,
63 				 enum i915_sw_fence_notify state)
64 {
65 	return NOTIFY_DONE;
66 }
67 
68 static void __delayed_huc_load_complete(struct intel_huc *huc)
69 {
70 	if (!i915_sw_fence_done(&huc->delayed_load.fence))
71 		i915_sw_fence_complete(&huc->delayed_load.fence);
72 }
73 
74 static void delayed_huc_load_complete(struct intel_huc *huc)
75 {
76 	hrtimer_cancel(&huc->delayed_load.timer);
77 	__delayed_huc_load_complete(huc);
78 }
79 
80 static void __gsc_init_error(struct intel_huc *huc)
81 {
82 	huc->delayed_load.status = INTEL_HUC_DELAYED_LOAD_ERROR;
83 	__delayed_huc_load_complete(huc);
84 }
85 
86 static void gsc_init_error(struct intel_huc *huc)
87 {
88 	hrtimer_cancel(&huc->delayed_load.timer);
89 	__gsc_init_error(huc);
90 }
91 
92 static void gsc_init_done(struct intel_huc *huc)
93 {
94 	hrtimer_cancel(&huc->delayed_load.timer);
95 
96 	/* MEI-GSC init is done, now we wait for MEI-PXP to bind */
97 	huc->delayed_load.status = INTEL_HUC_WAITING_ON_PXP;
98 	if (!i915_sw_fence_done(&huc->delayed_load.fence))
99 		hrtimer_start(&huc->delayed_load.timer,
100 			      ms_to_ktime(PXP_INIT_TIMEOUT_MS),
101 			      HRTIMER_MODE_REL);
102 }
103 
104 static enum hrtimer_restart huc_delayed_load_timer_callback(struct hrtimer *hrtimer)
105 {
106 	struct intel_huc *huc = container_of(hrtimer, struct intel_huc, delayed_load.timer);
107 
108 	if (!intel_huc_is_authenticated(huc)) {
109 		if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_GSC)
110 			drm_notice(&huc_to_gt(huc)->i915->drm,
111 				   "timed out waiting for MEI GSC init to load HuC\n");
112 		else if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_PXP)
113 			drm_notice(&huc_to_gt(huc)->i915->drm,
114 				   "timed out waiting for MEI PXP init to load HuC\n");
115 		else
116 			MISSING_CASE(huc->delayed_load.status);
117 
118 		__gsc_init_error(huc);
119 	}
120 
121 	return HRTIMER_NORESTART;
122 }
123 
124 static void huc_delayed_load_start(struct intel_huc *huc)
125 {
126 	ktime_t delay;
127 
128 	GEM_BUG_ON(intel_huc_is_authenticated(huc));
129 
130 	/*
131 	 * On resume we don't have to wait for MEI-GSC to be re-probed, but we
132 	 * do need to wait for MEI-PXP to reset & re-bind
133 	 */
134 	switch (huc->delayed_load.status) {
135 	case INTEL_HUC_WAITING_ON_GSC:
136 		delay = ms_to_ktime(GSC_INIT_TIMEOUT_MS);
137 		break;
138 	case INTEL_HUC_WAITING_ON_PXP:
139 		delay = ms_to_ktime(PXP_INIT_TIMEOUT_MS);
140 		break;
141 	default:
142 		gsc_init_error(huc);
143 		return;
144 	}
145 
146 	/*
147 	 * This fence is always complete unless we're waiting for the
148 	 * GSC device to come up to load the HuC. We arm the fence here
149 	 * and complete it when we confirm that the HuC is loaded from
150 	 * the PXP bind callback.
151 	 */
152 	GEM_BUG_ON(!i915_sw_fence_done(&huc->delayed_load.fence));
153 	i915_sw_fence_fini(&huc->delayed_load.fence);
154 	i915_sw_fence_reinit(&huc->delayed_load.fence);
155 	i915_sw_fence_await(&huc->delayed_load.fence);
156 	i915_sw_fence_commit(&huc->delayed_load.fence);
157 
158 	hrtimer_start(&huc->delayed_load.timer, delay, HRTIMER_MODE_REL);
159 }
160 
161 static int gsc_notifier(struct notifier_block *nb, unsigned long action, void *data)
162 {
163 	struct device *dev = data;
164 	struct intel_huc *huc = container_of(nb, struct intel_huc, delayed_load.nb);
165 	struct intel_gsc_intf *intf = &huc_to_gt(huc)->gsc.intf[0];
166 
167 	if (!intf->adev || &intf->adev->aux_dev.dev != dev)
168 		return 0;
169 
170 	switch (action) {
171 	case BUS_NOTIFY_BOUND_DRIVER: /* mei driver bound to aux device */
172 		gsc_init_done(huc);
173 		break;
174 
175 	case BUS_NOTIFY_DRIVER_NOT_BOUND: /* mei driver fails to be bound */
176 	case BUS_NOTIFY_UNBIND_DRIVER: /* mei driver about to be unbound */
177 		drm_info(&huc_to_gt(huc)->i915->drm,
178 			 "mei driver not bound, disabling HuC load\n");
179 		gsc_init_error(huc);
180 		break;
181 	}
182 
183 	return 0;
184 }
185 
186 void intel_huc_register_gsc_notifier(struct intel_huc *huc, struct bus_type *bus)
187 {
188 	int ret;
189 
190 	if (!intel_huc_is_loaded_by_gsc(huc))
191 		return;
192 
193 	huc->delayed_load.nb.notifier_call = gsc_notifier;
194 	ret = bus_register_notifier(bus, &huc->delayed_load.nb);
195 	if (ret) {
196 		drm_err(&huc_to_gt(huc)->i915->drm,
197 			"failed to register GSC notifier\n");
198 		huc->delayed_load.nb.notifier_call = NULL;
199 		gsc_init_error(huc);
200 	}
201 }
202 
203 void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, struct bus_type *bus)
204 {
205 	if (!huc->delayed_load.nb.notifier_call)
206 		return;
207 
208 	delayed_huc_load_complete(huc);
209 
210 	bus_unregister_notifier(bus, &huc->delayed_load.nb);
211 	huc->delayed_load.nb.notifier_call = NULL;
212 }
213 
214 void intel_huc_init_early(struct intel_huc *huc)
215 {
216 	struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
217 
218 	intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC);
219 
220 	if (GRAPHICS_VER(i915) >= 11) {
221 		huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO;
222 		huc->status.mask = HUC_LOAD_SUCCESSFUL;
223 		huc->status.value = HUC_LOAD_SUCCESSFUL;
224 	} else {
225 		huc->status.reg = HUC_STATUS2;
226 		huc->status.mask = HUC_FW_VERIFIED;
227 		huc->status.value = HUC_FW_VERIFIED;
228 	}
229 
230 	/*
231 	 * Initialize fence to be complete as this is expected to be complete
232 	 * unless there is a delayed HuC reload in progress.
233 	 */
234 	i915_sw_fence_init(&huc->delayed_load.fence,
235 			   sw_fence_dummy_notify);
236 	i915_sw_fence_commit(&huc->delayed_load.fence);
237 
238 	hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
239 	huc->delayed_load.timer.function = huc_delayed_load_timer_callback;
240 }
241 
242 #define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy")
243 static int check_huc_loading_mode(struct intel_huc *huc)
244 {
245 	struct intel_gt *gt = huc_to_gt(huc);
246 	bool fw_needs_gsc = intel_huc_is_loaded_by_gsc(huc);
247 	bool hw_uses_gsc = false;
248 
249 	/*
250 	 * The fuse for HuC load via GSC is only valid on platforms that have
251 	 * GuC deprivilege.
252 	 */
253 	if (HAS_GUC_DEPRIVILEGE(gt->i915))
254 		hw_uses_gsc = intel_uncore_read(gt->uncore, GUC_SHIM_CONTROL2) &
255 			      GSC_LOADS_HUC;
256 
257 	if (fw_needs_gsc != hw_uses_gsc) {
258 		drm_err(&gt->i915->drm,
259 			"mismatch between HuC FW (%s) and HW (%s) load modes\n",
260 			HUC_LOAD_MODE_STRING(fw_needs_gsc),
261 			HUC_LOAD_MODE_STRING(hw_uses_gsc));
262 		return -ENOEXEC;
263 	}
264 
265 	/* make sure we can access the GSC via the mei driver if we need it */
266 	if (!(IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC)) &&
267 	    fw_needs_gsc) {
268 		drm_info(&gt->i915->drm,
269 			 "Can't load HuC due to missing MEI modules\n");
270 		return -EIO;
271 	}
272 
273 	drm_dbg(&gt->i915->drm, "GSC loads huc=%s\n", str_yes_no(fw_needs_gsc));
274 
275 	return 0;
276 }
277 
278 int intel_huc_init(struct intel_huc *huc)
279 {
280 	struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
281 	int err;
282 
283 	err = check_huc_loading_mode(huc);
284 	if (err)
285 		goto out;
286 
287 	err = intel_uc_fw_init(&huc->fw);
288 	if (err)
289 		goto out;
290 
291 	intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE);
292 
293 	return 0;
294 
295 out:
296 	intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
297 	drm_info(&i915->drm, "HuC init failed with %d\n", err);
298 	return err;
299 }
300 
301 void intel_huc_fini(struct intel_huc *huc)
302 {
303 	if (!intel_uc_fw_is_loadable(&huc->fw))
304 		return;
305 
306 	delayed_huc_load_complete(huc);
307 
308 	i915_sw_fence_fini(&huc->delayed_load.fence);
309 	intel_uc_fw_fini(&huc->fw);
310 }
311 
312 void intel_huc_suspend(struct intel_huc *huc)
313 {
314 	if (!intel_uc_fw_is_loadable(&huc->fw))
315 		return;
316 
317 	/*
318 	 * in the unlikely case that we're suspending before the GSC has
319 	 * completed its loading sequence, just stop waiting. We'll restart
320 	 * on resume.
321 	 */
322 	delayed_huc_load_complete(huc);
323 }
324 
325 int intel_huc_wait_for_auth_complete(struct intel_huc *huc)
326 {
327 	struct intel_gt *gt = huc_to_gt(huc);
328 	int ret;
329 
330 	ret = __intel_wait_for_register(gt->uncore,
331 					huc->status.reg,
332 					huc->status.mask,
333 					huc->status.value,
334 					2, 50, NULL);
335 
336 	/* mark the load process as complete even if the wait failed */
337 	delayed_huc_load_complete(huc);
338 
339 	if (ret) {
340 		drm_err(&gt->i915->drm, "HuC: Firmware not verified %d\n", ret);
341 		intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
342 		return ret;
343 	}
344 
345 	intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
346 	drm_info(&gt->i915->drm, "HuC authenticated\n");
347 	return 0;
348 }
349 
350 /**
351  * intel_huc_auth() - Authenticate HuC uCode
352  * @huc: intel_huc structure
353  *
354  * Called after HuC and GuC firmware loading during intel_uc_init_hw().
355  *
356  * This function invokes the GuC action to authenticate the HuC firmware,
357  * passing the offset of the RSA signature to intel_guc_auth_huc(). It then
358  * waits for up to 50ms for firmware verification ACK.
359  */
360 int intel_huc_auth(struct intel_huc *huc)
361 {
362 	struct intel_gt *gt = huc_to_gt(huc);
363 	struct intel_guc *guc = &gt->uc.guc;
364 	int ret;
365 
366 	if (!intel_uc_fw_is_loaded(&huc->fw))
367 		return -ENOEXEC;
368 
369 	/* GSC will do the auth */
370 	if (intel_huc_is_loaded_by_gsc(huc))
371 		return -ENODEV;
372 
373 	ret = i915_inject_probe_error(gt->i915, -ENXIO);
374 	if (ret)
375 		goto fail;
376 
377 	GEM_BUG_ON(intel_uc_fw_is_running(&huc->fw));
378 
379 	ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
380 	if (ret) {
381 		DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
382 		goto fail;
383 	}
384 
385 	/* Check authentication status, it should be done by now */
386 	ret = intel_huc_wait_for_auth_complete(huc);
387 	if (ret)
388 		goto fail;
389 
390 	return 0;
391 
392 fail:
393 	i915_probe_error(gt->i915, "HuC: Authentication failed %d\n", ret);
394 	return ret;
395 }
396 
397 bool intel_huc_is_authenticated(struct intel_huc *huc)
398 {
399 	struct intel_gt *gt = huc_to_gt(huc);
400 	intel_wakeref_t wakeref;
401 	u32 status = 0;
402 
403 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
404 		status = intel_uncore_read(gt->uncore, huc->status.reg);
405 
406 	return (status & huc->status.mask) == huc->status.value;
407 }
408 
409 /**
410  * intel_huc_check_status() - check HuC status
411  * @huc: intel_huc structure
412  *
413  * This function reads status register to verify if HuC
414  * firmware was successfully loaded.
415  *
416  * The return values match what is expected for the I915_PARAM_HUC_STATUS
417  * getparam.
418  */
419 int intel_huc_check_status(struct intel_huc *huc)
420 {
421 	switch (__intel_uc_fw_status(&huc->fw)) {
422 	case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
423 		return -ENODEV;
424 	case INTEL_UC_FIRMWARE_DISABLED:
425 		return -EOPNOTSUPP;
426 	case INTEL_UC_FIRMWARE_MISSING:
427 		return -ENOPKG;
428 	case INTEL_UC_FIRMWARE_ERROR:
429 		return -ENOEXEC;
430 	case INTEL_UC_FIRMWARE_INIT_FAIL:
431 		return -ENOMEM;
432 	case INTEL_UC_FIRMWARE_LOAD_FAIL:
433 		return -EIO;
434 	default:
435 		break;
436 	}
437 
438 	return intel_huc_is_authenticated(huc);
439 }
440 
441 static bool huc_has_delayed_load(struct intel_huc *huc)
442 {
443 	return intel_huc_is_loaded_by_gsc(huc) &&
444 	       (huc->delayed_load.status != INTEL_HUC_DELAYED_LOAD_ERROR);
445 }
446 
447 void intel_huc_update_auth_status(struct intel_huc *huc)
448 {
449 	if (!intel_uc_fw_is_loadable(&huc->fw))
450 		return;
451 
452 	if (intel_huc_is_authenticated(huc))
453 		intel_uc_fw_change_status(&huc->fw,
454 					  INTEL_UC_FIRMWARE_RUNNING);
455 	else if (huc_has_delayed_load(huc))
456 		huc_delayed_load_start(huc);
457 }
458 
459 /**
460  * intel_huc_load_status - dump information about HuC load status
461  * @huc: the HuC
462  * @p: the &drm_printer
463  *
464  * Pretty printer for HuC load status.
465  */
466 void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p)
467 {
468 	struct intel_gt *gt = huc_to_gt(huc);
469 	intel_wakeref_t wakeref;
470 
471 	if (!intel_huc_is_supported(huc)) {
472 		drm_printf(p, "HuC not supported\n");
473 		return;
474 	}
475 
476 	if (!intel_huc_is_wanted(huc)) {
477 		drm_printf(p, "HuC disabled\n");
478 		return;
479 	}
480 
481 	intel_uc_fw_dump(&huc->fw, p);
482 
483 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
484 		drm_printf(p, "HuC status: 0x%08x\n",
485 			   intel_uncore_read(gt->uncore, huc->status.reg));
486 }
487