xref: /openbmc/linux/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c (revision fe17b91a7777df140d0f1433991da67ba658796c)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/firmware.h>
8 #include <linux/highmem.h>
9 
10 #include <drm/drm_cache.h>
11 #include <drm/drm_print.h>
12 
13 #include "gem/i915_gem_lmem.h"
14 #include "intel_uc_fw.h"
15 #include "intel_uc_fw_abi.h"
16 #include "i915_drv.h"
17 #include "i915_reg.h"
18 
19 static inline struct intel_gt *
20 ____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
21 {
22 	if (type == INTEL_UC_FW_TYPE_GUC)
23 		return container_of(uc_fw, struct intel_gt, uc.guc.fw);
24 
25 	GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
26 	return container_of(uc_fw, struct intel_gt, uc.huc.fw);
27 }
28 
29 static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
30 {
31 	GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
32 	return ____uc_fw_to_gt(uc_fw, uc_fw->type);
33 }
34 
35 #ifdef CONFIG_DRM_I915_DEBUG_GUC
36 void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
37 			       enum intel_uc_fw_status status)
38 {
39 	uc_fw->__status =  status;
40 	drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
41 		"%s firmware -> %s\n",
42 		intel_uc_fw_type_repr(uc_fw->type),
43 		status == INTEL_UC_FIRMWARE_SELECTED ?
44 		uc_fw->path : intel_uc_fw_status_repr(status));
45 }
46 #endif
47 
48 /*
49  * List of required GuC and HuC binaries per-platform.
50  * Must be ordered based on platform + revid, from newer to older.
51  *
52  * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
53  * firmware as TGL.
54  */
55 #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
56 	fw_def(DG2,          0, guc_def(dg2,  70, 1, 2)) \
57 	fw_def(ALDERLAKE_P,  0, guc_def(adlp, 70, 1, 1)) \
58 	fw_def(ALDERLAKE_S,  0, guc_def(tgl,  70, 1, 1)) \
59 	fw_def(DG1,          0, guc_def(dg1,  70, 1, 1)) \
60 	fw_def(ROCKETLAKE,   0, guc_def(tgl,  70, 1, 1)) \
61 	fw_def(TIGERLAKE,    0, guc_def(tgl,  70, 1, 1)) \
62 	fw_def(JASPERLAKE,   0, guc_def(ehl,  70, 1, 1)) \
63 	fw_def(ELKHARTLAKE,  0, guc_def(ehl,  70, 1, 1)) \
64 	fw_def(ICELAKE,      0, guc_def(icl,  70, 1, 1)) \
65 	fw_def(COMETLAKE,    5, guc_def(cml,  70, 1, 1)) \
66 	fw_def(COMETLAKE,    0, guc_def(kbl,  70, 1, 1)) \
67 	fw_def(COFFEELAKE,   0, guc_def(kbl,  70, 1, 1)) \
68 	fw_def(GEMINILAKE,   0, guc_def(glk,  70, 1, 1)) \
69 	fw_def(KABYLAKE,     0, guc_def(kbl,  70, 1, 1)) \
70 	fw_def(BROXTON,      0, guc_def(bxt,  70, 1, 1)) \
71 	fw_def(SKYLAKE,      0, guc_def(skl,  70, 1, 1))
72 
73 #define INTEL_GUC_FIRMWARE_DEFS_FALLBACK(fw_def, guc_def) \
74 	fw_def(ALDERLAKE_P,  0, guc_def(adlp, 69, 0, 3)) \
75 	fw_def(ALDERLAKE_S,  0, guc_def(tgl,  69, 0, 3))
76 
77 #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
78 	fw_def(ALDERLAKE_P,  0, huc_def(tgl,  7, 9, 3)) \
79 	fw_def(ALDERLAKE_S,  0, huc_def(tgl,  7, 9, 3)) \
80 	fw_def(DG1,          0, huc_def(dg1,  7, 9, 3)) \
81 	fw_def(ROCKETLAKE,   0, huc_def(tgl,  7, 9, 3)) \
82 	fw_def(TIGERLAKE,    0, huc_def(tgl,  7, 9, 3)) \
83 	fw_def(JASPERLAKE,   0, huc_def(ehl,  9, 0, 0)) \
84 	fw_def(ELKHARTLAKE,  0, huc_def(ehl,  9, 0, 0)) \
85 	fw_def(ICELAKE,      0, huc_def(icl,  9, 0, 0)) \
86 	fw_def(COMETLAKE,    5, huc_def(cml,  4, 0, 0)) \
87 	fw_def(COMETLAKE,    0, huc_def(kbl,  4, 0, 0)) \
88 	fw_def(COFFEELAKE,   0, huc_def(kbl,  4, 0, 0)) \
89 	fw_def(GEMINILAKE,   0, huc_def(glk,  4, 0, 0)) \
90 	fw_def(KABYLAKE,     0, huc_def(kbl,  4, 0, 0)) \
91 	fw_def(BROXTON,      0, huc_def(bxt,  2, 0, 0)) \
92 	fw_def(SKYLAKE,      0, huc_def(skl,  2, 0, 0))
93 
94 #define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
95 	"i915/" \
96 	__stringify(prefix_) name_ \
97 	__stringify(major_) "." \
98 	__stringify(minor_) "." \
99 	__stringify(patch_) ".bin"
100 
101 #define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
102 	__MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
103 
104 #define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
105 	__MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
106 
107 /* All blobs need to be declared via MODULE_FIRMWARE() */
108 #define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
109 	MODULE_FIRMWARE(uc_);
110 
111 INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
112 INTEL_GUC_FIRMWARE_DEFS_FALLBACK(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
113 INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
114 
115 /* The below structs and macros are used to iterate across the list of blobs */
116 struct __packed uc_fw_blob {
117 	u8 major;
118 	u8 minor;
119 	const char *path;
120 };
121 
122 #define UC_FW_BLOB(major_, minor_, path_) \
123 	{ .major = major_, .minor = minor_, .path = path_ }
124 
125 #define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
126 	UC_FW_BLOB(major_, minor_, \
127 		   MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))
128 
129 #define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
130 	UC_FW_BLOB(major_, minor_, \
131 		   MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))
132 
133 struct __packed uc_fw_platform_requirement {
134 	enum intel_platform p;
135 	u8 rev; /* first platform rev using this FW */
136 	const struct uc_fw_blob blob;
137 };
138 
139 #define MAKE_FW_LIST(platform_, revid_, uc_) \
140 { \
141 	.p = INTEL_##platform_, \
142 	.rev = revid_, \
143 	.blob = uc_, \
144 },
145 
146 struct fw_blobs_by_type {
147 	const struct uc_fw_platform_requirement *blobs;
148 	u32 count;
149 };
150 
151 static void
152 __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
153 {
154 	static const struct uc_fw_platform_requirement blobs_guc[] = {
155 		INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
156 	};
157 	static const struct uc_fw_platform_requirement blobs_guc_fallback[] = {
158 		INTEL_GUC_FIRMWARE_DEFS_FALLBACK(MAKE_FW_LIST, GUC_FW_BLOB)
159 	};
160 	static const struct uc_fw_platform_requirement blobs_huc[] = {
161 		INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
162 	};
163 	static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
164 		[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
165 		[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
166 	};
167 	const struct uc_fw_platform_requirement *fw_blobs;
168 	enum intel_platform p = INTEL_INFO(i915)->platform;
169 	u32 fw_count;
170 	u8 rev = INTEL_REVID(i915);
171 	int i;
172 
173 	/*
174 	 * The only difference between the ADL GuC FWs is the HWConfig support.
175 	 * ADL-N does not support HWConfig, so we should use the same binary as
176 	 * ADL-S, otherwise the GuC might attempt to fetch a config table that
177 	 * does not exist.
178 	 */
179 	if (IS_ADLP_N(i915))
180 		p = INTEL_ALDERLAKE_S;
181 
182 	GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
183 	fw_blobs = blobs_all[uc_fw->type].blobs;
184 	fw_count = blobs_all[uc_fw->type].count;
185 
186 	for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
187 		if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
188 			const struct uc_fw_blob *blob = &fw_blobs[i].blob;
189 			uc_fw->path = blob->path;
190 			uc_fw->wanted_path = blob->path;
191 			uc_fw->major_ver_wanted = blob->major;
192 			uc_fw->minor_ver_wanted = blob->minor;
193 			break;
194 		}
195 	}
196 
197 	if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) {
198 		const struct uc_fw_platform_requirement *blobs = blobs_guc_fallback;
199 		u32 count = ARRAY_SIZE(blobs_guc_fallback);
200 
201 		for (i = 0; i < count && p <= blobs[i].p; i++) {
202 			if (p == blobs[i].p && rev >= blobs[i].rev) {
203 				const struct uc_fw_blob *blob = &blobs[i].blob;
204 
205 				uc_fw->fallback.path = blob->path;
206 				uc_fw->fallback.major_ver = blob->major;
207 				uc_fw->fallback.minor_ver = blob->minor;
208 				break;
209 			}
210 		}
211 	}
212 
213 	/* make sure the list is ordered as expected */
214 	if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
215 		for (i = 1; i < fw_count; i++) {
216 			if (fw_blobs[i].p < fw_blobs[i - 1].p)
217 				continue;
218 
219 			if (fw_blobs[i].p == fw_blobs[i - 1].p &&
220 			    fw_blobs[i].rev < fw_blobs[i - 1].rev)
221 				continue;
222 
223 			pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
224 			       intel_platform_name(fw_blobs[i - 1].p),
225 			       fw_blobs[i - 1].rev,
226 			       intel_platform_name(fw_blobs[i].p),
227 			       fw_blobs[i].rev);
228 
229 			uc_fw->path = NULL;
230 		}
231 	}
232 }
233 
234 static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
235 {
236 	if (i915->params.enable_guc & ENABLE_GUC_MASK)
237 		return i915->params.guc_firmware_path;
238 	return "";
239 }
240 
241 static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
242 {
243 	if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC)
244 		return i915->params.huc_firmware_path;
245 	return "";
246 }
247 
248 static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
249 {
250 	const char *path = NULL;
251 
252 	switch (uc_fw->type) {
253 	case INTEL_UC_FW_TYPE_GUC:
254 		path = __override_guc_firmware_path(i915);
255 		break;
256 	case INTEL_UC_FW_TYPE_HUC:
257 		path = __override_huc_firmware_path(i915);
258 		break;
259 	}
260 
261 	if (unlikely(path)) {
262 		uc_fw->path = path;
263 		uc_fw->user_overridden = true;
264 	}
265 }
266 
267 /**
268  * intel_uc_fw_init_early - initialize the uC object and select the firmware
269  * @uc_fw: uC firmware
270  * @type: type of uC
271  *
272  * Initialize the state of our uC object and relevant tracking and select the
273  * firmware to fetch and load.
274  */
275 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
276 			    enum intel_uc_fw_type type)
277 {
278 	struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915;
279 
280 	/*
281 	 * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
282 	 * before we're looked at the HW caps to see if we have uc support
283 	 */
284 	BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
285 	GEM_BUG_ON(uc_fw->status);
286 	GEM_BUG_ON(uc_fw->path);
287 
288 	uc_fw->type = type;
289 
290 	if (HAS_GT_UC(i915)) {
291 		__uc_fw_auto_select(i915, uc_fw);
292 		__uc_fw_user_override(i915, uc_fw);
293 	}
294 
295 	intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
296 				  INTEL_UC_FIRMWARE_SELECTED :
297 				  INTEL_UC_FIRMWARE_DISABLED :
298 				  INTEL_UC_FIRMWARE_NOT_SUPPORTED);
299 }
300 
301 static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
302 {
303 	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
304 	bool user = e == -EINVAL;
305 
306 	if (i915_inject_probe_error(i915, e)) {
307 		/* non-existing blob */
308 		uc_fw->path = "<invalid>";
309 		uc_fw->user_overridden = user;
310 	} else if (i915_inject_probe_error(i915, e)) {
311 		/* require next major version */
312 		uc_fw->major_ver_wanted += 1;
313 		uc_fw->minor_ver_wanted = 0;
314 		uc_fw->user_overridden = user;
315 	} else if (i915_inject_probe_error(i915, e)) {
316 		/* require next minor version */
317 		uc_fw->minor_ver_wanted += 1;
318 		uc_fw->user_overridden = user;
319 	} else if (uc_fw->major_ver_wanted &&
320 		   i915_inject_probe_error(i915, e)) {
321 		/* require prev major version */
322 		uc_fw->major_ver_wanted -= 1;
323 		uc_fw->minor_ver_wanted = 0;
324 		uc_fw->user_overridden = user;
325 	} else if (uc_fw->minor_ver_wanted &&
326 		   i915_inject_probe_error(i915, e)) {
327 		/* require prev minor version - hey, this should work! */
328 		uc_fw->minor_ver_wanted -= 1;
329 		uc_fw->user_overridden = user;
330 	} else if (user && i915_inject_probe_error(i915, e)) {
331 		/* officially unsupported platform */
332 		uc_fw->major_ver_wanted = 0;
333 		uc_fw->minor_ver_wanted = 0;
334 		uc_fw->user_overridden = true;
335 	}
336 }
337 
338 /**
339  * intel_uc_fw_fetch - fetch uC firmware
340  * @uc_fw: uC firmware
341  *
342  * Fetch uC firmware into GEM obj.
343  *
344  * Return: 0 on success, a negative errno code on failure.
345  */
346 int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
347 {
348 	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
349 	struct device *dev = i915->drm.dev;
350 	struct drm_i915_gem_object *obj;
351 	const struct firmware *fw = NULL;
352 	struct uc_css_header *css;
353 	size_t size;
354 	int err;
355 
356 	GEM_BUG_ON(!i915->wopcm.size);
357 	GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
358 
359 	err = i915_inject_probe_error(i915, -ENXIO);
360 	if (err)
361 		goto fail;
362 
363 	__force_fw_fetch_failures(uc_fw, -EINVAL);
364 	__force_fw_fetch_failures(uc_fw, -ESTALE);
365 
366 	err = firmware_request_nowarn(&fw, uc_fw->path, dev);
367 	if (err && !intel_uc_fw_is_overridden(uc_fw) && uc_fw->fallback.path) {
368 		err = firmware_request_nowarn(&fw, uc_fw->fallback.path, dev);
369 		if (!err) {
370 			drm_notice(&i915->drm,
371 				   "%s firmware %s is recommended, but only %s was found\n",
372 				   intel_uc_fw_type_repr(uc_fw->type),
373 				   uc_fw->wanted_path,
374 				   uc_fw->fallback.path);
375 			drm_info(&i915->drm,
376 				 "Consider updating your linux-firmware pkg or downloading from %s\n",
377 				 INTEL_UC_FIRMWARE_URL);
378 
379 			uc_fw->path = uc_fw->fallback.path;
380 			uc_fw->major_ver_wanted = uc_fw->fallback.major_ver;
381 			uc_fw->minor_ver_wanted = uc_fw->fallback.minor_ver;
382 		}
383 	}
384 	if (err)
385 		goto fail;
386 
387 	/* Check the size of the blob before examining buffer contents */
388 	if (unlikely(fw->size < sizeof(struct uc_css_header))) {
389 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
390 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
391 			 fw->size, sizeof(struct uc_css_header));
392 		err = -ENODATA;
393 		goto fail;
394 	}
395 
396 	css = (struct uc_css_header *)fw->data;
397 
398 	/* Check integrity of size values inside CSS header */
399 	size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
400 		css->exponent_size_dw) * sizeof(u32);
401 	if (unlikely(size != sizeof(struct uc_css_header))) {
402 		drm_warn(&i915->drm,
403 			 "%s firmware %s: unexpected header size: %zu != %zu\n",
404 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
405 			 fw->size, sizeof(struct uc_css_header));
406 		err = -EPROTO;
407 		goto fail;
408 	}
409 
410 	/* uCode size must calculated from other sizes */
411 	uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
412 
413 	/* now RSA */
414 	uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
415 
416 	/* At least, it should have header, uCode and RSA. Size of all three. */
417 	size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
418 	if (unlikely(fw->size < size)) {
419 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
420 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
421 			 fw->size, size);
422 		err = -ENOEXEC;
423 		goto fail;
424 	}
425 
426 	/* Sanity check whether this fw is not larger than whole WOPCM memory */
427 	size = __intel_uc_fw_get_upload_size(uc_fw);
428 	if (unlikely(size >= i915->wopcm.size)) {
429 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
430 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
431 			 size, (size_t)i915->wopcm.size);
432 		err = -E2BIG;
433 		goto fail;
434 	}
435 
436 	/* Get version numbers from the CSS header */
437 	uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
438 					   css->sw_version);
439 	uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
440 					   css->sw_version);
441 
442 	if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
443 	    uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
444 		drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
445 			   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
446 			   uc_fw->major_ver_found, uc_fw->minor_ver_found,
447 			   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
448 		if (!intel_uc_fw_is_overridden(uc_fw)) {
449 			err = -ENOEXEC;
450 			goto fail;
451 		}
452 	}
453 
454 	if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
455 		uc_fw->private_data_size = css->private_data_size;
456 
457 	if (HAS_LMEM(i915)) {
458 		obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
459 		if (!IS_ERR(obj))
460 			obj->flags |= I915_BO_ALLOC_PM_EARLY;
461 	} else {
462 		obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
463 	}
464 
465 	if (IS_ERR(obj)) {
466 		err = PTR_ERR(obj);
467 		goto fail;
468 	}
469 
470 	uc_fw->obj = obj;
471 	uc_fw->size = fw->size;
472 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
473 
474 	release_firmware(fw);
475 	return 0;
476 
477 fail:
478 	intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
479 				  INTEL_UC_FIRMWARE_MISSING :
480 				  INTEL_UC_FIRMWARE_ERROR);
481 
482 	i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n",
483 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
484 	drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
485 		 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
486 
487 	release_firmware(fw);		/* OK even if fw is NULL */
488 	return err;
489 }
490 
491 static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
492 {
493 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
494 	struct drm_mm_node *node = &ggtt->uc_fw;
495 
496 	GEM_BUG_ON(!drm_mm_node_allocated(node));
497 	GEM_BUG_ON(upper_32_bits(node->start));
498 	GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
499 
500 	return lower_32_bits(node->start);
501 }
502 
503 static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
504 {
505 	struct drm_i915_gem_object *obj = uc_fw->obj;
506 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
507 	struct i915_vma_resource *dummy = &uc_fw->dummy;
508 	u32 pte_flags = 0;
509 
510 	dummy->start = uc_fw_ggtt_offset(uc_fw);
511 	dummy->node_size = obj->base.size;
512 	dummy->bi.pages = obj->mm.pages;
513 
514 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
515 	GEM_BUG_ON(dummy->node_size > ggtt->uc_fw.size);
516 
517 	/* uc_fw->obj cache domains were not controlled across suspend */
518 	if (i915_gem_object_has_struct_page(obj))
519 		drm_clflush_sg(dummy->bi.pages);
520 
521 	if (i915_gem_object_is_lmem(obj))
522 		pte_flags |= PTE_LM;
523 
524 	ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
525 }
526 
527 static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
528 {
529 	struct drm_i915_gem_object *obj = uc_fw->obj;
530 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
531 	u64 start = uc_fw_ggtt_offset(uc_fw);
532 
533 	ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
534 }
535 
536 static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
537 {
538 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
539 	struct intel_uncore *uncore = gt->uncore;
540 	u64 offset;
541 	int ret;
542 
543 	ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
544 	if (ret)
545 		return ret;
546 
547 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
548 
549 	/* Set the source address for the uCode */
550 	offset = uc_fw_ggtt_offset(uc_fw);
551 	GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
552 	intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
553 	intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
554 
555 	/* Set the DMA destination */
556 	intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
557 	intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
558 
559 	/*
560 	 * Set the transfer size. The header plus uCode will be copied to WOPCM
561 	 * via DMA, excluding any other components
562 	 */
563 	intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
564 			      sizeof(struct uc_css_header) + uc_fw->ucode_size);
565 
566 	/* Start the DMA */
567 	intel_uncore_write_fw(uncore, DMA_CTRL,
568 			      _MASKED_BIT_ENABLE(dma_flags | START_DMA));
569 
570 	/* Wait for DMA to finish */
571 	ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
572 	if (ret)
573 		drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
574 			intel_uc_fw_type_repr(uc_fw->type),
575 			intel_uncore_read_fw(uncore, DMA_CTRL));
576 
577 	/* Disable the bits once DMA is over */
578 	intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
579 
580 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
581 
582 	return ret;
583 }
584 
585 /**
586  * intel_uc_fw_upload - load uC firmware using custom loader
587  * @uc_fw: uC firmware
588  * @dst_offset: destination offset
589  * @dma_flags: flags for flags for dma ctrl
590  *
591  * Loads uC firmware and updates internal flags.
592  *
593  * Return: 0 on success, non-zero on failure.
594  */
595 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
596 {
597 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
598 	int err;
599 
600 	/* make sure the status was cleared the last time we reset the uc */
601 	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
602 
603 	err = i915_inject_probe_error(gt->i915, -ENOEXEC);
604 	if (err)
605 		return err;
606 
607 	if (!intel_uc_fw_is_loadable(uc_fw))
608 		return -ENOEXEC;
609 
610 	/* Call custom loader */
611 	uc_fw_bind_ggtt(uc_fw);
612 	err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
613 	uc_fw_unbind_ggtt(uc_fw);
614 	if (err)
615 		goto fail;
616 
617 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
618 	return 0;
619 
620 fail:
621 	i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
622 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
623 			 err);
624 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
625 	return err;
626 }
627 
628 static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
629 {
630 	/*
631 	 * The HW reads the GuC RSA from memory if the key size is > 256 bytes,
632 	 * while it reads it from the 64 RSA registers if it is smaller.
633 	 * The HuC RSA is always read from memory.
634 	 */
635 	return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256;
636 }
637 
638 static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
639 {
640 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
641 	struct i915_vma *vma;
642 	size_t copied;
643 	void *vaddr;
644 	int err;
645 
646 	err = i915_inject_probe_error(gt->i915, -ENXIO);
647 	if (err)
648 		return err;
649 
650 	if (!uc_fw_need_rsa_in_memory(uc_fw))
651 		return 0;
652 
653 	/*
654 	 * uC firmwares will sit above GUC_GGTT_TOP and will not map through
655 	 * GGTT. Unfortunately, this means that the GuC HW cannot perform the uC
656 	 * authentication from memory, as the RSA offset now falls within the
657 	 * GuC inaccessible range. We resort to perma-pinning an additional vma
658 	 * within the accessible range that only contains the RSA signature.
659 	 * The GuC HW can use this extra pinning to perform the authentication
660 	 * since its GGTT offset will be GuC accessible.
661 	 */
662 	GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
663 	vma = intel_guc_allocate_vma(&gt->uc.guc, PAGE_SIZE);
664 	if (IS_ERR(vma))
665 		return PTR_ERR(vma);
666 
667 	vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
668 						 i915_coherent_map_type(gt->i915, vma->obj, true));
669 	if (IS_ERR(vaddr)) {
670 		i915_vma_unpin_and_release(&vma, 0);
671 		err = PTR_ERR(vaddr);
672 		goto unpin_out;
673 	}
674 
675 	copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size);
676 	i915_gem_object_unpin_map(vma->obj);
677 
678 	if (copied < uc_fw->rsa_size) {
679 		err = -ENOMEM;
680 		goto unpin_out;
681 	}
682 
683 	uc_fw->rsa_data = vma;
684 
685 	return 0;
686 
687 unpin_out:
688 	i915_vma_unpin_and_release(&vma, 0);
689 	return err;
690 }
691 
692 static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw)
693 {
694 	i915_vma_unpin_and_release(&uc_fw->rsa_data, 0);
695 }
696 
697 int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
698 {
699 	int err;
700 
701 	/* this should happen before the load! */
702 	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
703 
704 	if (!intel_uc_fw_is_available(uc_fw))
705 		return -ENOEXEC;
706 
707 	err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
708 	if (err) {
709 		DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
710 				 intel_uc_fw_type_repr(uc_fw->type), err);
711 		goto out;
712 	}
713 
714 	err = uc_fw_rsa_data_create(uc_fw);
715 	if (err) {
716 		DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n",
717 				 intel_uc_fw_type_repr(uc_fw->type), err);
718 		goto out_unpin;
719 	}
720 
721 	return 0;
722 
723 out_unpin:
724 	i915_gem_object_unpin_pages(uc_fw->obj);
725 out:
726 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_INIT_FAIL);
727 	return err;
728 }
729 
730 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
731 {
732 	uc_fw_rsa_data_destroy(uc_fw);
733 
734 	if (i915_gem_object_has_pinned_pages(uc_fw->obj))
735 		i915_gem_object_unpin_pages(uc_fw->obj);
736 
737 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
738 }
739 
740 /**
741  * intel_uc_fw_cleanup_fetch - cleanup uC firmware
742  * @uc_fw: uC firmware
743  *
744  * Cleans up uC firmware by releasing the firmware GEM obj.
745  */
746 void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
747 {
748 	if (!intel_uc_fw_is_available(uc_fw))
749 		return;
750 
751 	i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
752 
753 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
754 }
755 
756 /**
757  * intel_uc_fw_copy_rsa - copy fw RSA to buffer
758  *
759  * @uc_fw: uC firmware
760  * @dst: dst buffer
761  * @max_len: max number of bytes to copy
762  *
763  * Return: number of copied bytes.
764  */
765 size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
766 {
767 	struct intel_memory_region *mr = uc_fw->obj->mm.region;
768 	u32 size = min_t(u32, uc_fw->rsa_size, max_len);
769 	u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
770 	struct sgt_iter iter;
771 	size_t count = 0;
772 	int idx;
773 
774 	/* Called during reset handling, must be atomic [no fs_reclaim] */
775 	GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
776 
777 	idx = offset >> PAGE_SHIFT;
778 	offset = offset_in_page(offset);
779 	if (i915_gem_object_has_struct_page(uc_fw->obj)) {
780 		struct page *page;
781 
782 		for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
783 			u32 len = min_t(u32, size, PAGE_SIZE - offset);
784 			void *vaddr;
785 
786 			if (idx > 0) {
787 				idx--;
788 				continue;
789 			}
790 
791 			vaddr = kmap_atomic(page);
792 			memcpy(dst, vaddr + offset, len);
793 			kunmap_atomic(vaddr);
794 
795 			offset = 0;
796 			dst += len;
797 			size -= len;
798 			count += len;
799 			if (!size)
800 				break;
801 		}
802 	} else {
803 		dma_addr_t addr;
804 
805 		for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
806 			u32 len = min_t(u32, size, PAGE_SIZE - offset);
807 			void __iomem *vaddr;
808 
809 			if (idx > 0) {
810 				idx--;
811 				continue;
812 			}
813 
814 			vaddr = io_mapping_map_atomic_wc(&mr->iomap,
815 							 addr - mr->region.start);
816 			memcpy_fromio(dst, vaddr + offset, len);
817 			io_mapping_unmap_atomic(vaddr);
818 
819 			offset = 0;
820 			dst += len;
821 			size -= len;
822 			count += len;
823 			if (!size)
824 				break;
825 		}
826 	}
827 
828 	return count;
829 }
830 
831 /**
832  * intel_uc_fw_dump - dump information about uC firmware
833  * @uc_fw: uC firmware
834  * @p: the &drm_printer
835  *
836  * Pretty printer for uC firmware.
837  */
838 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
839 {
840 	drm_printf(p, "%s firmware: %s\n",
841 		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->wanted_path);
842 	if (uc_fw->fallback.path) {
843 		drm_printf(p, "%s firmware fallback: %s\n",
844 			   intel_uc_fw_type_repr(uc_fw->type), uc_fw->fallback.path);
845 		drm_printf(p, "fallback selected: %s\n",
846 			   str_yes_no(uc_fw->path == uc_fw->fallback.path));
847 	}
848 	drm_printf(p, "\tstatus: %s\n",
849 		   intel_uc_fw_status_repr(uc_fw->status));
850 	drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
851 		   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
852 		   uc_fw->major_ver_found, uc_fw->minor_ver_found);
853 	drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
854 	drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
855 }
856