1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/firmware.h>
8 #include <linux/highmem.h>
9 
10 #include <drm/drm_cache.h>
11 #include <drm/drm_print.h>
12 
13 #include "gem/i915_gem_lmem.h"
14 #include "intel_uc_fw.h"
15 #include "intel_uc_fw_abi.h"
16 #include "i915_drv.h"
17 #include "i915_reg.h"
18 
19 static inline struct intel_gt *
20 ____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
21 {
22 	if (type == INTEL_UC_FW_TYPE_GUC)
23 		return container_of(uc_fw, struct intel_gt, uc.guc.fw);
24 
25 	GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
26 	return container_of(uc_fw, struct intel_gt, uc.huc.fw);
27 }
28 
29 static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
30 {
31 	GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
32 	return ____uc_fw_to_gt(uc_fw, uc_fw->type);
33 }
34 
35 #ifdef CONFIG_DRM_I915_DEBUG_GUC
36 void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
37 			       enum intel_uc_fw_status status)
38 {
39 	uc_fw->__status =  status;
40 	drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
41 		"%s firmware -> %s\n",
42 		intel_uc_fw_type_repr(uc_fw->type),
43 		status == INTEL_UC_FIRMWARE_SELECTED ?
44 		uc_fw->path : intel_uc_fw_status_repr(status));
45 }
46 #endif
47 
48 /*
49  * List of required GuC and HuC binaries per-platform.
50  * Must be ordered based on platform + revid, from newer to older.
51  *
52  * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
53  * firmware as TGL.
54  */
55 #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
56 	fw_def(DG2,          0, guc_def(dg2,  70, 4, 1)) \
57 	fw_def(ALDERLAKE_P,  0, guc_def(adlp, 70, 1, 1)) \
58 	fw_def(ALDERLAKE_S,  0, guc_def(tgl,  70, 1, 1)) \
59 	fw_def(DG1,          0, guc_def(dg1,  70, 1, 1)) \
60 	fw_def(ROCKETLAKE,   0, guc_def(tgl,  70, 1, 1)) \
61 	fw_def(TIGERLAKE,    0, guc_def(tgl,  70, 1, 1)) \
62 	fw_def(JASPERLAKE,   0, guc_def(ehl,  70, 1, 1)) \
63 	fw_def(ELKHARTLAKE,  0, guc_def(ehl,  70, 1, 1)) \
64 	fw_def(ICELAKE,      0, guc_def(icl,  70, 1, 1)) \
65 	fw_def(COMETLAKE,    5, guc_def(cml,  70, 1, 1)) \
66 	fw_def(COMETLAKE,    0, guc_def(kbl,  70, 1, 1)) \
67 	fw_def(COFFEELAKE,   0, guc_def(kbl,  70, 1, 1)) \
68 	fw_def(GEMINILAKE,   0, guc_def(glk,  70, 1, 1)) \
69 	fw_def(KABYLAKE,     0, guc_def(kbl,  70, 1, 1)) \
70 	fw_def(BROXTON,      0, guc_def(bxt,  70, 1, 1)) \
71 	fw_def(SKYLAKE,      0, guc_def(skl,  70, 1, 1))
72 
73 #define INTEL_GUC_FIRMWARE_DEFS_FALLBACK(fw_def, guc_def) \
74 	fw_def(ALDERLAKE_P,  0, guc_def(adlp, 69, 0, 3)) \
75 	fw_def(ALDERLAKE_S,  0, guc_def(tgl,  69, 0, 3))
76 
77 #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
78 	fw_def(ALDERLAKE_P,  0, huc_def(tgl,  7, 9, 3)) \
79 	fw_def(ALDERLAKE_S,  0, huc_def(tgl,  7, 9, 3)) \
80 	fw_def(DG1,          0, huc_def(dg1,  7, 9, 3)) \
81 	fw_def(ROCKETLAKE,   0, huc_def(tgl,  7, 9, 3)) \
82 	fw_def(TIGERLAKE,    0, huc_def(tgl,  7, 9, 3)) \
83 	fw_def(JASPERLAKE,   0, huc_def(ehl,  9, 0, 0)) \
84 	fw_def(ELKHARTLAKE,  0, huc_def(ehl,  9, 0, 0)) \
85 	fw_def(ICELAKE,      0, huc_def(icl,  9, 0, 0)) \
86 	fw_def(COMETLAKE,    5, huc_def(cml,  4, 0, 0)) \
87 	fw_def(COMETLAKE,    0, huc_def(kbl,  4, 0, 0)) \
88 	fw_def(COFFEELAKE,   0, huc_def(kbl,  4, 0, 0)) \
89 	fw_def(GEMINILAKE,   0, huc_def(glk,  4, 0, 0)) \
90 	fw_def(KABYLAKE,     0, huc_def(kbl,  4, 0, 0)) \
91 	fw_def(BROXTON,      0, huc_def(bxt,  2, 0, 0)) \
92 	fw_def(SKYLAKE,      0, huc_def(skl,  2, 0, 0))
93 
94 #define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
95 	"i915/" \
96 	__stringify(prefix_) name_ \
97 	__stringify(major_) "." \
98 	__stringify(minor_) "." \
99 	__stringify(patch_) ".bin"
100 
101 #define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
102 	__MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
103 
104 #define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
105 	__MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
106 
107 /* All blobs need to be declared via MODULE_FIRMWARE() */
108 #define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
109 	MODULE_FIRMWARE(uc_);
110 
111 INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
112 INTEL_GUC_FIRMWARE_DEFS_FALLBACK(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
113 INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
114 
115 /* The below structs and macros are used to iterate across the list of blobs */
116 struct __packed uc_fw_blob {
117 	u8 major;
118 	u8 minor;
119 	const char *path;
120 };
121 
122 #define UC_FW_BLOB(major_, minor_, path_) \
123 	{ .major = major_, .minor = minor_, .path = path_ }
124 
125 #define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
126 	UC_FW_BLOB(major_, minor_, \
127 		   MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))
128 
129 #define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
130 	UC_FW_BLOB(major_, minor_, \
131 		   MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))
132 
133 struct __packed uc_fw_platform_requirement {
134 	enum intel_platform p;
135 	u8 rev; /* first platform rev using this FW */
136 	const struct uc_fw_blob blob;
137 };
138 
139 #define MAKE_FW_LIST(platform_, revid_, uc_) \
140 { \
141 	.p = INTEL_##platform_, \
142 	.rev = revid_, \
143 	.blob = uc_, \
144 },
145 
146 struct fw_blobs_by_type {
147 	const struct uc_fw_platform_requirement *blobs;
148 	u32 count;
149 };
150 
151 static void
152 __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
153 {
154 	static const struct uc_fw_platform_requirement blobs_guc[] = {
155 		INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
156 	};
157 	static const struct uc_fw_platform_requirement blobs_guc_fallback[] = {
158 		INTEL_GUC_FIRMWARE_DEFS_FALLBACK(MAKE_FW_LIST, GUC_FW_BLOB)
159 	};
160 	static const struct uc_fw_platform_requirement blobs_huc[] = {
161 		INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
162 	};
163 	static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
164 		[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
165 		[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
166 	};
167 	const struct uc_fw_platform_requirement *fw_blobs;
168 	enum intel_platform p = INTEL_INFO(i915)->platform;
169 	u32 fw_count;
170 	u8 rev = INTEL_REVID(i915);
171 	int i;
172 
173 	/*
174 	 * The only difference between the ADL GuC FWs is the HWConfig support.
175 	 * ADL-N does not support HWConfig, so we should use the same binary as
176 	 * ADL-S, otherwise the GuC might attempt to fetch a config table that
177 	 * does not exist.
178 	 */
179 	if (IS_ADLP_N(i915))
180 		p = INTEL_ALDERLAKE_S;
181 
182 	GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
183 	fw_blobs = blobs_all[uc_fw->type].blobs;
184 	fw_count = blobs_all[uc_fw->type].count;
185 
186 	for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
187 		if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
188 			const struct uc_fw_blob *blob = &fw_blobs[i].blob;
189 			uc_fw->path = blob->path;
190 			uc_fw->wanted_path = blob->path;
191 			uc_fw->major_ver_wanted = blob->major;
192 			uc_fw->minor_ver_wanted = blob->minor;
193 			break;
194 		}
195 	}
196 
197 	if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) {
198 		const struct uc_fw_platform_requirement *blobs = blobs_guc_fallback;
199 		u32 count = ARRAY_SIZE(blobs_guc_fallback);
200 
201 		for (i = 0; i < count && p <= blobs[i].p; i++) {
202 			if (p == blobs[i].p && rev >= blobs[i].rev) {
203 				const struct uc_fw_blob *blob = &blobs[i].blob;
204 
205 				uc_fw->fallback.path = blob->path;
206 				uc_fw->fallback.major_ver = blob->major;
207 				uc_fw->fallback.minor_ver = blob->minor;
208 				break;
209 			}
210 		}
211 	}
212 
213 	/* make sure the list is ordered as expected */
214 	if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
215 		for (i = 1; i < fw_count; i++) {
216 			if (fw_blobs[i].p < fw_blobs[i - 1].p)
217 				continue;
218 
219 			if (fw_blobs[i].p == fw_blobs[i - 1].p &&
220 			    fw_blobs[i].rev < fw_blobs[i - 1].rev)
221 				continue;
222 
223 			drm_err(&i915->drm, "Invalid FW blob order: %s r%u comes before %s r%u\n",
224 				intel_platform_name(fw_blobs[i - 1].p),
225 				fw_blobs[i - 1].rev,
226 				intel_platform_name(fw_blobs[i].p),
227 				fw_blobs[i].rev);
228 
229 			uc_fw->path = NULL;
230 		}
231 	}
232 }
233 
234 static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
235 {
236 	if (i915->params.enable_guc & ENABLE_GUC_MASK)
237 		return i915->params.guc_firmware_path;
238 	return "";
239 }
240 
241 static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
242 {
243 	if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC)
244 		return i915->params.huc_firmware_path;
245 	return "";
246 }
247 
248 static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
249 {
250 	const char *path = NULL;
251 
252 	switch (uc_fw->type) {
253 	case INTEL_UC_FW_TYPE_GUC:
254 		path = __override_guc_firmware_path(i915);
255 		break;
256 	case INTEL_UC_FW_TYPE_HUC:
257 		path = __override_huc_firmware_path(i915);
258 		break;
259 	}
260 
261 	if (unlikely(path)) {
262 		uc_fw->path = path;
263 		uc_fw->user_overridden = true;
264 	}
265 }
266 
267 /**
268  * intel_uc_fw_init_early - initialize the uC object and select the firmware
269  * @uc_fw: uC firmware
270  * @type: type of uC
271  *
272  * Initialize the state of our uC object and relevant tracking and select the
273  * firmware to fetch and load.
274  */
275 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
276 			    enum intel_uc_fw_type type)
277 {
278 	struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915;
279 
280 	/*
281 	 * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
282 	 * before we're looked at the HW caps to see if we have uc support
283 	 */
284 	BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
285 	GEM_BUG_ON(uc_fw->status);
286 	GEM_BUG_ON(uc_fw->path);
287 
288 	uc_fw->type = type;
289 
290 	if (HAS_GT_UC(i915)) {
291 		__uc_fw_auto_select(i915, uc_fw);
292 		__uc_fw_user_override(i915, uc_fw);
293 	}
294 
295 	intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
296 				  INTEL_UC_FIRMWARE_SELECTED :
297 				  INTEL_UC_FIRMWARE_DISABLED :
298 				  INTEL_UC_FIRMWARE_NOT_SUPPORTED);
299 }
300 
301 static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
302 {
303 	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
304 	bool user = e == -EINVAL;
305 
306 	if (i915_inject_probe_error(i915, e)) {
307 		/* non-existing blob */
308 		uc_fw->path = "<invalid>";
309 		uc_fw->user_overridden = user;
310 	} else if (i915_inject_probe_error(i915, e)) {
311 		/* require next major version */
312 		uc_fw->major_ver_wanted += 1;
313 		uc_fw->minor_ver_wanted = 0;
314 		uc_fw->user_overridden = user;
315 	} else if (i915_inject_probe_error(i915, e)) {
316 		/* require next minor version */
317 		uc_fw->minor_ver_wanted += 1;
318 		uc_fw->user_overridden = user;
319 	} else if (uc_fw->major_ver_wanted &&
320 		   i915_inject_probe_error(i915, e)) {
321 		/* require prev major version */
322 		uc_fw->major_ver_wanted -= 1;
323 		uc_fw->minor_ver_wanted = 0;
324 		uc_fw->user_overridden = user;
325 	} else if (uc_fw->minor_ver_wanted &&
326 		   i915_inject_probe_error(i915, e)) {
327 		/* require prev minor version - hey, this should work! */
328 		uc_fw->minor_ver_wanted -= 1;
329 		uc_fw->user_overridden = user;
330 	} else if (user && i915_inject_probe_error(i915, e)) {
331 		/* officially unsupported platform */
332 		uc_fw->major_ver_wanted = 0;
333 		uc_fw->minor_ver_wanted = 0;
334 		uc_fw->user_overridden = true;
335 	}
336 }
337 
338 static int check_gsc_manifest(const struct firmware *fw,
339 			      struct intel_uc_fw *uc_fw)
340 {
341 	u32 *dw = (u32 *)fw->data;
342 	u32 version = dw[HUC_GSC_VERSION_DW];
343 
344 	uc_fw->major_ver_found = FIELD_GET(HUC_GSC_MAJOR_VER_MASK, version);
345 	uc_fw->minor_ver_found = FIELD_GET(HUC_GSC_MINOR_VER_MASK, version);
346 
347 	return 0;
348 }
349 
350 static int check_ccs_header(struct drm_i915_private *i915,
351 			    const struct firmware *fw,
352 			    struct intel_uc_fw *uc_fw)
353 {
354 	struct uc_css_header *css;
355 	size_t size;
356 
357 	/* Check the size of the blob before examining buffer contents */
358 	if (unlikely(fw->size < sizeof(struct uc_css_header))) {
359 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
360 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
361 			 fw->size, sizeof(struct uc_css_header));
362 		return -ENODATA;
363 	}
364 
365 	css = (struct uc_css_header *)fw->data;
366 
367 	/* Check integrity of size values inside CSS header */
368 	size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
369 		css->exponent_size_dw) * sizeof(u32);
370 	if (unlikely(size != sizeof(struct uc_css_header))) {
371 		drm_warn(&i915->drm,
372 			 "%s firmware %s: unexpected header size: %zu != %zu\n",
373 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
374 			 fw->size, sizeof(struct uc_css_header));
375 		return -EPROTO;
376 	}
377 
378 	/* uCode size must calculated from other sizes */
379 	uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
380 
381 	/* now RSA */
382 	uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
383 
384 	/* At least, it should have header, uCode and RSA. Size of all three. */
385 	size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
386 	if (unlikely(fw->size < size)) {
387 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
388 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
389 			 fw->size, size);
390 		return -ENOEXEC;
391 	}
392 
393 	/* Sanity check whether this fw is not larger than whole WOPCM memory */
394 	size = __intel_uc_fw_get_upload_size(uc_fw);
395 	if (unlikely(size >= i915->wopcm.size)) {
396 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
397 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
398 			 size, (size_t)i915->wopcm.size);
399 		return -E2BIG;
400 	}
401 
402 	/* Get version numbers from the CSS header */
403 	uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
404 					   css->sw_version);
405 	uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
406 					   css->sw_version);
407 
408 	if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
409 		uc_fw->private_data_size = css->private_data_size;
410 
411 	return 0;
412 }
413 
414 /**
415  * intel_uc_fw_fetch - fetch uC firmware
416  * @uc_fw: uC firmware
417  *
418  * Fetch uC firmware into GEM obj.
419  *
420  * Return: 0 on success, a negative errno code on failure.
421  */
422 int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
423 {
424 	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
425 	struct device *dev = i915->drm.dev;
426 	struct drm_i915_gem_object *obj;
427 	const struct firmware *fw = NULL;
428 	int err;
429 
430 	GEM_BUG_ON(!i915->wopcm.size);
431 	GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
432 
433 	err = i915_inject_probe_error(i915, -ENXIO);
434 	if (err)
435 		goto fail;
436 
437 	__force_fw_fetch_failures(uc_fw, -EINVAL);
438 	__force_fw_fetch_failures(uc_fw, -ESTALE);
439 
440 	err = firmware_request_nowarn(&fw, uc_fw->path, dev);
441 	if (err && !intel_uc_fw_is_overridden(uc_fw) && uc_fw->fallback.path) {
442 		err = firmware_request_nowarn(&fw, uc_fw->fallback.path, dev);
443 		if (!err) {
444 			drm_notice(&i915->drm,
445 				   "%s firmware %s is recommended, but only %s was found\n",
446 				   intel_uc_fw_type_repr(uc_fw->type),
447 				   uc_fw->wanted_path,
448 				   uc_fw->fallback.path);
449 			drm_info(&i915->drm,
450 				 "Consider updating your linux-firmware pkg or downloading from %s\n",
451 				 INTEL_UC_FIRMWARE_URL);
452 
453 			uc_fw->path = uc_fw->fallback.path;
454 			uc_fw->major_ver_wanted = uc_fw->fallback.major_ver;
455 			uc_fw->minor_ver_wanted = uc_fw->fallback.minor_ver;
456 		}
457 	}
458 	if (err)
459 		goto fail;
460 
461 	if (uc_fw->loaded_via_gsc)
462 		err = check_gsc_manifest(fw, uc_fw);
463 	else
464 		err = check_ccs_header(i915, fw, uc_fw);
465 	if (err)
466 		goto fail;
467 
468 	if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
469 	    uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
470 		drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
471 			   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
472 			   uc_fw->major_ver_found, uc_fw->minor_ver_found,
473 			   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
474 		if (!intel_uc_fw_is_overridden(uc_fw)) {
475 			err = -ENOEXEC;
476 			goto fail;
477 		}
478 	}
479 
480 	if (HAS_LMEM(i915)) {
481 		obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
482 		if (!IS_ERR(obj))
483 			obj->flags |= I915_BO_ALLOC_PM_EARLY;
484 	} else {
485 		obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
486 	}
487 
488 	if (IS_ERR(obj)) {
489 		err = PTR_ERR(obj);
490 		goto fail;
491 	}
492 
493 	uc_fw->obj = obj;
494 	uc_fw->size = fw->size;
495 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
496 
497 	release_firmware(fw);
498 	return 0;
499 
500 fail:
501 	intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
502 				  INTEL_UC_FIRMWARE_MISSING :
503 				  INTEL_UC_FIRMWARE_ERROR);
504 
505 	i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n",
506 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
507 	drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
508 		 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
509 
510 	release_firmware(fw);		/* OK even if fw is NULL */
511 	return err;
512 }
513 
514 static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
515 {
516 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
517 	struct drm_mm_node *node = &ggtt->uc_fw;
518 
519 	GEM_BUG_ON(!drm_mm_node_allocated(node));
520 	GEM_BUG_ON(upper_32_bits(node->start));
521 	GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
522 
523 	return lower_32_bits(node->start);
524 }
525 
526 static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
527 {
528 	struct drm_i915_gem_object *obj = uc_fw->obj;
529 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
530 	struct i915_vma_resource *dummy = &uc_fw->dummy;
531 	u32 pte_flags = 0;
532 
533 	dummy->start = uc_fw_ggtt_offset(uc_fw);
534 	dummy->node_size = obj->base.size;
535 	dummy->bi.pages = obj->mm.pages;
536 
537 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
538 	GEM_BUG_ON(dummy->node_size > ggtt->uc_fw.size);
539 
540 	/* uc_fw->obj cache domains were not controlled across suspend */
541 	if (i915_gem_object_has_struct_page(obj))
542 		drm_clflush_sg(dummy->bi.pages);
543 
544 	if (i915_gem_object_is_lmem(obj))
545 		pte_flags |= PTE_LM;
546 
547 	if (ggtt->vm.raw_insert_entries)
548 		ggtt->vm.raw_insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
549 	else
550 		ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
551 }
552 
553 static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
554 {
555 	struct drm_i915_gem_object *obj = uc_fw->obj;
556 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
557 	u64 start = uc_fw_ggtt_offset(uc_fw);
558 
559 	ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
560 }
561 
562 static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
563 {
564 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
565 	struct intel_uncore *uncore = gt->uncore;
566 	u64 offset;
567 	int ret;
568 
569 	ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
570 	if (ret)
571 		return ret;
572 
573 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
574 
575 	/* Set the source address for the uCode */
576 	offset = uc_fw_ggtt_offset(uc_fw);
577 	GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
578 	intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
579 	intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
580 
581 	/* Set the DMA destination */
582 	intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
583 	intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
584 
585 	/*
586 	 * Set the transfer size. The header plus uCode will be copied to WOPCM
587 	 * via DMA, excluding any other components
588 	 */
589 	intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
590 			      sizeof(struct uc_css_header) + uc_fw->ucode_size);
591 
592 	/* Start the DMA */
593 	intel_uncore_write_fw(uncore, DMA_CTRL,
594 			      _MASKED_BIT_ENABLE(dma_flags | START_DMA));
595 
596 	/* Wait for DMA to finish */
597 	ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
598 	if (ret)
599 		drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
600 			intel_uc_fw_type_repr(uc_fw->type),
601 			intel_uncore_read_fw(uncore, DMA_CTRL));
602 
603 	/* Disable the bits once DMA is over */
604 	intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
605 
606 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
607 
608 	return ret;
609 }
610 
611 /**
612  * intel_uc_fw_upload - load uC firmware using custom loader
613  * @uc_fw: uC firmware
614  * @dst_offset: destination offset
615  * @dma_flags: flags for flags for dma ctrl
616  *
617  * Loads uC firmware and updates internal flags.
618  *
619  * Return: 0 on success, non-zero on failure.
620  */
621 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
622 {
623 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
624 	int err;
625 
626 	/* make sure the status was cleared the last time we reset the uc */
627 	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
628 
629 	err = i915_inject_probe_error(gt->i915, -ENOEXEC);
630 	if (err)
631 		return err;
632 
633 	if (!intel_uc_fw_is_loadable(uc_fw))
634 		return -ENOEXEC;
635 
636 	/* Call custom loader */
637 	uc_fw_bind_ggtt(uc_fw);
638 	err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
639 	uc_fw_unbind_ggtt(uc_fw);
640 	if (err)
641 		goto fail;
642 
643 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
644 	return 0;
645 
646 fail:
647 	i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
648 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
649 			 err);
650 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
651 	return err;
652 }
653 
654 static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
655 {
656 	/*
657 	 * The HW reads the GuC RSA from memory if the key size is > 256 bytes,
658 	 * while it reads it from the 64 RSA registers if it is smaller.
659 	 * The HuC RSA is always read from memory.
660 	 */
661 	return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256;
662 }
663 
664 static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
665 {
666 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
667 	struct i915_vma *vma;
668 	size_t copied;
669 	void *vaddr;
670 	int err;
671 
672 	err = i915_inject_probe_error(gt->i915, -ENXIO);
673 	if (err)
674 		return err;
675 
676 	if (!uc_fw_need_rsa_in_memory(uc_fw))
677 		return 0;
678 
679 	/*
680 	 * uC firmwares will sit above GUC_GGTT_TOP and will not map through
681 	 * GGTT. Unfortunately, this means that the GuC HW cannot perform the uC
682 	 * authentication from memory, as the RSA offset now falls within the
683 	 * GuC inaccessible range. We resort to perma-pinning an additional vma
684 	 * within the accessible range that only contains the RSA signature.
685 	 * The GuC HW can use this extra pinning to perform the authentication
686 	 * since its GGTT offset will be GuC accessible.
687 	 */
688 	GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
689 	vma = intel_guc_allocate_vma(&gt->uc.guc, PAGE_SIZE);
690 	if (IS_ERR(vma))
691 		return PTR_ERR(vma);
692 
693 	vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
694 						 i915_coherent_map_type(gt->i915, vma->obj, true));
695 	if (IS_ERR(vaddr)) {
696 		i915_vma_unpin_and_release(&vma, 0);
697 		err = PTR_ERR(vaddr);
698 		goto unpin_out;
699 	}
700 
701 	copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size);
702 	i915_gem_object_unpin_map(vma->obj);
703 
704 	if (copied < uc_fw->rsa_size) {
705 		err = -ENOMEM;
706 		goto unpin_out;
707 	}
708 
709 	uc_fw->rsa_data = vma;
710 
711 	return 0;
712 
713 unpin_out:
714 	i915_vma_unpin_and_release(&vma, 0);
715 	return err;
716 }
717 
718 static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw)
719 {
720 	i915_vma_unpin_and_release(&uc_fw->rsa_data, 0);
721 }
722 
723 int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
724 {
725 	int err;
726 
727 	/* this should happen before the load! */
728 	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
729 
730 	if (!intel_uc_fw_is_available(uc_fw))
731 		return -ENOEXEC;
732 
733 	err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
734 	if (err) {
735 		DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
736 				 intel_uc_fw_type_repr(uc_fw->type), err);
737 		goto out;
738 	}
739 
740 	err = uc_fw_rsa_data_create(uc_fw);
741 	if (err) {
742 		DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n",
743 				 intel_uc_fw_type_repr(uc_fw->type), err);
744 		goto out_unpin;
745 	}
746 
747 	return 0;
748 
749 out_unpin:
750 	i915_gem_object_unpin_pages(uc_fw->obj);
751 out:
752 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_INIT_FAIL);
753 	return err;
754 }
755 
756 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
757 {
758 	uc_fw_rsa_data_destroy(uc_fw);
759 
760 	if (i915_gem_object_has_pinned_pages(uc_fw->obj))
761 		i915_gem_object_unpin_pages(uc_fw->obj);
762 
763 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
764 }
765 
766 /**
767  * intel_uc_fw_cleanup_fetch - cleanup uC firmware
768  * @uc_fw: uC firmware
769  *
770  * Cleans up uC firmware by releasing the firmware GEM obj.
771  */
772 void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
773 {
774 	if (!intel_uc_fw_is_available(uc_fw))
775 		return;
776 
777 	i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
778 
779 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
780 }
781 
782 /**
783  * intel_uc_fw_copy_rsa - copy fw RSA to buffer
784  *
785  * @uc_fw: uC firmware
786  * @dst: dst buffer
787  * @max_len: max number of bytes to copy
788  *
789  * Return: number of copied bytes.
790  */
791 size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
792 {
793 	struct intel_memory_region *mr = uc_fw->obj->mm.region;
794 	u32 size = min_t(u32, uc_fw->rsa_size, max_len);
795 	u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
796 	struct sgt_iter iter;
797 	size_t count = 0;
798 	int idx;
799 
800 	/* Called during reset handling, must be atomic [no fs_reclaim] */
801 	GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
802 
803 	idx = offset >> PAGE_SHIFT;
804 	offset = offset_in_page(offset);
805 	if (i915_gem_object_has_struct_page(uc_fw->obj)) {
806 		struct page *page;
807 
808 		for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
809 			u32 len = min_t(u32, size, PAGE_SIZE - offset);
810 			void *vaddr;
811 
812 			if (idx > 0) {
813 				idx--;
814 				continue;
815 			}
816 
817 			vaddr = kmap_atomic(page);
818 			memcpy(dst, vaddr + offset, len);
819 			kunmap_atomic(vaddr);
820 
821 			offset = 0;
822 			dst += len;
823 			size -= len;
824 			count += len;
825 			if (!size)
826 				break;
827 		}
828 	} else {
829 		dma_addr_t addr;
830 
831 		for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
832 			u32 len = min_t(u32, size, PAGE_SIZE - offset);
833 			void __iomem *vaddr;
834 
835 			if (idx > 0) {
836 				idx--;
837 				continue;
838 			}
839 
840 			vaddr = io_mapping_map_atomic_wc(&mr->iomap,
841 							 addr - mr->region.start);
842 			memcpy_fromio(dst, vaddr + offset, len);
843 			io_mapping_unmap_atomic(vaddr);
844 
845 			offset = 0;
846 			dst += len;
847 			size -= len;
848 			count += len;
849 			if (!size)
850 				break;
851 		}
852 	}
853 
854 	return count;
855 }
856 
857 /**
858  * intel_uc_fw_dump - dump information about uC firmware
859  * @uc_fw: uC firmware
860  * @p: the &drm_printer
861  *
862  * Pretty printer for uC firmware.
863  */
864 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
865 {
866 	drm_printf(p, "%s firmware: %s\n",
867 		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->wanted_path);
868 	if (uc_fw->fallback.path) {
869 		drm_printf(p, "%s firmware fallback: %s\n",
870 			   intel_uc_fw_type_repr(uc_fw->type), uc_fw->fallback.path);
871 		drm_printf(p, "fallback selected: %s\n",
872 			   str_yes_no(uc_fw->path == uc_fw->fallback.path));
873 	}
874 	drm_printf(p, "\tstatus: %s\n",
875 		   intel_uc_fw_status_repr(uc_fw->status));
876 	drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
877 		   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
878 		   uc_fw->major_ver_found, uc_fw->minor_ver_found);
879 	drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
880 	drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
881 }
882