1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/firmware.h>
8 #include <linux/highmem.h>
9 
10 #include <drm/drm_cache.h>
11 #include <drm/drm_print.h>
12 
13 #include "gem/i915_gem_lmem.h"
14 #include "intel_uc_fw.h"
15 #include "intel_uc_fw_abi.h"
16 #include "i915_drv.h"
17 #include "i915_reg.h"
18 
19 static inline struct intel_gt *
20 ____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
21 {
22 	if (type == INTEL_UC_FW_TYPE_GUC)
23 		return container_of(uc_fw, struct intel_gt, uc.guc.fw);
24 
25 	GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
26 	return container_of(uc_fw, struct intel_gt, uc.huc.fw);
27 }
28 
29 static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
30 {
31 	GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
32 	return ____uc_fw_to_gt(uc_fw, uc_fw->type);
33 }
34 
35 #ifdef CONFIG_DRM_I915_DEBUG_GUC
36 void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
37 			       enum intel_uc_fw_status status)
38 {
39 	uc_fw->__status =  status;
40 	drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
41 		"%s firmware -> %s\n",
42 		intel_uc_fw_type_repr(uc_fw->type),
43 		status == INTEL_UC_FIRMWARE_SELECTED ?
44 		uc_fw->path : intel_uc_fw_status_repr(status));
45 }
46 #endif
47 
48 /*
49  * List of required GuC and HuC binaries per-platform.
50  * Must be ordered based on platform + revid, from newer to older.
51  *
52  * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
53  * firmware as TGL.
54  */
55 #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
56 	fw_def(DG2,          0, guc_def(dg2,  70, 1, 2)) \
57 	fw_def(ALDERLAKE_P,  0, guc_def(adlp, 70, 1, 1)) \
58 	fw_def(ALDERLAKE_S,  0, guc_def(tgl,  70, 1, 1)) \
59 	fw_def(DG1,          0, guc_def(dg1,  70, 1, 1)) \
60 	fw_def(ROCKETLAKE,   0, guc_def(tgl,  70, 1, 1)) \
61 	fw_def(TIGERLAKE,    0, guc_def(tgl,  70, 1, 1)) \
62 	fw_def(JASPERLAKE,   0, guc_def(ehl,  70, 1, 1)) \
63 	fw_def(ELKHARTLAKE,  0, guc_def(ehl,  70, 1, 1)) \
64 	fw_def(ICELAKE,      0, guc_def(icl,  70, 1, 1)) \
65 	fw_def(COMETLAKE,    5, guc_def(cml,  70, 1, 1)) \
66 	fw_def(COMETLAKE,    0, guc_def(kbl,  70, 1, 1)) \
67 	fw_def(COFFEELAKE,   0, guc_def(kbl,  70, 1, 1)) \
68 	fw_def(GEMINILAKE,   0, guc_def(glk,  70, 1, 1)) \
69 	fw_def(KABYLAKE,     0, guc_def(kbl,  70, 1, 1)) \
70 	fw_def(BROXTON,      0, guc_def(bxt,  70, 1, 1)) \
71 	fw_def(SKYLAKE,      0, guc_def(skl,  70, 1, 1))
72 
73 #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
74 	fw_def(ALDERLAKE_P,  0, huc_def(tgl,  7, 9, 3)) \
75 	fw_def(ALDERLAKE_S,  0, huc_def(tgl,  7, 9, 3)) \
76 	fw_def(DG1,          0, huc_def(dg1,  7, 9, 3)) \
77 	fw_def(ROCKETLAKE,   0, huc_def(tgl,  7, 9, 3)) \
78 	fw_def(TIGERLAKE,    0, huc_def(tgl,  7, 9, 3)) \
79 	fw_def(JASPERLAKE,   0, huc_def(ehl,  9, 0, 0)) \
80 	fw_def(ELKHARTLAKE,  0, huc_def(ehl,  9, 0, 0)) \
81 	fw_def(ICELAKE,      0, huc_def(icl,  9, 0, 0)) \
82 	fw_def(COMETLAKE,    5, huc_def(cml,  4, 0, 0)) \
83 	fw_def(COMETLAKE,    0, huc_def(kbl,  4, 0, 0)) \
84 	fw_def(COFFEELAKE,   0, huc_def(kbl,  4, 0, 0)) \
85 	fw_def(GEMINILAKE,   0, huc_def(glk,  4, 0, 0)) \
86 	fw_def(KABYLAKE,     0, huc_def(kbl,  4, 0, 0)) \
87 	fw_def(BROXTON,      0, huc_def(bxt,  2, 0, 0)) \
88 	fw_def(SKYLAKE,      0, huc_def(skl,  2, 0, 0))
89 
90 #define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
91 	"i915/" \
92 	__stringify(prefix_) name_ \
93 	__stringify(major_) "." \
94 	__stringify(minor_) "." \
95 	__stringify(patch_) ".bin"
96 
97 #define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
98 	__MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
99 
100 #define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
101 	__MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
102 
103 /* All blobs need to be declared via MODULE_FIRMWARE() */
104 #define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
105 	MODULE_FIRMWARE(uc_);
106 
107 INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
108 INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
109 
110 /* The below structs and macros are used to iterate across the list of blobs */
111 struct __packed uc_fw_blob {
112 	u8 major;
113 	u8 minor;
114 	const char *path;
115 };
116 
117 #define UC_FW_BLOB(major_, minor_, path_) \
118 	{ .major = major_, .minor = minor_, .path = path_ }
119 
120 #define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
121 	UC_FW_BLOB(major_, minor_, \
122 		   MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))
123 
124 #define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
125 	UC_FW_BLOB(major_, minor_, \
126 		   MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))
127 
128 struct __packed uc_fw_platform_requirement {
129 	enum intel_platform p;
130 	u8 rev; /* first platform rev using this FW */
131 	const struct uc_fw_blob blob;
132 };
133 
134 #define MAKE_FW_LIST(platform_, revid_, uc_) \
135 { \
136 	.p = INTEL_##platform_, \
137 	.rev = revid_, \
138 	.blob = uc_, \
139 },
140 
141 struct fw_blobs_by_type {
142 	const struct uc_fw_platform_requirement *blobs;
143 	u32 count;
144 };
145 
146 static void
147 __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
148 {
149 	static const struct uc_fw_platform_requirement blobs_guc[] = {
150 		INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
151 	};
152 	static const struct uc_fw_platform_requirement blobs_huc[] = {
153 		INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
154 	};
155 	static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
156 		[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
157 		[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
158 	};
159 	static const struct uc_fw_platform_requirement *fw_blobs;
160 	enum intel_platform p = INTEL_INFO(i915)->platform;
161 	u32 fw_count;
162 	u8 rev = INTEL_REVID(i915);
163 	int i;
164 
165 	GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
166 	fw_blobs = blobs_all[uc_fw->type].blobs;
167 	fw_count = blobs_all[uc_fw->type].count;
168 
169 	for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
170 		if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
171 			const struct uc_fw_blob *blob = &fw_blobs[i].blob;
172 			uc_fw->path = blob->path;
173 			uc_fw->major_ver_wanted = blob->major;
174 			uc_fw->minor_ver_wanted = blob->minor;
175 			break;
176 		}
177 	}
178 
179 	/* make sure the list is ordered as expected */
180 	if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
181 		for (i = 1; i < fw_count; i++) {
182 			if (fw_blobs[i].p < fw_blobs[i - 1].p)
183 				continue;
184 
185 			if (fw_blobs[i].p == fw_blobs[i - 1].p &&
186 			    fw_blobs[i].rev < fw_blobs[i - 1].rev)
187 				continue;
188 
189 			pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
190 			       intel_platform_name(fw_blobs[i - 1].p),
191 			       fw_blobs[i - 1].rev,
192 			       intel_platform_name(fw_blobs[i].p),
193 			       fw_blobs[i].rev);
194 
195 			uc_fw->path = NULL;
196 		}
197 	}
198 }
199 
200 static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
201 {
202 	if (i915->params.enable_guc & ENABLE_GUC_MASK)
203 		return i915->params.guc_firmware_path;
204 	return "";
205 }
206 
207 static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
208 {
209 	if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC)
210 		return i915->params.huc_firmware_path;
211 	return "";
212 }
213 
214 static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
215 {
216 	const char *path = NULL;
217 
218 	switch (uc_fw->type) {
219 	case INTEL_UC_FW_TYPE_GUC:
220 		path = __override_guc_firmware_path(i915);
221 		break;
222 	case INTEL_UC_FW_TYPE_HUC:
223 		path = __override_huc_firmware_path(i915);
224 		break;
225 	}
226 
227 	if (unlikely(path)) {
228 		uc_fw->path = path;
229 		uc_fw->user_overridden = true;
230 	}
231 }
232 
233 /**
234  * intel_uc_fw_init_early - initialize the uC object and select the firmware
235  * @uc_fw: uC firmware
236  * @type: type of uC
237  *
238  * Initialize the state of our uC object and relevant tracking and select the
239  * firmware to fetch and load.
240  */
241 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
242 			    enum intel_uc_fw_type type)
243 {
244 	struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915;
245 
246 	/*
247 	 * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
248 	 * before we're looked at the HW caps to see if we have uc support
249 	 */
250 	BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
251 	GEM_BUG_ON(uc_fw->status);
252 	GEM_BUG_ON(uc_fw->path);
253 
254 	uc_fw->type = type;
255 
256 	if (HAS_GT_UC(i915)) {
257 		__uc_fw_auto_select(i915, uc_fw);
258 		__uc_fw_user_override(i915, uc_fw);
259 	}
260 
261 	intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
262 				  INTEL_UC_FIRMWARE_SELECTED :
263 				  INTEL_UC_FIRMWARE_DISABLED :
264 				  INTEL_UC_FIRMWARE_NOT_SUPPORTED);
265 }
266 
267 static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
268 {
269 	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
270 	bool user = e == -EINVAL;
271 
272 	if (i915_inject_probe_error(i915, e)) {
273 		/* non-existing blob */
274 		uc_fw->path = "<invalid>";
275 		uc_fw->user_overridden = user;
276 	} else if (i915_inject_probe_error(i915, e)) {
277 		/* require next major version */
278 		uc_fw->major_ver_wanted += 1;
279 		uc_fw->minor_ver_wanted = 0;
280 		uc_fw->user_overridden = user;
281 	} else if (i915_inject_probe_error(i915, e)) {
282 		/* require next minor version */
283 		uc_fw->minor_ver_wanted += 1;
284 		uc_fw->user_overridden = user;
285 	} else if (uc_fw->major_ver_wanted &&
286 		   i915_inject_probe_error(i915, e)) {
287 		/* require prev major version */
288 		uc_fw->major_ver_wanted -= 1;
289 		uc_fw->minor_ver_wanted = 0;
290 		uc_fw->user_overridden = user;
291 	} else if (uc_fw->minor_ver_wanted &&
292 		   i915_inject_probe_error(i915, e)) {
293 		/* require prev minor version - hey, this should work! */
294 		uc_fw->minor_ver_wanted -= 1;
295 		uc_fw->user_overridden = user;
296 	} else if (user && i915_inject_probe_error(i915, e)) {
297 		/* officially unsupported platform */
298 		uc_fw->major_ver_wanted = 0;
299 		uc_fw->minor_ver_wanted = 0;
300 		uc_fw->user_overridden = true;
301 	}
302 }
303 
304 /**
305  * intel_uc_fw_fetch - fetch uC firmware
306  * @uc_fw: uC firmware
307  *
308  * Fetch uC firmware into GEM obj.
309  *
310  * Return: 0 on success, a negative errno code on failure.
311  */
312 int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
313 {
314 	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
315 	struct device *dev = i915->drm.dev;
316 	struct drm_i915_gem_object *obj;
317 	const struct firmware *fw = NULL;
318 	struct uc_css_header *css;
319 	size_t size;
320 	int err;
321 
322 	GEM_BUG_ON(!i915->wopcm.size);
323 	GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
324 
325 	err = i915_inject_probe_error(i915, -ENXIO);
326 	if (err)
327 		goto fail;
328 
329 	__force_fw_fetch_failures(uc_fw, -EINVAL);
330 	__force_fw_fetch_failures(uc_fw, -ESTALE);
331 
332 	err = request_firmware(&fw, uc_fw->path, dev);
333 	if (err)
334 		goto fail;
335 
336 	/* Check the size of the blob before examining buffer contents */
337 	if (unlikely(fw->size < sizeof(struct uc_css_header))) {
338 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
339 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
340 			 fw->size, sizeof(struct uc_css_header));
341 		err = -ENODATA;
342 		goto fail;
343 	}
344 
345 	css = (struct uc_css_header *)fw->data;
346 
347 	/* Check integrity of size values inside CSS header */
348 	size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
349 		css->exponent_size_dw) * sizeof(u32);
350 	if (unlikely(size != sizeof(struct uc_css_header))) {
351 		drm_warn(&i915->drm,
352 			 "%s firmware %s: unexpected header size: %zu != %zu\n",
353 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
354 			 fw->size, sizeof(struct uc_css_header));
355 		err = -EPROTO;
356 		goto fail;
357 	}
358 
359 	/* uCode size must calculated from other sizes */
360 	uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
361 
362 	/* now RSA */
363 	uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
364 
365 	/* At least, it should have header, uCode and RSA. Size of all three. */
366 	size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
367 	if (unlikely(fw->size < size)) {
368 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
369 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
370 			 fw->size, size);
371 		err = -ENOEXEC;
372 		goto fail;
373 	}
374 
375 	/* Sanity check whether this fw is not larger than whole WOPCM memory */
376 	size = __intel_uc_fw_get_upload_size(uc_fw);
377 	if (unlikely(size >= i915->wopcm.size)) {
378 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
379 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
380 			 size, (size_t)i915->wopcm.size);
381 		err = -E2BIG;
382 		goto fail;
383 	}
384 
385 	/* Get version numbers from the CSS header */
386 	uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
387 					   css->sw_version);
388 	uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
389 					   css->sw_version);
390 
391 	if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
392 	    uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
393 		drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
394 			   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
395 			   uc_fw->major_ver_found, uc_fw->minor_ver_found,
396 			   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
397 		if (!intel_uc_fw_is_overridden(uc_fw)) {
398 			err = -ENOEXEC;
399 			goto fail;
400 		}
401 	}
402 
403 	if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
404 		uc_fw->private_data_size = css->private_data_size;
405 
406 	if (HAS_LMEM(i915)) {
407 		obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
408 		if (!IS_ERR(obj))
409 			obj->flags |= I915_BO_ALLOC_PM_EARLY;
410 	} else {
411 		obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
412 	}
413 
414 	if (IS_ERR(obj)) {
415 		err = PTR_ERR(obj);
416 		goto fail;
417 	}
418 
419 	uc_fw->obj = obj;
420 	uc_fw->size = fw->size;
421 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
422 
423 	release_firmware(fw);
424 	return 0;
425 
426 fail:
427 	intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
428 				  INTEL_UC_FIRMWARE_MISSING :
429 				  INTEL_UC_FIRMWARE_ERROR);
430 
431 	drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
432 		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
433 	drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
434 		 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
435 
436 	release_firmware(fw);		/* OK even if fw is NULL */
437 	return err;
438 }
439 
440 static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
441 {
442 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
443 	struct drm_mm_node *node = &ggtt->uc_fw;
444 
445 	GEM_BUG_ON(!drm_mm_node_allocated(node));
446 	GEM_BUG_ON(upper_32_bits(node->start));
447 	GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
448 
449 	return lower_32_bits(node->start);
450 }
451 
452 static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
453 {
454 	struct drm_i915_gem_object *obj = uc_fw->obj;
455 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
456 	struct i915_vma_resource *dummy = &uc_fw->dummy;
457 	u32 pte_flags = 0;
458 
459 	dummy->start = uc_fw_ggtt_offset(uc_fw);
460 	dummy->node_size = obj->base.size;
461 	dummy->bi.pages = obj->mm.pages;
462 
463 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
464 	GEM_BUG_ON(dummy->node_size > ggtt->uc_fw.size);
465 
466 	/* uc_fw->obj cache domains were not controlled across suspend */
467 	if (i915_gem_object_has_struct_page(obj))
468 		drm_clflush_sg(dummy->bi.pages);
469 
470 	if (i915_gem_object_is_lmem(obj))
471 		pte_flags |= PTE_LM;
472 
473 	ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
474 }
475 
476 static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
477 {
478 	struct drm_i915_gem_object *obj = uc_fw->obj;
479 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
480 	u64 start = uc_fw_ggtt_offset(uc_fw);
481 
482 	ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
483 }
484 
485 static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
486 {
487 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
488 	struct intel_uncore *uncore = gt->uncore;
489 	u64 offset;
490 	int ret;
491 
492 	ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
493 	if (ret)
494 		return ret;
495 
496 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
497 
498 	/* Set the source address for the uCode */
499 	offset = uc_fw_ggtt_offset(uc_fw);
500 	GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
501 	intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
502 	intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
503 
504 	/* Set the DMA destination */
505 	intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
506 	intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
507 
508 	/*
509 	 * Set the transfer size. The header plus uCode will be copied to WOPCM
510 	 * via DMA, excluding any other components
511 	 */
512 	intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
513 			      sizeof(struct uc_css_header) + uc_fw->ucode_size);
514 
515 	/* Start the DMA */
516 	intel_uncore_write_fw(uncore, DMA_CTRL,
517 			      _MASKED_BIT_ENABLE(dma_flags | START_DMA));
518 
519 	/* Wait for DMA to finish */
520 	ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
521 	if (ret)
522 		drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
523 			intel_uc_fw_type_repr(uc_fw->type),
524 			intel_uncore_read_fw(uncore, DMA_CTRL));
525 
526 	/* Disable the bits once DMA is over */
527 	intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
528 
529 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
530 
531 	return ret;
532 }
533 
534 /**
535  * intel_uc_fw_upload - load uC firmware using custom loader
536  * @uc_fw: uC firmware
537  * @dst_offset: destination offset
538  * @dma_flags: flags for flags for dma ctrl
539  *
540  * Loads uC firmware and updates internal flags.
541  *
542  * Return: 0 on success, non-zero on failure.
543  */
544 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
545 {
546 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
547 	int err;
548 
549 	/* make sure the status was cleared the last time we reset the uc */
550 	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
551 
552 	err = i915_inject_probe_error(gt->i915, -ENOEXEC);
553 	if (err)
554 		return err;
555 
556 	if (!intel_uc_fw_is_loadable(uc_fw))
557 		return -ENOEXEC;
558 
559 	/* Call custom loader */
560 	uc_fw_bind_ggtt(uc_fw);
561 	err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
562 	uc_fw_unbind_ggtt(uc_fw);
563 	if (err)
564 		goto fail;
565 
566 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
567 	return 0;
568 
569 fail:
570 	i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
571 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
572 			 err);
573 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
574 	return err;
575 }
576 
577 static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
578 {
579 	/*
580 	 * The HW reads the GuC RSA from memory if the key size is > 256 bytes,
581 	 * while it reads it from the 64 RSA registers if it is smaller.
582 	 * The HuC RSA is always read from memory.
583 	 */
584 	return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256;
585 }
586 
587 static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
588 {
589 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
590 	struct i915_vma *vma;
591 	size_t copied;
592 	void *vaddr;
593 	int err;
594 
595 	err = i915_inject_probe_error(gt->i915, -ENXIO);
596 	if (err)
597 		return err;
598 
599 	if (!uc_fw_need_rsa_in_memory(uc_fw))
600 		return 0;
601 
602 	/*
603 	 * uC firmwares will sit above GUC_GGTT_TOP and will not map through
604 	 * GGTT. Unfortunately, this means that the GuC HW cannot perform the uC
605 	 * authentication from memory, as the RSA offset now falls within the
606 	 * GuC inaccessible range. We resort to perma-pinning an additional vma
607 	 * within the accessible range that only contains the RSA signature.
608 	 * The GuC HW can use this extra pinning to perform the authentication
609 	 * since its GGTT offset will be GuC accessible.
610 	 */
611 	GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
612 	vma = intel_guc_allocate_vma(&gt->uc.guc, PAGE_SIZE);
613 	if (IS_ERR(vma))
614 		return PTR_ERR(vma);
615 
616 	vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
617 						 i915_coherent_map_type(gt->i915, vma->obj, true));
618 	if (IS_ERR(vaddr)) {
619 		i915_vma_unpin_and_release(&vma, 0);
620 		err = PTR_ERR(vaddr);
621 		goto unpin_out;
622 	}
623 
624 	copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size);
625 	i915_gem_object_unpin_map(vma->obj);
626 
627 	if (copied < uc_fw->rsa_size) {
628 		err = -ENOMEM;
629 		goto unpin_out;
630 	}
631 
632 	uc_fw->rsa_data = vma;
633 
634 	return 0;
635 
636 unpin_out:
637 	i915_vma_unpin_and_release(&vma, 0);
638 	return err;
639 }
640 
641 static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw)
642 {
643 	i915_vma_unpin_and_release(&uc_fw->rsa_data, 0);
644 }
645 
646 int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
647 {
648 	int err;
649 
650 	/* this should happen before the load! */
651 	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
652 
653 	if (!intel_uc_fw_is_available(uc_fw))
654 		return -ENOEXEC;
655 
656 	err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
657 	if (err) {
658 		DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
659 				 intel_uc_fw_type_repr(uc_fw->type), err);
660 		goto out;
661 	}
662 
663 	err = uc_fw_rsa_data_create(uc_fw);
664 	if (err) {
665 		DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n",
666 				 intel_uc_fw_type_repr(uc_fw->type), err);
667 		goto out_unpin;
668 	}
669 
670 	return 0;
671 
672 out_unpin:
673 	i915_gem_object_unpin_pages(uc_fw->obj);
674 out:
675 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_INIT_FAIL);
676 	return err;
677 }
678 
679 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
680 {
681 	uc_fw_rsa_data_destroy(uc_fw);
682 
683 	if (i915_gem_object_has_pinned_pages(uc_fw->obj))
684 		i915_gem_object_unpin_pages(uc_fw->obj);
685 
686 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
687 }
688 
689 /**
690  * intel_uc_fw_cleanup_fetch - cleanup uC firmware
691  * @uc_fw: uC firmware
692  *
693  * Cleans up uC firmware by releasing the firmware GEM obj.
694  */
695 void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
696 {
697 	if (!intel_uc_fw_is_available(uc_fw))
698 		return;
699 
700 	i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
701 
702 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
703 }
704 
705 /**
706  * intel_uc_fw_copy_rsa - copy fw RSA to buffer
707  *
708  * @uc_fw: uC firmware
709  * @dst: dst buffer
710  * @max_len: max number of bytes to copy
711  *
712  * Return: number of copied bytes.
713  */
714 size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
715 {
716 	struct intel_memory_region *mr = uc_fw->obj->mm.region;
717 	u32 size = min_t(u32, uc_fw->rsa_size, max_len);
718 	u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
719 	struct sgt_iter iter;
720 	size_t count = 0;
721 	int idx;
722 
723 	/* Called during reset handling, must be atomic [no fs_reclaim] */
724 	GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
725 
726 	idx = offset >> PAGE_SHIFT;
727 	offset = offset_in_page(offset);
728 	if (i915_gem_object_has_struct_page(uc_fw->obj)) {
729 		struct page *page;
730 
731 		for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
732 			u32 len = min_t(u32, size, PAGE_SIZE - offset);
733 			void *vaddr;
734 
735 			if (idx > 0) {
736 				idx--;
737 				continue;
738 			}
739 
740 			vaddr = kmap_atomic(page);
741 			memcpy(dst, vaddr + offset, len);
742 			kunmap_atomic(vaddr);
743 
744 			offset = 0;
745 			dst += len;
746 			size -= len;
747 			count += len;
748 			if (!size)
749 				break;
750 		}
751 	} else {
752 		dma_addr_t addr;
753 
754 		for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
755 			u32 len = min_t(u32, size, PAGE_SIZE - offset);
756 			void __iomem *vaddr;
757 
758 			if (idx > 0) {
759 				idx--;
760 				continue;
761 			}
762 
763 			vaddr = io_mapping_map_atomic_wc(&mr->iomap,
764 							 addr - mr->region.start);
765 			memcpy_fromio(dst, vaddr + offset, len);
766 			io_mapping_unmap_atomic(vaddr);
767 
768 			offset = 0;
769 			dst += len;
770 			size -= len;
771 			count += len;
772 			if (!size)
773 				break;
774 		}
775 	}
776 
777 	return count;
778 }
779 
780 /**
781  * intel_uc_fw_dump - dump information about uC firmware
782  * @uc_fw: uC firmware
783  * @p: the &drm_printer
784  *
785  * Pretty printer for uC firmware.
786  */
787 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
788 {
789 	drm_printf(p, "%s firmware: %s\n",
790 		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
791 	drm_printf(p, "\tstatus: %s\n",
792 		   intel_uc_fw_status_repr(uc_fw->status));
793 	drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
794 		   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
795 		   uc_fw->major_ver_found, uc_fw->minor_ver_found);
796 	drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
797 	drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
798 }
799