1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/firmware.h>
8 #include <drm/drm_print.h>
9 
10 #include "gem/i915_gem_lmem.h"
11 #include "intel_uc_fw.h"
12 #include "intel_uc_fw_abi.h"
13 #include "i915_drv.h"
14 
15 static inline struct intel_gt *
16 ____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
17 {
18 	if (type == INTEL_UC_FW_TYPE_GUC)
19 		return container_of(uc_fw, struct intel_gt, uc.guc.fw);
20 
21 	GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
22 	return container_of(uc_fw, struct intel_gt, uc.huc.fw);
23 }
24 
25 static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
26 {
27 	GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
28 	return ____uc_fw_to_gt(uc_fw, uc_fw->type);
29 }
30 
31 #ifdef CONFIG_DRM_I915_DEBUG_GUC
32 void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
33 			       enum intel_uc_fw_status status)
34 {
35 	uc_fw->__status =  status;
36 	drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
37 		"%s firmware -> %s\n",
38 		intel_uc_fw_type_repr(uc_fw->type),
39 		status == INTEL_UC_FIRMWARE_SELECTED ?
40 		uc_fw->path : intel_uc_fw_status_repr(status));
41 }
42 #endif
43 
44 /*
45  * List of required GuC and HuC binaries per-platform.
46  * Must be ordered based on platform + revid, from newer to older.
47  *
48  * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
49  * firmware as TGL.
50  */
51 #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
52 	fw_def(ALDERLAKE_P,  0, guc_def(adlp, 62, 0, 3)) \
53 	fw_def(ALDERLAKE_S,  0, guc_def(tgl,  62, 0, 0)) \
54 	fw_def(DG1,          0, guc_def(dg1,  62, 0, 0)) \
55 	fw_def(ROCKETLAKE,   0, guc_def(tgl,  62, 0, 0)) \
56 	fw_def(TIGERLAKE,    0, guc_def(tgl,  62, 0, 0)) \
57 	fw_def(JASPERLAKE,   0, guc_def(ehl,  62, 0, 0)) \
58 	fw_def(ELKHARTLAKE,  0, guc_def(ehl,  62, 0, 0)) \
59 	fw_def(ICELAKE,      0, guc_def(icl,  62, 0, 0)) \
60 	fw_def(COMETLAKE,    5, guc_def(cml,  62, 0, 0)) \
61 	fw_def(COMETLAKE,    0, guc_def(kbl,  62, 0, 0)) \
62 	fw_def(COFFEELAKE,   0, guc_def(kbl,  62, 0, 0)) \
63 	fw_def(GEMINILAKE,   0, guc_def(glk,  62, 0, 0)) \
64 	fw_def(KABYLAKE,     0, guc_def(kbl,  62, 0, 0)) \
65 	fw_def(BROXTON,      0, guc_def(bxt,  62, 0, 0)) \
66 	fw_def(SKYLAKE,      0, guc_def(skl,  62, 0, 0))
67 
68 #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
69 	fw_def(ALDERLAKE_P,  0, huc_def(tgl,  7, 9, 3)) \
70 	fw_def(ALDERLAKE_S,  0, huc_def(tgl,  7, 9, 3)) \
71 	fw_def(DG1,          0, huc_def(dg1,  7, 9, 3)) \
72 	fw_def(ROCKETLAKE,   0, huc_def(tgl,  7, 9, 3)) \
73 	fw_def(TIGERLAKE,    0, huc_def(tgl,  7, 9, 3)) \
74 	fw_def(JASPERLAKE,   0, huc_def(ehl,  9, 0, 0)) \
75 	fw_def(ELKHARTLAKE,  0, huc_def(ehl,  9, 0, 0)) \
76 	fw_def(ICELAKE,      0, huc_def(icl,  9, 0, 0)) \
77 	fw_def(COMETLAKE,    5, huc_def(cml,  4, 0, 0)) \
78 	fw_def(COMETLAKE,    0, huc_def(kbl,  4, 0, 0)) \
79 	fw_def(COFFEELAKE,   0, huc_def(kbl,  4, 0, 0)) \
80 	fw_def(GEMINILAKE,   0, huc_def(glk,  4, 0, 0)) \
81 	fw_def(KABYLAKE,     0, huc_def(kbl,  4, 0, 0)) \
82 	fw_def(BROXTON,      0, huc_def(bxt,  2, 0, 0)) \
83 	fw_def(SKYLAKE,      0, huc_def(skl,  2, 0, 0))
84 
85 #define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
86 	"i915/" \
87 	__stringify(prefix_) name_ \
88 	__stringify(major_) "." \
89 	__stringify(minor_) "." \
90 	__stringify(patch_) ".bin"
91 
92 #define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
93 	__MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
94 
95 #define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
96 	__MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
97 
98 /* All blobs need to be declared via MODULE_FIRMWARE() */
99 #define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
100 	MODULE_FIRMWARE(uc_);
101 
102 INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
103 INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
104 
105 /* The below structs and macros are used to iterate across the list of blobs */
106 struct __packed uc_fw_blob {
107 	u8 major;
108 	u8 minor;
109 	const char *path;
110 };
111 
112 #define UC_FW_BLOB(major_, minor_, path_) \
113 	{ .major = major_, .minor = minor_, .path = path_ }
114 
115 #define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
116 	UC_FW_BLOB(major_, minor_, \
117 		   MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))
118 
119 #define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
120 	UC_FW_BLOB(major_, minor_, \
121 		   MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))
122 
123 struct __packed uc_fw_platform_requirement {
124 	enum intel_platform p;
125 	u8 rev; /* first platform rev using this FW */
126 	const struct uc_fw_blob blob;
127 };
128 
129 #define MAKE_FW_LIST(platform_, revid_, uc_) \
130 { \
131 	.p = INTEL_##platform_, \
132 	.rev = revid_, \
133 	.blob = uc_, \
134 },
135 
136 struct fw_blobs_by_type {
137 	const struct uc_fw_platform_requirement *blobs;
138 	u32 count;
139 };
140 
141 static void
142 __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
143 {
144 	static const struct uc_fw_platform_requirement blobs_guc[] = {
145 		INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
146 	};
147 	static const struct uc_fw_platform_requirement blobs_huc[] = {
148 		INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
149 	};
150 	static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
151 		[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
152 		[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
153 	};
154 	static const struct uc_fw_platform_requirement *fw_blobs;
155 	enum intel_platform p = INTEL_INFO(i915)->platform;
156 	u32 fw_count;
157 	u8 rev = INTEL_REVID(i915);
158 	int i;
159 
160 	GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
161 	fw_blobs = blobs_all[uc_fw->type].blobs;
162 	fw_count = blobs_all[uc_fw->type].count;
163 
164 	for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
165 		if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
166 			const struct uc_fw_blob *blob = &fw_blobs[i].blob;
167 			uc_fw->path = blob->path;
168 			uc_fw->major_ver_wanted = blob->major;
169 			uc_fw->minor_ver_wanted = blob->minor;
170 			break;
171 		}
172 	}
173 
174 	/* make sure the list is ordered as expected */
175 	if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
176 		for (i = 1; i < fw_count; i++) {
177 			if (fw_blobs[i].p < fw_blobs[i - 1].p)
178 				continue;
179 
180 			if (fw_blobs[i].p == fw_blobs[i - 1].p &&
181 			    fw_blobs[i].rev < fw_blobs[i - 1].rev)
182 				continue;
183 
184 			pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
185 			       intel_platform_name(fw_blobs[i - 1].p),
186 			       fw_blobs[i - 1].rev,
187 			       intel_platform_name(fw_blobs[i].p),
188 			       fw_blobs[i].rev);
189 
190 			uc_fw->path = NULL;
191 		}
192 	}
193 }
194 
195 static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
196 {
197 	if (i915->params.enable_guc & ENABLE_GUC_MASK)
198 		return i915->params.guc_firmware_path;
199 	return "";
200 }
201 
202 static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
203 {
204 	if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC)
205 		return i915->params.huc_firmware_path;
206 	return "";
207 }
208 
209 static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
210 {
211 	const char *path = NULL;
212 
213 	switch (uc_fw->type) {
214 	case INTEL_UC_FW_TYPE_GUC:
215 		path = __override_guc_firmware_path(i915);
216 		break;
217 	case INTEL_UC_FW_TYPE_HUC:
218 		path = __override_huc_firmware_path(i915);
219 		break;
220 	}
221 
222 	if (unlikely(path)) {
223 		uc_fw->path = path;
224 		uc_fw->user_overridden = true;
225 	}
226 }
227 
228 /**
229  * intel_uc_fw_init_early - initialize the uC object and select the firmware
230  * @uc_fw: uC firmware
231  * @type: type of uC
232  *
233  * Initialize the state of our uC object and relevant tracking and select the
234  * firmware to fetch and load.
235  */
236 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
237 			    enum intel_uc_fw_type type)
238 {
239 	struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915;
240 
241 	/*
242 	 * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
243 	 * before we're looked at the HW caps to see if we have uc support
244 	 */
245 	BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
246 	GEM_BUG_ON(uc_fw->status);
247 	GEM_BUG_ON(uc_fw->path);
248 
249 	uc_fw->type = type;
250 
251 	if (HAS_GT_UC(i915)) {
252 		__uc_fw_auto_select(i915, uc_fw);
253 		__uc_fw_user_override(i915, uc_fw);
254 	}
255 
256 	intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
257 				  INTEL_UC_FIRMWARE_SELECTED :
258 				  INTEL_UC_FIRMWARE_DISABLED :
259 				  INTEL_UC_FIRMWARE_NOT_SUPPORTED);
260 }
261 
262 static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
263 {
264 	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
265 	bool user = e == -EINVAL;
266 
267 	if (i915_inject_probe_error(i915, e)) {
268 		/* non-existing blob */
269 		uc_fw->path = "<invalid>";
270 		uc_fw->user_overridden = user;
271 	} else if (i915_inject_probe_error(i915, e)) {
272 		/* require next major version */
273 		uc_fw->major_ver_wanted += 1;
274 		uc_fw->minor_ver_wanted = 0;
275 		uc_fw->user_overridden = user;
276 	} else if (i915_inject_probe_error(i915, e)) {
277 		/* require next minor version */
278 		uc_fw->minor_ver_wanted += 1;
279 		uc_fw->user_overridden = user;
280 	} else if (uc_fw->major_ver_wanted &&
281 		   i915_inject_probe_error(i915, e)) {
282 		/* require prev major version */
283 		uc_fw->major_ver_wanted -= 1;
284 		uc_fw->minor_ver_wanted = 0;
285 		uc_fw->user_overridden = user;
286 	} else if (uc_fw->minor_ver_wanted &&
287 		   i915_inject_probe_error(i915, e)) {
288 		/* require prev minor version - hey, this should work! */
289 		uc_fw->minor_ver_wanted -= 1;
290 		uc_fw->user_overridden = user;
291 	} else if (user && i915_inject_probe_error(i915, e)) {
292 		/* officially unsupported platform */
293 		uc_fw->major_ver_wanted = 0;
294 		uc_fw->minor_ver_wanted = 0;
295 		uc_fw->user_overridden = true;
296 	}
297 }
298 
299 /**
300  * intel_uc_fw_fetch - fetch uC firmware
301  * @uc_fw: uC firmware
302  *
303  * Fetch uC firmware into GEM obj.
304  *
305  * Return: 0 on success, a negative errno code on failure.
306  */
307 int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
308 {
309 	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
310 	struct device *dev = i915->drm.dev;
311 	struct drm_i915_gem_object *obj;
312 	const struct firmware *fw = NULL;
313 	struct uc_css_header *css;
314 	size_t size;
315 	int err;
316 
317 	GEM_BUG_ON(!i915->wopcm.size);
318 	GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
319 
320 	err = i915_inject_probe_error(i915, -ENXIO);
321 	if (err)
322 		goto fail;
323 
324 	__force_fw_fetch_failures(uc_fw, -EINVAL);
325 	__force_fw_fetch_failures(uc_fw, -ESTALE);
326 
327 	err = request_firmware(&fw, uc_fw->path, dev);
328 	if (err)
329 		goto fail;
330 
331 	/* Check the size of the blob before examining buffer contents */
332 	if (unlikely(fw->size < sizeof(struct uc_css_header))) {
333 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
334 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
335 			 fw->size, sizeof(struct uc_css_header));
336 		err = -ENODATA;
337 		goto fail;
338 	}
339 
340 	css = (struct uc_css_header *)fw->data;
341 
342 	/* Check integrity of size values inside CSS header */
343 	size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
344 		css->exponent_size_dw) * sizeof(u32);
345 	if (unlikely(size != sizeof(struct uc_css_header))) {
346 		drm_warn(&i915->drm,
347 			 "%s firmware %s: unexpected header size: %zu != %zu\n",
348 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
349 			 fw->size, sizeof(struct uc_css_header));
350 		err = -EPROTO;
351 		goto fail;
352 	}
353 
354 	/* uCode size must calculated from other sizes */
355 	uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
356 
357 	/* now RSA */
358 	uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
359 
360 	/* At least, it should have header, uCode and RSA. Size of all three. */
361 	size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
362 	if (unlikely(fw->size < size)) {
363 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
364 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
365 			 fw->size, size);
366 		err = -ENOEXEC;
367 		goto fail;
368 	}
369 
370 	/* Sanity check whether this fw is not larger than whole WOPCM memory */
371 	size = __intel_uc_fw_get_upload_size(uc_fw);
372 	if (unlikely(size >= i915->wopcm.size)) {
373 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
374 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
375 			 size, (size_t)i915->wopcm.size);
376 		err = -E2BIG;
377 		goto fail;
378 	}
379 
380 	/* Get version numbers from the CSS header */
381 	uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
382 					   css->sw_version);
383 	uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
384 					   css->sw_version);
385 
386 	if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
387 	    uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
388 		drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
389 			   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
390 			   uc_fw->major_ver_found, uc_fw->minor_ver_found,
391 			   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
392 		if (!intel_uc_fw_is_overridden(uc_fw)) {
393 			err = -ENOEXEC;
394 			goto fail;
395 		}
396 	}
397 
398 	if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
399 		uc_fw->private_data_size = css->private_data_size;
400 
401 	if (HAS_LMEM(i915)) {
402 		obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
403 		if (!IS_ERR(obj))
404 			obj->flags |= I915_BO_ALLOC_PM_EARLY;
405 	} else {
406 		obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
407 	}
408 
409 	if (IS_ERR(obj)) {
410 		err = PTR_ERR(obj);
411 		goto fail;
412 	}
413 
414 	uc_fw->obj = obj;
415 	uc_fw->size = fw->size;
416 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
417 
418 	release_firmware(fw);
419 	return 0;
420 
421 fail:
422 	intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
423 				  INTEL_UC_FIRMWARE_MISSING :
424 				  INTEL_UC_FIRMWARE_ERROR);
425 
426 	drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
427 		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
428 	drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
429 		 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
430 
431 	release_firmware(fw);		/* OK even if fw is NULL */
432 	return err;
433 }
434 
435 static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
436 {
437 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
438 	struct drm_mm_node *node = &ggtt->uc_fw;
439 
440 	GEM_BUG_ON(!drm_mm_node_allocated(node));
441 	GEM_BUG_ON(upper_32_bits(node->start));
442 	GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
443 
444 	return lower_32_bits(node->start);
445 }
446 
447 static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
448 {
449 	struct drm_i915_gem_object *obj = uc_fw->obj;
450 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
451 	struct i915_vma *dummy = &uc_fw->dummy;
452 	u32 pte_flags = 0;
453 
454 	dummy->node.start = uc_fw_ggtt_offset(uc_fw);
455 	dummy->node.size = obj->base.size;
456 	dummy->pages = obj->mm.pages;
457 	dummy->vm = &ggtt->vm;
458 
459 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
460 	GEM_BUG_ON(dummy->node.size > ggtt->uc_fw.size);
461 
462 	/* uc_fw->obj cache domains were not controlled across suspend */
463 	if (i915_gem_object_has_struct_page(obj))
464 		drm_clflush_sg(dummy->pages);
465 
466 	if (i915_gem_object_is_lmem(obj))
467 		pte_flags |= PTE_LM;
468 
469 	ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
470 }
471 
472 static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
473 {
474 	struct drm_i915_gem_object *obj = uc_fw->obj;
475 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
476 	u64 start = uc_fw_ggtt_offset(uc_fw);
477 
478 	ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
479 }
480 
481 static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
482 {
483 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
484 	struct intel_uncore *uncore = gt->uncore;
485 	u64 offset;
486 	int ret;
487 
488 	ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
489 	if (ret)
490 		return ret;
491 
492 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
493 
494 	/* Set the source address for the uCode */
495 	offset = uc_fw_ggtt_offset(uc_fw);
496 	GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
497 	intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
498 	intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
499 
500 	/* Set the DMA destination */
501 	intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
502 	intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
503 
504 	/*
505 	 * Set the transfer size. The header plus uCode will be copied to WOPCM
506 	 * via DMA, excluding any other components
507 	 */
508 	intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
509 			      sizeof(struct uc_css_header) + uc_fw->ucode_size);
510 
511 	/* Start the DMA */
512 	intel_uncore_write_fw(uncore, DMA_CTRL,
513 			      _MASKED_BIT_ENABLE(dma_flags | START_DMA));
514 
515 	/* Wait for DMA to finish */
516 	ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
517 	if (ret)
518 		drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
519 			intel_uc_fw_type_repr(uc_fw->type),
520 			intel_uncore_read_fw(uncore, DMA_CTRL));
521 
522 	/* Disable the bits once DMA is over */
523 	intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
524 
525 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
526 
527 	return ret;
528 }
529 
530 /**
531  * intel_uc_fw_upload - load uC firmware using custom loader
532  * @uc_fw: uC firmware
533  * @dst_offset: destination offset
534  * @dma_flags: flags for flags for dma ctrl
535  *
536  * Loads uC firmware and updates internal flags.
537  *
538  * Return: 0 on success, non-zero on failure.
539  */
540 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
541 {
542 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
543 	int err;
544 
545 	/* make sure the status was cleared the last time we reset the uc */
546 	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
547 
548 	err = i915_inject_probe_error(gt->i915, -ENOEXEC);
549 	if (err)
550 		return err;
551 
552 	if (!intel_uc_fw_is_loadable(uc_fw))
553 		return -ENOEXEC;
554 
555 	/* Call custom loader */
556 	uc_fw_bind_ggtt(uc_fw);
557 	err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
558 	uc_fw_unbind_ggtt(uc_fw);
559 	if (err)
560 		goto fail;
561 
562 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
563 	return 0;
564 
565 fail:
566 	i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
567 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
568 			 err);
569 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
570 	return err;
571 }
572 
573 static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
574 {
575 	/*
576 	 * The HW reads the GuC RSA from memory if the key size is > 256 bytes,
577 	 * while it reads it from the 64 RSA registers if it is smaller.
578 	 * The HuC RSA is always read from memory.
579 	 */
580 	return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256;
581 }
582 
583 static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
584 {
585 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
586 	struct i915_vma *vma;
587 	size_t copied;
588 	void *vaddr;
589 	int err;
590 
591 	err = i915_inject_probe_error(gt->i915, -ENXIO);
592 	if (err)
593 		return err;
594 
595 	if (!uc_fw_need_rsa_in_memory(uc_fw))
596 		return 0;
597 
598 	/*
599 	 * uC firmwares will sit above GUC_GGTT_TOP and will not map through
600 	 * GGTT. Unfortunately, this means that the GuC HW cannot perform the uC
601 	 * authentication from memory, as the RSA offset now falls within the
602 	 * GuC inaccessible range. We resort to perma-pinning an additional vma
603 	 * within the accessible range that only contains the RSA signature.
604 	 * The GuC HW can use this extra pinning to perform the authentication
605 	 * since its GGTT offset will be GuC accessible.
606 	 */
607 	GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
608 	vma = intel_guc_allocate_vma(&gt->uc.guc, PAGE_SIZE);
609 	if (IS_ERR(vma))
610 		return PTR_ERR(vma);
611 
612 	vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
613 						 i915_coherent_map_type(gt->i915, vma->obj, true));
614 	if (IS_ERR(vaddr)) {
615 		i915_vma_unpin_and_release(&vma, 0);
616 		err = PTR_ERR(vaddr);
617 		goto unpin_out;
618 	}
619 
620 	copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size);
621 	i915_gem_object_unpin_map(vma->obj);
622 
623 	if (copied < uc_fw->rsa_size) {
624 		err = -ENOMEM;
625 		goto unpin_out;
626 	}
627 
628 	uc_fw->rsa_data = vma;
629 
630 	return 0;
631 
632 unpin_out:
633 	i915_vma_unpin_and_release(&vma, 0);
634 	return err;
635 }
636 
637 static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw)
638 {
639 	i915_vma_unpin_and_release(&uc_fw->rsa_data, 0);
640 }
641 
642 int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
643 {
644 	int err;
645 
646 	/* this should happen before the load! */
647 	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
648 
649 	if (!intel_uc_fw_is_available(uc_fw))
650 		return -ENOEXEC;
651 
652 	err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
653 	if (err) {
654 		DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
655 				 intel_uc_fw_type_repr(uc_fw->type), err);
656 		goto out;
657 	}
658 
659 	err = uc_fw_rsa_data_create(uc_fw);
660 	if (err) {
661 		DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n",
662 				 intel_uc_fw_type_repr(uc_fw->type), err);
663 		goto out_unpin;
664 	}
665 
666 	return 0;
667 
668 out_unpin:
669 	i915_gem_object_unpin_pages(uc_fw->obj);
670 out:
671 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_INIT_FAIL);
672 	return err;
673 }
674 
675 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
676 {
677 	uc_fw_rsa_data_destroy(uc_fw);
678 
679 	if (i915_gem_object_has_pinned_pages(uc_fw->obj))
680 		i915_gem_object_unpin_pages(uc_fw->obj);
681 
682 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
683 }
684 
685 /**
686  * intel_uc_fw_cleanup_fetch - cleanup uC firmware
687  * @uc_fw: uC firmware
688  *
689  * Cleans up uC firmware by releasing the firmware GEM obj.
690  */
691 void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
692 {
693 	if (!intel_uc_fw_is_available(uc_fw))
694 		return;
695 
696 	i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
697 
698 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
699 }
700 
701 /**
702  * intel_uc_fw_copy_rsa - copy fw RSA to buffer
703  *
704  * @uc_fw: uC firmware
705  * @dst: dst buffer
706  * @max_len: max number of bytes to copy
707  *
708  * Return: number of copied bytes.
709  */
710 size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
711 {
712 	struct intel_memory_region *mr = uc_fw->obj->mm.region;
713 	u32 size = min_t(u32, uc_fw->rsa_size, max_len);
714 	u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
715 	struct sgt_iter iter;
716 	size_t count = 0;
717 	int idx;
718 
719 	/* Called during reset handling, must be atomic [no fs_reclaim] */
720 	GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
721 
722 	idx = offset >> PAGE_SHIFT;
723 	offset = offset_in_page(offset);
724 	if (i915_gem_object_has_struct_page(uc_fw->obj)) {
725 		struct page *page;
726 
727 		for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
728 			u32 len = min_t(u32, size, PAGE_SIZE - offset);
729 			void *vaddr;
730 
731 			if (idx > 0) {
732 				idx--;
733 				continue;
734 			}
735 
736 			vaddr = kmap_atomic(page);
737 			memcpy(dst, vaddr + offset, len);
738 			kunmap_atomic(vaddr);
739 
740 			offset = 0;
741 			dst += len;
742 			size -= len;
743 			count += len;
744 			if (!size)
745 				break;
746 		}
747 	} else {
748 		dma_addr_t addr;
749 
750 		for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
751 			u32 len = min_t(u32, size, PAGE_SIZE - offset);
752 			void __iomem *vaddr;
753 
754 			if (idx > 0) {
755 				idx--;
756 				continue;
757 			}
758 
759 			vaddr = io_mapping_map_atomic_wc(&mr->iomap,
760 							 addr - mr->region.start);
761 			memcpy_fromio(dst, vaddr + offset, len);
762 			io_mapping_unmap_atomic(vaddr);
763 
764 			offset = 0;
765 			dst += len;
766 			size -= len;
767 			count += len;
768 			if (!size)
769 				break;
770 		}
771 	}
772 
773 	return count;
774 }
775 
776 /**
777  * intel_uc_fw_dump - dump information about uC firmware
778  * @uc_fw: uC firmware
779  * @p: the &drm_printer
780  *
781  * Pretty printer for uC firmware.
782  */
783 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
784 {
785 	drm_printf(p, "%s firmware: %s\n",
786 		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
787 	drm_printf(p, "\tstatus: %s\n",
788 		   intel_uc_fw_status_repr(uc_fw->status));
789 	drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
790 		   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
791 		   uc_fw->major_ver_found, uc_fw->minor_ver_found);
792 	drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
793 	drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
794 }
795