1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/firmware.h>
8 #include <linux/highmem.h>
9 
10 #include <drm/drm_cache.h>
11 #include <drm/drm_print.h>
12 
13 #include "gem/i915_gem_lmem.h"
14 #include "intel_uc_fw.h"
15 #include "intel_uc_fw_abi.h"
16 #include "i915_drv.h"
17 #include "i915_reg.h"
18 
19 static inline struct intel_gt *
20 ____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
21 {
22 	if (type == INTEL_UC_FW_TYPE_GUC)
23 		return container_of(uc_fw, struct intel_gt, uc.guc.fw);
24 
25 	GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
26 	return container_of(uc_fw, struct intel_gt, uc.huc.fw);
27 }
28 
29 static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
30 {
31 	GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
32 	return ____uc_fw_to_gt(uc_fw, uc_fw->type);
33 }
34 
35 #ifdef CONFIG_DRM_I915_DEBUG_GUC
36 void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
37 			       enum intel_uc_fw_status status)
38 {
39 	uc_fw->__status =  status;
40 	drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
41 		"%s firmware -> %s\n",
42 		intel_uc_fw_type_repr(uc_fw->type),
43 		status == INTEL_UC_FIRMWARE_SELECTED ?
44 		uc_fw->path : intel_uc_fw_status_repr(status));
45 }
46 #endif
47 
48 /*
49  * List of required GuC and HuC binaries per-platform.
50  * Must be ordered based on platform + revid, from newer to older.
51  *
52  * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
53  * firmware as TGL.
54  */
55 #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
56 	fw_def(ALDERLAKE_P,  0, guc_def(adlp, 69, 0, 3)) \
57 	fw_def(ALDERLAKE_S,  0, guc_def(tgl,  69, 0, 3)) \
58 	fw_def(DG1,          0, guc_def(dg1,  69, 0, 3)) \
59 	fw_def(ROCKETLAKE,   0, guc_def(tgl,  69, 0, 3)) \
60 	fw_def(TIGERLAKE,    0, guc_def(tgl,  69, 0, 3)) \
61 	fw_def(JASPERLAKE,   0, guc_def(ehl,  69, 0, 3)) \
62 	fw_def(ELKHARTLAKE,  0, guc_def(ehl,  69, 0, 3)) \
63 	fw_def(ICELAKE,      0, guc_def(icl,  69, 0, 3)) \
64 	fw_def(COMETLAKE,    5, guc_def(cml,  69, 0, 3)) \
65 	fw_def(COMETLAKE,    0, guc_def(kbl,  69, 0, 3)) \
66 	fw_def(COFFEELAKE,   0, guc_def(kbl,  69, 0, 3)) \
67 	fw_def(GEMINILAKE,   0, guc_def(glk,  69, 0, 3)) \
68 	fw_def(KABYLAKE,     0, guc_def(kbl,  69, 0, 3)) \
69 	fw_def(BROXTON,      0, guc_def(bxt,  69, 0, 3)) \
70 	fw_def(SKYLAKE,      0, guc_def(skl,  69, 0, 3))
71 
72 #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
73 	fw_def(ALDERLAKE_P,  0, huc_def(tgl,  7, 9, 3)) \
74 	fw_def(ALDERLAKE_S,  0, huc_def(tgl,  7, 9, 3)) \
75 	fw_def(DG1,          0, huc_def(dg1,  7, 9, 3)) \
76 	fw_def(ROCKETLAKE,   0, huc_def(tgl,  7, 9, 3)) \
77 	fw_def(TIGERLAKE,    0, huc_def(tgl,  7, 9, 3)) \
78 	fw_def(JASPERLAKE,   0, huc_def(ehl,  9, 0, 0)) \
79 	fw_def(ELKHARTLAKE,  0, huc_def(ehl,  9, 0, 0)) \
80 	fw_def(ICELAKE,      0, huc_def(icl,  9, 0, 0)) \
81 	fw_def(COMETLAKE,    5, huc_def(cml,  4, 0, 0)) \
82 	fw_def(COMETLAKE,    0, huc_def(kbl,  4, 0, 0)) \
83 	fw_def(COFFEELAKE,   0, huc_def(kbl,  4, 0, 0)) \
84 	fw_def(GEMINILAKE,   0, huc_def(glk,  4, 0, 0)) \
85 	fw_def(KABYLAKE,     0, huc_def(kbl,  4, 0, 0)) \
86 	fw_def(BROXTON,      0, huc_def(bxt,  2, 0, 0)) \
87 	fw_def(SKYLAKE,      0, huc_def(skl,  2, 0, 0))
88 
89 #define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
90 	"i915/" \
91 	__stringify(prefix_) name_ \
92 	__stringify(major_) "." \
93 	__stringify(minor_) "." \
94 	__stringify(patch_) ".bin"
95 
96 #define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
97 	__MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
98 
99 #define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
100 	__MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
101 
102 /* All blobs need to be declared via MODULE_FIRMWARE() */
103 #define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
104 	MODULE_FIRMWARE(uc_);
105 
106 INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
107 INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
108 
109 /* The below structs and macros are used to iterate across the list of blobs */
110 struct __packed uc_fw_blob {
111 	u8 major;
112 	u8 minor;
113 	const char *path;
114 };
115 
116 #define UC_FW_BLOB(major_, minor_, path_) \
117 	{ .major = major_, .minor = minor_, .path = path_ }
118 
119 #define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
120 	UC_FW_BLOB(major_, minor_, \
121 		   MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))
122 
123 #define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
124 	UC_FW_BLOB(major_, minor_, \
125 		   MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))
126 
127 struct __packed uc_fw_platform_requirement {
128 	enum intel_platform p;
129 	u8 rev; /* first platform rev using this FW */
130 	const struct uc_fw_blob blob;
131 };
132 
133 #define MAKE_FW_LIST(platform_, revid_, uc_) \
134 { \
135 	.p = INTEL_##platform_, \
136 	.rev = revid_, \
137 	.blob = uc_, \
138 },
139 
140 struct fw_blobs_by_type {
141 	const struct uc_fw_platform_requirement *blobs;
142 	u32 count;
143 };
144 
145 static void
146 __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
147 {
148 	static const struct uc_fw_platform_requirement blobs_guc[] = {
149 		INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
150 	};
151 	static const struct uc_fw_platform_requirement blobs_huc[] = {
152 		INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
153 	};
154 	static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
155 		[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
156 		[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
157 	};
158 	static const struct uc_fw_platform_requirement *fw_blobs;
159 	enum intel_platform p = INTEL_INFO(i915)->platform;
160 	u32 fw_count;
161 	u8 rev = INTEL_REVID(i915);
162 	int i;
163 
164 	GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
165 	fw_blobs = blobs_all[uc_fw->type].blobs;
166 	fw_count = blobs_all[uc_fw->type].count;
167 
168 	for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
169 		if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
170 			const struct uc_fw_blob *blob = &fw_blobs[i].blob;
171 			uc_fw->path = blob->path;
172 			uc_fw->major_ver_wanted = blob->major;
173 			uc_fw->minor_ver_wanted = blob->minor;
174 			break;
175 		}
176 	}
177 
178 	/* make sure the list is ordered as expected */
179 	if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
180 		for (i = 1; i < fw_count; i++) {
181 			if (fw_blobs[i].p < fw_blobs[i - 1].p)
182 				continue;
183 
184 			if (fw_blobs[i].p == fw_blobs[i - 1].p &&
185 			    fw_blobs[i].rev < fw_blobs[i - 1].rev)
186 				continue;
187 
188 			pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
189 			       intel_platform_name(fw_blobs[i - 1].p),
190 			       fw_blobs[i - 1].rev,
191 			       intel_platform_name(fw_blobs[i].p),
192 			       fw_blobs[i].rev);
193 
194 			uc_fw->path = NULL;
195 		}
196 	}
197 }
198 
199 static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
200 {
201 	if (i915->params.enable_guc & ENABLE_GUC_MASK)
202 		return i915->params.guc_firmware_path;
203 	return "";
204 }
205 
206 static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
207 {
208 	if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC)
209 		return i915->params.huc_firmware_path;
210 	return "";
211 }
212 
213 static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
214 {
215 	const char *path = NULL;
216 
217 	switch (uc_fw->type) {
218 	case INTEL_UC_FW_TYPE_GUC:
219 		path = __override_guc_firmware_path(i915);
220 		break;
221 	case INTEL_UC_FW_TYPE_HUC:
222 		path = __override_huc_firmware_path(i915);
223 		break;
224 	}
225 
226 	if (unlikely(path)) {
227 		uc_fw->path = path;
228 		uc_fw->user_overridden = true;
229 	}
230 }
231 
232 /**
233  * intel_uc_fw_init_early - initialize the uC object and select the firmware
234  * @uc_fw: uC firmware
235  * @type: type of uC
236  *
237  * Initialize the state of our uC object and relevant tracking and select the
238  * firmware to fetch and load.
239  */
240 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
241 			    enum intel_uc_fw_type type)
242 {
243 	struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915;
244 
245 	/*
246 	 * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
247 	 * before we're looked at the HW caps to see if we have uc support
248 	 */
249 	BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
250 	GEM_BUG_ON(uc_fw->status);
251 	GEM_BUG_ON(uc_fw->path);
252 
253 	uc_fw->type = type;
254 
255 	if (HAS_GT_UC(i915)) {
256 		__uc_fw_auto_select(i915, uc_fw);
257 		__uc_fw_user_override(i915, uc_fw);
258 	}
259 
260 	intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
261 				  INTEL_UC_FIRMWARE_SELECTED :
262 				  INTEL_UC_FIRMWARE_DISABLED :
263 				  INTEL_UC_FIRMWARE_NOT_SUPPORTED);
264 }
265 
266 static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
267 {
268 	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
269 	bool user = e == -EINVAL;
270 
271 	if (i915_inject_probe_error(i915, e)) {
272 		/* non-existing blob */
273 		uc_fw->path = "<invalid>";
274 		uc_fw->user_overridden = user;
275 	} else if (i915_inject_probe_error(i915, e)) {
276 		/* require next major version */
277 		uc_fw->major_ver_wanted += 1;
278 		uc_fw->minor_ver_wanted = 0;
279 		uc_fw->user_overridden = user;
280 	} else if (i915_inject_probe_error(i915, e)) {
281 		/* require next minor version */
282 		uc_fw->minor_ver_wanted += 1;
283 		uc_fw->user_overridden = user;
284 	} else if (uc_fw->major_ver_wanted &&
285 		   i915_inject_probe_error(i915, e)) {
286 		/* require prev major version */
287 		uc_fw->major_ver_wanted -= 1;
288 		uc_fw->minor_ver_wanted = 0;
289 		uc_fw->user_overridden = user;
290 	} else if (uc_fw->minor_ver_wanted &&
291 		   i915_inject_probe_error(i915, e)) {
292 		/* require prev minor version - hey, this should work! */
293 		uc_fw->minor_ver_wanted -= 1;
294 		uc_fw->user_overridden = user;
295 	} else if (user && i915_inject_probe_error(i915, e)) {
296 		/* officially unsupported platform */
297 		uc_fw->major_ver_wanted = 0;
298 		uc_fw->minor_ver_wanted = 0;
299 		uc_fw->user_overridden = true;
300 	}
301 }
302 
303 /**
304  * intel_uc_fw_fetch - fetch uC firmware
305  * @uc_fw: uC firmware
306  *
307  * Fetch uC firmware into GEM obj.
308  *
309  * Return: 0 on success, a negative errno code on failure.
310  */
311 int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
312 {
313 	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
314 	struct device *dev = i915->drm.dev;
315 	struct drm_i915_gem_object *obj;
316 	const struct firmware *fw = NULL;
317 	struct uc_css_header *css;
318 	size_t size;
319 	int err;
320 
321 	GEM_BUG_ON(!i915->wopcm.size);
322 	GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
323 
324 	err = i915_inject_probe_error(i915, -ENXIO);
325 	if (err)
326 		goto fail;
327 
328 	__force_fw_fetch_failures(uc_fw, -EINVAL);
329 	__force_fw_fetch_failures(uc_fw, -ESTALE);
330 
331 	err = request_firmware(&fw, uc_fw->path, dev);
332 	if (err)
333 		goto fail;
334 
335 	/* Check the size of the blob before examining buffer contents */
336 	if (unlikely(fw->size < sizeof(struct uc_css_header))) {
337 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
338 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
339 			 fw->size, sizeof(struct uc_css_header));
340 		err = -ENODATA;
341 		goto fail;
342 	}
343 
344 	css = (struct uc_css_header *)fw->data;
345 
346 	/* Check integrity of size values inside CSS header */
347 	size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
348 		css->exponent_size_dw) * sizeof(u32);
349 	if (unlikely(size != sizeof(struct uc_css_header))) {
350 		drm_warn(&i915->drm,
351 			 "%s firmware %s: unexpected header size: %zu != %zu\n",
352 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
353 			 fw->size, sizeof(struct uc_css_header));
354 		err = -EPROTO;
355 		goto fail;
356 	}
357 
358 	/* uCode size must calculated from other sizes */
359 	uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
360 
361 	/* now RSA */
362 	uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
363 
364 	/* At least, it should have header, uCode and RSA. Size of all three. */
365 	size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
366 	if (unlikely(fw->size < size)) {
367 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
368 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
369 			 fw->size, size);
370 		err = -ENOEXEC;
371 		goto fail;
372 	}
373 
374 	/* Sanity check whether this fw is not larger than whole WOPCM memory */
375 	size = __intel_uc_fw_get_upload_size(uc_fw);
376 	if (unlikely(size >= i915->wopcm.size)) {
377 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
378 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
379 			 size, (size_t)i915->wopcm.size);
380 		err = -E2BIG;
381 		goto fail;
382 	}
383 
384 	/* Get version numbers from the CSS header */
385 	uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
386 					   css->sw_version);
387 	uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
388 					   css->sw_version);
389 
390 	if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
391 	    uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
392 		drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
393 			   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
394 			   uc_fw->major_ver_found, uc_fw->minor_ver_found,
395 			   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
396 		if (!intel_uc_fw_is_overridden(uc_fw)) {
397 			err = -ENOEXEC;
398 			goto fail;
399 		}
400 	}
401 
402 	if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
403 		uc_fw->private_data_size = css->private_data_size;
404 
405 	if (HAS_LMEM(i915)) {
406 		obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
407 		if (!IS_ERR(obj))
408 			obj->flags |= I915_BO_ALLOC_PM_EARLY;
409 	} else {
410 		obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
411 	}
412 
413 	if (IS_ERR(obj)) {
414 		err = PTR_ERR(obj);
415 		goto fail;
416 	}
417 
418 	uc_fw->obj = obj;
419 	uc_fw->size = fw->size;
420 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
421 
422 	release_firmware(fw);
423 	return 0;
424 
425 fail:
426 	intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
427 				  INTEL_UC_FIRMWARE_MISSING :
428 				  INTEL_UC_FIRMWARE_ERROR);
429 
430 	drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
431 		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
432 	drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
433 		 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
434 
435 	release_firmware(fw);		/* OK even if fw is NULL */
436 	return err;
437 }
438 
439 static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
440 {
441 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
442 	struct drm_mm_node *node = &ggtt->uc_fw;
443 
444 	GEM_BUG_ON(!drm_mm_node_allocated(node));
445 	GEM_BUG_ON(upper_32_bits(node->start));
446 	GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
447 
448 	return lower_32_bits(node->start);
449 }
450 
451 static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
452 {
453 	struct drm_i915_gem_object *obj = uc_fw->obj;
454 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
455 	struct i915_vma_resource *dummy = &uc_fw->dummy;
456 	u32 pte_flags = 0;
457 
458 	dummy->start = uc_fw_ggtt_offset(uc_fw);
459 	dummy->node_size = obj->base.size;
460 	dummy->bi.pages = obj->mm.pages;
461 
462 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
463 	GEM_BUG_ON(dummy->node_size > ggtt->uc_fw.size);
464 
465 	/* uc_fw->obj cache domains were not controlled across suspend */
466 	if (i915_gem_object_has_struct_page(obj))
467 		drm_clflush_sg(dummy->bi.pages);
468 
469 	if (i915_gem_object_is_lmem(obj))
470 		pte_flags |= PTE_LM;
471 
472 	ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
473 }
474 
475 static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
476 {
477 	struct drm_i915_gem_object *obj = uc_fw->obj;
478 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
479 	u64 start = uc_fw_ggtt_offset(uc_fw);
480 
481 	ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
482 }
483 
484 static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
485 {
486 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
487 	struct intel_uncore *uncore = gt->uncore;
488 	u64 offset;
489 	int ret;
490 
491 	ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
492 	if (ret)
493 		return ret;
494 
495 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
496 
497 	/* Set the source address for the uCode */
498 	offset = uc_fw_ggtt_offset(uc_fw);
499 	GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
500 	intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
501 	intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
502 
503 	/* Set the DMA destination */
504 	intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
505 	intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
506 
507 	/*
508 	 * Set the transfer size. The header plus uCode will be copied to WOPCM
509 	 * via DMA, excluding any other components
510 	 */
511 	intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
512 			      sizeof(struct uc_css_header) + uc_fw->ucode_size);
513 
514 	/* Start the DMA */
515 	intel_uncore_write_fw(uncore, DMA_CTRL,
516 			      _MASKED_BIT_ENABLE(dma_flags | START_DMA));
517 
518 	/* Wait for DMA to finish */
519 	ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
520 	if (ret)
521 		drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
522 			intel_uc_fw_type_repr(uc_fw->type),
523 			intel_uncore_read_fw(uncore, DMA_CTRL));
524 
525 	/* Disable the bits once DMA is over */
526 	intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
527 
528 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
529 
530 	return ret;
531 }
532 
533 /**
534  * intel_uc_fw_upload - load uC firmware using custom loader
535  * @uc_fw: uC firmware
536  * @dst_offset: destination offset
537  * @dma_flags: flags for flags for dma ctrl
538  *
539  * Loads uC firmware and updates internal flags.
540  *
541  * Return: 0 on success, non-zero on failure.
542  */
543 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
544 {
545 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
546 	int err;
547 
548 	/* make sure the status was cleared the last time we reset the uc */
549 	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
550 
551 	err = i915_inject_probe_error(gt->i915, -ENOEXEC);
552 	if (err)
553 		return err;
554 
555 	if (!intel_uc_fw_is_loadable(uc_fw))
556 		return -ENOEXEC;
557 
558 	/* Call custom loader */
559 	uc_fw_bind_ggtt(uc_fw);
560 	err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
561 	uc_fw_unbind_ggtt(uc_fw);
562 	if (err)
563 		goto fail;
564 
565 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
566 	return 0;
567 
568 fail:
569 	i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
570 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
571 			 err);
572 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
573 	return err;
574 }
575 
576 static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
577 {
578 	/*
579 	 * The HW reads the GuC RSA from memory if the key size is > 256 bytes,
580 	 * while it reads it from the 64 RSA registers if it is smaller.
581 	 * The HuC RSA is always read from memory.
582 	 */
583 	return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256;
584 }
585 
586 static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
587 {
588 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
589 	struct i915_vma *vma;
590 	size_t copied;
591 	void *vaddr;
592 	int err;
593 
594 	err = i915_inject_probe_error(gt->i915, -ENXIO);
595 	if (err)
596 		return err;
597 
598 	if (!uc_fw_need_rsa_in_memory(uc_fw))
599 		return 0;
600 
601 	/*
602 	 * uC firmwares will sit above GUC_GGTT_TOP and will not map through
603 	 * GGTT. Unfortunately, this means that the GuC HW cannot perform the uC
604 	 * authentication from memory, as the RSA offset now falls within the
605 	 * GuC inaccessible range. We resort to perma-pinning an additional vma
606 	 * within the accessible range that only contains the RSA signature.
607 	 * The GuC HW can use this extra pinning to perform the authentication
608 	 * since its GGTT offset will be GuC accessible.
609 	 */
610 	GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
611 	vma = intel_guc_allocate_vma(&gt->uc.guc, PAGE_SIZE);
612 	if (IS_ERR(vma))
613 		return PTR_ERR(vma);
614 
615 	vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
616 						 i915_coherent_map_type(gt->i915, vma->obj, true));
617 	if (IS_ERR(vaddr)) {
618 		i915_vma_unpin_and_release(&vma, 0);
619 		err = PTR_ERR(vaddr);
620 		goto unpin_out;
621 	}
622 
623 	copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size);
624 	i915_gem_object_unpin_map(vma->obj);
625 
626 	if (copied < uc_fw->rsa_size) {
627 		err = -ENOMEM;
628 		goto unpin_out;
629 	}
630 
631 	uc_fw->rsa_data = vma;
632 
633 	return 0;
634 
635 unpin_out:
636 	i915_vma_unpin_and_release(&vma, 0);
637 	return err;
638 }
639 
640 static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw)
641 {
642 	i915_vma_unpin_and_release(&uc_fw->rsa_data, 0);
643 }
644 
645 int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
646 {
647 	int err;
648 
649 	/* this should happen before the load! */
650 	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
651 
652 	if (!intel_uc_fw_is_available(uc_fw))
653 		return -ENOEXEC;
654 
655 	err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
656 	if (err) {
657 		DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
658 				 intel_uc_fw_type_repr(uc_fw->type), err);
659 		goto out;
660 	}
661 
662 	err = uc_fw_rsa_data_create(uc_fw);
663 	if (err) {
664 		DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n",
665 				 intel_uc_fw_type_repr(uc_fw->type), err);
666 		goto out_unpin;
667 	}
668 
669 	return 0;
670 
671 out_unpin:
672 	i915_gem_object_unpin_pages(uc_fw->obj);
673 out:
674 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_INIT_FAIL);
675 	return err;
676 }
677 
678 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
679 {
680 	uc_fw_rsa_data_destroy(uc_fw);
681 
682 	if (i915_gem_object_has_pinned_pages(uc_fw->obj))
683 		i915_gem_object_unpin_pages(uc_fw->obj);
684 
685 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
686 }
687 
688 /**
689  * intel_uc_fw_cleanup_fetch - cleanup uC firmware
690  * @uc_fw: uC firmware
691  *
692  * Cleans up uC firmware by releasing the firmware GEM obj.
693  */
694 void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
695 {
696 	if (!intel_uc_fw_is_available(uc_fw))
697 		return;
698 
699 	i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
700 
701 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
702 }
703 
704 /**
705  * intel_uc_fw_copy_rsa - copy fw RSA to buffer
706  *
707  * @uc_fw: uC firmware
708  * @dst: dst buffer
709  * @max_len: max number of bytes to copy
710  *
711  * Return: number of copied bytes.
712  */
713 size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
714 {
715 	struct intel_memory_region *mr = uc_fw->obj->mm.region;
716 	u32 size = min_t(u32, uc_fw->rsa_size, max_len);
717 	u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
718 	struct sgt_iter iter;
719 	size_t count = 0;
720 	int idx;
721 
722 	/* Called during reset handling, must be atomic [no fs_reclaim] */
723 	GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
724 
725 	idx = offset >> PAGE_SHIFT;
726 	offset = offset_in_page(offset);
727 	if (i915_gem_object_has_struct_page(uc_fw->obj)) {
728 		struct page *page;
729 
730 		for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
731 			u32 len = min_t(u32, size, PAGE_SIZE - offset);
732 			void *vaddr;
733 
734 			if (idx > 0) {
735 				idx--;
736 				continue;
737 			}
738 
739 			vaddr = kmap_atomic(page);
740 			memcpy(dst, vaddr + offset, len);
741 			kunmap_atomic(vaddr);
742 
743 			offset = 0;
744 			dst += len;
745 			size -= len;
746 			count += len;
747 			if (!size)
748 				break;
749 		}
750 	} else {
751 		dma_addr_t addr;
752 
753 		for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
754 			u32 len = min_t(u32, size, PAGE_SIZE - offset);
755 			void __iomem *vaddr;
756 
757 			if (idx > 0) {
758 				idx--;
759 				continue;
760 			}
761 
762 			vaddr = io_mapping_map_atomic_wc(&mr->iomap,
763 							 addr - mr->region.start);
764 			memcpy_fromio(dst, vaddr + offset, len);
765 			io_mapping_unmap_atomic(vaddr);
766 
767 			offset = 0;
768 			dst += len;
769 			size -= len;
770 			count += len;
771 			if (!size)
772 				break;
773 		}
774 	}
775 
776 	return count;
777 }
778 
779 /**
780  * intel_uc_fw_dump - dump information about uC firmware
781  * @uc_fw: uC firmware
782  * @p: the &drm_printer
783  *
784  * Pretty printer for uC firmware.
785  */
786 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
787 {
788 	drm_printf(p, "%s firmware: %s\n",
789 		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
790 	drm_printf(p, "\tstatus: %s\n",
791 		   intel_uc_fw_status_repr(uc_fw->status));
792 	drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
793 		   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
794 		   uc_fw->major_ver_found, uc_fw->minor_ver_found);
795 	drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
796 	drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
797 }
798