1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/firmware.h>
8 #include <drm/drm_print.h>
9 
10 #include "gem/i915_gem_lmem.h"
11 #include "intel_uc_fw.h"
12 #include "intel_uc_fw_abi.h"
13 #include "i915_drv.h"
14 #include "i915_reg.h"
15 
16 static inline struct intel_gt *
17 ____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
18 {
19 	if (type == INTEL_UC_FW_TYPE_GUC)
20 		return container_of(uc_fw, struct intel_gt, uc.guc.fw);
21 
22 	GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
23 	return container_of(uc_fw, struct intel_gt, uc.huc.fw);
24 }
25 
26 static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
27 {
28 	GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
29 	return ____uc_fw_to_gt(uc_fw, uc_fw->type);
30 }
31 
32 #ifdef CONFIG_DRM_I915_DEBUG_GUC
33 void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
34 			       enum intel_uc_fw_status status)
35 {
36 	uc_fw->__status =  status;
37 	drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
38 		"%s firmware -> %s\n",
39 		intel_uc_fw_type_repr(uc_fw->type),
40 		status == INTEL_UC_FIRMWARE_SELECTED ?
41 		uc_fw->path : intel_uc_fw_status_repr(status));
42 }
43 #endif
44 
45 /*
46  * List of required GuC and HuC binaries per-platform.
47  * Must be ordered based on platform + revid, from newer to older.
48  *
49  * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
50  * firmware as TGL.
51  */
52 #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
53 	fw_def(ALDERLAKE_P,  0, guc_def(adlp, 62, 0, 3)) \
54 	fw_def(ALDERLAKE_S,  0, guc_def(tgl,  62, 0, 0)) \
55 	fw_def(DG1,          0, guc_def(dg1,  62, 0, 0)) \
56 	fw_def(ROCKETLAKE,   0, guc_def(tgl,  62, 0, 0)) \
57 	fw_def(TIGERLAKE,    0, guc_def(tgl,  62, 0, 0)) \
58 	fw_def(JASPERLAKE,   0, guc_def(ehl,  62, 0, 0)) \
59 	fw_def(ELKHARTLAKE,  0, guc_def(ehl,  62, 0, 0)) \
60 	fw_def(ICELAKE,      0, guc_def(icl,  62, 0, 0)) \
61 	fw_def(COMETLAKE,    5, guc_def(cml,  62, 0, 0)) \
62 	fw_def(COMETLAKE,    0, guc_def(kbl,  62, 0, 0)) \
63 	fw_def(COFFEELAKE,   0, guc_def(kbl,  62, 0, 0)) \
64 	fw_def(GEMINILAKE,   0, guc_def(glk,  62, 0, 0)) \
65 	fw_def(KABYLAKE,     0, guc_def(kbl,  62, 0, 0)) \
66 	fw_def(BROXTON,      0, guc_def(bxt,  62, 0, 0)) \
67 	fw_def(SKYLAKE,      0, guc_def(skl,  62, 0, 0))
68 
69 #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
70 	fw_def(ALDERLAKE_P,  0, huc_def(tgl,  7, 9, 3)) \
71 	fw_def(ALDERLAKE_S,  0, huc_def(tgl,  7, 9, 3)) \
72 	fw_def(DG1,          0, huc_def(dg1,  7, 9, 3)) \
73 	fw_def(ROCKETLAKE,   0, huc_def(tgl,  7, 9, 3)) \
74 	fw_def(TIGERLAKE,    0, huc_def(tgl,  7, 9, 3)) \
75 	fw_def(JASPERLAKE,   0, huc_def(ehl,  9, 0, 0)) \
76 	fw_def(ELKHARTLAKE,  0, huc_def(ehl,  9, 0, 0)) \
77 	fw_def(ICELAKE,      0, huc_def(icl,  9, 0, 0)) \
78 	fw_def(COMETLAKE,    5, huc_def(cml,  4, 0, 0)) \
79 	fw_def(COMETLAKE,    0, huc_def(kbl,  4, 0, 0)) \
80 	fw_def(COFFEELAKE,   0, huc_def(kbl,  4, 0, 0)) \
81 	fw_def(GEMINILAKE,   0, huc_def(glk,  4, 0, 0)) \
82 	fw_def(KABYLAKE,     0, huc_def(kbl,  4, 0, 0)) \
83 	fw_def(BROXTON,      0, huc_def(bxt,  2, 0, 0)) \
84 	fw_def(SKYLAKE,      0, huc_def(skl,  2, 0, 0))
85 
86 #define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
87 	"i915/" \
88 	__stringify(prefix_) name_ \
89 	__stringify(major_) "." \
90 	__stringify(minor_) "." \
91 	__stringify(patch_) ".bin"
92 
93 #define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
94 	__MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
95 
96 #define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
97 	__MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
98 
99 /* All blobs need to be declared via MODULE_FIRMWARE() */
100 #define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
101 	MODULE_FIRMWARE(uc_);
102 
103 INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
104 INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
105 
106 /* The below structs and macros are used to iterate across the list of blobs */
107 struct __packed uc_fw_blob {
108 	u8 major;
109 	u8 minor;
110 	const char *path;
111 };
112 
113 #define UC_FW_BLOB(major_, minor_, path_) \
114 	{ .major = major_, .minor = minor_, .path = path_ }
115 
116 #define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
117 	UC_FW_BLOB(major_, minor_, \
118 		   MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))
119 
120 #define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
121 	UC_FW_BLOB(major_, minor_, \
122 		   MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))
123 
124 struct __packed uc_fw_platform_requirement {
125 	enum intel_platform p;
126 	u8 rev; /* first platform rev using this FW */
127 	const struct uc_fw_blob blob;
128 };
129 
130 #define MAKE_FW_LIST(platform_, revid_, uc_) \
131 { \
132 	.p = INTEL_##platform_, \
133 	.rev = revid_, \
134 	.blob = uc_, \
135 },
136 
137 struct fw_blobs_by_type {
138 	const struct uc_fw_platform_requirement *blobs;
139 	u32 count;
140 };
141 
142 static void
143 __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
144 {
145 	static const struct uc_fw_platform_requirement blobs_guc[] = {
146 		INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
147 	};
148 	static const struct uc_fw_platform_requirement blobs_huc[] = {
149 		INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
150 	};
151 	static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
152 		[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
153 		[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
154 	};
155 	static const struct uc_fw_platform_requirement *fw_blobs;
156 	enum intel_platform p = INTEL_INFO(i915)->platform;
157 	u32 fw_count;
158 	u8 rev = INTEL_REVID(i915);
159 	int i;
160 
161 	GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
162 	fw_blobs = blobs_all[uc_fw->type].blobs;
163 	fw_count = blobs_all[uc_fw->type].count;
164 
165 	for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
166 		if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
167 			const struct uc_fw_blob *blob = &fw_blobs[i].blob;
168 			uc_fw->path = blob->path;
169 			uc_fw->major_ver_wanted = blob->major;
170 			uc_fw->minor_ver_wanted = blob->minor;
171 			break;
172 		}
173 	}
174 
175 	/* make sure the list is ordered as expected */
176 	if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
177 		for (i = 1; i < fw_count; i++) {
178 			if (fw_blobs[i].p < fw_blobs[i - 1].p)
179 				continue;
180 
181 			if (fw_blobs[i].p == fw_blobs[i - 1].p &&
182 			    fw_blobs[i].rev < fw_blobs[i - 1].rev)
183 				continue;
184 
185 			pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
186 			       intel_platform_name(fw_blobs[i - 1].p),
187 			       fw_blobs[i - 1].rev,
188 			       intel_platform_name(fw_blobs[i].p),
189 			       fw_blobs[i].rev);
190 
191 			uc_fw->path = NULL;
192 		}
193 	}
194 }
195 
196 static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
197 {
198 	if (i915->params.enable_guc & ENABLE_GUC_MASK)
199 		return i915->params.guc_firmware_path;
200 	return "";
201 }
202 
203 static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
204 {
205 	if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC)
206 		return i915->params.huc_firmware_path;
207 	return "";
208 }
209 
210 static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
211 {
212 	const char *path = NULL;
213 
214 	switch (uc_fw->type) {
215 	case INTEL_UC_FW_TYPE_GUC:
216 		path = __override_guc_firmware_path(i915);
217 		break;
218 	case INTEL_UC_FW_TYPE_HUC:
219 		path = __override_huc_firmware_path(i915);
220 		break;
221 	}
222 
223 	if (unlikely(path)) {
224 		uc_fw->path = path;
225 		uc_fw->user_overridden = true;
226 	}
227 }
228 
229 /**
230  * intel_uc_fw_init_early - initialize the uC object and select the firmware
231  * @uc_fw: uC firmware
232  * @type: type of uC
233  *
234  * Initialize the state of our uC object and relevant tracking and select the
235  * firmware to fetch and load.
236  */
237 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
238 			    enum intel_uc_fw_type type)
239 {
240 	struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915;
241 
242 	/*
243 	 * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
244 	 * before we're looked at the HW caps to see if we have uc support
245 	 */
246 	BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
247 	GEM_BUG_ON(uc_fw->status);
248 	GEM_BUG_ON(uc_fw->path);
249 
250 	uc_fw->type = type;
251 
252 	if (HAS_GT_UC(i915)) {
253 		__uc_fw_auto_select(i915, uc_fw);
254 		__uc_fw_user_override(i915, uc_fw);
255 	}
256 
257 	intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
258 				  INTEL_UC_FIRMWARE_SELECTED :
259 				  INTEL_UC_FIRMWARE_DISABLED :
260 				  INTEL_UC_FIRMWARE_NOT_SUPPORTED);
261 }
262 
263 static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
264 {
265 	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
266 	bool user = e == -EINVAL;
267 
268 	if (i915_inject_probe_error(i915, e)) {
269 		/* non-existing blob */
270 		uc_fw->path = "<invalid>";
271 		uc_fw->user_overridden = user;
272 	} else if (i915_inject_probe_error(i915, e)) {
273 		/* require next major version */
274 		uc_fw->major_ver_wanted += 1;
275 		uc_fw->minor_ver_wanted = 0;
276 		uc_fw->user_overridden = user;
277 	} else if (i915_inject_probe_error(i915, e)) {
278 		/* require next minor version */
279 		uc_fw->minor_ver_wanted += 1;
280 		uc_fw->user_overridden = user;
281 	} else if (uc_fw->major_ver_wanted &&
282 		   i915_inject_probe_error(i915, e)) {
283 		/* require prev major version */
284 		uc_fw->major_ver_wanted -= 1;
285 		uc_fw->minor_ver_wanted = 0;
286 		uc_fw->user_overridden = user;
287 	} else if (uc_fw->minor_ver_wanted &&
288 		   i915_inject_probe_error(i915, e)) {
289 		/* require prev minor version - hey, this should work! */
290 		uc_fw->minor_ver_wanted -= 1;
291 		uc_fw->user_overridden = user;
292 	} else if (user && i915_inject_probe_error(i915, e)) {
293 		/* officially unsupported platform */
294 		uc_fw->major_ver_wanted = 0;
295 		uc_fw->minor_ver_wanted = 0;
296 		uc_fw->user_overridden = true;
297 	}
298 }
299 
300 /**
301  * intel_uc_fw_fetch - fetch uC firmware
302  * @uc_fw: uC firmware
303  *
304  * Fetch uC firmware into GEM obj.
305  *
306  * Return: 0 on success, a negative errno code on failure.
307  */
308 int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
309 {
310 	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
311 	struct device *dev = i915->drm.dev;
312 	struct drm_i915_gem_object *obj;
313 	const struct firmware *fw = NULL;
314 	struct uc_css_header *css;
315 	size_t size;
316 	int err;
317 
318 	GEM_BUG_ON(!i915->wopcm.size);
319 	GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
320 
321 	err = i915_inject_probe_error(i915, -ENXIO);
322 	if (err)
323 		goto fail;
324 
325 	__force_fw_fetch_failures(uc_fw, -EINVAL);
326 	__force_fw_fetch_failures(uc_fw, -ESTALE);
327 
328 	err = request_firmware(&fw, uc_fw->path, dev);
329 	if (err)
330 		goto fail;
331 
332 	/* Check the size of the blob before examining buffer contents */
333 	if (unlikely(fw->size < sizeof(struct uc_css_header))) {
334 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
335 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
336 			 fw->size, sizeof(struct uc_css_header));
337 		err = -ENODATA;
338 		goto fail;
339 	}
340 
341 	css = (struct uc_css_header *)fw->data;
342 
343 	/* Check integrity of size values inside CSS header */
344 	size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
345 		css->exponent_size_dw) * sizeof(u32);
346 	if (unlikely(size != sizeof(struct uc_css_header))) {
347 		drm_warn(&i915->drm,
348 			 "%s firmware %s: unexpected header size: %zu != %zu\n",
349 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
350 			 fw->size, sizeof(struct uc_css_header));
351 		err = -EPROTO;
352 		goto fail;
353 	}
354 
355 	/* uCode size must calculated from other sizes */
356 	uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
357 
358 	/* now RSA */
359 	uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
360 
361 	/* At least, it should have header, uCode and RSA. Size of all three. */
362 	size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
363 	if (unlikely(fw->size < size)) {
364 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
365 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
366 			 fw->size, size);
367 		err = -ENOEXEC;
368 		goto fail;
369 	}
370 
371 	/* Sanity check whether this fw is not larger than whole WOPCM memory */
372 	size = __intel_uc_fw_get_upload_size(uc_fw);
373 	if (unlikely(size >= i915->wopcm.size)) {
374 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
375 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
376 			 size, (size_t)i915->wopcm.size);
377 		err = -E2BIG;
378 		goto fail;
379 	}
380 
381 	/* Get version numbers from the CSS header */
382 	uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
383 					   css->sw_version);
384 	uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
385 					   css->sw_version);
386 
387 	if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
388 	    uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
389 		drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
390 			   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
391 			   uc_fw->major_ver_found, uc_fw->minor_ver_found,
392 			   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
393 		if (!intel_uc_fw_is_overridden(uc_fw)) {
394 			err = -ENOEXEC;
395 			goto fail;
396 		}
397 	}
398 
399 	if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
400 		uc_fw->private_data_size = css->private_data_size;
401 
402 	if (HAS_LMEM(i915)) {
403 		obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
404 		if (!IS_ERR(obj))
405 			obj->flags |= I915_BO_ALLOC_PM_EARLY;
406 	} else {
407 		obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
408 	}
409 
410 	if (IS_ERR(obj)) {
411 		err = PTR_ERR(obj);
412 		goto fail;
413 	}
414 
415 	uc_fw->obj = obj;
416 	uc_fw->size = fw->size;
417 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
418 
419 	release_firmware(fw);
420 	return 0;
421 
422 fail:
423 	intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
424 				  INTEL_UC_FIRMWARE_MISSING :
425 				  INTEL_UC_FIRMWARE_ERROR);
426 
427 	drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
428 		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
429 	drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
430 		 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
431 
432 	release_firmware(fw);		/* OK even if fw is NULL */
433 	return err;
434 }
435 
436 static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
437 {
438 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
439 	struct drm_mm_node *node = &ggtt->uc_fw;
440 
441 	GEM_BUG_ON(!drm_mm_node_allocated(node));
442 	GEM_BUG_ON(upper_32_bits(node->start));
443 	GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
444 
445 	return lower_32_bits(node->start);
446 }
447 
448 static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
449 {
450 	struct drm_i915_gem_object *obj = uc_fw->obj;
451 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
452 	struct i915_vma *dummy = &uc_fw->dummy;
453 	u32 pte_flags = 0;
454 
455 	dummy->node.start = uc_fw_ggtt_offset(uc_fw);
456 	dummy->node.size = obj->base.size;
457 	dummy->pages = obj->mm.pages;
458 	dummy->vm = &ggtt->vm;
459 
460 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
461 	GEM_BUG_ON(dummy->node.size > ggtt->uc_fw.size);
462 
463 	/* uc_fw->obj cache domains were not controlled across suspend */
464 	if (i915_gem_object_has_struct_page(obj))
465 		drm_clflush_sg(dummy->pages);
466 
467 	if (i915_gem_object_is_lmem(obj))
468 		pte_flags |= PTE_LM;
469 
470 	ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
471 }
472 
473 static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
474 {
475 	struct drm_i915_gem_object *obj = uc_fw->obj;
476 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
477 	u64 start = uc_fw_ggtt_offset(uc_fw);
478 
479 	ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
480 }
481 
482 static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
483 {
484 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
485 	struct intel_uncore *uncore = gt->uncore;
486 	u64 offset;
487 	int ret;
488 
489 	ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
490 	if (ret)
491 		return ret;
492 
493 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
494 
495 	/* Set the source address for the uCode */
496 	offset = uc_fw_ggtt_offset(uc_fw);
497 	GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
498 	intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
499 	intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
500 
501 	/* Set the DMA destination */
502 	intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
503 	intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
504 
505 	/*
506 	 * Set the transfer size. The header plus uCode will be copied to WOPCM
507 	 * via DMA, excluding any other components
508 	 */
509 	intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
510 			      sizeof(struct uc_css_header) + uc_fw->ucode_size);
511 
512 	/* Start the DMA */
513 	intel_uncore_write_fw(uncore, DMA_CTRL,
514 			      _MASKED_BIT_ENABLE(dma_flags | START_DMA));
515 
516 	/* Wait for DMA to finish */
517 	ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
518 	if (ret)
519 		drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
520 			intel_uc_fw_type_repr(uc_fw->type),
521 			intel_uncore_read_fw(uncore, DMA_CTRL));
522 
523 	/* Disable the bits once DMA is over */
524 	intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
525 
526 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
527 
528 	return ret;
529 }
530 
531 /**
532  * intel_uc_fw_upload - load uC firmware using custom loader
533  * @uc_fw: uC firmware
534  * @dst_offset: destination offset
535  * @dma_flags: flags for flags for dma ctrl
536  *
537  * Loads uC firmware and updates internal flags.
538  *
539  * Return: 0 on success, non-zero on failure.
540  */
541 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
542 {
543 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
544 	int err;
545 
546 	/* make sure the status was cleared the last time we reset the uc */
547 	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
548 
549 	err = i915_inject_probe_error(gt->i915, -ENOEXEC);
550 	if (err)
551 		return err;
552 
553 	if (!intel_uc_fw_is_loadable(uc_fw))
554 		return -ENOEXEC;
555 
556 	/* Call custom loader */
557 	uc_fw_bind_ggtt(uc_fw);
558 	err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
559 	uc_fw_unbind_ggtt(uc_fw);
560 	if (err)
561 		goto fail;
562 
563 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
564 	return 0;
565 
566 fail:
567 	i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
568 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
569 			 err);
570 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
571 	return err;
572 }
573 
574 static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
575 {
576 	/*
577 	 * The HW reads the GuC RSA from memory if the key size is > 256 bytes,
578 	 * while it reads it from the 64 RSA registers if it is smaller.
579 	 * The HuC RSA is always read from memory.
580 	 */
581 	return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256;
582 }
583 
584 static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
585 {
586 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
587 	struct i915_vma *vma;
588 	size_t copied;
589 	void *vaddr;
590 	int err;
591 
592 	err = i915_inject_probe_error(gt->i915, -ENXIO);
593 	if (err)
594 		return err;
595 
596 	if (!uc_fw_need_rsa_in_memory(uc_fw))
597 		return 0;
598 
599 	/*
600 	 * uC firmwares will sit above GUC_GGTT_TOP and will not map through
601 	 * GGTT. Unfortunately, this means that the GuC HW cannot perform the uC
602 	 * authentication from memory, as the RSA offset now falls within the
603 	 * GuC inaccessible range. We resort to perma-pinning an additional vma
604 	 * within the accessible range that only contains the RSA signature.
605 	 * The GuC HW can use this extra pinning to perform the authentication
606 	 * since its GGTT offset will be GuC accessible.
607 	 */
608 	GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
609 	vma = intel_guc_allocate_vma(&gt->uc.guc, PAGE_SIZE);
610 	if (IS_ERR(vma))
611 		return PTR_ERR(vma);
612 
613 	vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
614 						 i915_coherent_map_type(gt->i915, vma->obj, true));
615 	if (IS_ERR(vaddr)) {
616 		i915_vma_unpin_and_release(&vma, 0);
617 		err = PTR_ERR(vaddr);
618 		goto unpin_out;
619 	}
620 
621 	copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size);
622 	i915_gem_object_unpin_map(vma->obj);
623 
624 	if (copied < uc_fw->rsa_size) {
625 		err = -ENOMEM;
626 		goto unpin_out;
627 	}
628 
629 	uc_fw->rsa_data = vma;
630 
631 	return 0;
632 
633 unpin_out:
634 	i915_vma_unpin_and_release(&vma, 0);
635 	return err;
636 }
637 
638 static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw)
639 {
640 	i915_vma_unpin_and_release(&uc_fw->rsa_data, 0);
641 }
642 
643 int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
644 {
645 	int err;
646 
647 	/* this should happen before the load! */
648 	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
649 
650 	if (!intel_uc_fw_is_available(uc_fw))
651 		return -ENOEXEC;
652 
653 	err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
654 	if (err) {
655 		DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
656 				 intel_uc_fw_type_repr(uc_fw->type), err);
657 		goto out;
658 	}
659 
660 	err = uc_fw_rsa_data_create(uc_fw);
661 	if (err) {
662 		DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n",
663 				 intel_uc_fw_type_repr(uc_fw->type), err);
664 		goto out_unpin;
665 	}
666 
667 	return 0;
668 
669 out_unpin:
670 	i915_gem_object_unpin_pages(uc_fw->obj);
671 out:
672 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_INIT_FAIL);
673 	return err;
674 }
675 
676 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
677 {
678 	uc_fw_rsa_data_destroy(uc_fw);
679 
680 	if (i915_gem_object_has_pinned_pages(uc_fw->obj))
681 		i915_gem_object_unpin_pages(uc_fw->obj);
682 
683 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
684 }
685 
686 /**
687  * intel_uc_fw_cleanup_fetch - cleanup uC firmware
688  * @uc_fw: uC firmware
689  *
690  * Cleans up uC firmware by releasing the firmware GEM obj.
691  */
692 void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
693 {
694 	if (!intel_uc_fw_is_available(uc_fw))
695 		return;
696 
697 	i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
698 
699 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
700 }
701 
702 /**
703  * intel_uc_fw_copy_rsa - copy fw RSA to buffer
704  *
705  * @uc_fw: uC firmware
706  * @dst: dst buffer
707  * @max_len: max number of bytes to copy
708  *
709  * Return: number of copied bytes.
710  */
711 size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
712 {
713 	struct intel_memory_region *mr = uc_fw->obj->mm.region;
714 	u32 size = min_t(u32, uc_fw->rsa_size, max_len);
715 	u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
716 	struct sgt_iter iter;
717 	size_t count = 0;
718 	int idx;
719 
720 	/* Called during reset handling, must be atomic [no fs_reclaim] */
721 	GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
722 
723 	idx = offset >> PAGE_SHIFT;
724 	offset = offset_in_page(offset);
725 	if (i915_gem_object_has_struct_page(uc_fw->obj)) {
726 		struct page *page;
727 
728 		for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
729 			u32 len = min_t(u32, size, PAGE_SIZE - offset);
730 			void *vaddr;
731 
732 			if (idx > 0) {
733 				idx--;
734 				continue;
735 			}
736 
737 			vaddr = kmap_atomic(page);
738 			memcpy(dst, vaddr + offset, len);
739 			kunmap_atomic(vaddr);
740 
741 			offset = 0;
742 			dst += len;
743 			size -= len;
744 			count += len;
745 			if (!size)
746 				break;
747 		}
748 	} else {
749 		dma_addr_t addr;
750 
751 		for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
752 			u32 len = min_t(u32, size, PAGE_SIZE - offset);
753 			void __iomem *vaddr;
754 
755 			if (idx > 0) {
756 				idx--;
757 				continue;
758 			}
759 
760 			vaddr = io_mapping_map_atomic_wc(&mr->iomap,
761 							 addr - mr->region.start);
762 			memcpy_fromio(dst, vaddr + offset, len);
763 			io_mapping_unmap_atomic(vaddr);
764 
765 			offset = 0;
766 			dst += len;
767 			size -= len;
768 			count += len;
769 			if (!size)
770 				break;
771 		}
772 	}
773 
774 	return count;
775 }
776 
777 /**
778  * intel_uc_fw_dump - dump information about uC firmware
779  * @uc_fw: uC firmware
780  * @p: the &drm_printer
781  *
782  * Pretty printer for uC firmware.
783  */
784 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
785 {
786 	drm_printf(p, "%s firmware: %s\n",
787 		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
788 	drm_printf(p, "\tstatus: %s\n",
789 		   intel_uc_fw_status_repr(uc_fw->status));
790 	drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
791 		   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
792 		   uc_fw->major_ver_found, uc_fw->minor_ver_found);
793 	drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
794 	drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
795 }
796