1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/firmware.h>
8 #include <linux/highmem.h>
9 
10 #include <drm/drm_cache.h>
11 #include <drm/drm_print.h>
12 
13 #include "gem/i915_gem_lmem.h"
14 #include "intel_uc_fw.h"
15 #include "intel_uc_fw_abi.h"
16 #include "i915_drv.h"
17 #include "i915_reg.h"
18 
19 static inline struct intel_gt *
20 ____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
21 {
22 	if (type == INTEL_UC_FW_TYPE_GUC)
23 		return container_of(uc_fw, struct intel_gt, uc.guc.fw);
24 
25 	GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
26 	return container_of(uc_fw, struct intel_gt, uc.huc.fw);
27 }
28 
29 static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
30 {
31 	GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
32 	return ____uc_fw_to_gt(uc_fw, uc_fw->type);
33 }
34 
35 #ifdef CONFIG_DRM_I915_DEBUG_GUC
36 void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
37 			       enum intel_uc_fw_status status)
38 {
39 	uc_fw->__status =  status;
40 	drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
41 		"%s firmware -> %s\n",
42 		intel_uc_fw_type_repr(uc_fw->type),
43 		status == INTEL_UC_FIRMWARE_SELECTED ?
44 		uc_fw->path : intel_uc_fw_status_repr(status));
45 }
46 #endif
47 
48 /*
49  * List of required GuC and HuC binaries per-platform.
50  * Must be ordered based on platform + revid, from newer to older.
51  *
52  * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
53  * firmware as TGL.
54  */
55 #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
56 	fw_def(DG2,          0, guc_def(dg2,  70, 1, 2)) \
57 	fw_def(ALDERLAKE_P,  0, guc_def(adlp, 70, 1, 1)) \
58 	fw_def(ALDERLAKE_S,  0, guc_def(tgl,  70, 1, 1)) \
59 	fw_def(DG1,          0, guc_def(dg1,  70, 1, 1)) \
60 	fw_def(ROCKETLAKE,   0, guc_def(tgl,  70, 1, 1)) \
61 	fw_def(TIGERLAKE,    0, guc_def(tgl,  70, 1, 1)) \
62 	fw_def(JASPERLAKE,   0, guc_def(ehl,  70, 1, 1)) \
63 	fw_def(ELKHARTLAKE,  0, guc_def(ehl,  70, 1, 1)) \
64 	fw_def(ICELAKE,      0, guc_def(icl,  70, 1, 1)) \
65 	fw_def(COMETLAKE,    5, guc_def(cml,  70, 1, 1)) \
66 	fw_def(COMETLAKE,    0, guc_def(kbl,  70, 1, 1)) \
67 	fw_def(COFFEELAKE,   0, guc_def(kbl,  70, 1, 1)) \
68 	fw_def(GEMINILAKE,   0, guc_def(glk,  70, 1, 1)) \
69 	fw_def(KABYLAKE,     0, guc_def(kbl,  70, 1, 1)) \
70 	fw_def(BROXTON,      0, guc_def(bxt,  70, 1, 1)) \
71 	fw_def(SKYLAKE,      0, guc_def(skl,  70, 1, 1))
72 
73 #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
74 	fw_def(ALDERLAKE_P,  0, huc_def(tgl,  7, 9, 3)) \
75 	fw_def(ALDERLAKE_S,  0, huc_def(tgl,  7, 9, 3)) \
76 	fw_def(DG1,          0, huc_def(dg1,  7, 9, 3)) \
77 	fw_def(ROCKETLAKE,   0, huc_def(tgl,  7, 9, 3)) \
78 	fw_def(TIGERLAKE,    0, huc_def(tgl,  7, 9, 3)) \
79 	fw_def(JASPERLAKE,   0, huc_def(ehl,  9, 0, 0)) \
80 	fw_def(ELKHARTLAKE,  0, huc_def(ehl,  9, 0, 0)) \
81 	fw_def(ICELAKE,      0, huc_def(icl,  9, 0, 0)) \
82 	fw_def(COMETLAKE,    5, huc_def(cml,  4, 0, 0)) \
83 	fw_def(COMETLAKE,    0, huc_def(kbl,  4, 0, 0)) \
84 	fw_def(COFFEELAKE,   0, huc_def(kbl,  4, 0, 0)) \
85 	fw_def(GEMINILAKE,   0, huc_def(glk,  4, 0, 0)) \
86 	fw_def(KABYLAKE,     0, huc_def(kbl,  4, 0, 0)) \
87 	fw_def(BROXTON,      0, huc_def(bxt,  2, 0, 0)) \
88 	fw_def(SKYLAKE,      0, huc_def(skl,  2, 0, 0))
89 
90 #define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
91 	"i915/" \
92 	__stringify(prefix_) name_ \
93 	__stringify(major_) "." \
94 	__stringify(minor_) "." \
95 	__stringify(patch_) ".bin"
96 
97 #define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
98 	__MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
99 
100 #define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
101 	__MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
102 
103 /* All blobs need to be declared via MODULE_FIRMWARE() */
104 #define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
105 	MODULE_FIRMWARE(uc_);
106 
107 INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
108 INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
109 
110 /* The below structs and macros are used to iterate across the list of blobs */
111 struct __packed uc_fw_blob {
112 	u8 major;
113 	u8 minor;
114 	const char *path;
115 };
116 
117 #define UC_FW_BLOB(major_, minor_, path_) \
118 	{ .major = major_, .minor = minor_, .path = path_ }
119 
120 #define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
121 	UC_FW_BLOB(major_, minor_, \
122 		   MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))
123 
124 #define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
125 	UC_FW_BLOB(major_, minor_, \
126 		   MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))
127 
128 struct __packed uc_fw_platform_requirement {
129 	enum intel_platform p;
130 	u8 rev; /* first platform rev using this FW */
131 	const struct uc_fw_blob blob;
132 };
133 
134 #define MAKE_FW_LIST(platform_, revid_, uc_) \
135 { \
136 	.p = INTEL_##platform_, \
137 	.rev = revid_, \
138 	.blob = uc_, \
139 },
140 
141 struct fw_blobs_by_type {
142 	const struct uc_fw_platform_requirement *blobs;
143 	u32 count;
144 };
145 
146 static void
147 __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
148 {
149 	static const struct uc_fw_platform_requirement blobs_guc[] = {
150 		INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
151 	};
152 	static const struct uc_fw_platform_requirement blobs_huc[] = {
153 		INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
154 	};
155 	static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
156 		[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
157 		[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
158 	};
159 	const struct uc_fw_platform_requirement *fw_blobs;
160 	enum intel_platform p = INTEL_INFO(i915)->platform;
161 	u32 fw_count;
162 	u8 rev = INTEL_REVID(i915);
163 	int i;
164 
165 	GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
166 	fw_blobs = blobs_all[uc_fw->type].blobs;
167 	fw_count = blobs_all[uc_fw->type].count;
168 
169 	for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
170 		if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
171 			const struct uc_fw_blob *blob = &fw_blobs[i].blob;
172 			uc_fw->path = blob->path;
173 			uc_fw->major_ver_wanted = blob->major;
174 			uc_fw->minor_ver_wanted = blob->minor;
175 			break;
176 		}
177 	}
178 
179 	/* make sure the list is ordered as expected */
180 	if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
181 		for (i = 1; i < fw_count; i++) {
182 			if (fw_blobs[i].p < fw_blobs[i - 1].p)
183 				continue;
184 
185 			if (fw_blobs[i].p == fw_blobs[i - 1].p &&
186 			    fw_blobs[i].rev < fw_blobs[i - 1].rev)
187 				continue;
188 
189 			pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
190 			       intel_platform_name(fw_blobs[i - 1].p),
191 			       fw_blobs[i - 1].rev,
192 			       intel_platform_name(fw_blobs[i].p),
193 			       fw_blobs[i].rev);
194 
195 			uc_fw->path = NULL;
196 		}
197 	}
198 }
199 
200 static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
201 {
202 	if (i915->params.enable_guc & ENABLE_GUC_MASK)
203 		return i915->params.guc_firmware_path;
204 	return "";
205 }
206 
207 static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
208 {
209 	if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC)
210 		return i915->params.huc_firmware_path;
211 	return "";
212 }
213 
214 static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
215 {
216 	const char *path = NULL;
217 
218 	switch (uc_fw->type) {
219 	case INTEL_UC_FW_TYPE_GUC:
220 		path = __override_guc_firmware_path(i915);
221 		break;
222 	case INTEL_UC_FW_TYPE_HUC:
223 		path = __override_huc_firmware_path(i915);
224 		break;
225 	}
226 
227 	if (unlikely(path)) {
228 		uc_fw->path = path;
229 		uc_fw->user_overridden = true;
230 	}
231 }
232 
233 /**
234  * intel_uc_fw_init_early - initialize the uC object and select the firmware
235  * @uc_fw: uC firmware
236  * @type: type of uC
237  *
238  * Initialize the state of our uC object and relevant tracking and select the
239  * firmware to fetch and load.
240  */
241 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
242 			    enum intel_uc_fw_type type)
243 {
244 	struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915;
245 
246 	/*
247 	 * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
248 	 * before we're looked at the HW caps to see if we have uc support
249 	 */
250 	BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
251 	GEM_BUG_ON(uc_fw->status);
252 	GEM_BUG_ON(uc_fw->path);
253 
254 	uc_fw->type = type;
255 
256 	if (HAS_GT_UC(i915)) {
257 		__uc_fw_auto_select(i915, uc_fw);
258 		__uc_fw_user_override(i915, uc_fw);
259 	}
260 
261 	intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
262 				  INTEL_UC_FIRMWARE_SELECTED :
263 				  INTEL_UC_FIRMWARE_DISABLED :
264 				  INTEL_UC_FIRMWARE_NOT_SUPPORTED);
265 }
266 
267 static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
268 {
269 	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
270 	bool user = e == -EINVAL;
271 
272 	if (i915_inject_probe_error(i915, e)) {
273 		/* non-existing blob */
274 		uc_fw->path = "<invalid>";
275 		uc_fw->user_overridden = user;
276 	} else if (i915_inject_probe_error(i915, e)) {
277 		/* require next major version */
278 		uc_fw->major_ver_wanted += 1;
279 		uc_fw->minor_ver_wanted = 0;
280 		uc_fw->user_overridden = user;
281 	} else if (i915_inject_probe_error(i915, e)) {
282 		/* require next minor version */
283 		uc_fw->minor_ver_wanted += 1;
284 		uc_fw->user_overridden = user;
285 	} else if (uc_fw->major_ver_wanted &&
286 		   i915_inject_probe_error(i915, e)) {
287 		/* require prev major version */
288 		uc_fw->major_ver_wanted -= 1;
289 		uc_fw->minor_ver_wanted = 0;
290 		uc_fw->user_overridden = user;
291 	} else if (uc_fw->minor_ver_wanted &&
292 		   i915_inject_probe_error(i915, e)) {
293 		/* require prev minor version - hey, this should work! */
294 		uc_fw->minor_ver_wanted -= 1;
295 		uc_fw->user_overridden = user;
296 	} else if (user && i915_inject_probe_error(i915, e)) {
297 		/* officially unsupported platform */
298 		uc_fw->major_ver_wanted = 0;
299 		uc_fw->minor_ver_wanted = 0;
300 		uc_fw->user_overridden = true;
301 	}
302 }
303 
304 static int check_gsc_manifest(const struct firmware *fw,
305 			      struct intel_uc_fw *uc_fw)
306 {
307 	u32 *dw = (u32 *)fw->data;
308 	u32 version = dw[HUC_GSC_VERSION_DW];
309 
310 	uc_fw->major_ver_found = FIELD_GET(HUC_GSC_MAJOR_VER_MASK, version);
311 	uc_fw->minor_ver_found = FIELD_GET(HUC_GSC_MINOR_VER_MASK, version);
312 
313 	return 0;
314 }
315 
316 static int check_ccs_header(struct drm_i915_private *i915,
317 			    const struct firmware *fw,
318 			    struct intel_uc_fw *uc_fw)
319 {
320 	struct uc_css_header *css;
321 	size_t size;
322 
323 	/* Check the size of the blob before examining buffer contents */
324 	if (unlikely(fw->size < sizeof(struct uc_css_header))) {
325 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
326 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
327 			 fw->size, sizeof(struct uc_css_header));
328 		return -ENODATA;
329 	}
330 
331 	css = (struct uc_css_header *)fw->data;
332 
333 	/* Check integrity of size values inside CSS header */
334 	size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
335 		css->exponent_size_dw) * sizeof(u32);
336 	if (unlikely(size != sizeof(struct uc_css_header))) {
337 		drm_warn(&i915->drm,
338 			 "%s firmware %s: unexpected header size: %zu != %zu\n",
339 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
340 			 fw->size, sizeof(struct uc_css_header));
341 		return -EPROTO;
342 	}
343 
344 	/* uCode size must calculated from other sizes */
345 	uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
346 
347 	/* now RSA */
348 	uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
349 
350 	/* At least, it should have header, uCode and RSA. Size of all three. */
351 	size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
352 	if (unlikely(fw->size < size)) {
353 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
354 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
355 			 fw->size, size);
356 		return -ENOEXEC;
357 	}
358 
359 	/* Sanity check whether this fw is not larger than whole WOPCM memory */
360 	size = __intel_uc_fw_get_upload_size(uc_fw);
361 	if (unlikely(size >= i915->wopcm.size)) {
362 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
363 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
364 			 size, (size_t)i915->wopcm.size);
365 		return -E2BIG;
366 	}
367 
368 	/* Get version numbers from the CSS header */
369 	uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
370 					   css->sw_version);
371 	uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
372 					   css->sw_version);
373 
374 	if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
375 		uc_fw->private_data_size = css->private_data_size;
376 
377 	return 0;
378 }
379 
380 /**
381  * intel_uc_fw_fetch - fetch uC firmware
382  * @uc_fw: uC firmware
383  *
384  * Fetch uC firmware into GEM obj.
385  *
386  * Return: 0 on success, a negative errno code on failure.
387  */
388 int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
389 {
390 	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
391 	struct device *dev = i915->drm.dev;
392 	struct drm_i915_gem_object *obj;
393 	const struct firmware *fw = NULL;
394 	int err;
395 
396 	GEM_BUG_ON(!i915->wopcm.size);
397 	GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
398 
399 	err = i915_inject_probe_error(i915, -ENXIO);
400 	if (err)
401 		goto fail;
402 
403 	__force_fw_fetch_failures(uc_fw, -EINVAL);
404 	__force_fw_fetch_failures(uc_fw, -ESTALE);
405 
406 	err = request_firmware(&fw, uc_fw->path, dev);
407 	if (err)
408 		goto fail;
409 
410 	if (uc_fw->loaded_via_gsc)
411 		err = check_gsc_manifest(fw, uc_fw);
412 	else
413 		err = check_ccs_header(i915, fw, uc_fw);
414 	if (err)
415 		goto fail;
416 
417 	if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
418 	    uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
419 		drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
420 			   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
421 			   uc_fw->major_ver_found, uc_fw->minor_ver_found,
422 			   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
423 		if (!intel_uc_fw_is_overridden(uc_fw)) {
424 			err = -ENOEXEC;
425 			goto fail;
426 		}
427 	}
428 
429 	if (HAS_LMEM(i915)) {
430 		obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
431 		if (!IS_ERR(obj))
432 			obj->flags |= I915_BO_ALLOC_PM_EARLY;
433 	} else {
434 		obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
435 	}
436 
437 	if (IS_ERR(obj)) {
438 		err = PTR_ERR(obj);
439 		goto fail;
440 	}
441 
442 	uc_fw->obj = obj;
443 	uc_fw->size = fw->size;
444 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
445 
446 	release_firmware(fw);
447 	return 0;
448 
449 fail:
450 	intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
451 				  INTEL_UC_FIRMWARE_MISSING :
452 				  INTEL_UC_FIRMWARE_ERROR);
453 
454 	drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
455 		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
456 	drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
457 		 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
458 
459 	release_firmware(fw);		/* OK even if fw is NULL */
460 	return err;
461 }
462 
463 static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
464 {
465 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
466 	struct drm_mm_node *node = &ggtt->uc_fw;
467 
468 	GEM_BUG_ON(!drm_mm_node_allocated(node));
469 	GEM_BUG_ON(upper_32_bits(node->start));
470 	GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
471 
472 	return lower_32_bits(node->start);
473 }
474 
475 static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
476 {
477 	struct drm_i915_gem_object *obj = uc_fw->obj;
478 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
479 	struct i915_vma_resource *dummy = &uc_fw->dummy;
480 	u32 pte_flags = 0;
481 
482 	dummy->start = uc_fw_ggtt_offset(uc_fw);
483 	dummy->node_size = obj->base.size;
484 	dummy->bi.pages = obj->mm.pages;
485 
486 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
487 	GEM_BUG_ON(dummy->node_size > ggtt->uc_fw.size);
488 
489 	/* uc_fw->obj cache domains were not controlled across suspend */
490 	if (i915_gem_object_has_struct_page(obj))
491 		drm_clflush_sg(dummy->bi.pages);
492 
493 	if (i915_gem_object_is_lmem(obj))
494 		pte_flags |= PTE_LM;
495 
496 	if (ggtt->vm.raw_insert_entries)
497 		ggtt->vm.raw_insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
498 	else
499 		ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
500 }
501 
502 static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
503 {
504 	struct drm_i915_gem_object *obj = uc_fw->obj;
505 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
506 	u64 start = uc_fw_ggtt_offset(uc_fw);
507 
508 	ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
509 }
510 
511 static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
512 {
513 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
514 	struct intel_uncore *uncore = gt->uncore;
515 	u64 offset;
516 	int ret;
517 
518 	ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
519 	if (ret)
520 		return ret;
521 
522 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
523 
524 	/* Set the source address for the uCode */
525 	offset = uc_fw_ggtt_offset(uc_fw);
526 	GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
527 	intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
528 	intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
529 
530 	/* Set the DMA destination */
531 	intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
532 	intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
533 
534 	/*
535 	 * Set the transfer size. The header plus uCode will be copied to WOPCM
536 	 * via DMA, excluding any other components
537 	 */
538 	intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
539 			      sizeof(struct uc_css_header) + uc_fw->ucode_size);
540 
541 	/* Start the DMA */
542 	intel_uncore_write_fw(uncore, DMA_CTRL,
543 			      _MASKED_BIT_ENABLE(dma_flags | START_DMA));
544 
545 	/* Wait for DMA to finish */
546 	ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
547 	if (ret)
548 		drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
549 			intel_uc_fw_type_repr(uc_fw->type),
550 			intel_uncore_read_fw(uncore, DMA_CTRL));
551 
552 	/* Disable the bits once DMA is over */
553 	intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
554 
555 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
556 
557 	return ret;
558 }
559 
560 /**
561  * intel_uc_fw_upload - load uC firmware using custom loader
562  * @uc_fw: uC firmware
563  * @dst_offset: destination offset
564  * @dma_flags: flags for flags for dma ctrl
565  *
566  * Loads uC firmware and updates internal flags.
567  *
568  * Return: 0 on success, non-zero on failure.
569  */
570 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
571 {
572 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
573 	int err;
574 
575 	/* make sure the status was cleared the last time we reset the uc */
576 	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
577 
578 	err = i915_inject_probe_error(gt->i915, -ENOEXEC);
579 	if (err)
580 		return err;
581 
582 	if (!intel_uc_fw_is_loadable(uc_fw))
583 		return -ENOEXEC;
584 
585 	/* Call custom loader */
586 	uc_fw_bind_ggtt(uc_fw);
587 	err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
588 	uc_fw_unbind_ggtt(uc_fw);
589 	if (err)
590 		goto fail;
591 
592 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
593 	return 0;
594 
595 fail:
596 	i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
597 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
598 			 err);
599 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
600 	return err;
601 }
602 
603 static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
604 {
605 	/*
606 	 * The HW reads the GuC RSA from memory if the key size is > 256 bytes,
607 	 * while it reads it from the 64 RSA registers if it is smaller.
608 	 * The HuC RSA is always read from memory.
609 	 */
610 	return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256;
611 }
612 
613 static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
614 {
615 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
616 	struct i915_vma *vma;
617 	size_t copied;
618 	void *vaddr;
619 	int err;
620 
621 	err = i915_inject_probe_error(gt->i915, -ENXIO);
622 	if (err)
623 		return err;
624 
625 	if (!uc_fw_need_rsa_in_memory(uc_fw))
626 		return 0;
627 
628 	/*
629 	 * uC firmwares will sit above GUC_GGTT_TOP and will not map through
630 	 * GGTT. Unfortunately, this means that the GuC HW cannot perform the uC
631 	 * authentication from memory, as the RSA offset now falls within the
632 	 * GuC inaccessible range. We resort to perma-pinning an additional vma
633 	 * within the accessible range that only contains the RSA signature.
634 	 * The GuC HW can use this extra pinning to perform the authentication
635 	 * since its GGTT offset will be GuC accessible.
636 	 */
637 	GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
638 	vma = intel_guc_allocate_vma(&gt->uc.guc, PAGE_SIZE);
639 	if (IS_ERR(vma))
640 		return PTR_ERR(vma);
641 
642 	vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
643 						 i915_coherent_map_type(gt->i915, vma->obj, true));
644 	if (IS_ERR(vaddr)) {
645 		i915_vma_unpin_and_release(&vma, 0);
646 		err = PTR_ERR(vaddr);
647 		goto unpin_out;
648 	}
649 
650 	copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size);
651 	i915_gem_object_unpin_map(vma->obj);
652 
653 	if (copied < uc_fw->rsa_size) {
654 		err = -ENOMEM;
655 		goto unpin_out;
656 	}
657 
658 	uc_fw->rsa_data = vma;
659 
660 	return 0;
661 
662 unpin_out:
663 	i915_vma_unpin_and_release(&vma, 0);
664 	return err;
665 }
666 
667 static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw)
668 {
669 	i915_vma_unpin_and_release(&uc_fw->rsa_data, 0);
670 }
671 
672 int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
673 {
674 	int err;
675 
676 	/* this should happen before the load! */
677 	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
678 
679 	if (!intel_uc_fw_is_available(uc_fw))
680 		return -ENOEXEC;
681 
682 	err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
683 	if (err) {
684 		DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
685 				 intel_uc_fw_type_repr(uc_fw->type), err);
686 		goto out;
687 	}
688 
689 	err = uc_fw_rsa_data_create(uc_fw);
690 	if (err) {
691 		DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n",
692 				 intel_uc_fw_type_repr(uc_fw->type), err);
693 		goto out_unpin;
694 	}
695 
696 	return 0;
697 
698 out_unpin:
699 	i915_gem_object_unpin_pages(uc_fw->obj);
700 out:
701 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_INIT_FAIL);
702 	return err;
703 }
704 
705 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
706 {
707 	uc_fw_rsa_data_destroy(uc_fw);
708 
709 	if (i915_gem_object_has_pinned_pages(uc_fw->obj))
710 		i915_gem_object_unpin_pages(uc_fw->obj);
711 
712 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
713 }
714 
715 /**
716  * intel_uc_fw_cleanup_fetch - cleanup uC firmware
717  * @uc_fw: uC firmware
718  *
719  * Cleans up uC firmware by releasing the firmware GEM obj.
720  */
721 void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
722 {
723 	if (!intel_uc_fw_is_available(uc_fw))
724 		return;
725 
726 	i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
727 
728 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
729 }
730 
731 /**
732  * intel_uc_fw_copy_rsa - copy fw RSA to buffer
733  *
734  * @uc_fw: uC firmware
735  * @dst: dst buffer
736  * @max_len: max number of bytes to copy
737  *
738  * Return: number of copied bytes.
739  */
740 size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
741 {
742 	struct intel_memory_region *mr = uc_fw->obj->mm.region;
743 	u32 size = min_t(u32, uc_fw->rsa_size, max_len);
744 	u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
745 	struct sgt_iter iter;
746 	size_t count = 0;
747 	int idx;
748 
749 	/* Called during reset handling, must be atomic [no fs_reclaim] */
750 	GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
751 
752 	idx = offset >> PAGE_SHIFT;
753 	offset = offset_in_page(offset);
754 	if (i915_gem_object_has_struct_page(uc_fw->obj)) {
755 		struct page *page;
756 
757 		for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
758 			u32 len = min_t(u32, size, PAGE_SIZE - offset);
759 			void *vaddr;
760 
761 			if (idx > 0) {
762 				idx--;
763 				continue;
764 			}
765 
766 			vaddr = kmap_atomic(page);
767 			memcpy(dst, vaddr + offset, len);
768 			kunmap_atomic(vaddr);
769 
770 			offset = 0;
771 			dst += len;
772 			size -= len;
773 			count += len;
774 			if (!size)
775 				break;
776 		}
777 	} else {
778 		dma_addr_t addr;
779 
780 		for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
781 			u32 len = min_t(u32, size, PAGE_SIZE - offset);
782 			void __iomem *vaddr;
783 
784 			if (idx > 0) {
785 				idx--;
786 				continue;
787 			}
788 
789 			vaddr = io_mapping_map_atomic_wc(&mr->iomap,
790 							 addr - mr->region.start);
791 			memcpy_fromio(dst, vaddr + offset, len);
792 			io_mapping_unmap_atomic(vaddr);
793 
794 			offset = 0;
795 			dst += len;
796 			size -= len;
797 			count += len;
798 			if (!size)
799 				break;
800 		}
801 	}
802 
803 	return count;
804 }
805 
806 /**
807  * intel_uc_fw_dump - dump information about uC firmware
808  * @uc_fw: uC firmware
809  * @p: the &drm_printer
810  *
811  * Pretty printer for uC firmware.
812  */
813 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
814 {
815 	drm_printf(p, "%s firmware: %s\n",
816 		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
817 	drm_printf(p, "\tstatus: %s\n",
818 		   intel_uc_fw_status_repr(uc_fw->status));
819 	drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
820 		   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
821 		   uc_fw->major_ver_found, uc_fw->minor_ver_found);
822 	drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
823 	drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
824 }
825