1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "gem/i915_gem_lmem.h"
7 #include "gt/intel_engine_pm.h"
8 #include "gt/intel_gpu_commands.h"
9 #include "gt/intel_gt.h"
10 #include "gt/intel_gt_print.h"
11 #include "gt/intel_ring.h"
12 #include "intel_gsc_binary_headers.h"
13 #include "intel_gsc_fw.h"
14 #include "intel_gsc_uc_heci_cmd_submit.h"
15 #include "i915_reg.h"
16 
17 static bool gsc_is_in_reset(struct intel_uncore *uncore)
18 {
19 	u32 fw_status = intel_uncore_read(uncore, HECI_FWSTS(MTL_GSC_HECI1_BASE, 1));
20 
21 	return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fw_status) ==
22 			HECI1_FWSTS1_CURRENT_STATE_RESET;
23 }
24 
25 static u32 gsc_uc_get_fw_status(struct intel_uncore *uncore, bool needs_wakeref)
26 {
27 	intel_wakeref_t wakeref;
28 	u32 fw_status = 0;
29 
30 	if (needs_wakeref)
31 		wakeref = intel_runtime_pm_get(uncore->rpm);
32 
33 	fw_status = intel_uncore_read(uncore, HECI_FWSTS(MTL_GSC_HECI1_BASE, 1));
34 
35 	if (needs_wakeref)
36 		intel_runtime_pm_put(uncore->rpm, wakeref);
37 	return fw_status;
38 }
39 
40 bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc, bool needs_wakeref)
41 {
42 	return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE,
43 			     gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore,
44 						  needs_wakeref)) ==
45 	       HECI1_FWSTS1_PROXY_STATE_NORMAL;
46 }
47 
48 bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc)
49 {
50 	return gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore, false) &
51 	       HECI1_FWSTS1_INIT_COMPLETE;
52 }
53 
54 static inline u32 cpd_entry_offset(const struct intel_gsc_cpd_entry *entry)
55 {
56 	return entry->offset & INTEL_GSC_CPD_ENTRY_OFFSET_MASK;
57 }
58 
59 int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, size_t size)
60 {
61 	struct intel_gsc_uc *gsc = container_of(gsc_fw, struct intel_gsc_uc, fw);
62 	struct intel_gt *gt = gsc_uc_to_gt(gsc);
63 	const struct intel_gsc_layout_pointers *layout = data;
64 	const struct intel_gsc_bpdt_header *bpdt_header = NULL;
65 	const struct intel_gsc_bpdt_entry *bpdt_entry = NULL;
66 	const struct intel_gsc_cpd_header_v2 *cpd_header = NULL;
67 	const struct intel_gsc_cpd_entry *cpd_entry = NULL;
68 	const struct intel_gsc_manifest_header *manifest;
69 	size_t min_size = sizeof(*layout);
70 	int i;
71 
72 	if (size < min_size) {
73 		gt_err(gt, "GSC FW too small! %zu < %zu\n", size, min_size);
74 		return -ENODATA;
75 	}
76 
77 	/*
78 	 * The GSC binary starts with the pointer layout, which contains the
79 	 * locations of the various partitions of the binary. The one we're
80 	 * interested in to get the version is the boot1 partition, where we can
81 	 * find a BPDT header followed by entries, one of which points to the
82 	 * RBE sub-section of the partition. From here, we can parse the CPD
83 	 * header and the following entries to find the manifest location
84 	 * (entry identified by the "RBEP.man" name), from which we can finally
85 	 * extract the version.
86 	 *
87 	 * --------------------------------------------------
88 	 * [  intel_gsc_layout_pointers                     ]
89 	 * [      ...                                       ]
90 	 * [      boot1.offset  >---------------------------]------o
91 	 * [      ...                                       ]      |
92 	 * --------------------------------------------------      |
93 	 *                                                         |
94 	 * --------------------------------------------------      |
95 	 * [  intel_gsc_bpdt_header                         ]<-----o
96 	 * --------------------------------------------------
97 	 * [  intel_gsc_bpdt_entry[]                        ]
98 	 * [      entry1                                    ]
99 	 * [      ...                                       ]
100 	 * [      entryX                                    ]
101 	 * [          type == GSC_RBE                       ]
102 	 * [          offset  >-----------------------------]------o
103 	 * [      ...                                       ]      |
104 	 * --------------------------------------------------      |
105 	 *                                                         |
106 	 * --------------------------------------------------      |
107 	 * [  intel_gsc_cpd_header_v2                       ]<-----o
108 	 * --------------------------------------------------
109 	 * [  intel_gsc_cpd_entry[]                         ]
110 	 * [      entry1                                    ]
111 	 * [      ...                                       ]
112 	 * [      entryX                                    ]
113 	 * [          "RBEP.man"                            ]
114 	 * [           ...                                  ]
115 	 * [           offset  >----------------------------]------o
116 	 * [      ...                                       ]      |
117 	 * --------------------------------------------------      |
118 	 *                                                         |
119 	 * --------------------------------------------------      |
120 	 * [ intel_gsc_manifest_header                      ]<-----o
121 	 * [  ...                                           ]
122 	 * [  intel_gsc_version     fw_version              ]
123 	 * [  ...                                           ]
124 	 * --------------------------------------------------
125 	 */
126 
127 	min_size = layout->boot1.offset + layout->boot1.size;
128 	if (size < min_size) {
129 		gt_err(gt, "GSC FW too small for boot section! %zu < %zu\n",
130 		       size, min_size);
131 		return -ENODATA;
132 	}
133 
134 	min_size = sizeof(*bpdt_header);
135 	if (layout->boot1.size < min_size) {
136 		gt_err(gt, "GSC FW boot section too small for BPDT header: %u < %zu\n",
137 		       layout->boot1.size, min_size);
138 		return -ENODATA;
139 	}
140 
141 	bpdt_header = data + layout->boot1.offset;
142 	if (bpdt_header->signature != INTEL_GSC_BPDT_HEADER_SIGNATURE) {
143 		gt_err(gt, "invalid signature for BPDT header: 0x%08x!\n",
144 		       bpdt_header->signature);
145 		return -EINVAL;
146 	}
147 
148 	min_size += sizeof(*bpdt_entry) * bpdt_header->descriptor_count;
149 	if (layout->boot1.size < min_size) {
150 		gt_err(gt, "GSC FW boot section too small for BPDT entries: %u < %zu\n",
151 		       layout->boot1.size, min_size);
152 		return -ENODATA;
153 	}
154 
155 	bpdt_entry = (void *)bpdt_header + sizeof(*bpdt_header);
156 	for (i = 0; i < bpdt_header->descriptor_count; i++, bpdt_entry++) {
157 		if ((bpdt_entry->type & INTEL_GSC_BPDT_ENTRY_TYPE_MASK) !=
158 		    INTEL_GSC_BPDT_ENTRY_TYPE_GSC_RBE)
159 			continue;
160 
161 		cpd_header = (void *)bpdt_header + bpdt_entry->sub_partition_offset;
162 		min_size = bpdt_entry->sub_partition_offset + sizeof(*cpd_header);
163 		break;
164 	}
165 
166 	if (!cpd_header) {
167 		gt_err(gt, "couldn't find CPD header in GSC binary!\n");
168 		return -ENODATA;
169 	}
170 
171 	if (layout->boot1.size < min_size) {
172 		gt_err(gt, "GSC FW boot section too small for CPD header: %u < %zu\n",
173 		       layout->boot1.size, min_size);
174 		return -ENODATA;
175 	}
176 
177 	if (cpd_header->header_marker != INTEL_GSC_CPD_HEADER_MARKER) {
178 		gt_err(gt, "invalid marker for CPD header in GSC bin: 0x%08x!\n",
179 		       cpd_header->header_marker);
180 		return -EINVAL;
181 	}
182 
183 	min_size += sizeof(*cpd_entry) * cpd_header->num_of_entries;
184 	if (layout->boot1.size < min_size) {
185 		gt_err(gt, "GSC FW boot section too small for CPD entries: %u < %zu\n",
186 		       layout->boot1.size, min_size);
187 		return -ENODATA;
188 	}
189 
190 	cpd_entry = (void *)cpd_header + cpd_header->header_length;
191 	for (i = 0; i < cpd_header->num_of_entries; i++, cpd_entry++) {
192 		if (strcmp(cpd_entry->name, "RBEP.man") == 0) {
193 			manifest = (void *)cpd_header + cpd_entry_offset(cpd_entry);
194 			intel_uc_fw_version_from_gsc_manifest(&gsc->release,
195 							      manifest);
196 			gsc->security_version = manifest->security_version;
197 			break;
198 		}
199 	}
200 
201 	return 0;
202 }
203 
204 static int emit_gsc_fw_load(struct i915_request *rq, struct intel_gsc_uc *gsc)
205 {
206 	u32 offset = i915_ggtt_offset(gsc->local);
207 	u32 *cs;
208 
209 	cs = intel_ring_begin(rq, 4);
210 	if (IS_ERR(cs))
211 		return PTR_ERR(cs);
212 
213 	*cs++ = GSC_FW_LOAD;
214 	*cs++ = lower_32_bits(offset);
215 	*cs++ = upper_32_bits(offset);
216 	*cs++ = (gsc->local->size / SZ_4K) | HECI1_FW_LIMIT_VALID;
217 
218 	intel_ring_advance(rq, cs);
219 
220 	return 0;
221 }
222 
223 static int gsc_fw_load(struct intel_gsc_uc *gsc)
224 {
225 	struct intel_context *ce = gsc->ce;
226 	struct i915_request *rq;
227 	int err;
228 
229 	if (!ce)
230 		return -ENODEV;
231 
232 	rq = i915_request_create(ce);
233 	if (IS_ERR(rq))
234 		return PTR_ERR(rq);
235 
236 	if (ce->engine->emit_init_breadcrumb) {
237 		err = ce->engine->emit_init_breadcrumb(rq);
238 		if (err)
239 			goto out_rq;
240 	}
241 
242 	err = emit_gsc_fw_load(rq, gsc);
243 	if (err)
244 		goto out_rq;
245 
246 	err = ce->engine->emit_flush(rq, 0);
247 
248 out_rq:
249 	i915_request_get(rq);
250 
251 	if (unlikely(err))
252 		i915_request_set_error_once(rq, err);
253 
254 	i915_request_add(rq);
255 
256 	if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0)
257 		err = -ETIME;
258 
259 	i915_request_put(rq);
260 
261 	if (err)
262 		gt_err(gsc_uc_to_gt(gsc), "Request submission for GSC load failed %pe\n",
263 		       ERR_PTR(err));
264 
265 	return err;
266 }
267 
268 static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
269 {
270 	struct intel_gt *gt = gsc_uc_to_gt(gsc);
271 	struct drm_i915_private *i915 = gt->i915;
272 	void *src;
273 
274 	if (!gsc->local)
275 		return -ENODEV;
276 
277 	if (gsc->local->size < gsc->fw.size)
278 		return -ENOSPC;
279 
280 	src = i915_gem_object_pin_map_unlocked(gsc->fw.obj,
281 					       i915_coherent_map_type(i915, gsc->fw.obj, true));
282 	if (IS_ERR(src))
283 		return PTR_ERR(src);
284 
285 	memcpy_toio(gsc->local_vaddr, src, gsc->fw.size);
286 	memset_io(gsc->local_vaddr + gsc->fw.size, 0, gsc->local->size - gsc->fw.size);
287 
288 	/*
289 	 * Wa_22016122933: Making sure the data in dst is
290 	 * visible to GSC right away
291 	 */
292 	intel_guc_write_barrier(&gt->uc.guc);
293 
294 	i915_gem_object_unpin_map(gsc->fw.obj);
295 
296 	return 0;
297 }
298 
299 static int gsc_fw_wait(struct intel_gt *gt)
300 {
301 	return intel_wait_for_register(gt->uncore,
302 				       HECI_FWSTS(MTL_GSC_HECI1_BASE, 1),
303 				       HECI1_FWSTS1_INIT_COMPLETE,
304 				       HECI1_FWSTS1_INIT_COMPLETE,
305 				       500);
306 }
307 
308 struct intel_gsc_mkhi_header {
309 	u8  group_id;
310 #define MKHI_GROUP_ID_GFX_SRV 0x30
311 
312 	u8  command;
313 #define MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION (0x42)
314 
315 	u8  reserved;
316 	u8  result;
317 } __packed;
318 
319 struct mtl_gsc_ver_msg_in {
320 	struct intel_gsc_mtl_header header;
321 	struct intel_gsc_mkhi_header mkhi;
322 } __packed;
323 
324 struct mtl_gsc_ver_msg_out {
325 	struct intel_gsc_mtl_header header;
326 	struct intel_gsc_mkhi_header mkhi;
327 	u16 proj_major;
328 	u16 compat_major;
329 	u16 compat_minor;
330 	u16 reserved[5];
331 } __packed;
332 
333 #define GSC_VER_PKT_SZ SZ_4K
334 
335 static int gsc_fw_query_compatibility_version(struct intel_gsc_uc *gsc)
336 {
337 	struct intel_gt *gt = gsc_uc_to_gt(gsc);
338 	struct mtl_gsc_ver_msg_in *msg_in;
339 	struct mtl_gsc_ver_msg_out *msg_out;
340 	struct i915_vma *vma;
341 	u64 offset;
342 	void *vaddr;
343 	int err;
344 
345 	err = intel_guc_allocate_and_map_vma(&gt->uc.guc, GSC_VER_PKT_SZ * 2,
346 					     &vma, &vaddr);
347 	if (err) {
348 		gt_err(gt, "failed to allocate vma for GSC version query\n");
349 		return err;
350 	}
351 
352 	offset = i915_ggtt_offset(vma);
353 	msg_in = vaddr;
354 	msg_out = vaddr + GSC_VER_PKT_SZ;
355 
356 	intel_gsc_uc_heci_cmd_emit_mtl_header(&msg_in->header,
357 					      HECI_MEADDRESS_MKHI,
358 					      sizeof(*msg_in), 0);
359 	msg_in->mkhi.group_id = MKHI_GROUP_ID_GFX_SRV;
360 	msg_in->mkhi.command = MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION;
361 
362 	err = intel_gsc_uc_heci_cmd_submit_packet(&gt->uc.gsc,
363 						  offset,
364 						  sizeof(*msg_in),
365 						  offset + GSC_VER_PKT_SZ,
366 						  GSC_VER_PKT_SZ);
367 	if (err) {
368 		gt_err(gt,
369 		       "failed to submit GSC request for compatibility version: %d\n",
370 		       err);
371 		goto out_vma;
372 	}
373 
374 	if (msg_out->header.message_size != sizeof(*msg_out)) {
375 		gt_err(gt, "invalid GSC reply length %u [expected %zu], s=0x%x, f=0x%x, r=0x%x\n",
376 		       msg_out->header.message_size, sizeof(*msg_out),
377 		       msg_out->header.status, msg_out->header.flags, msg_out->mkhi.result);
378 		err = -EPROTO;
379 		goto out_vma;
380 	}
381 
382 	gsc->fw.file_selected.ver.major = msg_out->compat_major;
383 	gsc->fw.file_selected.ver.minor = msg_out->compat_minor;
384 
385 out_vma:
386 	i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
387 	return err;
388 }
389 
390 int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
391 {
392 	struct intel_gt *gt = gsc_uc_to_gt(gsc);
393 	struct intel_uc_fw *gsc_fw = &gsc->fw;
394 	int err;
395 
396 	/* check current fw status */
397 	if (intel_gsc_uc_fw_init_done(gsc)) {
398 		if (GEM_WARN_ON(!intel_uc_fw_is_loaded(gsc_fw)))
399 			intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
400 		return -EEXIST;
401 	}
402 
403 	if (!intel_uc_fw_is_loadable(gsc_fw))
404 		return -ENOEXEC;
405 
406 	/* FW blob is ok, so clean the status */
407 	intel_uc_fw_sanitize(&gsc->fw);
408 
409 	if (!gsc_is_in_reset(gt->uncore))
410 		return -EIO;
411 
412 	err = gsc_fw_load_prepare(gsc);
413 	if (err)
414 		goto fail;
415 
416 	/*
417 	 * GSC is only killed by an FLR, so we need to trigger one on unload to
418 	 * make sure we stop it. This is because we assign a chunk of memory to
419 	 * the GSC as part of the FW load , so we need to make sure it stops
420 	 * using it when we release it to the system on driver unload. Note that
421 	 * this is not a problem of the unload per-se, because the GSC will not
422 	 * touch that memory unless there are requests for it coming from the
423 	 * driver; therefore, no accesses will happen while i915 is not loaded,
424 	 * but if we re-load the driver then the GSC might wake up and try to
425 	 * access that old memory location again.
426 	 * Given that an FLR is a very disruptive action (see the FLR function
427 	 * for details), we want to do it as the last action before releasing
428 	 * the access to the MMIO bar, which means we need to do it as part of
429 	 * the primary uncore cleanup.
430 	 * An alternative approach to the FLR would be to use a memory location
431 	 * that survives driver unload, like e.g. stolen memory, and keep the
432 	 * GSC loaded across reloads. However, this requires us to make sure we
433 	 * preserve that memory location on unload and then determine and
434 	 * reserve its offset on each subsequent load, which is not trivial, so
435 	 * it is easier to just kill everything and start fresh.
436 	 */
437 	intel_uncore_set_flr_on_fini(&gt->i915->uncore);
438 
439 	err = gsc_fw_load(gsc);
440 	if (err)
441 		goto fail;
442 
443 	err = gsc_fw_wait(gt);
444 	if (err)
445 		goto fail;
446 
447 	err = gsc_fw_query_compatibility_version(gsc);
448 	if (err)
449 		goto fail;
450 
451 	/* we only support compatibility version 1.0 at the moment */
452 	err = intel_uc_check_file_version(gsc_fw, NULL);
453 	if (err)
454 		goto fail;
455 
456 	/* FW is not fully operational until we enable SW proxy */
457 	intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
458 
459 	gt_info(gt, "Loaded GSC firmware %s (cv%u.%u, r%u.%u.%u.%u, svn %u)\n",
460 		gsc_fw->file_selected.path,
461 		gsc_fw->file_selected.ver.major, gsc_fw->file_selected.ver.minor,
462 		gsc->release.major, gsc->release.minor,
463 		gsc->release.patch, gsc->release.build,
464 		gsc->security_version);
465 
466 	return 0;
467 
468 fail:
469 	return intel_uc_fw_mark_load_failed(gsc_fw, err);
470 }
471