1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include <linux/types.h>
7
8 #include "gt/intel_gt.h"
9 #include "gt/intel_gt_print.h"
10 #include "intel_gsc_fw.h"
11 #include "intel_gsc_proxy.h"
12 #include "intel_gsc_uc.h"
13 #include "i915_drv.h"
14 #include "i915_reg.h"
15
gsc_work(struct work_struct * work)16 static void gsc_work(struct work_struct *work)
17 {
18 struct intel_gsc_uc *gsc = container_of(work, typeof(*gsc), work);
19 struct intel_gt *gt = gsc_uc_to_gt(gsc);
20 intel_wakeref_t wakeref;
21 u32 actions;
22 int ret;
23
24 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
25
26 spin_lock_irq(gt->irq_lock);
27 actions = gsc->gsc_work_actions;
28 gsc->gsc_work_actions = 0;
29 spin_unlock_irq(gt->irq_lock);
30
31 if (actions & GSC_ACTION_FW_LOAD) {
32 ret = intel_gsc_uc_fw_upload(gsc);
33 if (!ret)
34 /* setup proxy on a new load */
35 actions |= GSC_ACTION_SW_PROXY;
36 else if (ret != -EEXIST)
37 goto out_put;
38
39 /*
40 * The HuC auth can be done both before or after the proxy init;
41 * if done after, a proxy request will be issued and must be
42 * serviced before the authentication can complete.
43 * Since this worker also handles proxy requests, we can't
44 * perform an action that requires the proxy from within it and
45 * then stall waiting for it, because we'd be blocking the
46 * service path. Therefore, it is easier for us to load HuC
47 * first and do proxy later. The GSC will ack the HuC auth and
48 * then send the HuC proxy request as part of the proxy init
49 * flow.
50 * Note that we can only do the GSC auth if the GuC auth was
51 * successful.
52 */
53 if (intel_uc_uses_huc(>->uc) &&
54 intel_huc_is_authenticated(>->uc.huc, INTEL_HUC_AUTH_BY_GUC))
55 intel_huc_auth(>->uc.huc, INTEL_HUC_AUTH_BY_GSC);
56 }
57
58 if (actions & GSC_ACTION_SW_PROXY) {
59 if (!intel_gsc_uc_fw_init_done(gsc)) {
60 gt_err(gt, "Proxy request received with GSC not loaded!\n");
61 goto out_put;
62 }
63
64 ret = intel_gsc_proxy_request_handler(gsc);
65 if (ret) {
66 if (actions & GSC_ACTION_FW_LOAD) {
67 /*
68 * A proxy failure right after firmware load means the proxy-init
69 * step has failed so mark GSC as not usable after this
70 */
71 drm_err(>->i915->drm,
72 "GSC proxy handler failed to init\n");
73 intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
74 }
75 goto out_put;
76 }
77
78 /* mark the GSC FW init as done the first time we run this */
79 if (actions & GSC_ACTION_FW_LOAD) {
80 /*
81 * If there is a proxy establishment error, the GSC might still
82 * complete the request handling cleanly, so we need to check the
83 * status register to check if the proxy init was actually successful
84 */
85 if (intel_gsc_uc_fw_proxy_init_done(gsc, false)) {
86 drm_dbg(>->i915->drm, "GSC Proxy initialized\n");
87 intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_RUNNING);
88 } else {
89 drm_err(>->i915->drm,
90 "GSC status reports proxy init not complete\n");
91 intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
92 }
93 }
94 }
95
96 out_put:
97 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
98 }
99
gsc_engine_supported(struct intel_gt * gt)100 static bool gsc_engine_supported(struct intel_gt *gt)
101 {
102 intel_engine_mask_t mask;
103
104 /*
105 * We reach here from i915_driver_early_probe for the primary GT before
106 * its engine mask is set, so we use the device info engine mask for it.
107 * For other GTs we expect the GT-specific mask to be set before we
108 * call this function.
109 */
110 GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask);
111
112 if (gt_is_root(gt))
113 mask = INTEL_INFO(gt->i915)->platform_engine_mask;
114 else
115 mask = gt->info.engine_mask;
116
117 return __HAS_ENGINE(mask, GSC0);
118 }
119
intel_gsc_uc_init_early(struct intel_gsc_uc * gsc)120 void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc)
121 {
122 struct intel_gt *gt = gsc_uc_to_gt(gsc);
123
124 /*
125 * GSC FW needs to be copied to a dedicated memory allocations for
126 * loading (see gsc->local), so we don't need to GGTT map the FW image
127 * itself into GGTT.
128 */
129 intel_uc_fw_init_early(&gsc->fw, INTEL_UC_FW_TYPE_GSC, false);
130 INIT_WORK(&gsc->work, gsc_work);
131
132 /* we can arrive here from i915_driver_early_probe for primary
133 * GT with it being not fully setup hence check device info's
134 * engine mask
135 */
136 if (!gsc_engine_supported(gt)) {
137 intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
138 return;
139 }
140
141 gsc->wq = alloc_ordered_workqueue("i915_gsc", 0);
142 if (!gsc->wq) {
143 gt_err(gt, "failed to allocate WQ for GSC, disabling FW\n");
144 intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
145 }
146 }
147
gsc_allocate_and_map_vma(struct intel_gsc_uc * gsc,u32 size)148 static int gsc_allocate_and_map_vma(struct intel_gsc_uc *gsc, u32 size)
149 {
150 struct intel_gt *gt = gsc_uc_to_gt(gsc);
151 struct drm_i915_gem_object *obj;
152 struct i915_vma *vma;
153 void __iomem *vaddr;
154 int ret = 0;
155
156 /*
157 * The GSC FW doesn't immediately suspend after becoming idle, so there
158 * is a chance that it could still be awake after we successfully
159 * return from the pci suspend function, even if there are no pending
160 * operations.
161 * The FW might therefore try to access memory for its suspend operation
162 * after the kernel has completed the HW suspend flow; this can cause
163 * issues if the FW is mapped in normal RAM memory, as some of the
164 * involved HW units might've already lost power.
165 * The driver must therefore avoid this situation and the recommended
166 * way to do so is to use stolen memory for the GSC memory allocation,
167 * because stolen memory takes a different path in HW and it is
168 * guaranteed to always work as long as the GPU itself is awake (which
169 * it must be if the GSC is awake).
170 */
171 obj = i915_gem_object_create_stolen(gt->i915, size);
172 if (IS_ERR(obj))
173 return PTR_ERR(obj);
174
175 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
176 if (IS_ERR(vma)) {
177 ret = PTR_ERR(vma);
178 goto err;
179 }
180
181 vaddr = i915_vma_pin_iomap(vma);
182 i915_vma_unpin(vma);
183 if (IS_ERR(vaddr)) {
184 ret = PTR_ERR(vaddr);
185 goto err;
186 }
187
188 i915_vma_make_unshrinkable(vma);
189
190 gsc->local = vma;
191 gsc->local_vaddr = vaddr;
192
193 return 0;
194
195 err:
196 i915_gem_object_put(obj);
197 return ret;
198 }
199
gsc_unmap_and_free_vma(struct intel_gsc_uc * gsc)200 static void gsc_unmap_and_free_vma(struct intel_gsc_uc *gsc)
201 {
202 struct i915_vma *vma = fetch_and_zero(&gsc->local);
203
204 if (!vma)
205 return;
206
207 gsc->local_vaddr = NULL;
208 i915_vma_unpin_iomap(vma);
209 i915_gem_object_put(vma->obj);
210 }
211
intel_gsc_uc_init(struct intel_gsc_uc * gsc)212 int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
213 {
214 static struct lock_class_key gsc_lock;
215 struct intel_gt *gt = gsc_uc_to_gt(gsc);
216 struct intel_engine_cs *engine = gt->engine[GSC0];
217 struct intel_context *ce;
218 int err;
219
220 err = intel_uc_fw_init(&gsc->fw);
221 if (err)
222 goto out;
223
224 err = gsc_allocate_and_map_vma(gsc, SZ_4M);
225 if (err)
226 goto out_fw;
227
228 ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
229 I915_GEM_HWS_GSC_ADDR,
230 &gsc_lock, "gsc_context");
231 if (IS_ERR(ce)) {
232 gt_err(gt, "failed to create GSC CS ctx for FW communication\n");
233 err = PTR_ERR(ce);
234 goto out_vma;
235 }
236
237 gsc->ce = ce;
238
239 /* if we fail to init proxy we still want to load GSC for PM */
240 intel_gsc_proxy_init(gsc);
241
242 intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOADABLE);
243
244 return 0;
245
246 out_vma:
247 gsc_unmap_and_free_vma(gsc);
248 out_fw:
249 intel_uc_fw_fini(&gsc->fw);
250 out:
251 gt_probe_error(gt, "GSC init failed %pe\n", ERR_PTR(err));
252 return err;
253 }
254
intel_gsc_uc_fini(struct intel_gsc_uc * gsc)255 void intel_gsc_uc_fini(struct intel_gsc_uc *gsc)
256 {
257 if (!intel_uc_fw_is_loadable(&gsc->fw))
258 return;
259
260 flush_work(&gsc->work);
261 if (gsc->wq) {
262 destroy_workqueue(gsc->wq);
263 gsc->wq = NULL;
264 }
265
266 intel_gsc_proxy_fini(gsc);
267
268 if (gsc->ce)
269 intel_engine_destroy_pinned_context(fetch_and_zero(&gsc->ce));
270
271 gsc_unmap_and_free_vma(gsc);
272
273 intel_uc_fw_fini(&gsc->fw);
274 }
275
intel_gsc_uc_flush_work(struct intel_gsc_uc * gsc)276 void intel_gsc_uc_flush_work(struct intel_gsc_uc *gsc)
277 {
278 if (!intel_uc_fw_is_loadable(&gsc->fw))
279 return;
280
281 flush_work(&gsc->work);
282 }
283
intel_gsc_uc_resume(struct intel_gsc_uc * gsc)284 void intel_gsc_uc_resume(struct intel_gsc_uc *gsc)
285 {
286 if (!intel_uc_fw_is_loadable(&gsc->fw))
287 return;
288
289 /*
290 * we only want to start the GSC worker from here in the actual resume
291 * flow and not during driver load. This is because GSC load is slow and
292 * therefore we want to make sure that the default state init completes
293 * first to not slow down the init thread. A separate call to
294 * intel_gsc_uc_load_start will ensure that the GSC is loaded during
295 * driver load.
296 */
297 if (!gsc_uc_to_gt(gsc)->engine[GSC0]->default_state)
298 return;
299
300 intel_gsc_uc_load_start(gsc);
301 }
302
intel_gsc_uc_load_start(struct intel_gsc_uc * gsc)303 void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
304 {
305 struct intel_gt *gt = gsc_uc_to_gt(gsc);
306
307 if (!intel_uc_fw_is_loadable(&gsc->fw) || intel_uc_fw_is_in_error(&gsc->fw))
308 return;
309
310 if (intel_gsc_uc_fw_init_done(gsc))
311 return;
312
313 spin_lock_irq(gt->irq_lock);
314 gsc->gsc_work_actions |= GSC_ACTION_FW_LOAD;
315 spin_unlock_irq(gt->irq_lock);
316
317 queue_work(gsc->wq, &gsc->work);
318 }
319
intel_gsc_uc_load_status(struct intel_gsc_uc * gsc,struct drm_printer * p)320 void intel_gsc_uc_load_status(struct intel_gsc_uc *gsc, struct drm_printer *p)
321 {
322 struct intel_gt *gt = gsc_uc_to_gt(gsc);
323 struct intel_uncore *uncore = gt->uncore;
324 intel_wakeref_t wakeref;
325
326 if (!intel_gsc_uc_is_supported(gsc)) {
327 drm_printf(p, "GSC not supported\n");
328 return;
329 }
330
331 if (!intel_gsc_uc_is_wanted(gsc)) {
332 drm_printf(p, "GSC disabled\n");
333 return;
334 }
335
336 drm_printf(p, "GSC firmware: %s\n", gsc->fw.file_selected.path);
337 if (gsc->fw.file_selected.path != gsc->fw.file_wanted.path)
338 drm_printf(p, "GSC firmware wanted: %s\n", gsc->fw.file_wanted.path);
339 drm_printf(p, "\tstatus: %s\n", intel_uc_fw_status_repr(gsc->fw.status));
340
341 drm_printf(p, "Release: %u.%u.%u.%u\n",
342 gsc->release.major, gsc->release.minor,
343 gsc->release.patch, gsc->release.build);
344
345 drm_printf(p, "Compatibility Version: %u.%u [min expected %u.%u]\n",
346 gsc->fw.file_selected.ver.major, gsc->fw.file_selected.ver.minor,
347 gsc->fw.file_wanted.ver.major, gsc->fw.file_wanted.ver.minor);
348
349 drm_printf(p, "SVN: %u\n", gsc->security_version);
350
351 with_intel_runtime_pm(uncore->rpm, wakeref) {
352 u32 i;
353
354 for (i = 1; i <= 6; i++) {
355 u32 status = intel_uncore_read(uncore,
356 HECI_FWSTS(MTL_GSC_HECI1_BASE, i));
357 drm_printf(p, "HECI1 FWSTST%u = 0x%08x\n", i, status);
358 }
359 }
360 }
361