xref: /openbmc/linux/drivers/gpu/drm/i915/pxp/intel_pxp.c (revision 55b37d9c)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright(c) 2020 Intel Corporation.
4  */
5 #include <linux/workqueue.h>
6 
7 #include "gem/i915_gem_context.h"
8 
9 #include "gt/intel_context.h"
10 #include "gt/intel_gt.h"
11 
12 #include "i915_drv.h"
13 
14 #include "intel_pxp.h"
15 #include "intel_pxp_irq.h"
16 #include "intel_pxp_session.h"
17 #include "intel_pxp_tee.h"
18 #include "intel_pxp_types.h"
19 
20 /**
21  * DOC: PXP
22  *
23  * PXP (Protected Xe Path) is a feature available in Gen12 and newer platforms.
24  * It allows execution and flip to display of protected (i.e. encrypted)
25  * objects. The SW support is enabled via the CONFIG_DRM_I915_PXP kconfig.
26  *
27  * Objects can opt-in to PXP encryption at creation time via the
28  * I915_GEM_CREATE_EXT_PROTECTED_CONTENT create_ext flag. For objects to be
29  * correctly protected they must be used in conjunction with a context created
30  * with the I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. See the documentation
31  * of those two uapi flags for details and restrictions.
32  *
33  * Protected objects are tied to a pxp session; currently we only support one
34  * session, which i915 manages and whose index is available in the uapi
35  * (I915_PROTECTED_CONTENT_DEFAULT_SESSION) for use in instructions targeting
36  * protected objects.
37  * The session is invalidated by the HW when certain events occur (e.g.
38  * suspend/resume). When this happens, all the objects that were used with the
39  * session are marked as invalid and all contexts marked as using protected
40  * content are banned. Any further attempt at using them in an execbuf call is
41  * rejected, while flips are converted to black frames.
42  *
43  * Some of the PXP setup operations are performed by the Management Engine,
44  * which is handled by the mei driver; communication between i915 and mei is
45  * performed via the mei_pxp component module.
46  */
47 
48 bool intel_pxp_is_supported(const struct intel_pxp *pxp)
49 {
50 	return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp;
51 }
52 
53 bool intel_pxp_is_enabled(const struct intel_pxp *pxp)
54 {
55 	return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->ce;
56 }
57 
58 bool intel_pxp_is_active(const struct intel_pxp *pxp)
59 {
60 	return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->arb_is_valid;
61 }
62 
63 /* KCR register definitions */
64 #define KCR_INIT _MMIO(0x320f0)
65 /* Setting KCR Init bit is required after system boot */
66 #define KCR_INIT_ALLOW_DISPLAY_ME_WRITES REG_BIT(14)
67 
68 static void kcr_pxp_enable(struct intel_gt *gt)
69 {
70 	intel_uncore_write(gt->uncore, KCR_INIT,
71 			   _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES));
72 }
73 
74 static void kcr_pxp_disable(struct intel_gt *gt)
75 {
76 	intel_uncore_write(gt->uncore, KCR_INIT,
77 			   _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES));
78 }
79 
80 static int create_vcs_context(struct intel_pxp *pxp)
81 {
82 	static struct lock_class_key pxp_lock;
83 	struct intel_gt *gt = pxp->ctrl_gt;
84 	struct intel_engine_cs *engine;
85 	struct intel_context *ce;
86 	int i;
87 
88 	/*
89 	 * Find the first VCS engine present. We're guaranteed there is one
90 	 * if we're in this function due to the check in has_pxp
91 	 */
92 	for (i = 0, engine = NULL; !engine; i++)
93 		engine = gt->engine_class[VIDEO_DECODE_CLASS][i];
94 
95 	GEM_BUG_ON(!engine || engine->class != VIDEO_DECODE_CLASS);
96 
97 	ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
98 						I915_GEM_HWS_PXP_ADDR,
99 						&pxp_lock, "pxp_context");
100 	if (IS_ERR(ce)) {
101 		drm_err(&gt->i915->drm, "failed to create VCS ctx for PXP\n");
102 		return PTR_ERR(ce);
103 	}
104 
105 	pxp->ce = ce;
106 
107 	return 0;
108 }
109 
110 static void destroy_vcs_context(struct intel_pxp *pxp)
111 {
112 	if (pxp->ce)
113 		intel_engine_destroy_pinned_context(fetch_and_zero(&pxp->ce));
114 }
115 
116 static void pxp_init_full(struct intel_pxp *pxp)
117 {
118 	struct intel_gt *gt = pxp->ctrl_gt;
119 	int ret;
120 
121 	/*
122 	 * we'll use the completion to check if there is a termination pending,
123 	 * so we start it as completed and we reinit it when a termination
124 	 * is triggered.
125 	 */
126 	init_completion(&pxp->termination);
127 	complete_all(&pxp->termination);
128 
129 	intel_pxp_session_management_init(pxp);
130 
131 	ret = create_vcs_context(pxp);
132 	if (ret)
133 		return;
134 
135 	ret = intel_pxp_tee_component_init(pxp);
136 	if (ret)
137 		goto out_context;
138 
139 	drm_info(&gt->i915->drm, "Protected Xe Path (PXP) protected content support initialized\n");
140 
141 	return;
142 
143 out_context:
144 	destroy_vcs_context(pxp);
145 }
146 
147 static struct intel_gt *find_gt_for_required_teelink(struct drm_i915_private *i915)
148 {
149 	/*
150 	 * NOTE: Only certain platforms require PXP-tee-backend dependencies
151 	 * for HuC authentication. For now, its limited to DG2.
152 	 */
153 	if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC) &&
154 	    intel_huc_is_loaded_by_gsc(&i915->gt0.uc.huc) && intel_uc_uses_huc(&i915->gt0.uc))
155 		return &i915->gt0;
156 
157 	return NULL;
158 }
159 
160 static struct intel_gt *find_gt_for_required_protected_content(struct drm_i915_private *i915)
161 {
162 	if (!IS_ENABLED(CONFIG_DRM_I915_PXP) || !INTEL_INFO(i915)->has_pxp)
163 		return NULL;
164 
165 	/*
166 	 * For MTL onwards, PXP-controller-GT needs to have a valid GSC engine
167 	 * on the media GT. NOTE: if we have a media-tile with a GSC-engine,
168 	 * the VDBOX is already present so skip that check
169 	 */
170 	if (i915->media_gt && HAS_ENGINE(i915->media_gt, GSC0))
171 		return i915->media_gt;
172 
173 	/*
174 	 * Else we rely on mei-pxp module but only on legacy platforms
175 	 * prior to having separate media GTs and has a valid VDBOX.
176 	 */
177 	if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(&i915->gt0))
178 		return &i915->gt0;
179 
180 	return NULL;
181 }
182 
183 int intel_pxp_init(struct drm_i915_private *i915)
184 {
185 	struct intel_gt *gt;
186 	bool is_full_feature = false;
187 
188 	/*
189 	 * NOTE: Get the ctrl_gt before checking intel_pxp_is_supported since
190 	 * we still need it if PXP's backend tee transport is needed.
191 	 */
192 	gt = find_gt_for_required_protected_content(i915);
193 	if (gt)
194 		is_full_feature = true;
195 	else
196 		gt = find_gt_for_required_teelink(i915);
197 
198 	if (!gt)
199 		return -ENODEV;
200 
201 	/*
202 	 * At this point, we will either enable full featured PXP capabilities
203 	 * including session and object management, or we will init the backend tee
204 	 * channel for internal users such as HuC loading by GSC
205 	 */
206 	i915->pxp = kzalloc(sizeof(*i915->pxp), GFP_KERNEL);
207 	if (!i915->pxp)
208 		return -ENOMEM;
209 
210 	i915->pxp->ctrl_gt = gt;
211 
212 	/*
213 	 * If full PXP feature is not available but HuC is loaded by GSC on pre-MTL
214 	 * such as DG2, we can skip the init of the full PXP session/object management
215 	 * and just init the tee channel.
216 	 */
217 	if (is_full_feature)
218 		pxp_init_full(i915->pxp);
219 	else
220 		intel_pxp_tee_component_init(i915->pxp);
221 
222 	return 0;
223 }
224 
225 void intel_pxp_fini(struct drm_i915_private *i915)
226 {
227 	if (!i915->pxp)
228 		return;
229 
230 	i915->pxp->arb_is_valid = false;
231 
232 	intel_pxp_tee_component_fini(i915->pxp);
233 
234 	destroy_vcs_context(i915->pxp);
235 
236 	kfree(i915->pxp);
237 	i915->pxp = NULL;
238 }
239 
240 void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp)
241 {
242 	pxp->arb_is_valid = false;
243 	reinit_completion(&pxp->termination);
244 }
245 
246 static void pxp_queue_termination(struct intel_pxp *pxp)
247 {
248 	struct intel_gt *gt = pxp->ctrl_gt;
249 
250 	/*
251 	 * We want to get the same effect as if we received a termination
252 	 * interrupt, so just pretend that we did.
253 	 */
254 	spin_lock_irq(gt->irq_lock);
255 	intel_pxp_mark_termination_in_progress(pxp);
256 	pxp->session_events |= PXP_TERMINATION_REQUEST;
257 	queue_work(system_unbound_wq, &pxp->session_work);
258 	spin_unlock_irq(gt->irq_lock);
259 }
260 
261 static bool pxp_component_bound(struct intel_pxp *pxp)
262 {
263 	bool bound = false;
264 
265 	mutex_lock(&pxp->tee_mutex);
266 	if (pxp->pxp_component)
267 		bound = true;
268 	mutex_unlock(&pxp->tee_mutex);
269 
270 	return bound;
271 }
272 
273 static int __pxp_global_teardown_final(struct intel_pxp *pxp)
274 {
275 	if (!pxp->arb_is_valid)
276 		return 0;
277 	/*
278 	 * To ensure synchronous and coherent session teardown completion
279 	 * in response to suspend or shutdown triggers, don't use a worker.
280 	 */
281 	intel_pxp_mark_termination_in_progress(pxp);
282 	intel_pxp_terminate(pxp, false);
283 
284 	if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250)))
285 		return -ETIMEDOUT;
286 
287 	return 0;
288 }
289 
290 static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
291 {
292 	if (pxp->arb_is_valid)
293 		return 0;
294 	/*
295 	 * The arb-session is currently inactive and we are doing a reset and restart
296 	 * due to a runtime event. Use the worker that was designed for this.
297 	 */
298 	pxp_queue_termination(pxp);
299 
300 	if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250)))
301 		return -ETIMEDOUT;
302 
303 	return 0;
304 }
305 
306 void intel_pxp_end(struct intel_pxp *pxp)
307 {
308 	struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
309 	intel_wakeref_t wakeref;
310 
311 	if (!intel_pxp_is_enabled(pxp))
312 		return;
313 
314 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
315 
316 	mutex_lock(&pxp->arb_mutex);
317 
318 	if (__pxp_global_teardown_final(pxp))
319 		drm_dbg(&i915->drm, "PXP end timed out\n");
320 
321 	mutex_unlock(&pxp->arb_mutex);
322 
323 	intel_pxp_fini_hw(pxp);
324 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
325 }
326 
327 /*
328  * the arb session is restarted from the irq work when we receive the
329  * termination completion interrupt
330  */
331 int intel_pxp_start(struct intel_pxp *pxp)
332 {
333 	int ret = 0;
334 
335 	if (!intel_pxp_is_enabled(pxp))
336 		return -ENODEV;
337 
338 	if (wait_for(pxp_component_bound(pxp), 250))
339 		return -ENXIO;
340 
341 	mutex_lock(&pxp->arb_mutex);
342 
343 	ret = __pxp_global_teardown_restart(pxp);
344 	if (ret)
345 		goto unlock;
346 
347 	/* make sure the compiler doesn't optimize the double access */
348 	barrier();
349 
350 	if (!pxp->arb_is_valid)
351 		ret = -EIO;
352 
353 unlock:
354 	mutex_unlock(&pxp->arb_mutex);
355 	return ret;
356 }
357 
358 void intel_pxp_init_hw(struct intel_pxp *pxp)
359 {
360 	kcr_pxp_enable(pxp->ctrl_gt);
361 	intel_pxp_irq_enable(pxp);
362 }
363 
364 void intel_pxp_fini_hw(struct intel_pxp *pxp)
365 {
366 	kcr_pxp_disable(pxp->ctrl_gt);
367 
368 	intel_pxp_irq_disable(pxp);
369 }
370 
371 int intel_pxp_key_check(struct intel_pxp *pxp,
372 			struct drm_i915_gem_object *obj,
373 			bool assign)
374 {
375 	if (!intel_pxp_is_active(pxp))
376 		return -ENODEV;
377 
378 	if (!i915_gem_object_is_protected(obj))
379 		return -EINVAL;
380 
381 	GEM_BUG_ON(!pxp->key_instance);
382 
383 	/*
384 	 * If this is the first time we're using this object, it's not
385 	 * encrypted yet; it will be encrypted with the current key, so mark it
386 	 * as such. If the object is already encrypted, check instead if the
387 	 * used key is still valid.
388 	 */
389 	if (!obj->pxp_key_instance && assign)
390 		obj->pxp_key_instance = pxp->key_instance;
391 
392 	if (obj->pxp_key_instance != pxp->key_instance)
393 		return -ENOEXEC;
394 
395 	return 0;
396 }
397 
398 void intel_pxp_invalidate(struct intel_pxp *pxp)
399 {
400 	struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
401 	struct i915_gem_context *ctx, *cn;
402 
403 	/* ban all contexts marked as protected */
404 	spin_lock_irq(&i915->gem.contexts.lock);
405 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
406 		struct i915_gem_engines_iter it;
407 		struct intel_context *ce;
408 
409 		if (!kref_get_unless_zero(&ctx->ref))
410 			continue;
411 
412 		if (likely(!i915_gem_context_uses_protected_content(ctx))) {
413 			i915_gem_context_put(ctx);
414 			continue;
415 		}
416 
417 		spin_unlock_irq(&i915->gem.contexts.lock);
418 
419 		/*
420 		 * By the time we get here we are either going to suspend with
421 		 * quiesced execution or the HW keys are already long gone and
422 		 * in this case it is worthless to attempt to close the context
423 		 * and wait for its execution. It will hang the GPU if it has
424 		 * not already. So, as a fast mitigation, we can ban the
425 		 * context as quick as we can. That might race with the
426 		 * execbuffer, but currently this is the best that can be done.
427 		 */
428 		for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
429 			intel_context_ban(ce, NULL);
430 		i915_gem_context_unlock_engines(ctx);
431 
432 		/*
433 		 * The context has been banned, no need to keep the wakeref.
434 		 * This is safe from races because the only other place this
435 		 * is touched is context_release and we're holding a ctx ref
436 		 */
437 		if (ctx->pxp_wakeref) {
438 			intel_runtime_pm_put(&i915->runtime_pm,
439 					     ctx->pxp_wakeref);
440 			ctx->pxp_wakeref = 0;
441 		}
442 
443 		spin_lock_irq(&i915->gem.contexts.lock);
444 		list_safe_reset_next(ctx, cn, link);
445 		i915_gem_context_put(ctx);
446 	}
447 	spin_unlock_irq(&i915->gem.contexts.lock);
448 }
449