1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright(c) 2020, Intel Corporation. All rights reserved.
4  */
5 
6 #include "intel_pxp.h"
7 #include "intel_pxp_cmd.h"
8 #include "intel_pxp_session.h"
9 #include "gt/intel_context.h"
10 #include "gt/intel_engine_pm.h"
11 #include "gt/intel_gpu_commands.h"
12 #include "gt/intel_ring.h"
13 
14 #include "i915_trace.h"
15 
16 /* stall until prior PXP and MFX/HCP/HUC objects are cmopleted */
17 #define MFX_WAIT_PXP (MFX_WAIT | \
18 		      MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG | \
19 		      MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG)
20 
21 static u32 *pxp_emit_session_selection(u32 *cs, u32 idx)
22 {
23 	*cs++ = MFX_WAIT_PXP;
24 
25 	/* pxp off */
26 	*cs++ = MI_FLUSH_DW;
27 	*cs++ = 0;
28 	*cs++ = 0;
29 
30 	/* select session */
31 	*cs++ = MI_SET_APPID | MI_SET_APPID_SESSION_ID(idx);
32 
33 	*cs++ = MFX_WAIT_PXP;
34 
35 	/* pxp on */
36 	*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_PROTECTED_MEM_EN |
37 		MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
38 	*cs++ = I915_GEM_HWS_PXP_ADDR | MI_FLUSH_DW_USE_GTT;
39 	*cs++ = 0;
40 
41 	*cs++ = MFX_WAIT_PXP;
42 
43 	return cs;
44 }
45 
46 static u32 *pxp_emit_inline_termination(u32 *cs)
47 {
48 	/* session inline termination */
49 	*cs++ = CRYPTO_KEY_EXCHANGE;
50 	*cs++ = 0;
51 
52 	return cs;
53 }
54 
55 static u32 *pxp_emit_session_termination(u32 *cs, u32 idx)
56 {
57 	cs = pxp_emit_session_selection(cs, idx);
58 	cs = pxp_emit_inline_termination(cs);
59 
60 	return cs;
61 }
62 
63 static u32 *pxp_emit_wait(u32 *cs)
64 {
65 	/* wait for cmds to go through */
66 	*cs++ = MFX_WAIT_PXP;
67 	*cs++ = 0;
68 
69 	return cs;
70 }
71 
72 /*
73  * if we ever need to terminate more than one session, we can submit multiple
74  * selections and terminations back-to-back with a single wait at the end
75  */
76 #define SELECTION_LEN 10
77 #define TERMINATION_LEN 2
78 #define SESSION_TERMINATION_LEN(x) ((SELECTION_LEN + TERMINATION_LEN) * (x))
79 #define WAIT_LEN 2
80 
81 static void pxp_request_commit(struct i915_request *rq)
82 {
83 	struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
84 	struct intel_timeline * const tl = i915_request_timeline(rq);
85 
86 	lockdep_unpin_lock(&tl->mutex, rq->cookie);
87 
88 	trace_i915_request_add(rq);
89 	__i915_request_commit(rq);
90 	__i915_request_queue(rq, &attr);
91 
92 	mutex_unlock(&tl->mutex);
93 }
94 
95 int intel_pxp_terminate_session(struct intel_pxp *pxp, u32 id)
96 {
97 	struct i915_request *rq;
98 	struct intel_context *ce = pxp->ce;
99 	u32 *cs;
100 	int err = 0;
101 
102 	if (!intel_pxp_is_enabled(pxp))
103 		return 0;
104 
105 	rq = i915_request_create(ce);
106 	if (IS_ERR(rq))
107 		return PTR_ERR(rq);
108 
109 	if (ce->engine->emit_init_breadcrumb) {
110 		err = ce->engine->emit_init_breadcrumb(rq);
111 		if (err)
112 			goto out_rq;
113 	}
114 
115 	cs = intel_ring_begin(rq, SESSION_TERMINATION_LEN(1) + WAIT_LEN);
116 	if (IS_ERR(cs)) {
117 		err = PTR_ERR(cs);
118 		goto out_rq;
119 	}
120 
121 	cs = pxp_emit_session_termination(cs, id);
122 	cs = pxp_emit_wait(cs);
123 
124 	intel_ring_advance(rq, cs);
125 
126 out_rq:
127 	i915_request_get(rq);
128 
129 	if (unlikely(err))
130 		i915_request_set_error_once(rq, err);
131 
132 	pxp_request_commit(rq);
133 
134 	if (!err && i915_request_wait(rq, 0, HZ / 5) < 0)
135 		err = -ETIME;
136 
137 	i915_request_put(rq);
138 
139 	return err;
140 }
141 
142