1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "gt/intel_engine_pm.h"
7 #include "gt/intel_gpu_commands.h"
8 #include "gt/intel_gt.h"
9 #include "gt/intel_gt_print.h"
10 #include "gt/intel_ring.h"
11 #include "intel_gsc_fw.h"
12 
13 #define GSC_FW_STATUS_REG			_MMIO(0x116C40)
14 #define GSC_FW_CURRENT_STATE			REG_GENMASK(3, 0)
15 #define   GSC_FW_CURRENT_STATE_RESET		0
16 #define   GSC_FW_PROXY_STATE_NORMAL		5
17 #define GSC_FW_INIT_COMPLETE_BIT		REG_BIT(9)
18 
19 static bool gsc_is_in_reset(struct intel_uncore *uncore)
20 {
21 	u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
22 
23 	return REG_FIELD_GET(GSC_FW_CURRENT_STATE, fw_status) ==
24 	       GSC_FW_CURRENT_STATE_RESET;
25 }
26 
27 static u32 gsc_uc_get_fw_status(struct intel_uncore *uncore)
28 {
29 	intel_wakeref_t wakeref;
30 	u32 fw_status = 0;
31 
32 	with_intel_runtime_pm(uncore->rpm, wakeref)
33 		fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
34 
35 	return fw_status;
36 }
37 
38 bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc)
39 {
40 	return REG_FIELD_GET(GSC_FW_CURRENT_STATE,
41 			     gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore)) ==
42 	       GSC_FW_PROXY_STATE_NORMAL;
43 }
44 
45 bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc)
46 {
47 	return gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore) & GSC_FW_INIT_COMPLETE_BIT;
48 }
49 
50 static int emit_gsc_fw_load(struct i915_request *rq, struct intel_gsc_uc *gsc)
51 {
52 	u32 offset = i915_ggtt_offset(gsc->local);
53 	u32 *cs;
54 
55 	cs = intel_ring_begin(rq, 4);
56 	if (IS_ERR(cs))
57 		return PTR_ERR(cs);
58 
59 	*cs++ = GSC_FW_LOAD;
60 	*cs++ = lower_32_bits(offset);
61 	*cs++ = upper_32_bits(offset);
62 	*cs++ = (gsc->local->size / SZ_4K) | HECI1_FW_LIMIT_VALID;
63 
64 	intel_ring_advance(rq, cs);
65 
66 	return 0;
67 }
68 
69 static int gsc_fw_load(struct intel_gsc_uc *gsc)
70 {
71 	struct intel_context *ce = gsc->ce;
72 	struct i915_request *rq;
73 	int err;
74 
75 	if (!ce)
76 		return -ENODEV;
77 
78 	rq = i915_request_create(ce);
79 	if (IS_ERR(rq))
80 		return PTR_ERR(rq);
81 
82 	if (ce->engine->emit_init_breadcrumb) {
83 		err = ce->engine->emit_init_breadcrumb(rq);
84 		if (err)
85 			goto out_rq;
86 	}
87 
88 	err = emit_gsc_fw_load(rq, gsc);
89 	if (err)
90 		goto out_rq;
91 
92 	err = ce->engine->emit_flush(rq, 0);
93 
94 out_rq:
95 	i915_request_get(rq);
96 
97 	if (unlikely(err))
98 		i915_request_set_error_once(rq, err);
99 
100 	i915_request_add(rq);
101 
102 	if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0)
103 		err = -ETIME;
104 
105 	i915_request_put(rq);
106 
107 	if (err)
108 		gt_err(gsc_uc_to_gt(gsc), "Request submission for GSC load failed %pe\n",
109 		       ERR_PTR(err));
110 
111 	return err;
112 }
113 
114 static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
115 {
116 	struct intel_gt *gt = gsc_uc_to_gt(gsc);
117 	struct drm_i915_private *i915 = gt->i915;
118 	struct drm_i915_gem_object *obj;
119 	void *src, *dst;
120 
121 	if (!gsc->local)
122 		return -ENODEV;
123 
124 	obj = gsc->local->obj;
125 
126 	if (obj->base.size < gsc->fw.size)
127 		return -ENOSPC;
128 
129 	/*
130 	 * Wa_22016122933: For MTL the shared memory needs to be mapped
131 	 * as WC on CPU side and UC (PAT index 2) on GPU side
132 	 */
133 	if (IS_METEORLAKE(i915))
134 		i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
135 
136 	dst = i915_gem_object_pin_map_unlocked(obj,
137 					       i915_coherent_map_type(i915, obj, true));
138 	if (IS_ERR(dst))
139 		return PTR_ERR(dst);
140 
141 	src = i915_gem_object_pin_map_unlocked(gsc->fw.obj,
142 					       i915_coherent_map_type(i915, gsc->fw.obj, true));
143 	if (IS_ERR(src)) {
144 		i915_gem_object_unpin_map(obj);
145 		return PTR_ERR(src);
146 	}
147 
148 	memset(dst, 0, obj->base.size);
149 	memcpy(dst, src, gsc->fw.size);
150 
151 	/*
152 	 * Wa_22016122933: Making sure the data in dst is
153 	 * visible to GSC right away
154 	 */
155 	intel_guc_write_barrier(&gt->uc.guc);
156 
157 	i915_gem_object_unpin_map(gsc->fw.obj);
158 	i915_gem_object_unpin_map(obj);
159 
160 	return 0;
161 }
162 
163 static int gsc_fw_wait(struct intel_gt *gt)
164 {
165 	return intel_wait_for_register(gt->uncore,
166 				       GSC_FW_STATUS_REG,
167 				       GSC_FW_INIT_COMPLETE_BIT,
168 				       GSC_FW_INIT_COMPLETE_BIT,
169 				       500);
170 }
171 
172 int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
173 {
174 	struct intel_gt *gt = gsc_uc_to_gt(gsc);
175 	struct intel_uc_fw *gsc_fw = &gsc->fw;
176 	int err;
177 
178 	/* check current fw status */
179 	if (intel_gsc_uc_fw_init_done(gsc)) {
180 		if (GEM_WARN_ON(!intel_uc_fw_is_loaded(gsc_fw)))
181 			intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
182 		return -EEXIST;
183 	}
184 
185 	if (!intel_uc_fw_is_loadable(gsc_fw))
186 		return -ENOEXEC;
187 
188 	/* FW blob is ok, so clean the status */
189 	intel_uc_fw_sanitize(&gsc->fw);
190 
191 	if (!gsc_is_in_reset(gt->uncore))
192 		return -EIO;
193 
194 	err = gsc_fw_load_prepare(gsc);
195 	if (err)
196 		goto fail;
197 
198 	/*
199 	 * GSC is only killed by an FLR, so we need to trigger one on unload to
200 	 * make sure we stop it. This is because we assign a chunk of memory to
201 	 * the GSC as part of the FW load , so we need to make sure it stops
202 	 * using it when we release it to the system on driver unload. Note that
203 	 * this is not a problem of the unload per-se, because the GSC will not
204 	 * touch that memory unless there are requests for it coming from the
205 	 * driver; therefore, no accesses will happen while i915 is not loaded,
206 	 * but if we re-load the driver then the GSC might wake up and try to
207 	 * access that old memory location again.
208 	 * Given that an FLR is a very disruptive action (see the FLR function
209 	 * for details), we want to do it as the last action before releasing
210 	 * the access to the MMIO bar, which means we need to do it as part of
211 	 * the primary uncore cleanup.
212 	 * An alternative approach to the FLR would be to use a memory location
213 	 * that survives driver unload, like e.g. stolen memory, and keep the
214 	 * GSC loaded across reloads. However, this requires us to make sure we
215 	 * preserve that memory location on unload and then determine and
216 	 * reserve its offset on each subsequent load, which is not trivial, so
217 	 * it is easier to just kill everything and start fresh.
218 	 */
219 	intel_uncore_set_flr_on_fini(&gt->i915->uncore);
220 
221 	err = gsc_fw_load(gsc);
222 	if (err)
223 		goto fail;
224 
225 	err = gsc_fw_wait(gt);
226 	if (err)
227 		goto fail;
228 
229 	/* FW is not fully operational until we enable SW proxy */
230 	intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
231 
232 	gt_info(gt, "Loaded GSC firmware %s\n", gsc_fw->file_selected.path);
233 
234 	return 0;
235 
236 fail:
237 	return intel_uc_fw_mark_load_failed(gsc_fw, err);
238 }
239