xref: /openbmc/linux/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c (revision 83775e158a3d2dc437132ab357ed6c9214ef0ae9)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "gt/intel_engine_pm.h"
7 #include "gt/intel_gpu_commands.h"
8 #include "gt/intel_gt.h"
9 #include "gt/intel_gt_print.h"
10 #include "gt/intel_ring.h"
11 #include "intel_gsc_fw.h"
12 
13 #define GSC_FW_STATUS_REG			_MMIO(0x116C40)
14 #define GSC_FW_CURRENT_STATE			REG_GENMASK(3, 0)
15 #define   GSC_FW_CURRENT_STATE_RESET		0
16 #define   GSC_FW_PROXY_STATE_NORMAL		5
17 #define GSC_FW_INIT_COMPLETE_BIT		REG_BIT(9)
18 
19 static bool gsc_is_in_reset(struct intel_uncore *uncore)
20 {
21 	u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
22 
23 	return REG_FIELD_GET(GSC_FW_CURRENT_STATE, fw_status) ==
24 	       GSC_FW_CURRENT_STATE_RESET;
25 }
26 
27 bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc)
28 {
29 	struct intel_uncore *uncore = gsc_uc_to_gt(gsc)->uncore;
30 	u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
31 
32 	return REG_FIELD_GET(GSC_FW_CURRENT_STATE, fw_status) ==
33 	       GSC_FW_PROXY_STATE_NORMAL;
34 }
35 
36 bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc)
37 {
38 	struct intel_uncore *uncore = gsc_uc_to_gt(gsc)->uncore;
39 	u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
40 
41 	return fw_status & GSC_FW_INIT_COMPLETE_BIT;
42 }
43 
44 static int emit_gsc_fw_load(struct i915_request *rq, struct intel_gsc_uc *gsc)
45 {
46 	u32 offset = i915_ggtt_offset(gsc->local);
47 	u32 *cs;
48 
49 	cs = intel_ring_begin(rq, 4);
50 	if (IS_ERR(cs))
51 		return PTR_ERR(cs);
52 
53 	*cs++ = GSC_FW_LOAD;
54 	*cs++ = lower_32_bits(offset);
55 	*cs++ = upper_32_bits(offset);
56 	*cs++ = (gsc->local->size / SZ_4K) | HECI1_FW_LIMIT_VALID;
57 
58 	intel_ring_advance(rq, cs);
59 
60 	return 0;
61 }
62 
63 static int gsc_fw_load(struct intel_gsc_uc *gsc)
64 {
65 	struct intel_context *ce = gsc->ce;
66 	struct i915_request *rq;
67 	int err;
68 
69 	if (!ce)
70 		return -ENODEV;
71 
72 	rq = i915_request_create(ce);
73 	if (IS_ERR(rq))
74 		return PTR_ERR(rq);
75 
76 	if (ce->engine->emit_init_breadcrumb) {
77 		err = ce->engine->emit_init_breadcrumb(rq);
78 		if (err)
79 			goto out_rq;
80 	}
81 
82 	err = emit_gsc_fw_load(rq, gsc);
83 	if (err)
84 		goto out_rq;
85 
86 	err = ce->engine->emit_flush(rq, 0);
87 
88 out_rq:
89 	i915_request_get(rq);
90 
91 	if (unlikely(err))
92 		i915_request_set_error_once(rq, err);
93 
94 	i915_request_add(rq);
95 
96 	if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0)
97 		err = -ETIME;
98 
99 	i915_request_put(rq);
100 
101 	if (err)
102 		gt_err(gsc_uc_to_gt(gsc), "Request submission for GSC load failed %pe\n",
103 		       ERR_PTR(err));
104 
105 	return err;
106 }
107 
108 static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
109 {
110 	struct intel_gt *gt = gsc_uc_to_gt(gsc);
111 	struct drm_i915_private *i915 = gt->i915;
112 	struct drm_i915_gem_object *obj;
113 	void *src, *dst;
114 
115 	if (!gsc->local)
116 		return -ENODEV;
117 
118 	obj = gsc->local->obj;
119 
120 	if (obj->base.size < gsc->fw.size)
121 		return -ENOSPC;
122 
123 	/*
124 	 * Wa_22016122933: For MTL the shared memory needs to be mapped
125 	 * as WC on CPU side and UC (PAT index 2) on GPU side
126 	 */
127 	if (IS_METEORLAKE(i915))
128 		i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
129 
130 	dst = i915_gem_object_pin_map_unlocked(obj,
131 					       i915_coherent_map_type(i915, obj, true));
132 	if (IS_ERR(dst))
133 		return PTR_ERR(dst);
134 
135 	src = i915_gem_object_pin_map_unlocked(gsc->fw.obj,
136 					       i915_coherent_map_type(i915, gsc->fw.obj, true));
137 	if (IS_ERR(src)) {
138 		i915_gem_object_unpin_map(obj);
139 		return PTR_ERR(src);
140 	}
141 
142 	memset(dst, 0, obj->base.size);
143 	memcpy(dst, src, gsc->fw.size);
144 
145 	/*
146 	 * Wa_22016122933: Making sure the data in dst is
147 	 * visible to GSC right away
148 	 */
149 	intel_guc_write_barrier(&gt->uc.guc);
150 
151 	i915_gem_object_unpin_map(gsc->fw.obj);
152 	i915_gem_object_unpin_map(obj);
153 
154 	return 0;
155 }
156 
157 static int gsc_fw_wait(struct intel_gt *gt)
158 {
159 	return intel_wait_for_register(gt->uncore,
160 				       GSC_FW_STATUS_REG,
161 				       GSC_FW_INIT_COMPLETE_BIT,
162 				       GSC_FW_INIT_COMPLETE_BIT,
163 				       500);
164 }
165 
166 int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
167 {
168 	struct intel_gt *gt = gsc_uc_to_gt(gsc);
169 	struct intel_uc_fw *gsc_fw = &gsc->fw;
170 	int err;
171 
172 	/* check current fw status */
173 	if (intel_gsc_uc_fw_init_done(gsc)) {
174 		if (GEM_WARN_ON(!intel_uc_fw_is_loaded(gsc_fw)))
175 			intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
176 		return -EEXIST;
177 	}
178 
179 	if (!intel_uc_fw_is_loadable(gsc_fw))
180 		return -ENOEXEC;
181 
182 	/* FW blob is ok, so clean the status */
183 	intel_uc_fw_sanitize(&gsc->fw);
184 
185 	if (!gsc_is_in_reset(gt->uncore))
186 		return -EIO;
187 
188 	err = gsc_fw_load_prepare(gsc);
189 	if (err)
190 		goto fail;
191 
192 	/*
193 	 * GSC is only killed by an FLR, so we need to trigger one on unload to
194 	 * make sure we stop it. This is because we assign a chunk of memory to
195 	 * the GSC as part of the FW load , so we need to make sure it stops
196 	 * using it when we release it to the system on driver unload. Note that
197 	 * this is not a problem of the unload per-se, because the GSC will not
198 	 * touch that memory unless there are requests for it coming from the
199 	 * driver; therefore, no accesses will happen while i915 is not loaded,
200 	 * but if we re-load the driver then the GSC might wake up and try to
201 	 * access that old memory location again.
202 	 * Given that an FLR is a very disruptive action (see the FLR function
203 	 * for details), we want to do it as the last action before releasing
204 	 * the access to the MMIO bar, which means we need to do it as part of
205 	 * the primary uncore cleanup.
206 	 * An alternative approach to the FLR would be to use a memory location
207 	 * that survives driver unload, like e.g. stolen memory, and keep the
208 	 * GSC loaded across reloads. However, this requires us to make sure we
209 	 * preserve that memory location on unload and then determine and
210 	 * reserve its offset on each subsequent load, which is not trivial, so
211 	 * it is easier to just kill everything and start fresh.
212 	 */
213 	intel_uncore_set_flr_on_fini(&gt->i915->uncore);
214 
215 	err = gsc_fw_load(gsc);
216 	if (err)
217 		goto fail;
218 
219 	err = gsc_fw_wait(gt);
220 	if (err)
221 		goto fail;
222 
223 	/* FW is not fully operational until we enable SW proxy */
224 	intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
225 
226 	gt_info(gt, "Loaded GSC firmware %s\n", gsc_fw->file_selected.path);
227 
228 	return 0;
229 
230 fail:
231 	return intel_uc_fw_mark_load_failed(gsc_fw, err);
232 }
233