1 /*
2  * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 
24 #include <core/memory.h>
25 #include <subdev/acr.h>
26 #include <subdev/timer.h>
27 
28 #include <nvfw/flcn.h>
29 #include <nvfw/sec2.h>
30 
31 static int
32 gp102_sec2_acr_bootstrap_falcon_callback(void *priv, struct nv_falcon_msg *hdr)
33 {
34 	struct nv_sec2_acr_bootstrap_falcon_msg *msg =
35 		container_of(hdr, typeof(*msg), msg.hdr);
36 	struct nvkm_subdev *subdev = priv;
37 	const char *name = nvkm_acr_lsf_id(msg->falcon_id);
38 
39 	if (msg->error_code) {
40 		nvkm_error(subdev, "ACR_BOOTSTRAP_FALCON failed for "
41 				   "falcon %d [%s]: %08x\n",
42 			   msg->falcon_id, name, msg->error_code);
43 		return -EINVAL;
44 	}
45 
46 	nvkm_debug(subdev, "%s booted\n", name);
47 	return 0;
48 }
49 
50 static int
51 gp102_sec2_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
52 			        enum nvkm_acr_lsf_id id)
53 {
54 	struct nvkm_sec2 *sec2 = container_of(falcon, typeof(*sec2), falcon);
55 	struct nv_sec2_acr_bootstrap_falcon_cmd cmd = {
56 		.cmd.hdr.unit_id = sec2->func->unit_acr,
57 		.cmd.hdr.size = sizeof(cmd),
58 		.cmd.cmd_type = NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON,
59 		.flags = NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES,
60 		.falcon_id = id,
61 	};
62 
63 	return nvkm_falcon_cmdq_send(sec2->cmdq, &cmd.cmd.hdr,
64 				     gp102_sec2_acr_bootstrap_falcon_callback,
65 				     &sec2->engine.subdev,
66 				     msecs_to_jiffies(1000));
67 }
68 
69 static int
70 gp102_sec2_acr_boot(struct nvkm_falcon *falcon)
71 {
72 	struct nv_sec2_args args = {};
73 	nvkm_falcon_load_dmem(falcon, &args,
74 			      falcon->func->emem_addr, sizeof(args), 0);
75 	nvkm_falcon_start(falcon);
76 	return 0;
77 }
78 
79 static void
80 gp102_sec2_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
81 {
82 	struct loader_config_v1 hdr;
83 	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
84 	hdr.code_dma_base = hdr.code_dma_base + adjust;
85 	hdr.data_dma_base = hdr.data_dma_base + adjust;
86 	hdr.overlay_dma_base = hdr.overlay_dma_base + adjust;
87 	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
88 	loader_config_v1_dump(&acr->subdev, &hdr);
89 }
90 
91 static void
92 gp102_sec2_acr_bld_write(struct nvkm_acr *acr, u32 bld,
93 			 struct nvkm_acr_lsfw *lsfw)
94 {
95 	const struct loader_config_v1 hdr = {
96 		.dma_idx = FALCON_SEC2_DMAIDX_UCODE,
97 		.code_dma_base = lsfw->offset.img + lsfw->app_start_offset,
98 		.code_size_total = lsfw->app_size,
99 		.code_size_to_load = lsfw->app_resident_code_size,
100 		.code_entry_point = lsfw->app_imem_entry,
101 		.data_dma_base = lsfw->offset.img + lsfw->app_start_offset +
102 				 lsfw->app_resident_data_offset,
103 		.data_size = lsfw->app_resident_data_size,
104 		.overlay_dma_base = lsfw->offset.img + lsfw->app_start_offset,
105 		.argc = 1,
106 		.argv = lsfw->falcon->func->emem_addr,
107 	};
108 
109 	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
110 }
111 
112 static const struct nvkm_acr_lsf_func
113 gp102_sec2_acr_0 = {
114 	.bld_size = sizeof(struct loader_config_v1),
115 	.bld_write = gp102_sec2_acr_bld_write,
116 	.bld_patch = gp102_sec2_acr_bld_patch,
117 	.boot = gp102_sec2_acr_boot,
118 	.bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
119 };
120 
121 int
122 gp102_sec2_initmsg(struct nvkm_sec2 *sec2)
123 {
124 	struct nv_sec2_init_msg msg;
125 	int ret, i;
126 
127 	ret = nvkm_falcon_msgq_recv_initmsg(sec2->msgq, &msg, sizeof(msg));
128 	if (ret)
129 		return ret;
130 
131 	if (msg.hdr.unit_id != NV_SEC2_UNIT_INIT ||
132 	    msg.msg_type != NV_SEC2_INIT_MSG_INIT)
133 		return -EINVAL;
134 
135 	for (i = 0; i < ARRAY_SIZE(msg.queue_info); i++) {
136 		if (msg.queue_info[i].id == NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ) {
137 			nvkm_falcon_msgq_init(sec2->msgq,
138 					      msg.queue_info[i].index,
139 					      msg.queue_info[i].offset,
140 					      msg.queue_info[i].size);
141 		} else {
142 			nvkm_falcon_cmdq_init(sec2->cmdq,
143 					      msg.queue_info[i].index,
144 					      msg.queue_info[i].offset,
145 					      msg.queue_info[i].size);
146 		}
147 	}
148 
149 	return 0;
150 }
151 
152 void
153 gp102_sec2_intr(struct nvkm_sec2 *sec2)
154 {
155 	struct nvkm_subdev *subdev = &sec2->engine.subdev;
156 	struct nvkm_falcon *falcon = &sec2->falcon;
157 	u32 disp = nvkm_falcon_rd32(falcon, 0x01c);
158 	u32 intr = nvkm_falcon_rd32(falcon, 0x008) & disp & ~(disp >> 16);
159 
160 	if (intr & 0x00000040) {
161 		schedule_work(&sec2->work);
162 		nvkm_falcon_wr32(falcon, 0x004, 0x00000040);
163 		intr &= ~0x00000040;
164 	}
165 
166 	if (intr) {
167 		nvkm_error(subdev, "unhandled intr %08x\n", intr);
168 		nvkm_falcon_wr32(falcon, 0x004, intr);
169 	}
170 }
171 
172 int
173 gp102_sec2_flcn_enable(struct nvkm_falcon *falcon)
174 {
175 	nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000001);
176 	udelay(10);
177 	nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000000);
178 	return nvkm_falcon_v1_enable(falcon);
179 }
180 
181 void
182 gp102_sec2_flcn_bind_context(struct nvkm_falcon *falcon,
183 			     struct nvkm_memory *ctx)
184 {
185 	struct nvkm_device *device = falcon->owner->device;
186 
187 	nvkm_falcon_v1_bind_context(falcon, ctx);
188 	if (!ctx)
189 		return;
190 
191 	/* Not sure if this is a WAR for a HW issue, or some additional
192 	 * programming sequence that's needed to properly complete the
193 	 * context switch we trigger above.
194 	 *
195 	 * Fixes unreliability of booting the SEC2 RTOS on Quadro P620,
196 	 * particularly when resuming from suspend.
197 	 *
198 	 * Also removes the need for an odd workaround where we needed
199 	 * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before
200 	 * the SEC2 RTOS would begin executing.
201 	 */
202 	nvkm_msec(device, 10,
203 		u32 irqstat = nvkm_falcon_rd32(falcon, 0x008);
204 		u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
205 		if ((irqstat & 0x00000008) &&
206 		    (flcn0dc & 0x00007000) == 0x00005000)
207 			break;
208 	);
209 
210 	nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
211 	nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
212 
213 	nvkm_msec(device, 10,
214 		u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
215 		if ((flcn0dc & 0x00007000) == 0x00000000)
216 			break;
217 	);
218 }
219 
220 static const struct nvkm_falcon_func
221 gp102_sec2_flcn = {
222 	.debug = 0x408,
223 	.fbif = 0x600,
224 	.load_imem = nvkm_falcon_v1_load_imem,
225 	.load_dmem = nvkm_falcon_v1_load_dmem,
226 	.read_dmem = nvkm_falcon_v1_read_dmem,
227 	.emem_addr = 0x01000000,
228 	.bind_context = gp102_sec2_flcn_bind_context,
229 	.wait_for_halt = nvkm_falcon_v1_wait_for_halt,
230 	.clear_interrupt = nvkm_falcon_v1_clear_interrupt,
231 	.set_start_addr = nvkm_falcon_v1_set_start_addr,
232 	.start = nvkm_falcon_v1_start,
233 	.enable = gp102_sec2_flcn_enable,
234 	.disable = nvkm_falcon_v1_disable,
235 	.cmdq = { 0xa00, 0xa04, 8 },
236 	.msgq = { 0xa30, 0xa34, 8 },
237 };
238 
239 const struct nvkm_sec2_func
240 gp102_sec2 = {
241 	.flcn = &gp102_sec2_flcn,
242 	.unit_acr = NV_SEC2_UNIT_ACR,
243 	.intr = gp102_sec2_intr,
244 	.initmsg = gp102_sec2_initmsg,
245 };
246 
247 MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
248 MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
249 MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
250 MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
251 MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
252 MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
253 MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
254 MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
255 MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
256 MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
257 MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
258 MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
259 
260 static void
261 gp102_sec2_acr_bld_patch_1(struct nvkm_acr *acr, u32 bld, s64 adjust)
262 {
263 	struct flcn_bl_dmem_desc_v2 hdr;
264 	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
265 	hdr.code_dma_base = hdr.code_dma_base + adjust;
266 	hdr.data_dma_base = hdr.data_dma_base + adjust;
267 	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
268 	flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr);
269 }
270 
271 static void
272 gp102_sec2_acr_bld_write_1(struct nvkm_acr *acr, u32 bld,
273 			   struct nvkm_acr_lsfw *lsfw)
274 {
275 	const struct flcn_bl_dmem_desc_v2 hdr = {
276 		.ctx_dma = FALCON_SEC2_DMAIDX_UCODE,
277 		.code_dma_base = lsfw->offset.img + lsfw->app_start_offset,
278 		.non_sec_code_off = lsfw->app_resident_code_offset,
279 		.non_sec_code_size = lsfw->app_resident_code_size,
280 		.code_entry_point = lsfw->app_imem_entry,
281 		.data_dma_base = lsfw->offset.img + lsfw->app_start_offset +
282 				 lsfw->app_resident_data_offset,
283 		.data_size = lsfw->app_resident_data_size,
284 		.argc = 1,
285 		.argv = lsfw->falcon->func->emem_addr,
286 	};
287 
288 	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
289 }
290 
291 const struct nvkm_acr_lsf_func
292 gp102_sec2_acr_1 = {
293 	.bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
294 	.bld_write = gp102_sec2_acr_bld_write_1,
295 	.bld_patch = gp102_sec2_acr_bld_patch_1,
296 	.boot = gp102_sec2_acr_boot,
297 	.bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
298 };
299 
300 int
301 gp102_sec2_load(struct nvkm_sec2 *sec2, int ver,
302 		const struct nvkm_sec2_fwif *fwif)
303 {
304 	return nvkm_acr_lsfw_load_sig_image_desc_v1(&sec2->engine.subdev,
305 						    &sec2->falcon,
306 						    NVKM_ACR_LSF_SEC2, "sec2/",
307 						    ver, fwif->acr);
308 }
309 
310 MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
311 MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
312 MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
313 MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
314 MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
315 MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
316 MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
317 MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
318 MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
319 MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
320 MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
321 MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
322 
323 static const struct nvkm_sec2_fwif
324 gp102_sec2_fwif[] = {
325 	{ 1, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 },
326 	{ 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_0 },
327 	{}
328 };
329 
330 int
331 gp102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
332 {
333 	return nvkm_sec2_new_(gp102_sec2_fwif, device, index, 0, psec2);
334 }
335