1 /*
2  * Copyright 2022 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 
24 #include <core/memory.h>
25 #include <subdev/mc.h>
26 #include <subdev/timer.h>
27 
28 void
29 gm200_flcn_tracepc(struct nvkm_falcon *falcon)
30 {
31 	u32 sctl = nvkm_falcon_rd32(falcon, 0x240);
32 	u32 tidx = nvkm_falcon_rd32(falcon, 0x148);
33 	int nr = (tidx & 0x00ff0000) >> 16, sp, ip;
34 
35 	FLCN_ERR(falcon, "TRACEPC SCTL %08x TIDX %08x", sctl, tidx);
36 	for (sp = 0; sp < nr; sp++) {
37 		nvkm_falcon_wr32(falcon, 0x148, sp);
38 		ip = nvkm_falcon_rd32(falcon, 0x14c);
39 		FLCN_ERR(falcon, "TRACEPC: %08x", ip);
40 	}
41 }
42 
43 static void
44 gm200_flcn_pio_dmem_rd(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len)
45 {
46 	while (len >= 4) {
47 		*(u32 *)img = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
48 		img += 4;
49 		len -= 4;
50 	}
51 }
52 
53 static void
54 gm200_flcn_pio_dmem_rd_init(struct nvkm_falcon *falcon, u8 port, u32 dmem_base)
55 {
56 	nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), BIT(25) | dmem_base);
57 }
58 
59 static void
60 gm200_flcn_pio_dmem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag)
61 {
62 	while (len >= 4) {
63 		nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), *(u32 *)img);
64 		img += 4;
65 		len -= 4;
66 	}
67 }
68 
69 static void
70 gm200_flcn_pio_dmem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 dmem_base)
71 {
72 	nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), BIT(24) | dmem_base);
73 }
74 
75 const struct nvkm_falcon_func_pio
76 gm200_flcn_dmem_pio = {
77 	.min = 4,
78 	.max = 0x100,
79 	.wr_init = gm200_flcn_pio_dmem_wr_init,
80 	.wr = gm200_flcn_pio_dmem_wr,
81 	.rd_init = gm200_flcn_pio_dmem_rd_init,
82 	.rd = gm200_flcn_pio_dmem_rd,
83 };
84 
85 static void
86 gm200_flcn_pio_imem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 imem_base)
87 {
88 	nvkm_falcon_wr32(falcon, 0x180 + (port * 0x10), (sec ? BIT(28) : 0) | BIT(24) | imem_base);
89 }
90 
91 static void
92 gm200_flcn_pio_imem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag)
93 {
94 	nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag++);
95 	while (len >= 4) {
96 		nvkm_falcon_wr32(falcon, 0x184 + (port * 0x10), *(u32 *)img);
97 		img += 4;
98 		len -= 4;
99 	}
100 }
101 
102 const struct nvkm_falcon_func_pio
103 gm200_flcn_imem_pio = {
104 	.min = 0x100,
105 	.max = 0x100,
106 	.wr_init = gm200_flcn_pio_imem_wr_init,
107 	.wr = gm200_flcn_pio_imem_wr,
108 };
109 
110 int
111 gm200_flcn_bind_stat(struct nvkm_falcon *falcon, bool intr)
112 {
113 	if (intr && !(nvkm_falcon_rd32(falcon, 0x008) & 0x00000008))
114 		return -1;
115 
116 	return (nvkm_falcon_rd32(falcon, 0x0dc) & 0x00007000) >> 12;
117 }
118 
119 void
120 gm200_flcn_bind_inst(struct nvkm_falcon *falcon, int target, u64 addr)
121 {
122 	nvkm_falcon_mask(falcon, 0x604, 0x00000007, 0x00000000); /* DMAIDX_VIRT */
123 	nvkm_falcon_wr32(falcon, 0x054, (1 << 30) | (target << 28) | (addr >> 12));
124 	nvkm_falcon_mask(falcon, 0x090, 0x00010000, 0x00010000);
125 	nvkm_falcon_mask(falcon, 0x0a4, 0x00000008, 0x00000008);
126 }
127 
128 int
129 gm200_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *falcon)
130 {
131 	nvkm_falcon_mask(falcon, 0x040, 0x00000000, 0x00000000);
132 
133 	if (nvkm_msec(falcon->owner->device, 10,
134 		if (!(nvkm_falcon_rd32(falcon, 0x10c) & 0x00000006))
135 			break;
136 	) < 0)
137 		return -ETIMEDOUT;
138 
139 	return 0;
140 }
141 
142 int
143 gm200_flcn_enable(struct nvkm_falcon *falcon)
144 {
145 	struct nvkm_device *device = falcon->owner->device;
146 	int ret;
147 
148 	if (falcon->func->reset_eng) {
149 		ret = falcon->func->reset_eng(falcon);
150 		if (ret)
151 			return ret;
152 	}
153 
154 	if (falcon->func->select) {
155 		ret = falcon->func->select(falcon);
156 		if (ret)
157 			return ret;
158 	}
159 
160 	if (falcon->func->reset_pmc)
161 		nvkm_mc_enable(device, falcon->owner->type, falcon->owner->inst);
162 
163 	ret = falcon->func->reset_wait_mem_scrubbing(falcon);
164 	if (ret)
165 		return ret;
166 
167 	nvkm_falcon_wr32(falcon, 0x084, nvkm_rd32(device, 0x000000));
168 	return 0;
169 }
170 
171 int
172 gm200_flcn_disable(struct nvkm_falcon *falcon)
173 {
174 	struct nvkm_device *device = falcon->owner->device;
175 	int ret;
176 
177 	if (falcon->func->select) {
178 		ret = falcon->func->select(falcon);
179 		if (ret)
180 			return ret;
181 	}
182 
183 	nvkm_falcon_mask(falcon, 0x048, 0x00000003, 0x00000000);
184 	nvkm_falcon_wr32(falcon, 0x014, 0xffffffff);
185 
186 	if (falcon->func->reset_pmc) {
187 		if (falcon->func->reset_prep) {
188 			ret = falcon->func->reset_prep(falcon);
189 			if (ret)
190 				return ret;
191 		}
192 
193 		nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst);
194 	}
195 
196 	if (falcon->func->reset_eng) {
197 		ret = falcon->func->reset_eng(falcon);
198 		if (ret)
199 			return ret;
200 	}
201 
202 	return 0;
203 }
204 
205 int
206 gm200_flcn_fw_boot(struct nvkm_falcon_fw *fw, u32 *pmbox0, u32 *pmbox1, u32 mbox0_ok, u32 irqsclr)
207 {
208 	struct nvkm_falcon *falcon = fw->falcon;
209 	u32 mbox0, mbox1;
210 	int ret = 0;
211 
212 	nvkm_falcon_wr32(falcon, 0x040, pmbox0 ? *pmbox0 : 0xcafebeef);
213 	if (pmbox1)
214 		nvkm_falcon_wr32(falcon, 0x044, *pmbox1);
215 
216 	nvkm_falcon_wr32(falcon, 0x104, fw->boot_addr);
217 	nvkm_falcon_wr32(falcon, 0x100, 0x00000002);
218 
219 	if (nvkm_msec(falcon->owner->device, 2000,
220 		if (nvkm_falcon_rd32(falcon, 0x100) & 0x00000010)
221 			break;
222 	) < 0)
223 		ret = -ETIMEDOUT;
224 
225 	mbox0 = nvkm_falcon_rd32(falcon, 0x040);
226 	mbox1 = nvkm_falcon_rd32(falcon, 0x044);
227 	if (FLCN_ERRON(falcon, ret || mbox0 != mbox0_ok, "mbox %08x %08x", mbox0, mbox1))
228 		ret = ret ?: -EIO;
229 
230 	if (irqsclr)
231 		nvkm_falcon_mask(falcon, 0x004, 0xffffffff, irqsclr);
232 
233 	return ret;
234 }
235 
236 int
237 gm200_flcn_fw_load(struct nvkm_falcon_fw *fw)
238 {
239 	struct nvkm_falcon *falcon = fw->falcon;
240 	int target, ret;
241 
242 	if (fw->inst) {
243 		nvkm_falcon_mask(falcon, 0x048, 0x00000001, 0x00000001);
244 
245 		switch (nvkm_memory_target(fw->inst)) {
246 		case NVKM_MEM_TARGET_VRAM: target = 0; break;
247 		case NVKM_MEM_TARGET_HOST: target = 2; break;
248 		case NVKM_MEM_TARGET_NCOH: target = 3; break;
249 		default:
250 			WARN_ON(1);
251 			return -EINVAL;
252 		}
253 
254 		falcon->func->bind_inst(falcon, target, nvkm_memory_addr(fw->inst));
255 
256 		if (nvkm_msec(falcon->owner->device, 10,
257 			if (falcon->func->bind_stat(falcon, falcon->func->bind_intr) == 5)
258 				break;
259 		) < 0)
260 			return -ETIMEDOUT;
261 
262 		nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
263 		nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
264 
265 		if (nvkm_msec(falcon->owner->device, 10,
266 			if (falcon->func->bind_stat(falcon, false) == 0)
267 				break;
268 		) < 0)
269 			return -ETIMEDOUT;
270 	} else {
271 		nvkm_falcon_mask(falcon, 0x624, 0x00000080, 0x00000080);
272 		nvkm_falcon_wr32(falcon, 0x10c, 0x00000000);
273 	}
274 
275 	if (fw->boot) {
276 		switch (nvkm_memory_target(&fw->fw.mem.memory)) {
277 		case NVKM_MEM_TARGET_VRAM: target = 4; break;
278 		case NVKM_MEM_TARGET_HOST: target = 5; break;
279 		case NVKM_MEM_TARGET_NCOH: target = 6; break;
280 		default:
281 			WARN_ON(1);
282 			return -EINVAL;
283 		}
284 
285 		ret = nvkm_falcon_pio_wr(falcon, fw->boot, 0, 0,
286 					 IMEM, falcon->code.limit - fw->boot_size, fw->boot_size,
287 					 fw->boot_addr >> 8, false);
288 		if (ret)
289 			return ret;
290 
291 		return fw->func->load_bld(fw);
292 	}
293 
294 	ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->nmem_base_img, fw->nmem_base_img, 0,
295 				 IMEM, fw->nmem_base, fw->nmem_size, fw->nmem_base >> 8, false);
296 	if (ret)
297 		return ret;
298 
299 	ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->imem_base_img, fw->imem_base_img, 0,
300 				 IMEM, fw->imem_base, fw->imem_size, fw->imem_base >> 8, true);
301 	if (ret)
302 		return ret;
303 
304 	ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->dmem_base_img, fw->dmem_base_img, 0,
305 				 DMEM, fw->dmem_base, fw->dmem_size, 0, false);
306 	if (ret)
307 		return ret;
308 
309 	return 0;
310 }
311 
312 int
313 gm200_flcn_fw_reset(struct nvkm_falcon_fw *fw)
314 {
315 	return nvkm_falcon_reset(fw->falcon);
316 }
317 
318 int
319 gm200_flcn_fw_signature(struct nvkm_falcon_fw *fw, u32 *sig_base_src)
320 {
321 	struct nvkm_falcon *falcon = fw->falcon;
322 	u32 addr = falcon->func->debug;
323 	int ret = 0;
324 
325 	if (addr) {
326 		ret = nvkm_falcon_enable(falcon);
327 		if (ret)
328 			return ret;
329 
330 		if (nvkm_falcon_rd32(falcon, addr) & 0x00100000) {
331 			*sig_base_src = fw->sig_base_dbg;
332 			return 1;
333 		}
334 	}
335 
336 	return ret;
337 }
338 
339 const struct nvkm_falcon_fw_func
340 gm200_flcn_fw = {
341 	.signature = gm200_flcn_fw_signature,
342 	.reset = gm200_flcn_fw_reset,
343 	.load = gm200_flcn_fw_load,
344 	.boot = gm200_flcn_fw_boot,
345 };
346