xref: /openbmc/linux/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c (revision 2541626cfb794e57ba0575a6920826f591f7ced0)
1 /*
2  * Copyright 2022 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 
24 #include <core/memory.h>
25 #include <subdev/mc.h>
26 #include <subdev/timer.h>
27 
28 static void
29 gm200_flcn_pio_dmem_rd(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len)
30 {
31 	while (len >= 4) {
32 		*(u32 *)img = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
33 		img += 4;
34 		len -= 4;
35 	}
36 }
37 
38 static void
39 gm200_flcn_pio_dmem_rd_init(struct nvkm_falcon *falcon, u8 port, u32 dmem_base)
40 {
41 	nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), BIT(25) | dmem_base);
42 }
43 
44 static void
45 gm200_flcn_pio_dmem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag)
46 {
47 	while (len >= 4) {
48 		nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), *(u32 *)img);
49 		img += 4;
50 		len -= 4;
51 	}
52 }
53 
54 static void
55 gm200_flcn_pio_dmem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 dmem_base)
56 {
57 	nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), BIT(24) | dmem_base);
58 }
59 
60 const struct nvkm_falcon_func_pio
61 gm200_flcn_dmem_pio = {
62 	.min = 4,
63 	.max = 0x100,
64 	.wr_init = gm200_flcn_pio_dmem_wr_init,
65 	.wr = gm200_flcn_pio_dmem_wr,
66 	.rd_init = gm200_flcn_pio_dmem_rd_init,
67 	.rd = gm200_flcn_pio_dmem_rd,
68 };
69 
70 static void
71 gm200_flcn_pio_imem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 imem_base)
72 {
73 	nvkm_falcon_wr32(falcon, 0x180 + (port * 0x10), (sec ? BIT(28) : 0) | BIT(24) | imem_base);
74 }
75 
76 static void
77 gm200_flcn_pio_imem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag)
78 {
79 	nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag++);
80 	while (len >= 4) {
81 		nvkm_falcon_wr32(falcon, 0x184 + (port * 0x10), *(u32 *)img);
82 		img += 4;
83 		len -= 4;
84 	}
85 }
86 
87 const struct nvkm_falcon_func_pio
88 gm200_flcn_imem_pio = {
89 	.min = 0x100,
90 	.max = 0x100,
91 	.wr_init = gm200_flcn_pio_imem_wr_init,
92 	.wr = gm200_flcn_pio_imem_wr,
93 };
94 
95 int
96 gm200_flcn_bind_stat(struct nvkm_falcon *falcon, bool intr)
97 {
98 	if (intr && !(nvkm_falcon_rd32(falcon, 0x008) & 0x00000008))
99 		return -1;
100 
101 	return (nvkm_falcon_rd32(falcon, 0x0dc) & 0x00007000) >> 12;
102 }
103 
104 void
105 gm200_flcn_bind_inst(struct nvkm_falcon *falcon, int target, u64 addr)
106 {
107 	nvkm_falcon_mask(falcon, 0x604, 0x00000007, 0x00000000); /* DMAIDX_VIRT */
108 	nvkm_falcon_wr32(falcon, 0x054, (1 << 30) | (target << 28) | (addr >> 12));
109 	nvkm_falcon_mask(falcon, 0x090, 0x00010000, 0x00010000);
110 	nvkm_falcon_mask(falcon, 0x0a4, 0x00000008, 0x00000008);
111 }
112 
113 int
114 gm200_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *falcon)
115 {
116 	nvkm_falcon_mask(falcon, 0x040, 0x00000000, 0x00000000);
117 
118 	if (nvkm_msec(falcon->owner->device, 10,
119 		if (!(nvkm_falcon_rd32(falcon, 0x10c) & 0x00000006))
120 			break;
121 	) < 0)
122 		return -ETIMEDOUT;
123 
124 	return 0;
125 }
126 
127 int
128 gm200_flcn_enable(struct nvkm_falcon *falcon)
129 {
130 	struct nvkm_device *device = falcon->owner->device;
131 	int ret;
132 
133 	if (falcon->func->reset_eng) {
134 		ret = falcon->func->reset_eng(falcon);
135 		if (ret)
136 			return ret;
137 	}
138 
139 	if (falcon->func->reset_pmc)
140 		nvkm_mc_enable(device, falcon->owner->type, falcon->owner->inst);
141 
142 	ret = falcon->func->reset_wait_mem_scrubbing(falcon);
143 	if (ret)
144 		return ret;
145 
146 	nvkm_falcon_wr32(falcon, 0x084, nvkm_rd32(device, 0x000000));
147 	return 0;
148 }
149 
150 int
151 gm200_flcn_disable(struct nvkm_falcon *falcon)
152 {
153 	struct nvkm_device *device = falcon->owner->device;
154 	int ret;
155 
156 	nvkm_falcon_mask(falcon, 0x048, 0x00000003, 0x00000000);
157 	nvkm_falcon_wr32(falcon, 0x014, 0xffffffff);
158 
159 	if (falcon->func->reset_pmc)
160 		nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst);
161 
162 	if (falcon->func->reset_eng) {
163 		ret = falcon->func->reset_eng(falcon);
164 		if (ret)
165 			return ret;
166 	}
167 
168 	return 0;
169 }
170 
171 int
172 gm200_flcn_fw_boot(struct nvkm_falcon_fw *fw, u32 *pmbox0, u32 *pmbox1, u32 mbox0_ok, u32 irqsclr)
173 {
174 	struct nvkm_falcon *falcon = fw->falcon;
175 	u32 mbox0, mbox1;
176 	int ret = 0;
177 
178 	nvkm_falcon_wr32(falcon, 0x040, pmbox0 ? *pmbox0 : 0xcafebeef);
179 	if (pmbox1)
180 		nvkm_falcon_wr32(falcon, 0x044, *pmbox1);
181 
182 	nvkm_falcon_wr32(falcon, 0x104, fw->boot_addr);
183 	nvkm_falcon_wr32(falcon, 0x100, 0x00000002);
184 
185 	if (nvkm_msec(falcon->owner->device, 2000,
186 		if (nvkm_falcon_rd32(falcon, 0x100) & 0x00000010)
187 			break;
188 	) < 0)
189 		ret = -ETIMEDOUT;
190 
191 	mbox0 = nvkm_falcon_rd32(falcon, 0x040);
192 	mbox1 = nvkm_falcon_rd32(falcon, 0x044);
193 	if (FLCN_ERRON(falcon, ret || mbox0 != mbox0_ok, "mbox %08x %08x", mbox0, mbox1))
194 		ret = ret ?: -EIO;
195 
196 	if (irqsclr)
197 		nvkm_falcon_mask(falcon, 0x004, 0xffffffff, irqsclr);
198 
199 	return ret;
200 }
201 
202 int
203 gm200_flcn_fw_load(struct nvkm_falcon_fw *fw)
204 {
205 	struct nvkm_falcon *falcon = fw->falcon;
206 	int target, ret;
207 
208 	if (fw->inst) {
209 		nvkm_falcon_mask(falcon, 0x048, 0x00000001, 0x00000001);
210 
211 		switch (nvkm_memory_target(fw->inst)) {
212 		case NVKM_MEM_TARGET_VRAM: target = 0; break;
213 		case NVKM_MEM_TARGET_HOST: target = 2; break;
214 		case NVKM_MEM_TARGET_NCOH: target = 3; break;
215 		default:
216 			WARN_ON(1);
217 			return -EINVAL;
218 		}
219 
220 		falcon->func->bind_inst(falcon, target, nvkm_memory_addr(fw->inst));
221 
222 		if (nvkm_msec(falcon->owner->device, 10,
223 			if (falcon->func->bind_stat(falcon, falcon->func->bind_intr) == 5)
224 				break;
225 		) < 0)
226 			return -ETIMEDOUT;
227 
228 		nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
229 		nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
230 
231 		if (nvkm_msec(falcon->owner->device, 10,
232 			if (falcon->func->bind_stat(falcon, false) == 0)
233 				break;
234 		) < 0)
235 			return -ETIMEDOUT;
236 	} else {
237 		nvkm_falcon_mask(falcon, 0x624, 0x00000080, 0x00000080);
238 		nvkm_falcon_wr32(falcon, 0x10c, 0x00000000);
239 	}
240 
241 	if (fw->boot) {
242 		switch (nvkm_memory_target(&fw->fw.mem.memory)) {
243 		case NVKM_MEM_TARGET_VRAM: target = 4; break;
244 		case NVKM_MEM_TARGET_HOST: target = 5; break;
245 		case NVKM_MEM_TARGET_NCOH: target = 6; break;
246 		default:
247 			WARN_ON(1);
248 			return -EINVAL;
249 		}
250 
251 		ret = nvkm_falcon_pio_wr(falcon, fw->boot, 0, 0,
252 					 IMEM, falcon->code.limit - fw->boot_size, fw->boot_size,
253 					 fw->boot_addr >> 8, false);
254 		if (ret)
255 			return ret;
256 
257 		return fw->func->load_bld(fw);
258 	}
259 
260 	ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->nmem_base_img, fw->nmem_base_img, 0,
261 				 IMEM, fw->nmem_base, fw->nmem_size, fw->nmem_base >> 8, false);
262 	if (ret)
263 		return ret;
264 
265 	ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->imem_base_img, fw->imem_base_img, 0,
266 				 IMEM, fw->imem_base, fw->imem_size, fw->imem_base >> 8, true);
267 	if (ret)
268 		return ret;
269 
270 	ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->dmem_base_img, fw->dmem_base_img, 0,
271 				 DMEM, fw->dmem_base, fw->dmem_size, 0, false);
272 	if (ret)
273 		return ret;
274 
275 	return 0;
276 }
277 
278 int
279 gm200_flcn_fw_reset(struct nvkm_falcon_fw *fw)
280 {
281 	return nvkm_falcon_reset(fw->falcon);
282 }
283 
284 int
285 gm200_flcn_fw_signature(struct nvkm_falcon_fw *fw, u32 *sig_base_src)
286 {
287 	struct nvkm_falcon *falcon = fw->falcon;
288 	u32 addr = falcon->func->debug;
289 	int ret = 0;
290 
291 	if (addr) {
292 		ret = nvkm_falcon_enable(falcon);
293 		if (ret)
294 			return ret;
295 
296 		if (nvkm_falcon_rd32(falcon, addr) & 0x00100000) {
297 			*sig_base_src = fw->sig_base_dbg;
298 			return 1;
299 		}
300 	}
301 
302 	return ret;
303 }
304 
305 const struct nvkm_falcon_fw_func
306 gm200_flcn_fw = {
307 	.signature = gm200_flcn_fw_signature,
308 	.reset = gm200_flcn_fw_reset,
309 	.load = gm200_flcn_fw_load,
310 	.boot = gm200_flcn_fw_boot,
311 };
312