xref: /openbmc/linux/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c (revision 4ed91d48259d9ddd378424d008f2e6559f7e78f8)
1 /*
2  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 
24 #include <core/gpuobj.h>
25 #include <core/memory.h>
26 #include <subdev/timer.h>
27 
28 static void
29 nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
30 			 u32 size, u16 tag, u8 port, bool secure)
31 {
32 	u8 rem = size % 4;
33 	u32 reg;
34 	int i;
35 
36 	size -= rem;
37 
38 	reg = start | BIT(24) | (secure ? BIT(28) : 0);
39 	nvkm_falcon_wr32(falcon, 0x180 + (port * 16), reg);
40 	for (i = 0; i < size / 4; i++) {
41 		/* write new tag every 256B */
42 		if ((i & 0x3f) == 0)
43 			nvkm_falcon_wr32(falcon, 0x188, tag++);
44 		nvkm_falcon_wr32(falcon, 0x184, ((u32 *)data)[i]);
45 	}
46 
47 	/*
48 	 * If size is not a multiple of 4, mask the last work to ensure garbage
49 	 * does not get written
50 	 */
51 	if (rem) {
52 		u32 extra = ((u32 *)data)[i];
53 
54 		/* write new tag every 256B */
55 		if ((i & 0x3f) == 0)
56 			nvkm_falcon_wr32(falcon, 0x188, tag++);
57 		nvkm_falcon_wr32(falcon, 0x184, extra & (BIT(rem * 8) - 1));
58 		++i;
59 	}
60 
61 	/* code must be padded to 0x40 words */
62 	for (; i & 0x3f; i++)
63 		nvkm_falcon_wr32(falcon, 0x184, 0);
64 }
65 
66 static void
67 nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
68 		      u32 size, u8 port)
69 {
70 	u8 rem = size % 4;
71 	int i;
72 
73 	size -= rem;
74 
75 	nvkm_falcon_wr32(falcon, 0x1c0 + (port * 16), start | (0x1 << 24));
76 	for (i = 0; i < size / 4; i++)
77 		nvkm_falcon_wr32(falcon, 0x1c4, ((u32 *)data)[i]);
78 
79 	/*
80 	 * If size is not a multiple of 4, mask the last work to ensure garbage
81 	 * does not get read
82 	 */
83 	if (rem) {
84 		u32 extra = ((u32 *)data)[i];
85 
86 		nvkm_falcon_wr32(falcon, 0x1c4, extra & (BIT(rem * 8) - 1));
87 	}
88 }
89 
90 static void
91 nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
92 			 u8 port, void *data)
93 {
94 	u8 rem = size % 4;
95 	int i;
96 
97 	size -= rem;
98 
99 	nvkm_falcon_wr32(falcon, 0x1c0 + (port * 16), start | (0x1 << 25));
100 	for (i = 0; i < size / 4; i++)
101 		((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4);
102 
103 	/*
104 	 * If size is not a multiple of 4, mask the last work to ensure garbage
105 	 * does not get read
106 	 */
107 	if (rem) {
108 		u32 extra = nvkm_falcon_rd32(falcon, 0x1c4);
109 
110 		for (i = size; i < size + rem; i++) {
111 			((u8 *)data)[i] = (u8)(extra & 0xff);
112 			extra >>= 8;
113 		}
114 	}
115 }
116 
117 static void
118 nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
119 {
120 	u32 inst_loc;
121 
122 	/* disable instance block binding */
123 	if (ctx == NULL) {
124 		nvkm_falcon_wr32(falcon, 0x10c, 0x0);
125 		return;
126 	}
127 
128 	nvkm_falcon_wr32(falcon, 0x10c, 0x1);
129 
130 	/* setup apertures - virtual */
131 	nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_UCODE, 0x4);
132 	nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_VIRT, 0x0);
133 	/* setup apertures - physical */
134 	nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_VID, 0x4);
135 	nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5);
136 	nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
137 
138 	/* Set context */
139 	switch (nvkm_memory_target(ctx->memory)) {
140 	case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break;
141 	case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break;
142 	default:
143 		WARN_ON(1);
144 		return;
145 	}
146 
147 	/* Enable context */
148 	nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1);
149 	nvkm_falcon_wr32(falcon, 0x480,
150 			 ((ctx->addr >> 12) & 0xfffffff) |
151 			 (inst_loc << 28) | (1 << 30));
152 }
153 
154 static void
155 nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
156 {
157 	nvkm_falcon_wr32(falcon, 0x104, start_addr);
158 }
159 
160 static void
161 nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
162 {
163 	u32 reg = nvkm_falcon_rd32(falcon, 0x100);
164 
165 	if (reg & BIT(6))
166 		nvkm_falcon_wr32(falcon, 0x130, 0x2);
167 	else
168 		nvkm_falcon_wr32(falcon, 0x100, 0x2);
169 }
170 
171 static int
172 nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
173 {
174 	struct nvkm_device *device = falcon->owner->device;
175 	int ret;
176 
177 	ret = nvkm_wait_msec(device, ms, falcon->addr + 0x100, 0x10, 0x10);
178 	if (ret < 0)
179 		return ret;
180 
181 	return 0;
182 }
183 
184 static int
185 nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
186 {
187 	struct nvkm_device *device = falcon->owner->device;
188 	int ret;
189 
190 	/* clear interrupt(s) */
191 	nvkm_falcon_mask(falcon, 0x004, mask, mask);
192 	/* wait until interrupts are cleared */
193 	ret = nvkm_wait_msec(device, 10, falcon->addr + 0x008, mask, 0x0);
194 	if (ret < 0)
195 		return ret;
196 
197 	return 0;
198 }
199 
200 static int
201 falcon_v1_wait_idle(struct nvkm_falcon *falcon)
202 {
203 	struct nvkm_device *device = falcon->owner->device;
204 	int ret;
205 
206 	ret = nvkm_wait_msec(device, 10, falcon->addr + 0x04c, 0xffff, 0x0);
207 	if (ret < 0)
208 		return ret;
209 
210 	return 0;
211 }
212 
213 static int
214 nvkm_falcon_v1_enable(struct nvkm_falcon *falcon)
215 {
216 	struct nvkm_device *device = falcon->owner->device;
217 	int ret;
218 
219 	ret = nvkm_wait_msec(device, 10, falcon->addr + 0x10c, 0x6, 0x0);
220 	if (ret < 0) {
221 		nvkm_error(falcon->user, "Falcon mem scrubbing timeout\n");
222 		return ret;
223 	}
224 
225 	ret = falcon_v1_wait_idle(falcon);
226 	if (ret)
227 		return ret;
228 
229 	/* enable IRQs */
230 	nvkm_falcon_wr32(falcon, 0x010, 0xff);
231 
232 	return 0;
233 }
234 
235 static void
236 nvkm_falcon_v1_disable(struct nvkm_falcon *falcon)
237 {
238 	/* disable IRQs and wait for any previous code to complete */
239 	nvkm_falcon_wr32(falcon, 0x014, 0xff);
240 	falcon_v1_wait_idle(falcon);
241 }
242 
243 static const struct nvkm_falcon_func
244 nvkm_falcon_v1 = {
245 	.load_imem = nvkm_falcon_v1_load_imem,
246 	.load_dmem = nvkm_falcon_v1_load_dmem,
247 	.read_dmem = nvkm_falcon_v1_read_dmem,
248 	.bind_context = nvkm_falcon_v1_bind_context,
249 	.start = nvkm_falcon_v1_start,
250 	.wait_for_halt = nvkm_falcon_v1_wait_for_halt,
251 	.clear_interrupt = nvkm_falcon_v1_clear_interrupt,
252 	.enable = nvkm_falcon_v1_enable,
253 	.disable = nvkm_falcon_v1_disable,
254 	.set_start_addr = nvkm_falcon_v1_set_start_addr,
255 };
256 
257 int
258 nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
259 		   struct nvkm_falcon **pfalcon)
260 {
261 	struct nvkm_falcon *falcon;
262 	if (!(falcon = *pfalcon = kzalloc(sizeof(*falcon), GFP_KERNEL)))
263 		return -ENOMEM;
264 	nvkm_falcon_ctor(&nvkm_falcon_v1, owner, name, addr, falcon);
265 	return 0;
266 }
267