xref: /openbmc/linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c (revision d3e7a4392c82ec2d3c573cdc0fbcc843f3d76b12)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "cgrp.h"
25 #include "chan.h"
26 #include "chid.h"
27 #include "runl.h"
28 
29 #include <core/gpuobj.h>
30 #include <subdev/timer.h>
31 
32 #include "nv50.h"
33 #include "channv50.h"
34 
35 #include <nvif/class.h>
36 
37 void
38 nv50_chan_stop(struct nvkm_chan *chan)
39 {
40 	struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
41 
42 	nvkm_mask(device, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
43 }
44 
45 void
46 nv50_chan_start(struct nvkm_chan *chan)
47 {
48 	struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
49 
50 	nvkm_mask(device, 0x002600 + (chan->id * 4), 0x80000000, 0x80000000);
51 }
52 
53 void
54 nv50_chan_unbind(struct nvkm_chan *chan)
55 {
56 	struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
57 
58 	nvkm_wr32(device, 0x002600 + (chan->id * 4), 0x00000000);
59 }
60 
61 static void
62 nv50_chan_bind(struct nvkm_chan *chan)
63 {
64 	struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
65 
66 	nvkm_wr32(device, 0x002600 + (chan->id * 4), nv50_fifo_chan(chan)->ramfc->addr >> 12);
67 }
68 
69 const struct nvkm_chan_func_inst
70 nv50_chan_inst = {
71 	.size = 0x10000,
72 	.vmm = true,
73 };
74 
75 static const struct nvkm_chan_func
76 nv50_chan = {
77 	.inst = &nv50_chan_inst,
78 	.bind = nv50_chan_bind,
79 	.unbind = nv50_chan_unbind,
80 	.start = nv50_chan_start,
81 	.stop = nv50_chan_stop,
82 };
83 
84 static const struct nvkm_engn_func
85 nv50_engn = {
86 };
87 
88 const struct nvkm_engn_func
89 nv50_engn_sw = {
90 };
91 
92 static bool
93 nv50_runl_pending(struct nvkm_runl *runl)
94 {
95 	return nvkm_rd32(runl->fifo->engine.subdev.device, 0x0032ec) & 0x00000100;
96 }
97 
98 int
99 nv50_runl_wait(struct nvkm_runl *runl)
100 {
101 	struct nvkm_fifo *fifo = runl->fifo;
102 
103 	nvkm_msec(fifo->engine.subdev.device, fifo->timeout.chan_msec,
104 		if (!nvkm_runl_update_pending(runl))
105 			return 0;
106 		usleep_range(1, 2);
107 	);
108 
109 	return -ETIMEDOUT;
110 }
111 
112 static void
113 nv50_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count)
114 {
115 	struct nvkm_device *device = runl->fifo->engine.subdev.device;
116 	u64 addr = nvkm_memory_addr(memory) + start;
117 
118 	nvkm_wr32(device, 0x0032f4, addr >> 12);
119 	nvkm_wr32(device, 0x0032ec, count);
120 }
121 
122 static void
123 nv50_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
124 {
125 	nvkm_wo32(memory, offset, chan->id);
126 }
127 
128 static struct nvkm_memory *
129 nv50_runl_alloc(struct nvkm_runl *runl, u32 *offset)
130 {
131 	const u32 segment = ALIGN((runl->cgrp_nr + runl->chan_nr) * runl->func->size, 0x1000);
132 	const u32 maxsize = (runl->cgid ? runl->cgid->nr : 0) + runl->chid->nr;
133 	int ret;
134 
135 	if (unlikely(!runl->mem)) {
136 		ret = nvkm_memory_new(runl->fifo->engine.subdev.device, NVKM_MEM_TARGET_INST,
137 				      maxsize * 2 * runl->func->size, 0, false, &runl->mem);
138 		if (ret) {
139 			RUNL_ERROR(runl, "alloc %d\n", ret);
140 			return ERR_PTR(ret);
141 		}
142 	} else {
143 		if (runl->offset + segment >= nvkm_memory_size(runl->mem)) {
144 			ret = runl->func->wait(runl);
145 			if (ret) {
146 				RUNL_DEBUG(runl, "rewind timeout");
147 				return ERR_PTR(ret);
148 			}
149 
150 			runl->offset = 0;
151 		}
152 	}
153 
154 	*offset = runl->offset;
155 	runl->offset += segment;
156 	return runl->mem;
157 }
158 
159 int
160 nv50_runl_update(struct nvkm_runl *runl)
161 {
162 	struct nvkm_memory *memory;
163 	struct nvkm_cgrp *cgrp;
164 	struct nvkm_chan *chan;
165 	u32 start, offset, count;
166 
167 	/*TODO: prio, interleaving. */
168 
169 	RUNL_TRACE(runl, "RAMRL: update cgrps:%d chans:%d", runl->cgrp_nr, runl->chan_nr);
170 	memory = nv50_runl_alloc(runl, &start);
171 	if (IS_ERR(memory))
172 		return PTR_ERR(memory);
173 
174 	RUNL_TRACE(runl, "RAMRL: update start:%08x", start);
175 	offset = start;
176 
177 	nvkm_kmap(memory);
178 	nvkm_runl_foreach_cgrp(cgrp, runl) {
179 		if (cgrp->hw) {
180 			CGRP_TRACE(cgrp, "     RAMRL+%08x: chans:%d", offset, cgrp->chan_nr);
181 			runl->func->insert_cgrp(cgrp, memory, offset);
182 			offset += runl->func->size;
183 		}
184 
185 		nvkm_cgrp_foreach_chan(chan, cgrp) {
186 			CHAN_TRACE(chan, "RAMRL+%08x: [%s]", offset, chan->name);
187 			runl->func->insert_chan(chan, memory, offset);
188 			offset += runl->func->size;
189 		}
190 	}
191 	nvkm_done(memory);
192 
193 	/*TODO: look into using features on newer HW to guarantee forward progress. */
194 	list_rotate_left(&runl->cgrps);
195 
196 	count = (offset - start) / runl->func->size;
197 	RUNL_TRACE(runl, "RAMRL: commit start:%08x count:%d", start, count);
198 
199 	runl->func->commit(runl, memory, start, count);
200 	return 0;
201 }
202 
203 const struct nvkm_runl_func
204 nv50_runl = {
205 	.size = 4,
206 	.update = nv50_runl_update,
207 	.insert_chan = nv50_runl_insert_chan,
208 	.commit = nv50_runl_commit,
209 	.wait = nv50_runl_wait,
210 	.pending = nv50_runl_pending,
211 };
212 
213 void
214 nv50_fifo_init(struct nvkm_fifo *fifo)
215 {
216 	struct nvkm_runl *runl = nvkm_runl_first(fifo);
217 	struct nvkm_device *device = fifo->engine.subdev.device;
218 	int i;
219 
220 	nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
221 	nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
222 	nvkm_wr32(device, 0x00250c, 0x6f3cfc34);
223 	nvkm_wr32(device, 0x002044, 0x01003fff);
224 
225 	nvkm_wr32(device, 0x002100, 0xffffffff);
226 	nvkm_wr32(device, 0x002140, 0xbfffffff);
227 
228 	for (i = 0; i < 128; i++)
229 		nvkm_wr32(device, 0x002600 + (i * 4), 0x00000000);
230 
231 	atomic_set(&runl->changed, 1);
232 	runl->func->update(runl);
233 
234 	nvkm_wr32(device, 0x003200, 0x00000001);
235 	nvkm_wr32(device, 0x003250, 0x00000001);
236 	nvkm_wr32(device, 0x002500, 0x00000001);
237 }
238 
239 int
240 nv50_fifo_chid_ctor(struct nvkm_fifo *fifo, int nr)
241 {
242 	/* CHID 0 is unusable (some kind of PIO channel?), 127 is "channel invalid". */
243 	return nvkm_chid_new(&nvkm_chan_event, &fifo->engine.subdev, nr, 1, nr - 2, &fifo->chid);
244 }
245 
246 int
247 nv50_fifo_chid_nr(struct nvkm_fifo *fifo)
248 {
249 	return 128;
250 }
251 
252 void *
253 nv50_fifo_dtor(struct nvkm_fifo *base)
254 {
255 	struct nv50_fifo *fifo = nv50_fifo(base);
256 	return fifo;
257 }
258 
259 int
260 nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
261 	       enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
262 {
263 	struct nv50_fifo *fifo;
264 	int ret;
265 
266 	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
267 		return -ENOMEM;
268 	*pfifo = &fifo->base;
269 
270 	ret = nvkm_fifo_ctor(func, device, type, inst, &fifo->base);
271 	if (ret)
272 		return ret;
273 
274 	return 0;
275 }
276 
277 static const struct nvkm_fifo_func
278 nv50_fifo = {
279 	.dtor = nv50_fifo_dtor,
280 	.chid_nr = nv50_fifo_chid_nr,
281 	.chid_ctor = nv50_fifo_chid_ctor,
282 	.runl_ctor = nv04_fifo_runl_ctor,
283 	.init = nv50_fifo_init,
284 	.intr = nv04_fifo_intr,
285 	.engine_id = nv04_fifo_engine_id,
286 	.pause = nv04_fifo_pause,
287 	.start = nv04_fifo_start,
288 	.runl = &nv50_runl,
289 	.engn = &nv50_engn,
290 	.engn_sw = &nv50_engn_sw,
291 	.cgrp = {{                           }, &nv04_cgrp },
292 	.chan = {{ 0, 0, NV50_CHANNEL_GPFIFO }, &nv50_chan, .oclass = &nv50_fifo_gpfifo_oclass },
293 };
294 
295 int
296 nv50_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
297 	      struct nvkm_fifo **pfifo)
298 {
299 	return nv50_fifo_new_(&nv50_fifo, device, type, inst, pfifo);
300 }
301