1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv50.h"
25 #include "nv04.h"
26 
27 #include <core/client.h>
28 #include <core/engctx.h>
29 #include <core/ramht.h>
30 #include <subdev/bar.h>
31 #include <subdev/mmu.h>
32 #include <subdev/timer.h>
33 
34 #include <nvif/class.h>
35 #include <nvif/unpack.h>
36 
37 /*******************************************************************************
38  * FIFO channel objects
39  ******************************************************************************/
40 
41 static void
42 nv50_fifo_playlist_update_locked(struct nv50_fifo *fifo)
43 {
44 	struct nvkm_device *device = fifo->base.engine.subdev.device;
45 	struct nvkm_bar *bar = device->bar;
46 	struct nvkm_gpuobj *cur;
47 	int i, p;
48 
49 	cur = fifo->playlist[fifo->cur_playlist];
50 	fifo->cur_playlist = !fifo->cur_playlist;
51 
52 	for (i = fifo->base.min, p = 0; i < fifo->base.max; i++) {
53 		if (nvkm_rd32(device, 0x002600 + (i * 4)) & 0x80000000)
54 			nv_wo32(cur, p++ * 4, i);
55 	}
56 
57 	bar->flush(bar);
58 
59 	nvkm_wr32(device, 0x0032f4, cur->addr >> 12);
60 	nvkm_wr32(device, 0x0032ec, p);
61 	nvkm_wr32(device, 0x002500, 0x00000101);
62 }
63 
64 void
65 nv50_fifo_playlist_update(struct nv50_fifo *fifo)
66 {
67 	mutex_lock(&nv_subdev(fifo)->mutex);
68 	nv50_fifo_playlist_update_locked(fifo);
69 	mutex_unlock(&nv_subdev(fifo)->mutex);
70 }
71 
72 static int
73 nv50_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
74 {
75 	struct nvkm_bar *bar = nvkm_bar(parent);
76 	struct nv50_fifo_base *base = (void *)parent->parent;
77 	struct nvkm_gpuobj *ectx = (void *)object;
78 	u64 limit = ectx->addr + ectx->size - 1;
79 	u64 start = ectx->addr;
80 	u32 addr;
81 
82 	switch (nv_engidx(object->engine)) {
83 	case NVDEV_ENGINE_SW   : return 0;
84 	case NVDEV_ENGINE_GR   : addr = 0x0000; break;
85 	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
86 	default:
87 		return -EINVAL;
88 	}
89 
90 	nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
91 	nv_wo32(base->eng, addr + 0x00, 0x00190000);
92 	nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
93 	nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
94 	nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
95 					upper_32_bits(start));
96 	nv_wo32(base->eng, addr + 0x10, 0x00000000);
97 	nv_wo32(base->eng, addr + 0x14, 0x00000000);
98 	bar->flush(bar);
99 	return 0;
100 }
101 
102 static int
103 nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
104 			 struct nvkm_object *object)
105 {
106 	struct nv50_fifo *fifo = (void *)parent->engine;
107 	struct nv50_fifo_base *base = (void *)parent->parent;
108 	struct nv50_fifo_chan *chan = (void *)parent;
109 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
110 	struct nvkm_device *device = subdev->device;
111 	struct nvkm_bar *bar = device->bar;
112 	u32 addr, me;
113 	int ret = 0;
114 
115 	switch (nv_engidx(object->engine)) {
116 	case NVDEV_ENGINE_SW   : return 0;
117 	case NVDEV_ENGINE_GR   : addr = 0x0000; break;
118 	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
119 	default:
120 		return -EINVAL;
121 	}
122 
123 	/* HW bug workaround:
124 	 *
125 	 * PFIFO will hang forever if the connected engines don't report
126 	 * that they've processed the context switch request.
127 	 *
128 	 * In order for the kickoff to work, we need to ensure all the
129 	 * connected engines are in a state where they can answer.
130 	 *
131 	 * Newer chipsets don't seem to suffer from this issue, and well,
132 	 * there's also a "ignore these engines" bitmask reg we can use
133 	 * if we hit the issue there..
134 	 */
135 	me = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001);
136 
137 	/* do the kickoff... */
138 	nvkm_wr32(device, 0x0032fc, nv_gpuobj(base)->addr >> 12);
139 	if (nvkm_msec(device, 2000,
140 		if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
141 			break;
142 	) < 0) {
143 		nvkm_error(subdev, "channel %d [%s] unload timeout\n",
144 			   chan->base.chid, nvkm_client_name(chan));
145 		if (suspend)
146 			ret = -EBUSY;
147 	}
148 	nvkm_wr32(device, 0x00b860, me);
149 
150 	if (ret == 0) {
151 		nv_wo32(base->eng, addr + 0x00, 0x00000000);
152 		nv_wo32(base->eng, addr + 0x04, 0x00000000);
153 		nv_wo32(base->eng, addr + 0x08, 0x00000000);
154 		nv_wo32(base->eng, addr + 0x0c, 0x00000000);
155 		nv_wo32(base->eng, addr + 0x10, 0x00000000);
156 		nv_wo32(base->eng, addr + 0x14, 0x00000000);
157 		bar->flush(bar);
158 	}
159 
160 	return ret;
161 }
162 
163 static int
164 nv50_fifo_object_attach(struct nvkm_object *parent,
165 			struct nvkm_object *object, u32 handle)
166 {
167 	struct nv50_fifo_chan *chan = (void *)parent;
168 	u32 context;
169 
170 	if (nv_iclass(object, NV_GPUOBJ_CLASS))
171 		context = nv_gpuobj(object)->node->offset >> 4;
172 	else
173 		context = 0x00000004; /* just non-zero */
174 
175 	switch (nv_engidx(object->engine)) {
176 	case NVDEV_ENGINE_DMAOBJ:
177 	case NVDEV_ENGINE_SW    : context |= 0x00000000; break;
178 	case NVDEV_ENGINE_GR    : context |= 0x00100000; break;
179 	case NVDEV_ENGINE_MPEG  : context |= 0x00200000; break;
180 	default:
181 		return -EINVAL;
182 	}
183 
184 	return nvkm_ramht_insert(chan->ramht, 0, handle, context);
185 }
186 
187 void
188 nv50_fifo_object_detach(struct nvkm_object *parent, int cookie)
189 {
190 	struct nv50_fifo_chan *chan = (void *)parent;
191 	nvkm_ramht_remove(chan->ramht, cookie);
192 }
193 
194 static int
195 nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
196 			struct nvkm_oclass *oclass, void *data, u32 size,
197 			struct nvkm_object **pobject)
198 {
199 	union {
200 		struct nv03_channel_dma_v0 v0;
201 	} *args = data;
202 	struct nvkm_bar *bar = nvkm_bar(parent);
203 	struct nv50_fifo_base *base = (void *)parent;
204 	struct nv50_fifo_chan *chan;
205 	int ret;
206 
207 	nvif_ioctl(parent, "create channel dma size %d\n", size);
208 	if (nvif_unpack(args->v0, 0, 0, false)) {
209 		nvif_ioctl(parent, "create channel dma vers %d pushbuf %08x "
210 				   "offset %016llx\n", args->v0.version,
211 			   args->v0.pushbuf, args->v0.offset);
212 	} else
213 		return ret;
214 
215 	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
216 				       0x2000, args->v0.pushbuf,
217 				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
218 				       (1ULL << NVDEV_ENGINE_SW) |
219 				       (1ULL << NVDEV_ENGINE_GR) |
220 				       (1ULL << NVDEV_ENGINE_MPEG), &chan);
221 	*pobject = nv_object(chan);
222 	if (ret)
223 		return ret;
224 
225 	args->v0.chid = chan->base.chid;
226 
227 	nv_parent(chan)->context_attach = nv50_fifo_context_attach;
228 	nv_parent(chan)->context_detach = nv50_fifo_context_detach;
229 	nv_parent(chan)->object_attach = nv50_fifo_object_attach;
230 	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
231 
232 	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
233 			     &chan->ramht);
234 	if (ret)
235 		return ret;
236 
237 	nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
238 	nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
239 	nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
240 	nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
241 	nv_wo32(base->ramfc, 0x3c, 0x003f6078);
242 	nv_wo32(base->ramfc, 0x44, 0x01003fff);
243 	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
244 	nv_wo32(base->ramfc, 0x4c, 0xffffffff);
245 	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
246 	nv_wo32(base->ramfc, 0x78, 0x00000000);
247 	nv_wo32(base->ramfc, 0x7c, 0x30000001);
248 	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
249 				   (4 << 24) /* SEARCH_FULL */ |
250 				   (chan->ramht->gpuobj.node->offset >> 4));
251 	bar->flush(bar);
252 	return 0;
253 }
254 
255 static int
256 nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
257 			struct nvkm_oclass *oclass, void *data, u32 size,
258 			struct nvkm_object **pobject)
259 {
260 	union {
261 		struct nv50_channel_gpfifo_v0 v0;
262 	} *args = data;
263 	struct nvkm_bar *bar = nvkm_bar(parent);
264 	struct nv50_fifo_base *base = (void *)parent;
265 	struct nv50_fifo_chan *chan;
266 	u64 ioffset, ilength;
267 	int ret;
268 
269 	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
270 	if (nvif_unpack(args->v0, 0, 0, false)) {
271 		nvif_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
272 				   "ioffset %016llx ilength %08x\n",
273 			   args->v0.version, args->v0.pushbuf, args->v0.ioffset,
274 			   args->v0.ilength);
275 	} else
276 		return ret;
277 
278 	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
279 				       0x2000, args->v0.pushbuf,
280 				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
281 				       (1ULL << NVDEV_ENGINE_SW) |
282 				       (1ULL << NVDEV_ENGINE_GR) |
283 				       (1ULL << NVDEV_ENGINE_MPEG), &chan);
284 	*pobject = nv_object(chan);
285 	if (ret)
286 		return ret;
287 
288 	args->v0.chid = chan->base.chid;
289 
290 	nv_parent(chan)->context_attach = nv50_fifo_context_attach;
291 	nv_parent(chan)->context_detach = nv50_fifo_context_detach;
292 	nv_parent(chan)->object_attach = nv50_fifo_object_attach;
293 	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
294 
295 	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
296 			     &chan->ramht);
297 	if (ret)
298 		return ret;
299 
300 	ioffset = args->v0.ioffset;
301 	ilength = order_base_2(args->v0.ilength / 8);
302 
303 	nv_wo32(base->ramfc, 0x3c, 0x403f6078);
304 	nv_wo32(base->ramfc, 0x44, 0x01003fff);
305 	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
306 	nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
307 	nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
308 	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
309 	nv_wo32(base->ramfc, 0x78, 0x00000000);
310 	nv_wo32(base->ramfc, 0x7c, 0x30000001);
311 	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
312 				   (4 << 24) /* SEARCH_FULL */ |
313 				   (chan->ramht->gpuobj.node->offset >> 4));
314 	bar->flush(bar);
315 	return 0;
316 }
317 
318 void
319 nv50_fifo_chan_dtor(struct nvkm_object *object)
320 {
321 	struct nv50_fifo_chan *chan = (void *)object;
322 	nvkm_ramht_ref(NULL, &chan->ramht);
323 	nvkm_fifo_channel_destroy(&chan->base);
324 }
325 
326 static int
327 nv50_fifo_chan_init(struct nvkm_object *object)
328 {
329 	struct nv50_fifo *fifo = (void *)object->engine;
330 	struct nv50_fifo_base *base = (void *)object->parent;
331 	struct nv50_fifo_chan *chan = (void *)object;
332 	struct nvkm_gpuobj *ramfc = base->ramfc;
333 	struct nvkm_device *device = fifo->base.engine.subdev.device;
334 	u32 chid = chan->base.chid;
335 	int ret;
336 
337 	ret = nvkm_fifo_channel_init(&chan->base);
338 	if (ret)
339 		return ret;
340 
341 	nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
342 	nv50_fifo_playlist_update(fifo);
343 	return 0;
344 }
345 
346 int
347 nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend)
348 {
349 	struct nv50_fifo *fifo = (void *)object->engine;
350 	struct nv50_fifo_chan *chan = (void *)object;
351 	struct nvkm_device *device = fifo->base.engine.subdev.device;
352 	u32 chid = chan->base.chid;
353 
354 	/* remove channel from playlist, fifo will unload context */
355 	nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
356 	nv50_fifo_playlist_update(fifo);
357 	nvkm_wr32(device, 0x002600 + (chid * 4), 0x00000000);
358 
359 	return nvkm_fifo_channel_fini(&chan->base, suspend);
360 }
361 
362 static struct nvkm_ofuncs
363 nv50_fifo_ofuncs_dma = {
364 	.ctor = nv50_fifo_chan_ctor_dma,
365 	.dtor = nv50_fifo_chan_dtor,
366 	.init = nv50_fifo_chan_init,
367 	.fini = nv50_fifo_chan_fini,
368 	.map  = _nvkm_fifo_channel_map,
369 	.rd32 = _nvkm_fifo_channel_rd32,
370 	.wr32 = _nvkm_fifo_channel_wr32,
371 	.ntfy = _nvkm_fifo_channel_ntfy
372 };
373 
374 static struct nvkm_ofuncs
375 nv50_fifo_ofuncs_ind = {
376 	.ctor = nv50_fifo_chan_ctor_ind,
377 	.dtor = nv50_fifo_chan_dtor,
378 	.init = nv50_fifo_chan_init,
379 	.fini = nv50_fifo_chan_fini,
380 	.map  = _nvkm_fifo_channel_map,
381 	.rd32 = _nvkm_fifo_channel_rd32,
382 	.wr32 = _nvkm_fifo_channel_wr32,
383 	.ntfy = _nvkm_fifo_channel_ntfy
384 };
385 
386 static struct nvkm_oclass
387 nv50_fifo_sclass[] = {
388 	{ NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma },
389 	{ NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind },
390 	{}
391 };
392 
393 /*******************************************************************************
394  * FIFO context - basically just the instmem reserved for the channel
395  ******************************************************************************/
396 
397 static int
398 nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
399 		       struct nvkm_oclass *oclass, void *data, u32 size,
400 		       struct nvkm_object **pobject)
401 {
402 	struct nv50_fifo_base *base;
403 	int ret;
404 
405 	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
406 				       0x1000, NVOBJ_FLAG_HEAP, &base);
407 	*pobject = nv_object(base);
408 	if (ret)
409 		return ret;
410 
411 	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200,
412 			      0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
413 	if (ret)
414 		return ret;
415 
416 	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0,
417 			      NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
418 	if (ret)
419 		return ret;
420 
421 	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0,
422 			      &base->pgd);
423 	if (ret)
424 		return ret;
425 
426 	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
427 	if (ret)
428 		return ret;
429 
430 	return 0;
431 }
432 
433 void
434 nv50_fifo_context_dtor(struct nvkm_object *object)
435 {
436 	struct nv50_fifo_base *base = (void *)object;
437 	nvkm_vm_ref(NULL, &base->vm, base->pgd);
438 	nvkm_gpuobj_ref(NULL, &base->pgd);
439 	nvkm_gpuobj_ref(NULL, &base->eng);
440 	nvkm_gpuobj_ref(NULL, &base->ramfc);
441 	nvkm_gpuobj_ref(NULL, &base->cache);
442 	nvkm_fifo_context_destroy(&base->base);
443 }
444 
445 static struct nvkm_oclass
446 nv50_fifo_cclass = {
447 	.handle = NV_ENGCTX(FIFO, 0x50),
448 	.ofuncs = &(struct nvkm_ofuncs) {
449 		.ctor = nv50_fifo_context_ctor,
450 		.dtor = nv50_fifo_context_dtor,
451 		.init = _nvkm_fifo_context_init,
452 		.fini = _nvkm_fifo_context_fini,
453 		.rd32 = _nvkm_fifo_context_rd32,
454 		.wr32 = _nvkm_fifo_context_wr32,
455 	},
456 };
457 
458 /*******************************************************************************
459  * PFIFO engine
460  ******************************************************************************/
461 
462 static int
463 nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
464 	       struct nvkm_oclass *oclass, void *data, u32 size,
465 	       struct nvkm_object **pobject)
466 {
467 	struct nv50_fifo *fifo;
468 	int ret;
469 
470 	ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &fifo);
471 	*pobject = nv_object(fifo);
472 	if (ret)
473 		return ret;
474 
475 	ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 4, 0x1000, 0,
476 			      &fifo->playlist[0]);
477 	if (ret)
478 		return ret;
479 
480 	ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 4, 0x1000, 0,
481 			      &fifo->playlist[1]);
482 	if (ret)
483 		return ret;
484 
485 	nv_subdev(fifo)->unit = 0x00000100;
486 	nv_subdev(fifo)->intr = nv04_fifo_intr;
487 	nv_engine(fifo)->cclass = &nv50_fifo_cclass;
488 	nv_engine(fifo)->sclass = nv50_fifo_sclass;
489 	fifo->base.pause = nv04_fifo_pause;
490 	fifo->base.start = nv04_fifo_start;
491 	return 0;
492 }
493 
494 void
495 nv50_fifo_dtor(struct nvkm_object *object)
496 {
497 	struct nv50_fifo *fifo = (void *)object;
498 
499 	nvkm_gpuobj_ref(NULL, &fifo->playlist[1]);
500 	nvkm_gpuobj_ref(NULL, &fifo->playlist[0]);
501 
502 	nvkm_fifo_destroy(&fifo->base);
503 }
504 
505 int
506 nv50_fifo_init(struct nvkm_object *object)
507 {
508 	struct nv50_fifo *fifo = (void *)object;
509 	struct nvkm_device *device = fifo->base.engine.subdev.device;
510 	int ret, i;
511 
512 	ret = nvkm_fifo_init(&fifo->base);
513 	if (ret)
514 		return ret;
515 
516 	nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
517 	nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
518 	nvkm_wr32(device, 0x00250c, 0x6f3cfc34);
519 	nvkm_wr32(device, 0x002044, 0x01003fff);
520 
521 	nvkm_wr32(device, 0x002100, 0xffffffff);
522 	nvkm_wr32(device, 0x002140, 0xbfffffff);
523 
524 	for (i = 0; i < 128; i++)
525 		nvkm_wr32(device, 0x002600 + (i * 4), 0x00000000);
526 	nv50_fifo_playlist_update_locked(fifo);
527 
528 	nvkm_wr32(device, 0x003200, 0x00000001);
529 	nvkm_wr32(device, 0x003250, 0x00000001);
530 	nvkm_wr32(device, 0x002500, 0x00000001);
531 	return 0;
532 }
533 
534 struct nvkm_oclass *
535 nv50_fifo_oclass = &(struct nvkm_oclass) {
536 	.handle = NV_ENGINE(FIFO, 0x50),
537 	.ofuncs = &(struct nvkm_ofuncs) {
538 		.ctor = nv50_fifo_ctor,
539 		.dtor = nv50_fifo_dtor,
540 		.init = nv50_fifo_init,
541 		.fini = _nvkm_fifo_fini,
542 	},
543 };
544