xref: /openbmc/linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c (revision f48dd2936138882d7755cbbc5d9984015c75980c)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "chan.h"
25 #include "chid.h"
26 #include "cgrp.h"
27 #include "runl.h"
28 #include "priv.h"
29 
30 #include <core/client.h>
31 #include <core/gpuobj.h>
32 #include <core/oproxy.h>
33 #include <subdev/mmu.h>
34 #include <engine/dma.h>
35 
36 #include <nvif/if0020.h>
37 
38 const struct nvkm_event_func
39 nvkm_chan_event = {
40 };
41 
42 struct nvkm_fifo_chan_object {
43 	struct nvkm_oproxy oproxy;
44 	struct nvkm_fifo_chan *chan;
45 	int hash;
46 };
47 
48 static struct nvkm_fifo_engn *
49 nvkm_fifo_chan_engn(struct nvkm_fifo_chan *chan, struct nvkm_engine *engine)
50 {
51 	int engi = chan->fifo->func->engine_id(chan->fifo, engine);
52 	if (engi >= 0)
53 		return &chan->engn[engi];
54 	return NULL;
55 }
56 
57 static int
58 nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
59 {
60 	struct nvkm_fifo_chan_object *object =
61 		container_of(base, typeof(*object), oproxy);
62 	struct nvkm_engine *engine  = object->oproxy.object->engine;
63 	struct nvkm_fifo_chan *chan = object->chan;
64 	struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
65 	const char *name = engine->subdev.name;
66 	int ret = 0;
67 
68 	if (chan->func->engine_fini) {
69 		ret = chan->func->engine_fini(chan, engine, suspend);
70 		if (ret) {
71 			nvif_error(&chan->object,
72 				   "detach %s failed, %d\n", name, ret);
73 			return ret;
74 		}
75 	}
76 
77 	if (engn->object) {
78 		ret = nvkm_object_fini(engn->object, suspend);
79 		if (ret && suspend)
80 			return ret;
81 	}
82 
83 	nvif_trace(&chan->object, "detached %s\n", name);
84 	return ret;
85 }
86 
87 static int
88 nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
89 {
90 	struct nvkm_fifo_chan_object *object =
91 		container_of(base, typeof(*object), oproxy);
92 	struct nvkm_engine *engine  = object->oproxy.object->engine;
93 	struct nvkm_fifo_chan *chan = object->chan;
94 	struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
95 	const char *name = engine->subdev.name;
96 	int ret;
97 
98 	if (engn->object) {
99 		ret = nvkm_object_init(engn->object);
100 		if (ret)
101 			return ret;
102 	}
103 
104 	if (chan->func->engine_init) {
105 		ret = chan->func->engine_init(chan, engine);
106 		if (ret) {
107 			nvif_error(&chan->object,
108 				   "attach %s failed, %d\n", name, ret);
109 			return ret;
110 		}
111 	}
112 
113 	nvif_trace(&chan->object, "attached %s\n", name);
114 	return 0;
115 }
116 
117 static void
118 nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
119 {
120 	struct nvkm_fifo_chan_object *object =
121 		container_of(base, typeof(*object), oproxy);
122 	struct nvkm_engine *engine  = object->oproxy.base.engine;
123 	struct nvkm_fifo_chan *chan = object->chan;
124 	struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
125 
126 	if (chan->func->object_dtor)
127 		chan->func->object_dtor(chan, object->hash);
128 
129 	if (!--engn->refcount) {
130 		if (chan->func->engine_dtor)
131 			chan->func->engine_dtor(chan, engine);
132 		nvkm_object_del(&engn->object);
133 		if (chan->vmm)
134 			atomic_dec(&chan->vmm->engref[engine->subdev.type]);
135 	}
136 }
137 
138 static const struct nvkm_oproxy_func
139 nvkm_fifo_chan_child_func = {
140 	.dtor[0] = nvkm_fifo_chan_child_del,
141 };
142 
143 int
144 nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
145 			 struct nvkm_object **pobject)
146 {
147 	struct nvkm_engine *engine = oclass->engine;
148 	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
149 	struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
150 	struct nvkm_fifo_chan_object *object;
151 	int ret = 0;
152 
153 	if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
154 		return -ENOMEM;
155 	nvkm_oproxy_ctor(&nvkm_fifo_chan_child_func, oclass, &object->oproxy);
156 	object->chan = chan;
157 	*pobject = &object->oproxy.base;
158 
159 	if (!engn->refcount++) {
160 		struct nvkm_oclass cclass = {
161 			.client = oclass->client,
162 			.engine = oclass->engine,
163 		};
164 
165 		if (chan->vmm)
166 			atomic_inc(&chan->vmm->engref[engine->subdev.type]);
167 
168 		if (engine->func->fifo.cclass) {
169 			ret = engine->func->fifo.cclass(chan, &cclass,
170 							&engn->object);
171 		} else
172 		if (engine->func->cclass) {
173 			ret = nvkm_object_new_(engine->func->cclass, &cclass,
174 					       NULL, 0, &engn->object);
175 		}
176 		if (ret)
177 			return ret;
178 
179 		if (chan->func->engine_ctor) {
180 			ret = chan->func->engine_ctor(chan, oclass->engine,
181 						      engn->object);
182 			if (ret)
183 				return ret;
184 		}
185 	}
186 
187 	ret = oclass->base.ctor(&(const struct nvkm_oclass) {
188 					.base = oclass->base,
189 					.engn = oclass->engn,
190 					.handle = oclass->handle,
191 					.object = oclass->object,
192 					.client = oclass->client,
193 					.parent = engn->object ?
194 						  engn->object :
195 						  oclass->parent,
196 					.engine = engine,
197 				}, data, size, &object->oproxy.object);
198 	if (ret)
199 		return ret;
200 
201 	if (chan->func->object_ctor) {
202 		object->hash =
203 			chan->func->object_ctor(chan, object->oproxy.object);
204 		if (object->hash < 0)
205 			return object->hash;
206 	}
207 
208 	return 0;
209 }
210 
211 void
212 nvkm_chan_cctx_bind(struct nvkm_chan *chan, struct nvkm_oproxy *oproxy, struct nvkm_cctx *cctx)
213 {
214 	/* Update context pointer. */
215 	if (cctx)
216 		nvkm_fifo_chan_child_init(nvkm_oproxy(oproxy->object));
217 	else
218 		nvkm_fifo_chan_child_fini(nvkm_oproxy(oproxy->object), false);
219 }
220 
221 void
222 nvkm_chan_cctx_put(struct nvkm_chan *chan, struct nvkm_cctx **pcctx)
223 {
224 	struct nvkm_cctx *cctx = *pcctx;
225 
226 	if (cctx) {
227 		struct nvkm_engn *engn = cctx->vctx->ectx->engn;
228 
229 		if (refcount_dec_and_mutex_lock(&cctx->refs, &chan->cgrp->mutex)) {
230 			CHAN_TRACE(chan, "dtor cctx %d[%s]", engn->id, engn->engine->subdev.name);
231 			nvkm_cgrp_vctx_put(chan->cgrp, &cctx->vctx);
232 			list_del(&cctx->head);
233 			kfree(cctx);
234 			mutex_unlock(&chan->cgrp->mutex);
235 		}
236 
237 		*pcctx = NULL;
238 	}
239 }
240 
241 int
242 nvkm_chan_cctx_get(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx **pcctx,
243 		   struct nvkm_client *client)
244 {
245 	struct nvkm_cgrp *cgrp = chan->cgrp;
246 	struct nvkm_vctx *vctx;
247 	struct nvkm_cctx *cctx;
248 	int ret;
249 
250 	/* Look for an existing channel context for this engine+VEID. */
251 	mutex_lock(&cgrp->mutex);
252 	cctx = nvkm_list_find(cctx, &chan->cctxs, head,
253 			      cctx->vctx->ectx->engn == engn && cctx->vctx->vmm == chan->vmm);
254 	if (cctx) {
255 		refcount_inc(&cctx->refs);
256 		*pcctx = cctx;
257 		mutex_unlock(&chan->cgrp->mutex);
258 		return 0;
259 	}
260 
261 	/* Nope - create a fresh one.  But, sub-context first. */
262 	ret = nvkm_cgrp_vctx_get(cgrp, engn, chan, &vctx, client);
263 	if (ret) {
264 		CHAN_ERROR(chan, "vctx %d[%s]: %d", engn->id, engn->engine->subdev.name, ret);
265 		goto done;
266 	}
267 
268 	/* Now, create the channel context - to track engine binding. */
269 	CHAN_TRACE(chan, "ctor cctx %d[%s]", engn->id, engn->engine->subdev.name);
270 	if (!(cctx = *pcctx = kzalloc(sizeof(*cctx), GFP_KERNEL))) {
271 		nvkm_cgrp_vctx_put(cgrp, &vctx);
272 		ret = -ENOMEM;
273 		goto done;
274 	}
275 
276 	cctx->vctx = vctx;
277 	refcount_set(&cctx->refs, 1);
278 	refcount_set(&cctx->uses, 0);
279 	list_add_tail(&cctx->head, &chan->cctxs);
280 done:
281 	mutex_unlock(&cgrp->mutex);
282 	return ret;
283 }
284 
285 static int
286 nvkm_fifo_chan_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
287 {
288 	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
289 	union nvif_chan_event_args *args = argv;
290 
291 	switch (args->v0.type) {
292 	case NVIF_CHAN_EVENT_V0_KILLED:
293 		return nvkm_uevent_add(uevent, &chan->fifo->kevent, chan->chid,
294 				       NVKM_FIFO_EVENT_KILLED, NULL);
295 	default:
296 		break;
297 	}
298 
299 	return -ENOSYS;
300 }
301 
302 static int
303 nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
304 		   enum nvkm_object_map *type, u64 *addr, u64 *size)
305 {
306 	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
307 	*type = NVKM_OBJECT_MAP_IO;
308 	*addr = chan->addr;
309 	*size = chan->size;
310 	return 0;
311 }
312 
313 static int
314 nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
315 {
316 	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
317 	chan->func->fini(chan);
318 	return 0;
319 }
320 
321 static int
322 nvkm_fifo_chan_init(struct nvkm_object *object)
323 {
324 	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
325 	chan->func->init(chan);
326 	return 0;
327 }
328 
329 void
330 nvkm_chan_del(struct nvkm_chan **pchan)
331 {
332 	struct nvkm_chan *chan = *pchan;
333 
334 	if (!chan)
335 		return;
336 
337 	if (chan->cgrp) {
338 		nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
339 		nvkm_cgrp_unref(&chan->cgrp);
340 	}
341 
342 	chan = nvkm_object_dtor(&chan->object);
343 	kfree(chan);
344 }
345 
346 static void *
347 nvkm_fifo_chan_dtor(struct nvkm_object *object)
348 {
349 	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
350 	struct nvkm_fifo *fifo = chan->fifo;
351 	void *data = chan->func->dtor(chan);
352 	unsigned long flags;
353 
354 	spin_lock_irqsave(&fifo->lock, flags);
355 	if (!list_empty(&chan->head)) {
356 		list_del(&chan->head);
357 	}
358 	spin_unlock_irqrestore(&fifo->lock, flags);
359 
360 	if (chan->vmm) {
361 		nvkm_vmm_part(chan->vmm, chan->inst->memory);
362 		nvkm_vmm_unref(&chan->vmm);
363 	}
364 
365 	nvkm_gpuobj_del(&chan->push);
366 	nvkm_gpuobj_del(&chan->inst);
367 	kfree(chan->func);
368 	return data;
369 }
370 
371 void
372 nvkm_chan_put(struct nvkm_chan **pchan, unsigned long irqflags)
373 {
374 	struct nvkm_chan *chan = *pchan;
375 
376 	if (!chan)
377 		return;
378 
379 	*pchan = NULL;
380 	spin_unlock_irqrestore(&chan->cgrp->lock, irqflags);
381 }
382 
383 struct nvkm_chan *
384 nvkm_chan_get_inst(struct nvkm_engine *engine, u64 inst, unsigned long *pirqflags)
385 {
386 	struct nvkm_fifo *fifo = engine->subdev.device->fifo;
387 	struct nvkm_runl *runl;
388 	struct nvkm_engn *engn;
389 	struct nvkm_chan *chan;
390 
391 	nvkm_runl_foreach(runl, fifo) {
392 		nvkm_runl_foreach_engn(engn, runl) {
393 			if (engine == &fifo->engine || engn->engine == engine) {
394 				chan = nvkm_runl_chan_get_inst(runl, inst, pirqflags);
395 				if (chan || engn->engine == engine)
396 					return chan;
397 			}
398 		}
399 	}
400 
401 	return NULL;
402 }
403 
404 struct nvkm_chan *
405 nvkm_chan_get_chid(struct nvkm_engine *engine, int id, unsigned long *pirqflags)
406 {
407 	struct nvkm_fifo *fifo = engine->subdev.device->fifo;
408 	struct nvkm_runl *runl;
409 	struct nvkm_engn *engn;
410 
411 	nvkm_runl_foreach(runl, fifo) {
412 		nvkm_runl_foreach_engn(engn, runl) {
413 			if (fifo->chid || engn->engine == engine)
414 				return nvkm_runl_chan_get_chid(runl, id, pirqflags);
415 		}
416 	}
417 
418 	return NULL;
419 }
420 
421 static const struct nvkm_object_func
422 nvkm_fifo_chan_func = {
423 	.dtor = nvkm_fifo_chan_dtor,
424 	.init = nvkm_fifo_chan_init,
425 	.fini = nvkm_fifo_chan_fini,
426 	.map = nvkm_fifo_chan_map,
427 	.uevent = nvkm_fifo_chan_uevent,
428 };
429 
430 int
431 nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *fn,
432 		    struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
433 		    u64 hvmm, u64 push, u32 engm, int bar, u32 base,
434 		    u32 user, const struct nvkm_oclass *oclass,
435 		    struct nvkm_fifo_chan *chan)
436 {
437 	struct nvkm_chan_func *func;
438 	struct nvkm_client *client = oclass->client;
439 	struct nvkm_device *device = fifo->engine.subdev.device;
440 	struct nvkm_dmaobj *dmaobj;
441 	struct nvkm_cgrp *cgrp = NULL;
442 	struct nvkm_runl *runl;
443 	struct nvkm_engn *engn = NULL;
444 	struct nvkm_vmm *vmm = NULL;
445 	unsigned long flags;
446 	int ret;
447 
448 	nvkm_runl_foreach(runl, fifo) {
449 		engn = nvkm_runl_find_engn(engn, runl, engm & BIT(engn->id));
450 		if (engn)
451 			break;
452 	}
453 
454 	if (!engn)
455 		return -EINVAL;
456 
457 	/*FIXME: temp kludge to ease transition, remove later */
458 	if (!(func = kmalloc(sizeof(*func), GFP_KERNEL)))
459 		return -ENOMEM;
460 
461 	*func = *fifo->func->chan.func;
462 	func->dtor = fn->dtor;
463 	func->init = fn->init;
464 	func->fini = fn->fini;
465 	func->engine_ctor = fn->engine_ctor;
466 	func->engine_dtor = fn->engine_dtor;
467 	func->engine_init = fn->engine_init;
468 	func->engine_fini = fn->engine_fini;
469 	func->object_ctor = fn->object_ctor;
470 	func->object_dtor = fn->object_dtor;
471 	func->submit_token = fn->submit_token;
472 
473 	chan->func = func;
474 	chan->id = -1;
475 
476 	nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
477 	chan->fifo = fifo;
478 	INIT_LIST_HEAD(&chan->cctxs);
479 	INIT_LIST_HEAD(&chan->head);
480 
481 	/* Join channel group.
482 	 *
483 	 * GK110 and newer support channel groups (aka TSGs), where individual channels
484 	 * share a timeslice, and, engine context(s).
485 	 *
486 	 * As such, engine contexts are tracked in nvkm_cgrp and we need them even when
487 	 * channels aren't in an API channel group, and on HW that doesn't support TSGs.
488 	 */
489 	if (!cgrp) {
490 		ret = nvkm_cgrp_new(runl, chan->name, vmm, fifo->func->cgrp.force, &chan->cgrp);
491 		if (ret) {
492 			RUNL_DEBUG(runl, "cgrp %d", ret);
493 			return ret;
494 		}
495 
496 		cgrp = chan->cgrp;
497 	} else {
498 		if (cgrp->runl != runl || cgrp->vmm != vmm) {
499 			RUNL_DEBUG(runl, "cgrp %d %d", cgrp->runl != runl, cgrp->vmm != vmm);
500 			return -EINVAL;
501 		}
502 
503 		chan->cgrp = nvkm_cgrp_ref(cgrp);
504 	}
505 
506 	/* instance memory */
507 	ret = nvkm_gpuobj_new(device, size, align, zero, NULL, &chan->inst);
508 	if (ret)
509 		return ret;
510 
511 	/* allocate push buffer ctxdma instance */
512 	if (push) {
513 		dmaobj = nvkm_dmaobj_search(client, push);
514 		if (IS_ERR(dmaobj))
515 			return PTR_ERR(dmaobj);
516 
517 		ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
518 				       &chan->push);
519 		if (ret)
520 			return ret;
521 	}
522 
523 	/* channel address space */
524 	if (hvmm) {
525 		struct nvkm_vmm *vmm = nvkm_uvmm_search(client, hvmm);
526 		if (IS_ERR(vmm))
527 			return PTR_ERR(vmm);
528 
529 		if (vmm->mmu != device->mmu)
530 			return -EINVAL;
531 
532 		ret = nvkm_vmm_join(vmm, chan->inst->memory);
533 		if (ret)
534 			return ret;
535 
536 		chan->vmm = nvkm_vmm_ref(vmm);
537 	}
538 
539 	/* Allocate channel ID. */
540 	if (runl->cgid) {
541 		chan->id = chan->cgrp->id;
542 		runl->chid->data[chan->id] = chan;
543 		set_bit(chan->id, runl->chid->used);
544 		goto temp_hack_until_no_chid_eq_cgid_req;
545 	}
546 
547 	chan->id = nvkm_chid_get(runl->chid, chan);
548 	if (chan->id < 0) {
549 		RUNL_ERROR(runl, "!chids");
550 		return -ENOSPC;
551 	}
552 
553 temp_hack_until_no_chid_eq_cgid_req:
554 	spin_lock_irqsave(&fifo->lock, flags);
555 	list_add(&chan->head, &fifo->chan);
556 	spin_unlock_irqrestore(&fifo->lock, flags);
557 
558 	/* determine address of this channel's user registers */
559 	chan->addr = device->func->resource_addr(device, bar) +
560 		     base + user * chan->chid;
561 	chan->size = user;
562 	return 0;
563 }
564