xref: /openbmc/linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c (revision f48dd2936138882d7755cbbc5d9984015c75980c)
1 /*
2  * Copyright 2021 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #define nvkm_uchan(p) container_of((p), struct nvkm_uchan, object)
23 #include "cgrp.h"
24 #include "chan.h"
25 #include "runl.h"
26 
27 #include <core/oproxy.h>
28 
29 #include <nvif/if0020.h>
30 
31 #include "gk104.h"
32 
33 struct nvkm_uchan {
34 	struct nvkm_object object;
35 	struct nvkm_chan *chan;
36 };
37 
38 static int
39 nvkm_uchan_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
40 {
41 	struct nvkm_chan *chan = nvkm_uchan(object)->chan;
42 	struct nvkm_runl *runl = chan->cgrp->runl;
43 	union nvif_chan_event_args *args = argv;
44 
45 	if (!uevent)
46 		return 0;
47 	if (argc != sizeof(args->v0) || args->v0.version != 0)
48 		return -ENOSYS;
49 
50 	switch (args->v0.type) {
51 	case NVIF_CHAN_EVENT_V0_NON_STALL_INTR:
52 		return nvkm_uevent_add(uevent, &runl->fifo->nonstall.event, 0,
53 				       NVKM_FIFO_NONSTALL_EVENT, NULL);
54 	case NVIF_CHAN_EVENT_V0_KILLED:
55 		return chan->object.func->uevent(&chan->object, argv, argc, uevent);
56 	default:
57 		break;
58 	}
59 
60 	return -ENOSYS;
61 }
62 
63 struct nvkm_uobj {
64 	struct nvkm_oproxy oproxy;
65 	struct nvkm_chan *chan;
66 	struct nvkm_cctx *cctx;
67 };
68 
69 static int
70 nvkm_uchan_object_fini_1(struct nvkm_oproxy *oproxy, bool suspend)
71 {
72 	struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy);
73 	struct nvkm_chan *chan = uobj->chan;
74 	struct nvkm_cctx *cctx = uobj->cctx;
75 
76 	/* Unbind engine context from channel, if no longer required. */
77 	if (refcount_dec_and_mutex_lock(&cctx->uses, &chan->cgrp->mutex)) {
78 		nvkm_chan_cctx_bind(chan, oproxy, NULL);
79 		mutex_unlock(&chan->cgrp->mutex);
80 	}
81 
82 	return 0;
83 }
84 
85 static int
86 nvkm_uchan_object_init_0(struct nvkm_oproxy *oproxy)
87 {
88 	struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy);
89 	struct nvkm_chan *chan = uobj->chan;
90 	struct nvkm_cctx *cctx = uobj->cctx;
91 	int ret = 0;
92 
93 	/* Bind engine context to channel, if it hasn't been already. */
94 	if (!refcount_inc_not_zero(&cctx->uses)) {
95 		mutex_lock(&chan->cgrp->mutex);
96 		if (!refcount_inc_not_zero(&cctx->uses)) {
97 			if (ret == 0) {
98 				nvkm_chan_cctx_bind(chan, oproxy, cctx);
99 				refcount_set(&cctx->uses, 1);
100 			}
101 		}
102 		mutex_unlock(&chan->cgrp->mutex);
103 	}
104 
105 	return ret;
106 }
107 
108 static void
109 nvkm_uchan_object_dtor(struct nvkm_oproxy *oproxy)
110 {
111 	struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy);
112 
113 	nvkm_chan_cctx_put(uobj->chan, &uobj->cctx);
114 }
115 
116 static const struct nvkm_oproxy_func
117 nvkm_uchan_object = {
118 	.dtor[1] = nvkm_uchan_object_dtor,
119 	.init[0] = nvkm_uchan_object_init_0,
120 	.fini[1] = nvkm_uchan_object_fini_1,
121 };
122 
123 static int
124 nvkm_uchan_object_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
125 		      struct nvkm_object **pobject)
126 {
127 	struct nvkm_chan *chan = nvkm_uchan(oclass->parent)->chan;
128 	struct nvkm_cgrp *cgrp = chan->cgrp;
129 	struct nvkm_engn *engn;
130 	struct nvkm_uobj *uobj;
131 	struct nvkm_oclass _oclass;
132 	int ret;
133 
134 	/* Lookup host engine state for target engine. */
135 	engn = nvkm_runl_find_engn(engn, cgrp->runl, engn->engine == oclass->engine);
136 	if (WARN_ON(!engn))
137 		return -EINVAL;
138 
139 	/* Allocate SW object. */
140 	if (!(uobj = kzalloc(sizeof(*uobj), GFP_KERNEL)))
141 		return -ENOMEM;
142 
143 	nvkm_oproxy_ctor(&nvkm_uchan_object, oclass, &uobj->oproxy);
144 	uobj->chan = chan;
145 	*pobject = &uobj->oproxy.base;
146 
147 	/* Ref. channel context for target engine.*/
148 	ret = nvkm_chan_cctx_get(chan, engn, &uobj->cctx, oclass->client);
149 	if (ret)
150 		return ret;
151 
152 	/* Allocate HW object. */
153 	_oclass = *oclass;
154 	_oclass.parent = &chan->object;
155 	return nvkm_fifo_chan_child_new(&_oclass, argv, argc, &uobj->oproxy.object);
156 }
157 
158 static int
159 nvkm_uchan_sclass(struct nvkm_object *object, int index, struct nvkm_oclass *oclass)
160 {
161 	struct nvkm_chan *chan = nvkm_uchan(object)->chan;
162 	struct nvkm_engn *engn;
163 	int ret;
164 
165 	nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
166 		struct nvkm_engine *engine = engn->engine;
167 		int c = 0;
168 
169 		oclass->engine = engine;
170 		oclass->base.oclass = 0;
171 
172 		if (engine->func->fifo.sclass) {
173 			ret = engine->func->fifo.sclass(oclass, index);
174 			if (oclass->base.oclass) {
175 				if (!oclass->base.ctor)
176 					oclass->base.ctor = nvkm_object_new;
177 				oclass->ctor = nvkm_uchan_object_new;
178 				return 0;
179 			}
180 
181 			index -= ret;
182 			continue;
183 		}
184 
185 		while (engine->func->sclass[c].oclass) {
186 			if (c++ == index) {
187 				oclass->base = engine->func->sclass[index];
188 				if (!oclass->base.ctor)
189 					oclass->base.ctor = nvkm_object_new;
190 				oclass->ctor = nvkm_uchan_object_new;
191 				return 0;
192 			}
193 		}
194 
195 		index -= c;
196 	}
197 
198 	return -EINVAL;
199 }
200 
201 static int
202 nvkm_uchan_map(struct nvkm_object *object, void *argv, u32 argc,
203 	       enum nvkm_object_map *type, u64 *addr, u64 *size)
204 {
205 	struct nvkm_chan *chan = nvkm_uchan(object)->chan;
206 
207 	return chan->object.func->map(&chan->object, argv, argc, type, addr, size);
208 }
209 
210 static int
211 nvkm_uchan_fini(struct nvkm_object *object, bool suspend)
212 {
213 	struct nvkm_chan *chan = nvkm_uchan(object)->chan;
214 	int ret;
215 
216 	ret = chan->object.func->fini(&chan->object, suspend);
217 	if (ret && suspend)
218 		return ret;
219 
220 	return 0;
221 }
222 
223 static int
224 nvkm_uchan_init(struct nvkm_object *object)
225 {
226 	struct nvkm_chan *chan = nvkm_uchan(object)->chan;
227 
228 	return chan->object.func->init(&chan->object);
229 }
230 
231 static void *
232 nvkm_uchan_dtor(struct nvkm_object *object)
233 {
234 	struct nvkm_uchan *uchan = nvkm_uchan(object);
235 
236 	nvkm_chan_del(&uchan->chan);
237 	return uchan;
238 }
239 
240 static const struct nvkm_object_func
241 nvkm_uchan = {
242 	.dtor = nvkm_uchan_dtor,
243 	.init = nvkm_uchan_init,
244 	.fini = nvkm_uchan_fini,
245 	.map = nvkm_uchan_map,
246 	.sclass = nvkm_uchan_sclass,
247 	.uevent = nvkm_uchan_uevent,
248 };
249 
250 int
251 nvkm_uchan_new(struct nvkm_fifo *fifo, struct nvkm_cgrp *cgrp, const struct nvkm_oclass *oclass,
252 	       void *argv, u32 argc, struct nvkm_object **pobject)
253 {
254 	struct nvkm_object *object = NULL;
255 	struct nvkm_uchan *uchan;
256 	int ret;
257 
258 	if (!(uchan = kzalloc(sizeof(*uchan), GFP_KERNEL)))
259 		return -ENOMEM;
260 
261 	nvkm_object_ctor(&nvkm_uchan, oclass, &uchan->object);
262 	*pobject = &uchan->object;
263 
264 	if (fifo->func->chan.ctor)
265 		ret = fifo->func->chan.ctor(gk104_fifo(fifo), oclass, argv, argc, &object);
266 	else
267 		ret = fifo->func->chan.oclass->ctor(fifo, oclass, argv, argc, &object);
268 	if (!object)
269 		return ret;
270 
271 	uchan->chan = container_of(object, typeof(*uchan->chan), object);
272 	return ret;
273 }
274