1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25 #include "chan.h"
26 
27 #include <core/client.h>
28 #include <core/gpuobj.h>
29 #include <core/notify.h>
30 #include <subdev/mc.h>
31 
32 #include <nvif/event.h>
33 #include <nvif/unpack.h>
34 
35 void
36 nvkm_fifo_recover_chan(struct nvkm_fifo *fifo, int chid)
37 {
38 	unsigned long flags;
39 	if (WARN_ON(!fifo->func->recover_chan))
40 		return;
41 	spin_lock_irqsave(&fifo->lock, flags);
42 	fifo->func->recover_chan(fifo, chid);
43 	spin_unlock_irqrestore(&fifo->lock, flags);
44 }
45 
46 void
47 nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
48 {
49 	return fifo->func->pause(fifo, flags);
50 }
51 
52 void
53 nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
54 {
55 	return fifo->func->start(fifo, flags);
56 }
57 
58 void
59 nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
60 		   struct nvkm_fifo_chan **pchan)
61 {
62 	struct nvkm_fifo_chan *chan = *pchan;
63 	if (likely(chan)) {
64 		*pchan = NULL;
65 		spin_unlock_irqrestore(&fifo->lock, flags);
66 	}
67 }
68 
69 struct nvkm_fifo_chan *
70 nvkm_fifo_chan_inst_locked(struct nvkm_fifo *fifo, u64 inst)
71 {
72 	struct nvkm_fifo_chan *chan;
73 	list_for_each_entry(chan, &fifo->chan, head) {
74 		if (chan->inst->addr == inst) {
75 			list_del(&chan->head);
76 			list_add(&chan->head, &fifo->chan);
77 			return chan;
78 		}
79 	}
80 	return NULL;
81 }
82 
83 struct nvkm_fifo_chan *
84 nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
85 {
86 	struct nvkm_fifo_chan *chan;
87 	unsigned long flags;
88 	spin_lock_irqsave(&fifo->lock, flags);
89 	if ((chan = nvkm_fifo_chan_inst_locked(fifo, inst))) {
90 		*rflags = flags;
91 		return chan;
92 	}
93 	spin_unlock_irqrestore(&fifo->lock, flags);
94 	return NULL;
95 }
96 
97 struct nvkm_fifo_chan *
98 nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags)
99 {
100 	struct nvkm_fifo_chan *chan;
101 	unsigned long flags;
102 	spin_lock_irqsave(&fifo->lock, flags);
103 	list_for_each_entry(chan, &fifo->chan, head) {
104 		if (chan->chid == chid) {
105 			list_del(&chan->head);
106 			list_add(&chan->head, &fifo->chan);
107 			*rflags = flags;
108 			return chan;
109 		}
110 	}
111 	spin_unlock_irqrestore(&fifo->lock, flags);
112 	return NULL;
113 }
114 
115 void
116 nvkm_fifo_kevent(struct nvkm_fifo *fifo, int chid)
117 {
118 	nvkm_event_send(&fifo->kevent, 1, chid, NULL, 0);
119 }
120 
121 static int
122 nvkm_fifo_kevent_ctor(struct nvkm_object *object, void *data, u32 size,
123 		      struct nvkm_notify *notify)
124 {
125 	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
126 	if (size == 0) {
127 		notify->size  = 0;
128 		notify->types = 1;
129 		notify->index = chan->chid;
130 		return 0;
131 	}
132 	return -ENOSYS;
133 }
134 
135 static const struct nvkm_event_func
136 nvkm_fifo_kevent_func = {
137 	.ctor = nvkm_fifo_kevent_ctor,
138 };
139 
140 static int
141 nvkm_fifo_cevent_ctor(struct nvkm_object *object, void *data, u32 size,
142 		      struct nvkm_notify *notify)
143 {
144 	if (size == 0) {
145 		notify->size  = 0;
146 		notify->types = 1;
147 		notify->index = 0;
148 		return 0;
149 	}
150 	return -ENOSYS;
151 }
152 
153 static const struct nvkm_event_func
154 nvkm_fifo_cevent_func = {
155 	.ctor = nvkm_fifo_cevent_ctor,
156 };
157 
158 void
159 nvkm_fifo_cevent(struct nvkm_fifo *fifo)
160 {
161 	nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
162 }
163 
164 static void
165 nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
166 {
167 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
168 	fifo->func->uevent_fini(fifo);
169 }
170 
171 static void
172 nvkm_fifo_uevent_init(struct nvkm_event *event, int type, int index)
173 {
174 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
175 	fifo->func->uevent_init(fifo);
176 }
177 
178 static int
179 nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
180 		      struct nvkm_notify *notify)
181 {
182 	union {
183 		struct nvif_notify_uevent_req none;
184 	} *req = data;
185 	int ret = -ENOSYS;
186 
187 	if (!(ret = nvif_unvers(ret, &data, &size, req->none))) {
188 		notify->size  = sizeof(struct nvif_notify_uevent_rep);
189 		notify->types = 1;
190 		notify->index = 0;
191 	}
192 
193 	return ret;
194 }
195 
196 static const struct nvkm_event_func
197 nvkm_fifo_uevent_func = {
198 	.ctor = nvkm_fifo_uevent_ctor,
199 	.init = nvkm_fifo_uevent_init,
200 	.fini = nvkm_fifo_uevent_fini,
201 };
202 
203 void
204 nvkm_fifo_uevent(struct nvkm_fifo *fifo)
205 {
206 	struct nvif_notify_uevent_rep rep = {
207 	};
208 	nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
209 }
210 
211 static int
212 nvkm_fifo_class_new(struct nvkm_device *device,
213 		    const struct nvkm_oclass *oclass, void *data, u32 size,
214 		    struct nvkm_object **pobject)
215 {
216 	const struct nvkm_fifo_chan_oclass *sclass = oclass->engn;
217 	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
218 	return sclass->ctor(fifo, oclass, data, size, pobject);
219 }
220 
221 static const struct nvkm_device_oclass
222 nvkm_fifo_class = {
223 	.ctor = nvkm_fifo_class_new,
224 };
225 
226 static int
227 nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index,
228 		    const struct nvkm_device_oclass **class)
229 {
230 	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
231 	const struct nvkm_fifo_chan_oclass *sclass;
232 	int c = 0;
233 
234 	if (fifo->func->class_get) {
235 		int ret = fifo->func->class_get(fifo, index, &sclass);
236 		if (ret == 0) {
237 			oclass->base = sclass->base;
238 			oclass->engn = sclass;
239 			*class = &nvkm_fifo_class;
240 			return 0;
241 		}
242 		return ret;
243 	}
244 
245 	while ((sclass = fifo->func->chan[c])) {
246 		if (c++ == index) {
247 			oclass->base = sclass->base;
248 			oclass->engn = sclass;
249 			*class = &nvkm_fifo_class;
250 			return 0;
251 		}
252 	}
253 
254 	return c;
255 }
256 
257 static void
258 nvkm_fifo_intr(struct nvkm_engine *engine)
259 {
260 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
261 	fifo->func->intr(fifo);
262 }
263 
264 static int
265 nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
266 {
267 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
268 	if (fifo->func->fini)
269 		fifo->func->fini(fifo);
270 	return 0;
271 }
272 
273 static int
274 nvkm_fifo_oneinit(struct nvkm_engine *engine)
275 {
276 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
277 	if (fifo->func->oneinit)
278 		return fifo->func->oneinit(fifo);
279 	return 0;
280 }
281 
282 static void
283 nvkm_fifo_preinit(struct nvkm_engine *engine)
284 {
285 	nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO);
286 }
287 
288 static int
289 nvkm_fifo_init(struct nvkm_engine *engine)
290 {
291 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
292 	fifo->func->init(fifo);
293 	return 0;
294 }
295 
296 static void *
297 nvkm_fifo_dtor(struct nvkm_engine *engine)
298 {
299 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
300 	void *data = fifo;
301 	if (fifo->func->dtor)
302 		data = fifo->func->dtor(fifo);
303 	nvkm_event_fini(&fifo->kevent);
304 	nvkm_event_fini(&fifo->cevent);
305 	nvkm_event_fini(&fifo->uevent);
306 	return data;
307 }
308 
309 static const struct nvkm_engine_func
310 nvkm_fifo = {
311 	.dtor = nvkm_fifo_dtor,
312 	.preinit = nvkm_fifo_preinit,
313 	.oneinit = nvkm_fifo_oneinit,
314 	.init = nvkm_fifo_init,
315 	.fini = nvkm_fifo_fini,
316 	.intr = nvkm_fifo_intr,
317 	.base.sclass = nvkm_fifo_class_get,
318 };
319 
320 int
321 nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
322 	       int index, int nr, struct nvkm_fifo *fifo)
323 {
324 	int ret;
325 
326 	fifo->func = func;
327 	INIT_LIST_HEAD(&fifo->chan);
328 	spin_lock_init(&fifo->lock);
329 
330 	if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR))
331 		fifo->nr = NVKM_FIFO_CHID_NR;
332 	else
333 		fifo->nr = nr;
334 	bitmap_clear(fifo->mask, 0, fifo->nr);
335 
336 	ret = nvkm_engine_ctor(&nvkm_fifo, device, index, true, &fifo->engine);
337 	if (ret)
338 		return ret;
339 
340 	if (func->uevent_init) {
341 		ret = nvkm_event_init(&nvkm_fifo_uevent_func, 1, 1,
342 				      &fifo->uevent);
343 		if (ret)
344 			return ret;
345 	}
346 
347 	ret = nvkm_event_init(&nvkm_fifo_cevent_func, 1, 1, &fifo->cevent);
348 	if (ret)
349 		return ret;
350 
351 	return nvkm_event_init(&nvkm_fifo_kevent_func, 1, nr, &fifo->kevent);
352 }
353