1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25 #include "chan.h"
26 
27 #include <core/client.h>
28 #include <core/gpuobj.h>
29 #include <core/notify.h>
30 #include <subdev/mc.h>
31 
32 #include <nvif/event.h>
33 #include <nvif/cl0080.h>
34 #include <nvif/unpack.h>
35 
36 void
37 nvkm_fifo_recover_chan(struct nvkm_fifo *fifo, int chid)
38 {
39 	unsigned long flags;
40 	if (WARN_ON(!fifo->func->recover_chan))
41 		return;
42 	spin_lock_irqsave(&fifo->lock, flags);
43 	fifo->func->recover_chan(fifo, chid);
44 	spin_unlock_irqrestore(&fifo->lock, flags);
45 }
46 
47 void
48 nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
49 {
50 	return fifo->func->pause(fifo, flags);
51 }
52 
53 void
54 nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
55 {
56 	return fifo->func->start(fifo, flags);
57 }
58 
59 void
60 nvkm_fifo_fault(struct nvkm_fifo *fifo, struct nvkm_fault_data *info)
61 {
62 	return fifo->func->fault(fifo, info);
63 }
64 
65 void
66 nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
67 		   struct nvkm_fifo_chan **pchan)
68 {
69 	struct nvkm_fifo_chan *chan = *pchan;
70 	if (likely(chan)) {
71 		*pchan = NULL;
72 		spin_unlock_irqrestore(&fifo->lock, flags);
73 	}
74 }
75 
76 struct nvkm_fifo_chan *
77 nvkm_fifo_chan_inst_locked(struct nvkm_fifo *fifo, u64 inst)
78 {
79 	struct nvkm_fifo_chan *chan;
80 	list_for_each_entry(chan, &fifo->chan, head) {
81 		if (chan->inst->addr == inst) {
82 			list_del(&chan->head);
83 			list_add(&chan->head, &fifo->chan);
84 			return chan;
85 		}
86 	}
87 	return NULL;
88 }
89 
90 struct nvkm_fifo_chan *
91 nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
92 {
93 	struct nvkm_fifo_chan *chan;
94 	unsigned long flags;
95 	spin_lock_irqsave(&fifo->lock, flags);
96 	if ((chan = nvkm_fifo_chan_inst_locked(fifo, inst))) {
97 		*rflags = flags;
98 		return chan;
99 	}
100 	spin_unlock_irqrestore(&fifo->lock, flags);
101 	return NULL;
102 }
103 
104 struct nvkm_fifo_chan *
105 nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags)
106 {
107 	struct nvkm_fifo_chan *chan;
108 	unsigned long flags;
109 	spin_lock_irqsave(&fifo->lock, flags);
110 	list_for_each_entry(chan, &fifo->chan, head) {
111 		if (chan->chid == chid) {
112 			list_del(&chan->head);
113 			list_add(&chan->head, &fifo->chan);
114 			*rflags = flags;
115 			return chan;
116 		}
117 	}
118 	spin_unlock_irqrestore(&fifo->lock, flags);
119 	return NULL;
120 }
121 
122 void
123 nvkm_fifo_kevent(struct nvkm_fifo *fifo, int chid)
124 {
125 	nvkm_event_send(&fifo->kevent, 1, chid, NULL, 0);
126 }
127 
128 static int
129 nvkm_fifo_kevent_ctor(struct nvkm_object *object, void *data, u32 size,
130 		      struct nvkm_notify *notify)
131 {
132 	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
133 	if (size == 0) {
134 		notify->size  = 0;
135 		notify->types = 1;
136 		notify->index = chan->chid;
137 		return 0;
138 	}
139 	return -ENOSYS;
140 }
141 
142 static const struct nvkm_event_func
143 nvkm_fifo_kevent_func = {
144 	.ctor = nvkm_fifo_kevent_ctor,
145 };
146 
147 static void
148 nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
149 {
150 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
151 	fifo->func->uevent_fini(fifo);
152 }
153 
154 static void
155 nvkm_fifo_uevent_init(struct nvkm_event *event, int type, int index)
156 {
157 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
158 	fifo->func->uevent_init(fifo);
159 }
160 
161 static int
162 nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
163 		      struct nvkm_notify *notify)
164 {
165 	union {
166 		struct nvif_notify_uevent_req none;
167 	} *req = data;
168 	int ret = -ENOSYS;
169 
170 	if (!(ret = nvif_unvers(ret, &data, &size, req->none))) {
171 		notify->size  = sizeof(struct nvif_notify_uevent_rep);
172 		notify->types = 1;
173 		notify->index = 0;
174 	}
175 
176 	return ret;
177 }
178 
179 static const struct nvkm_event_func
180 nvkm_fifo_uevent_func = {
181 	.ctor = nvkm_fifo_uevent_ctor,
182 	.init = nvkm_fifo_uevent_init,
183 	.fini = nvkm_fifo_uevent_fini,
184 };
185 
186 void
187 nvkm_fifo_uevent(struct nvkm_fifo *fifo)
188 {
189 	struct nvif_notify_uevent_rep rep = {
190 	};
191 	nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
192 }
193 
194 static int
195 nvkm_fifo_class_new_(struct nvkm_device *device,
196 		     const struct nvkm_oclass *oclass, void *data, u32 size,
197 		     struct nvkm_object **pobject)
198 {
199 	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
200 	return fifo->func->class_new(fifo, oclass, data, size, pobject);
201 }
202 
203 static const struct nvkm_device_oclass
204 nvkm_fifo_class_ = {
205 	.ctor = nvkm_fifo_class_new_,
206 };
207 
208 static int
209 nvkm_fifo_class_new(struct nvkm_device *device,
210 		    const struct nvkm_oclass *oclass, void *data, u32 size,
211 		    struct nvkm_object **pobject)
212 {
213 	const struct nvkm_fifo_chan_oclass *sclass = oclass->engn;
214 	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
215 	return sclass->ctor(fifo, oclass, data, size, pobject);
216 }
217 
218 static const struct nvkm_device_oclass
219 nvkm_fifo_class = {
220 	.ctor = nvkm_fifo_class_new,
221 };
222 
223 static int
224 nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index,
225 		    const struct nvkm_device_oclass **class)
226 {
227 	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
228 	const struct nvkm_fifo_chan_oclass *sclass;
229 	int c = 0;
230 
231 	if (fifo->func->class_get) {
232 		int ret = fifo->func->class_get(fifo, index, oclass);
233 		if (ret == 0)
234 			*class = &nvkm_fifo_class_;
235 		return ret;
236 	}
237 
238 	while ((sclass = fifo->func->chan[c])) {
239 		if (c++ == index) {
240 			oclass->base = sclass->base;
241 			oclass->engn = sclass;
242 			*class = &nvkm_fifo_class;
243 			return 0;
244 		}
245 	}
246 
247 	return c;
248 }
249 
250 static void
251 nvkm_fifo_intr(struct nvkm_engine *engine)
252 {
253 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
254 	fifo->func->intr(fifo);
255 }
256 
257 static int
258 nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
259 {
260 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
261 	if (fifo->func->fini)
262 		fifo->func->fini(fifo);
263 	return 0;
264 }
265 
266 static int
267 nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
268 {
269 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
270 	switch (mthd) {
271 	case NV_DEVICE_HOST_CHANNELS: *data = fifo->nr; return 0;
272 	default:
273 		if (fifo->func->info)
274 			return fifo->func->info(fifo, mthd, data);
275 		break;
276 	}
277 	return -ENOSYS;
278 }
279 
280 static int
281 nvkm_fifo_oneinit(struct nvkm_engine *engine)
282 {
283 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
284 	if (fifo->func->oneinit)
285 		return fifo->func->oneinit(fifo);
286 	return 0;
287 }
288 
289 static void
290 nvkm_fifo_preinit(struct nvkm_engine *engine)
291 {
292 	nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO, 0);
293 }
294 
295 static int
296 nvkm_fifo_init(struct nvkm_engine *engine)
297 {
298 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
299 	fifo->func->init(fifo);
300 	return 0;
301 }
302 
303 static void *
304 nvkm_fifo_dtor(struct nvkm_engine *engine)
305 {
306 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
307 	void *data = fifo;
308 	if (fifo->func->dtor)
309 		data = fifo->func->dtor(fifo);
310 	nvkm_event_fini(&fifo->kevent);
311 	nvkm_event_fini(&fifo->uevent);
312 	mutex_destroy(&fifo->mutex);
313 	return data;
314 }
315 
316 static const struct nvkm_engine_func
317 nvkm_fifo = {
318 	.dtor = nvkm_fifo_dtor,
319 	.preinit = nvkm_fifo_preinit,
320 	.oneinit = nvkm_fifo_oneinit,
321 	.info = nvkm_fifo_info,
322 	.init = nvkm_fifo_init,
323 	.fini = nvkm_fifo_fini,
324 	.intr = nvkm_fifo_intr,
325 	.base.sclass = nvkm_fifo_class_get,
326 };
327 
328 int
329 nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
330 	       enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo *fifo)
331 {
332 	int ret;
333 
334 	fifo->func = func;
335 	INIT_LIST_HEAD(&fifo->chan);
336 	spin_lock_init(&fifo->lock);
337 	mutex_init(&fifo->mutex);
338 
339 	if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR))
340 		fifo->nr = NVKM_FIFO_CHID_NR;
341 	else
342 		fifo->nr = nr;
343 	bitmap_clear(fifo->mask, 0, fifo->nr);
344 
345 	ret = nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
346 	if (ret)
347 		return ret;
348 
349 	if (func->uevent_init) {
350 		ret = nvkm_event_init(&nvkm_fifo_uevent_func, 1, 1,
351 				      &fifo->uevent);
352 		if (ret)
353 			return ret;
354 	}
355 
356 	return nvkm_event_init(&nvkm_fifo_kevent_func, 1, nr, &fifo->kevent);
357 }
358