1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25 #include "chan.h"
26 #include "chid.h"
27 #include "runq.h"
28 
29 #include <core/gpuobj.h>
30 #include <subdev/mc.h>
31 
32 #include <nvif/cl0080.h>
33 #include <nvif/unpack.h>
34 
35 void
36 nvkm_fifo_recover_chan(struct nvkm_fifo *fifo, int chid)
37 {
38 	unsigned long flags;
39 	if (WARN_ON(!fifo->func->recover_chan))
40 		return;
41 	spin_lock_irqsave(&fifo->lock, flags);
42 	fifo->func->recover_chan(fifo, chid);
43 	spin_unlock_irqrestore(&fifo->lock, flags);
44 }
45 
46 void
47 nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
48 {
49 	return fifo->func->pause(fifo, flags);
50 }
51 
52 void
53 nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
54 {
55 	return fifo->func->start(fifo, flags);
56 }
57 
58 void
59 nvkm_fifo_fault(struct nvkm_fifo *fifo, struct nvkm_fault_data *info)
60 {
61 	return fifo->func->mmu_fault->recover(fifo, info);
62 }
63 
64 void
65 nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
66 		   struct nvkm_fifo_chan **pchan)
67 {
68 	struct nvkm_fifo_chan *chan = *pchan;
69 	if (likely(chan)) {
70 		*pchan = NULL;
71 		spin_unlock_irqrestore(&fifo->lock, flags);
72 	}
73 }
74 
75 struct nvkm_fifo_chan *
76 nvkm_fifo_chan_inst_locked(struct nvkm_fifo *fifo, u64 inst)
77 {
78 	struct nvkm_fifo_chan *chan;
79 	list_for_each_entry(chan, &fifo->chan, head) {
80 		if (chan->inst->addr == inst) {
81 			list_del(&chan->head);
82 			list_add(&chan->head, &fifo->chan);
83 			return chan;
84 		}
85 	}
86 	return NULL;
87 }
88 
89 struct nvkm_fifo_chan *
90 nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
91 {
92 	struct nvkm_fifo_chan *chan;
93 	unsigned long flags;
94 	spin_lock_irqsave(&fifo->lock, flags);
95 	if ((chan = nvkm_fifo_chan_inst_locked(fifo, inst))) {
96 		*rflags = flags;
97 		return chan;
98 	}
99 	spin_unlock_irqrestore(&fifo->lock, flags);
100 	return NULL;
101 }
102 
103 struct nvkm_fifo_chan *
104 nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags)
105 {
106 	struct nvkm_fifo_chan *chan;
107 	unsigned long flags;
108 	spin_lock_irqsave(&fifo->lock, flags);
109 	list_for_each_entry(chan, &fifo->chan, head) {
110 		if (chan->chid == chid) {
111 			list_del(&chan->head);
112 			list_add(&chan->head, &fifo->chan);
113 			*rflags = flags;
114 			return chan;
115 		}
116 	}
117 	spin_unlock_irqrestore(&fifo->lock, flags);
118 	return NULL;
119 }
120 
121 void
122 nvkm_fifo_kevent(struct nvkm_fifo *fifo, int chid)
123 {
124 	nvkm_event_ntfy(&fifo->kevent, chid, NVKM_FIFO_EVENT_KILLED);
125 }
126 
127 static const struct nvkm_event_func
128 nvkm_fifo_kevent_func = {
129 };
130 
131 static void
132 nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
133 {
134 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
135 	fifo->func->uevent_fini(fifo);
136 }
137 
138 static void
139 nvkm_fifo_uevent_init(struct nvkm_event *event, int type, int index)
140 {
141 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
142 	fifo->func->uevent_init(fifo);
143 }
144 
145 static const struct nvkm_event_func
146 nvkm_fifo_uevent_func = {
147 	.init = nvkm_fifo_uevent_init,
148 	.fini = nvkm_fifo_uevent_fini,
149 };
150 
151 void
152 nvkm_fifo_uevent(struct nvkm_fifo *fifo)
153 {
154 	nvkm_event_ntfy(&fifo->uevent, 0, NVKM_FIFO_EVENT_NON_STALL_INTR);
155 }
156 
157 static int
158 nvkm_fifo_class_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
159 		    void *argv, u32 argc, struct nvkm_object **pobject)
160 {
161 	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
162 
163 	if (oclass->engn == &fifo->func->chan.user)
164 		return nvkm_uchan_new(fifo, NULL, oclass, argv, argc, pobject);
165 
166 	WARN_ON(1);
167 	return -ENOSYS;
168 }
169 
170 static const struct nvkm_device_oclass
171 nvkm_fifo_class = {
172 	.ctor = nvkm_fifo_class_new,
173 };
174 
175 static int
176 nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index, const struct nvkm_device_oclass **class)
177 {
178 	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
179 	const struct nvkm_fifo_func_chan *chan = &fifo->func->chan;
180 	int c = 0;
181 
182 	/* *_CHANNEL_DMA, *_CHANNEL_GPFIFO_* */
183 	if (chan->user.oclass) {
184 		if (c++ == index) {
185 			oclass->base = chan->user;
186 			oclass->engn = &fifo->func->chan.user;
187 			*class = &nvkm_fifo_class;
188 			return 0;
189 		}
190 	}
191 
192 	return c;
193 }
194 
195 static void
196 nvkm_fifo_intr(struct nvkm_engine *engine)
197 {
198 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
199 	fifo->func->intr(fifo);
200 }
201 
202 static int
203 nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
204 {
205 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
206 	if (fifo->func->fini)
207 		fifo->func->fini(fifo);
208 	return 0;
209 }
210 
211 static int
212 nvkm_fifo_init(struct nvkm_engine *engine)
213 {
214 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
215 	fifo->func->init(fifo);
216 	return 0;
217 }
218 
219 static int
220 nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
221 {
222 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
223 
224 	switch (mthd) {
225 	case NV_DEVICE_HOST_CHANNELS: *data = fifo->chid ? fifo->chid->nr : 0; return 0;
226 	default:
227 		if (fifo->func->info)
228 			return fifo->func->info(fifo, mthd, data);
229 		break;
230 	}
231 
232 	return -ENOSYS;
233 }
234 
235 static int
236 nvkm_fifo_oneinit(struct nvkm_engine *engine)
237 {
238 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
239 	int ret, nr, i;
240 
241 	/* Initialise CHID/CGID allocator(s) on GPUs where they aren't per-runlist. */
242 	if (fifo->func->chid_nr) {
243 		ret = fifo->func->chid_ctor(fifo, fifo->func->chid_nr(fifo));
244 		if (ret)
245 			return ret;
246 	}
247 
248 	/* Create runqueues for each PBDMA. */
249 	if (fifo->func->runq_nr) {
250 		for (nr = fifo->func->runq_nr(fifo), i = 0; i < nr; i++) {
251 			if (!nvkm_runq_new(fifo, i))
252 				return -ENOMEM;
253 		}
254 	}
255 
256 	if (fifo->func->oneinit)
257 		return fifo->func->oneinit(fifo);
258 
259 	return 0;
260 }
261 
262 static void
263 nvkm_fifo_preinit(struct nvkm_engine *engine)
264 {
265 	nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO, 0);
266 }
267 
268 static void *
269 nvkm_fifo_dtor(struct nvkm_engine *engine)
270 {
271 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
272 	struct nvkm_runq *runq, *rtmp;
273 	void *data = fifo;
274 
275 	list_for_each_entry_safe(runq, rtmp, &fifo->runqs, head)
276 		nvkm_runq_del(runq);
277 
278 	nvkm_chid_unref(&fifo->cgid);
279 	nvkm_chid_unref(&fifo->chid);
280 
281 	if (fifo->func->dtor)
282 		data = fifo->func->dtor(fifo);
283 	nvkm_event_fini(&fifo->kevent);
284 	nvkm_event_fini(&fifo->uevent);
285 	mutex_destroy(&fifo->mutex);
286 	return data;
287 }
288 
289 static const struct nvkm_engine_func
290 nvkm_fifo = {
291 	.dtor = nvkm_fifo_dtor,
292 	.preinit = nvkm_fifo_preinit,
293 	.oneinit = nvkm_fifo_oneinit,
294 	.info = nvkm_fifo_info,
295 	.init = nvkm_fifo_init,
296 	.fini = nvkm_fifo_fini,
297 	.intr = nvkm_fifo_intr,
298 	.base.sclass = nvkm_fifo_class_get,
299 };
300 
301 int
302 nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
303 	       enum nvkm_subdev_type type, int inst, struct nvkm_fifo *fifo)
304 {
305 	int ret, nr;
306 
307 	fifo->func = func;
308 	INIT_LIST_HEAD(&fifo->runqs);
309 	spin_lock_init(&fifo->lock);
310 	mutex_init(&fifo->mutex);
311 
312 	ret = nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
313 	if (ret)
314 		return ret;
315 
316 	INIT_LIST_HEAD(&fifo->chan);
317 
318 	nr = func->chid_nr(fifo);
319 	if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR))
320 		fifo->nr = NVKM_FIFO_CHID_NR;
321 	else
322 		fifo->nr = nr;
323 
324 	if (func->uevent_init) {
325 		ret = nvkm_event_init(&nvkm_fifo_uevent_func, &fifo->engine.subdev, 1, 1,
326 				      &fifo->uevent);
327 		if (ret)
328 			return ret;
329 	}
330 
331 	return nvkm_event_init(&nvkm_fifo_kevent_func, &fifo->engine.subdev, 1, nr, &fifo->kevent);
332 }
333