1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv50.h"
25 #include "nv04.h"
26 
27 #include <core/client.h>
28 #include <core/engctx.h>
29 #include <core/ramht.h>
30 #include <subdev/bar.h>
31 #include <subdev/mmu.h>
32 #include <subdev/timer.h>
33 
34 #include <nvif/class.h>
35 #include <nvif/unpack.h>
36 
37 /*******************************************************************************
38  * FIFO channel objects
39  ******************************************************************************/
40 
41 static void
42 nv50_fifo_playlist_update_locked(struct nv50_fifo *fifo)
43 {
44 	struct nvkm_device *device = fifo->base.engine.subdev.device;
45 	struct nvkm_bar *bar = device->bar;
46 	struct nvkm_gpuobj *cur;
47 	int i, p;
48 
49 	cur = fifo->playlist[fifo->cur_playlist];
50 	fifo->cur_playlist = !fifo->cur_playlist;
51 
52 	nvkm_kmap(cur);
53 	for (i = fifo->base.min, p = 0; i < fifo->base.max; i++) {
54 		if (nvkm_rd32(device, 0x002600 + (i * 4)) & 0x80000000)
55 			nvkm_wo32(cur, p++ * 4, i);
56 	}
57 	bar->flush(bar);
58 	nvkm_done(cur);
59 
60 	nvkm_wr32(device, 0x0032f4, cur->addr >> 12);
61 	nvkm_wr32(device, 0x0032ec, p);
62 	nvkm_wr32(device, 0x002500, 0x00000101);
63 }
64 
65 void
66 nv50_fifo_playlist_update(struct nv50_fifo *fifo)
67 {
68 	mutex_lock(&nv_subdev(fifo)->mutex);
69 	nv50_fifo_playlist_update_locked(fifo);
70 	mutex_unlock(&nv_subdev(fifo)->mutex);
71 }
72 
73 static int
74 nv50_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
75 {
76 	struct nvkm_bar *bar = nvkm_bar(parent);
77 	struct nv50_fifo_base *base = (void *)parent->parent;
78 	struct nvkm_gpuobj *ectx = (void *)object;
79 	u64 limit = ectx->addr + ectx->size - 1;
80 	u64 start = ectx->addr;
81 	u32 addr;
82 
83 	switch (nv_engidx(object->engine)) {
84 	case NVDEV_ENGINE_SW   : return 0;
85 	case NVDEV_ENGINE_GR   : addr = 0x0000; break;
86 	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
87 	default:
88 		return -EINVAL;
89 	}
90 
91 	nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
92 
93 	nvkm_kmap(base->eng);
94 	nvkm_wo32(base->eng, addr + 0x00, 0x00190000);
95 	nvkm_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
96 	nvkm_wo32(base->eng, addr + 0x08, lower_32_bits(start));
97 	nvkm_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
98 					  upper_32_bits(start));
99 	nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
100 	nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
101 	bar->flush(bar);
102 	nvkm_done(base->eng);
103 	return 0;
104 }
105 
106 static int
107 nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
108 			 struct nvkm_object *object)
109 {
110 	struct nv50_fifo *fifo = (void *)parent->engine;
111 	struct nv50_fifo_base *base = (void *)parent->parent;
112 	struct nv50_fifo_chan *chan = (void *)parent;
113 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
114 	struct nvkm_device *device = subdev->device;
115 	struct nvkm_bar *bar = device->bar;
116 	u32 addr, me;
117 	int ret = 0;
118 
119 	switch (nv_engidx(object->engine)) {
120 	case NVDEV_ENGINE_SW   : return 0;
121 	case NVDEV_ENGINE_GR   : addr = 0x0000; break;
122 	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
123 	default:
124 		return -EINVAL;
125 	}
126 
127 	/* HW bug workaround:
128 	 *
129 	 * PFIFO will hang forever if the connected engines don't report
130 	 * that they've processed the context switch request.
131 	 *
132 	 * In order for the kickoff to work, we need to ensure all the
133 	 * connected engines are in a state where they can answer.
134 	 *
135 	 * Newer chipsets don't seem to suffer from this issue, and well,
136 	 * there's also a "ignore these engines" bitmask reg we can use
137 	 * if we hit the issue there..
138 	 */
139 	me = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001);
140 
141 	/* do the kickoff... */
142 	nvkm_wr32(device, 0x0032fc, nv_gpuobj(base)->addr >> 12);
143 	if (nvkm_msec(device, 2000,
144 		if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
145 			break;
146 	) < 0) {
147 		nvkm_error(subdev, "channel %d [%s] unload timeout\n",
148 			   chan->base.chid, nvkm_client_name(chan));
149 		if (suspend)
150 			ret = -EBUSY;
151 	}
152 	nvkm_wr32(device, 0x00b860, me);
153 
154 	if (ret == 0) {
155 		nvkm_kmap(base->eng);
156 		nvkm_wo32(base->eng, addr + 0x00, 0x00000000);
157 		nvkm_wo32(base->eng, addr + 0x04, 0x00000000);
158 		nvkm_wo32(base->eng, addr + 0x08, 0x00000000);
159 		nvkm_wo32(base->eng, addr + 0x0c, 0x00000000);
160 		nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
161 		nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
162 		bar->flush(bar);
163 		nvkm_done(base->eng);
164 	}
165 
166 	return ret;
167 }
168 
169 static int
170 nv50_fifo_object_attach(struct nvkm_object *parent,
171 			struct nvkm_object *object, u32 handle)
172 {
173 	struct nv50_fifo_chan *chan = (void *)parent;
174 	u32 context;
175 
176 	if (nv_iclass(object, NV_GPUOBJ_CLASS))
177 		context = nv_gpuobj(object)->node->offset >> 4;
178 	else
179 		context = 0x00000004; /* just non-zero */
180 
181 	switch (nv_engidx(object->engine)) {
182 	case NVDEV_ENGINE_DMAOBJ:
183 	case NVDEV_ENGINE_SW    : context |= 0x00000000; break;
184 	case NVDEV_ENGINE_GR    : context |= 0x00100000; break;
185 	case NVDEV_ENGINE_MPEG  : context |= 0x00200000; break;
186 	default:
187 		return -EINVAL;
188 	}
189 
190 	return nvkm_ramht_insert(chan->ramht, 0, handle, context);
191 }
192 
193 void
194 nv50_fifo_object_detach(struct nvkm_object *parent, int cookie)
195 {
196 	struct nv50_fifo_chan *chan = (void *)parent;
197 	nvkm_ramht_remove(chan->ramht, cookie);
198 }
199 
200 static int
201 nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
202 			struct nvkm_oclass *oclass, void *data, u32 size,
203 			struct nvkm_object **pobject)
204 {
205 	union {
206 		struct nv50_channel_dma_v0 v0;
207 	} *args = data;
208 	struct nvkm_bar *bar = nvkm_bar(parent);
209 	struct nv50_fifo_base *base = (void *)parent;
210 	struct nv50_fifo_chan *chan;
211 	int ret;
212 
213 	nvif_ioctl(parent, "create channel dma size %d\n", size);
214 	if (nvif_unpack(args->v0, 0, 0, false)) {
215 		nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
216 				   "offset %016llx\n", args->v0.version,
217 			   args->v0.pushbuf, args->v0.offset);
218 		if (args->v0.vm)
219 			return -ENOENT;
220 	} else
221 		return ret;
222 
223 	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
224 				       0x2000, args->v0.pushbuf,
225 				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
226 				       (1ULL << NVDEV_ENGINE_SW) |
227 				       (1ULL << NVDEV_ENGINE_GR) |
228 				       (1ULL << NVDEV_ENGINE_MPEG), &chan);
229 	*pobject = nv_object(chan);
230 	if (ret)
231 		return ret;
232 
233 	args->v0.chid = chan->base.chid;
234 
235 	nv_parent(chan)->context_attach = nv50_fifo_context_attach;
236 	nv_parent(chan)->context_detach = nv50_fifo_context_detach;
237 	nv_parent(chan)->object_attach = nv50_fifo_object_attach;
238 	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
239 
240 	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
241 			     &chan->ramht);
242 	if (ret)
243 		return ret;
244 
245 	nvkm_kmap(base->ramfc);
246 	nvkm_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
247 	nvkm_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
248 	nvkm_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
249 	nvkm_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
250 	nvkm_wo32(base->ramfc, 0x3c, 0x003f6078);
251 	nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
252 	nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
253 	nvkm_wo32(base->ramfc, 0x4c, 0xffffffff);
254 	nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
255 	nvkm_wo32(base->ramfc, 0x78, 0x00000000);
256 	nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
257 	nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
258 				     (4 << 24) /* SEARCH_FULL */ |
259 				     (chan->ramht->gpuobj.node->offset >> 4));
260 	bar->flush(bar);
261 	nvkm_done(base->ramfc);
262 	return 0;
263 }
264 
265 static int
266 nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
267 			struct nvkm_oclass *oclass, void *data, u32 size,
268 			struct nvkm_object **pobject)
269 {
270 	union {
271 		struct nv50_channel_gpfifo_v0 v0;
272 	} *args = data;
273 	struct nvkm_bar *bar = nvkm_bar(parent);
274 	struct nv50_fifo_base *base = (void *)parent;
275 	struct nv50_fifo_chan *chan;
276 	u64 ioffset, ilength;
277 	int ret;
278 
279 	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
280 	if (nvif_unpack(args->v0, 0, 0, false)) {
281 		nvif_ioctl(parent, "create channel gpfifo vers %d pushbuf %llx "
282 				   "ioffset %016llx ilength %08x\n",
283 			   args->v0.version, args->v0.pushbuf, args->v0.ioffset,
284 			   args->v0.ilength);
285 		if (args->v0.vm)
286 			return -ENOENT;
287 	} else
288 		return ret;
289 
290 	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
291 				       0x2000, args->v0.pushbuf,
292 				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
293 				       (1ULL << NVDEV_ENGINE_SW) |
294 				       (1ULL << NVDEV_ENGINE_GR) |
295 				       (1ULL << NVDEV_ENGINE_MPEG), &chan);
296 	*pobject = nv_object(chan);
297 	if (ret)
298 		return ret;
299 
300 	args->v0.chid = chan->base.chid;
301 
302 	nv_parent(chan)->context_attach = nv50_fifo_context_attach;
303 	nv_parent(chan)->context_detach = nv50_fifo_context_detach;
304 	nv_parent(chan)->object_attach = nv50_fifo_object_attach;
305 	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
306 
307 	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
308 			     &chan->ramht);
309 	if (ret)
310 		return ret;
311 
312 	ioffset = args->v0.ioffset;
313 	ilength = order_base_2(args->v0.ilength / 8);
314 
315 	nvkm_kmap(base->ramfc);
316 	nvkm_wo32(base->ramfc, 0x3c, 0x403f6078);
317 	nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
318 	nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
319 	nvkm_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
320 	nvkm_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
321 	nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
322 	nvkm_wo32(base->ramfc, 0x78, 0x00000000);
323 	nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
324 	nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
325 				     (4 << 24) /* SEARCH_FULL */ |
326 				     (chan->ramht->gpuobj.node->offset >> 4));
327 	bar->flush(bar);
328 	nvkm_done(base->ramfc);
329 	return 0;
330 }
331 
332 void
333 nv50_fifo_chan_dtor(struct nvkm_object *object)
334 {
335 	struct nv50_fifo_chan *chan = (void *)object;
336 	nvkm_ramht_ref(NULL, &chan->ramht);
337 	nvkm_fifo_channel_destroy(&chan->base);
338 }
339 
340 static int
341 nv50_fifo_chan_init(struct nvkm_object *object)
342 {
343 	struct nv50_fifo *fifo = (void *)object->engine;
344 	struct nv50_fifo_base *base = (void *)object->parent;
345 	struct nv50_fifo_chan *chan = (void *)object;
346 	struct nvkm_gpuobj *ramfc = base->ramfc;
347 	struct nvkm_device *device = fifo->base.engine.subdev.device;
348 	u32 chid = chan->base.chid;
349 	int ret;
350 
351 	ret = nvkm_fifo_channel_init(&chan->base);
352 	if (ret)
353 		return ret;
354 
355 	nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
356 	nv50_fifo_playlist_update(fifo);
357 	return 0;
358 }
359 
360 int
361 nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend)
362 {
363 	struct nv50_fifo *fifo = (void *)object->engine;
364 	struct nv50_fifo_chan *chan = (void *)object;
365 	struct nvkm_device *device = fifo->base.engine.subdev.device;
366 	u32 chid = chan->base.chid;
367 
368 	/* remove channel from playlist, fifo will unload context */
369 	nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
370 	nv50_fifo_playlist_update(fifo);
371 	nvkm_wr32(device, 0x002600 + (chid * 4), 0x00000000);
372 
373 	return nvkm_fifo_channel_fini(&chan->base, suspend);
374 }
375 
376 static struct nvkm_ofuncs
377 nv50_fifo_ofuncs_dma = {
378 	.ctor = nv50_fifo_chan_ctor_dma,
379 	.dtor = nv50_fifo_chan_dtor,
380 	.init = nv50_fifo_chan_init,
381 	.fini = nv50_fifo_chan_fini,
382 	.map  = _nvkm_fifo_channel_map,
383 	.rd32 = _nvkm_fifo_channel_rd32,
384 	.wr32 = _nvkm_fifo_channel_wr32,
385 	.ntfy = _nvkm_fifo_channel_ntfy
386 };
387 
388 static struct nvkm_ofuncs
389 nv50_fifo_ofuncs_ind = {
390 	.ctor = nv50_fifo_chan_ctor_ind,
391 	.dtor = nv50_fifo_chan_dtor,
392 	.init = nv50_fifo_chan_init,
393 	.fini = nv50_fifo_chan_fini,
394 	.map  = _nvkm_fifo_channel_map,
395 	.rd32 = _nvkm_fifo_channel_rd32,
396 	.wr32 = _nvkm_fifo_channel_wr32,
397 	.ntfy = _nvkm_fifo_channel_ntfy
398 };
399 
400 static struct nvkm_oclass
401 nv50_fifo_sclass[] = {
402 	{ NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma },
403 	{ NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind },
404 	{}
405 };
406 
407 /*******************************************************************************
408  * FIFO context - basically just the instmem reserved for the channel
409  ******************************************************************************/
410 
411 static int
412 nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
413 		       struct nvkm_oclass *oclass, void *data, u32 size,
414 		       struct nvkm_object **pobject)
415 {
416 	struct nv50_fifo_base *base;
417 	int ret;
418 
419 	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
420 				       0x1000, NVOBJ_FLAG_HEAP, &base);
421 	*pobject = nv_object(base);
422 	if (ret)
423 		return ret;
424 
425 	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200,
426 			      0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
427 	if (ret)
428 		return ret;
429 
430 	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0,
431 			      NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
432 	if (ret)
433 		return ret;
434 
435 	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0,
436 			      &base->pgd);
437 	if (ret)
438 		return ret;
439 
440 	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
441 	if (ret)
442 		return ret;
443 
444 	return 0;
445 }
446 
447 void
448 nv50_fifo_context_dtor(struct nvkm_object *object)
449 {
450 	struct nv50_fifo_base *base = (void *)object;
451 	nvkm_vm_ref(NULL, &base->vm, base->pgd);
452 	nvkm_gpuobj_ref(NULL, &base->pgd);
453 	nvkm_gpuobj_ref(NULL, &base->eng);
454 	nvkm_gpuobj_ref(NULL, &base->ramfc);
455 	nvkm_gpuobj_ref(NULL, &base->cache);
456 	nvkm_fifo_context_destroy(&base->base);
457 }
458 
459 static struct nvkm_oclass
460 nv50_fifo_cclass = {
461 	.handle = NV_ENGCTX(FIFO, 0x50),
462 	.ofuncs = &(struct nvkm_ofuncs) {
463 		.ctor = nv50_fifo_context_ctor,
464 		.dtor = nv50_fifo_context_dtor,
465 		.init = _nvkm_fifo_context_init,
466 		.fini = _nvkm_fifo_context_fini,
467 		.rd32 = _nvkm_fifo_context_rd32,
468 		.wr32 = _nvkm_fifo_context_wr32,
469 	},
470 };
471 
472 /*******************************************************************************
473  * PFIFO engine
474  ******************************************************************************/
475 
476 static int
477 nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
478 	       struct nvkm_oclass *oclass, void *data, u32 size,
479 	       struct nvkm_object **pobject)
480 {
481 	struct nv50_fifo *fifo;
482 	int ret;
483 
484 	ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &fifo);
485 	*pobject = nv_object(fifo);
486 	if (ret)
487 		return ret;
488 
489 	ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 4, 0x1000, 0,
490 			      &fifo->playlist[0]);
491 	if (ret)
492 		return ret;
493 
494 	ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 4, 0x1000, 0,
495 			      &fifo->playlist[1]);
496 	if (ret)
497 		return ret;
498 
499 	nv_subdev(fifo)->unit = 0x00000100;
500 	nv_subdev(fifo)->intr = nv04_fifo_intr;
501 	nv_engine(fifo)->cclass = &nv50_fifo_cclass;
502 	nv_engine(fifo)->sclass = nv50_fifo_sclass;
503 	fifo->base.pause = nv04_fifo_pause;
504 	fifo->base.start = nv04_fifo_start;
505 	return 0;
506 }
507 
508 void
509 nv50_fifo_dtor(struct nvkm_object *object)
510 {
511 	struct nv50_fifo *fifo = (void *)object;
512 
513 	nvkm_gpuobj_ref(NULL, &fifo->playlist[1]);
514 	nvkm_gpuobj_ref(NULL, &fifo->playlist[0]);
515 
516 	nvkm_fifo_destroy(&fifo->base);
517 }
518 
519 int
520 nv50_fifo_init(struct nvkm_object *object)
521 {
522 	struct nv50_fifo *fifo = (void *)object;
523 	struct nvkm_device *device = fifo->base.engine.subdev.device;
524 	int ret, i;
525 
526 	ret = nvkm_fifo_init(&fifo->base);
527 	if (ret)
528 		return ret;
529 
530 	nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
531 	nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
532 	nvkm_wr32(device, 0x00250c, 0x6f3cfc34);
533 	nvkm_wr32(device, 0x002044, 0x01003fff);
534 
535 	nvkm_wr32(device, 0x002100, 0xffffffff);
536 	nvkm_wr32(device, 0x002140, 0xbfffffff);
537 
538 	for (i = 0; i < 128; i++)
539 		nvkm_wr32(device, 0x002600 + (i * 4), 0x00000000);
540 	nv50_fifo_playlist_update_locked(fifo);
541 
542 	nvkm_wr32(device, 0x003200, 0x00000001);
543 	nvkm_wr32(device, 0x003250, 0x00000001);
544 	nvkm_wr32(device, 0x002500, 0x00000001);
545 	return 0;
546 }
547 
548 struct nvkm_oclass *
549 nv50_fifo_oclass = &(struct nvkm_oclass) {
550 	.handle = NV_ENGINE(FIFO, 0x50),
551 	.ofuncs = &(struct nvkm_ofuncs) {
552 		.ctor = nv50_fifo_ctor,
553 		.dtor = nv50_fifo_dtor,
554 		.init = nv50_fifo_init,
555 		.fini = _nvkm_fifo_fini,
556 	},
557 };
558