1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "cgrp.h" 25 #include "chan.h" 26 #include "chid.h" 27 #include "runl.h" 28 29 #include "nv04.h" 30 #include "channv04.h" 31 #include "regsnv04.h" 32 33 #include <core/client.h> 34 #include <core/ramht.h> 35 #include <subdev/instmem.h> 36 #include <subdev/timer.h> 37 #include <engine/sw.h> 38 39 #include <nvif/class.h> 40 41 static const struct nv04_fifo_ramfc 42 nv04_fifo_ramfc[] = { 43 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 44 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 45 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, 46 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, 47 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE }, 48 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, 49 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE }, 50 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 }, 51 {} 52 }; 53 54 void 55 nv04_fifo_dma_fini(struct nvkm_fifo_chan *base) 56 { 57 struct nv04_fifo_chan *chan = nv04_fifo_chan(base); 58 struct nv04_fifo *fifo = chan->fifo; 59 struct nvkm_device *device = fifo->base.engine.subdev.device; 60 struct nvkm_memory *fctx = device->imem->ramfc; 61 const struct nv04_fifo_ramfc *c; 62 unsigned long flags; 63 u32 data = chan->ramfc; 64 u32 chid; 65 66 /* prevent fifo context switches */ 67 spin_lock_irqsave(&fifo->base.lock, flags); 68 nvkm_wr32(device, NV03_PFIFO_CACHES, 0); 69 70 /* if this channel is active, replace it with a null context */ 71 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.chid->mask; 72 if (chid == chan->base.chid) { 73 nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); 74 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0); 75 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); 76 77 c = fifo->ramfc; 78 nvkm_kmap(fctx); 79 do { 80 u32 rm = ((1ULL << c->bits) - 1) << c->regs; 81 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; 82 u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs; 83 u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm); 84 nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs)); 85 } while ((++c)->bits); 86 nvkm_done(fctx); 87 88 c = fifo->ramfc; 89 do { 90 nvkm_wr32(device, c->regp, 0x00000000); 91 } while ((++c)->bits); 92 93 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0); 94 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0); 95 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.chid->mask); 96 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); 97 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 98 } 99 100 /* restore normal operation, after disabling dma mode */ 101 nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0); 102 nvkm_wr32(device, NV03_PFIFO_CACHES, 1); 103 spin_unlock_irqrestore(&fifo->base.lock, flags); 104 } 105 106 void 107 nv04_fifo_dma_init(struct nvkm_fifo_chan *base) 108 { 109 struct nv04_fifo_chan *chan = nv04_fifo_chan(base); 110 struct nv04_fifo *fifo = chan->fifo; 111 struct nvkm_device *device = fifo->base.engine.subdev.device; 112 u32 mask = 1 << chan->base.chid; 113 unsigned long flags; 114 spin_lock_irqsave(&fifo->base.lock, flags); 115 nvkm_mask(device, NV04_PFIFO_MODE, mask, mask); 116 spin_unlock_irqrestore(&fifo->base.lock, flags); 117 } 118 119 static const struct nvkm_chan_func 120 nv04_chan = { 121 }; 122 123 const struct nvkm_cgrp_func 124 nv04_cgrp = { 125 }; 126 127 const struct nvkm_engn_func 128 nv04_engn = { 129 }; 130 131 void 132 nv04_fifo_pause(struct nvkm_fifo *base, unsigned long *pflags) 133 __acquires(fifo->base.lock) 134 { 135 struct nv04_fifo *fifo = nv04_fifo(base); 136 struct nvkm_device *device = fifo->base.engine.subdev.device; 137 unsigned long flags; 138 139 spin_lock_irqsave(&fifo->base.lock, flags); 140 *pflags = flags; 141 142 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000); 143 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000); 144 145 /* in some cases the puller may be left in an inconsistent state 146 * if you try to stop it while it's busy translating handles. 147 * sometimes you get a CACHE_ERROR, sometimes it just fails 148 * silently; sending incorrect instance offsets to PGRAPH after 149 * it's started up again. 150 * 151 * to avoid this, we invalidate the most recently calculated 152 * instance. 153 */ 154 nvkm_msec(device, 2000, 155 u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0); 156 if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY)) 157 break; 158 ); 159 160 if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) & 161 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) 162 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 163 164 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000); 165 } 166 167 void 168 nv04_fifo_start(struct nvkm_fifo *base, unsigned long *pflags) 169 __releases(fifo->base.lock) 170 { 171 struct nv04_fifo *fifo = nv04_fifo(base); 172 struct nvkm_device *device = fifo->base.engine.subdev.device; 173 unsigned long flags = *pflags; 174 175 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001); 176 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001); 177 178 spin_unlock_irqrestore(&fifo->base.lock, flags); 179 } 180 181 const struct nvkm_runl_func 182 nv04_runl = { 183 }; 184 185 struct nvkm_engine * 186 nv04_fifo_id_engine(struct nvkm_fifo *fifo, int engi) 187 { 188 enum nvkm_subdev_type type; 189 190 switch (engi) { 191 case NV04_FIFO_ENGN_SW : type = NVKM_ENGINE_SW; break; 192 case NV04_FIFO_ENGN_GR : type = NVKM_ENGINE_GR; break; 193 case NV04_FIFO_ENGN_MPEG: type = NVKM_ENGINE_MPEG; break; 194 case NV04_FIFO_ENGN_DMA : type = NVKM_ENGINE_DMAOBJ; break; 195 default: 196 WARN_ON(1); 197 return NULL; 198 } 199 200 return nvkm_device_engine(fifo->engine.subdev.device, type, 0); 201 } 202 203 int 204 nv04_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine) 205 { 206 switch (engine->subdev.type) { 207 case NVKM_ENGINE_SW : return NV04_FIFO_ENGN_SW; 208 case NVKM_ENGINE_GR : return NV04_FIFO_ENGN_GR; 209 case NVKM_ENGINE_MPEG : return NV04_FIFO_ENGN_MPEG; 210 case NVKM_ENGINE_DMAOBJ: return NV04_FIFO_ENGN_DMA; 211 default: 212 WARN_ON(1); 213 return 0; 214 } 215 } 216 217 static const char * 218 nv_dma_state_err(u32 state) 219 { 220 static const char * const desc[] = { 221 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE", 222 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK" 223 }; 224 return desc[(state >> 29) & 0x7]; 225 } 226 227 static bool 228 nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data) 229 { 230 struct nvkm_sw *sw = device->sw; 231 const int subc = (addr & 0x0000e000) >> 13; 232 const int mthd = (addr & 0x00001ffc); 233 const u32 mask = 0x0000000f << (subc * 4); 234 u32 engine = nvkm_rd32(device, 0x003280); 235 bool handled = false; 236 237 switch (mthd) { 238 case 0x0000 ... 0x0000: /* subchannel's engine -> software */ 239 nvkm_wr32(device, 0x003280, (engine &= ~mask)); 240 fallthrough; 241 case 0x0180 ... 0x01fc: /* handle -> instance */ 242 data = nvkm_rd32(device, 0x003258) & 0x0000ffff; 243 fallthrough; 244 case 0x0100 ... 0x017c: 245 case 0x0200 ... 0x1ffc: /* pass method down to sw */ 246 if (!(engine & mask) && sw) 247 handled = nvkm_sw_mthd(sw, chid, subc, mthd, data); 248 break; 249 default: 250 break; 251 } 252 253 return handled; 254 } 255 256 static void 257 nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get) 258 { 259 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 260 struct nvkm_device *device = subdev->device; 261 struct nvkm_fifo_chan *chan; 262 unsigned long flags; 263 u32 pull0 = nvkm_rd32(device, 0x003250); 264 u32 mthd, data; 265 int ptr; 266 267 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my 268 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests 269 * show that it wraps around to the start at GET=0x800.. No clue as to 270 * why.. 271 */ 272 ptr = (get & 0x7ff) >> 2; 273 274 if (device->card_type < NV_40) { 275 mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr)); 276 data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr)); 277 } else { 278 mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr)); 279 data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr)); 280 } 281 282 if (!(pull0 & 0x00000100) || 283 !nv04_fifo_swmthd(device, chid, mthd, data)) { 284 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); 285 nvkm_error(subdev, "CACHE_ERROR - " 286 "ch %d [%s] subc %d mthd %04x data %08x\n", 287 chid, chan ? chan->object.client->name : "unknown", 288 (mthd >> 13) & 7, mthd & 0x1ffc, data); 289 nvkm_fifo_chan_put(&fifo->base, flags, &chan); 290 } 291 292 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0); 293 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 294 295 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 296 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1); 297 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); 298 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 299 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1); 300 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0); 301 302 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 303 nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); 304 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 305 } 306 307 static void 308 nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid) 309 { 310 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 311 struct nvkm_device *device = subdev->device; 312 u32 dma_get = nvkm_rd32(device, 0x003244); 313 u32 dma_put = nvkm_rd32(device, 0x003240); 314 u32 push = nvkm_rd32(device, 0x003220); 315 u32 state = nvkm_rd32(device, 0x003228); 316 struct nvkm_fifo_chan *chan; 317 unsigned long flags; 318 const char *name; 319 320 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); 321 name = chan ? chan->object.client->name : "unknown"; 322 if (device->card_type == NV_50) { 323 u32 ho_get = nvkm_rd32(device, 0x003328); 324 u32 ho_put = nvkm_rd32(device, 0x003320); 325 u32 ib_get = nvkm_rd32(device, 0x003334); 326 u32 ib_put = nvkm_rd32(device, 0x003330); 327 328 nvkm_error(subdev, "DMA_PUSHER - " 329 "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x " 330 "ib_put %08x state %08x (err: %s) push %08x\n", 331 chid, name, ho_get, dma_get, ho_put, dma_put, 332 ib_get, ib_put, state, nv_dma_state_err(state), 333 push); 334 335 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ 336 nvkm_wr32(device, 0x003364, 0x00000000); 337 if (dma_get != dma_put || ho_get != ho_put) { 338 nvkm_wr32(device, 0x003244, dma_put); 339 nvkm_wr32(device, 0x003328, ho_put); 340 } else 341 if (ib_get != ib_put) 342 nvkm_wr32(device, 0x003334, ib_put); 343 } else { 344 nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x " 345 "state %08x (err: %s) push %08x\n", 346 chid, name, dma_get, dma_put, state, 347 nv_dma_state_err(state), push); 348 349 if (dma_get != dma_put) 350 nvkm_wr32(device, 0x003244, dma_put); 351 } 352 nvkm_fifo_chan_put(&fifo->base, flags, &chan); 353 354 nvkm_wr32(device, 0x003228, 0x00000000); 355 nvkm_wr32(device, 0x003220, 0x00000001); 356 nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); 357 } 358 359 void 360 nv04_fifo_intr(struct nvkm_fifo *base) 361 { 362 struct nv04_fifo *fifo = nv04_fifo(base); 363 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 364 struct nvkm_device *device = subdev->device; 365 u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0); 366 u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask; 367 u32 reassign, chid, get, sem; 368 369 reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1; 370 nvkm_wr32(device, NV03_PFIFO_CACHES, 0); 371 372 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.chid->mask; 373 get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET); 374 375 if (stat & NV_PFIFO_INTR_CACHE_ERROR) { 376 nv04_fifo_cache_error(fifo, chid, get); 377 stat &= ~NV_PFIFO_INTR_CACHE_ERROR; 378 } 379 380 if (stat & NV_PFIFO_INTR_DMA_PUSHER) { 381 nv04_fifo_dma_pusher(fifo, chid); 382 stat &= ~NV_PFIFO_INTR_DMA_PUSHER; 383 } 384 385 if (stat & NV_PFIFO_INTR_SEMAPHORE) { 386 stat &= ~NV_PFIFO_INTR_SEMAPHORE; 387 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); 388 389 sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE); 390 nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); 391 392 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); 393 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 394 } 395 396 if (device->card_type == NV_50) { 397 if (stat & 0x00000010) { 398 stat &= ~0x00000010; 399 nvkm_wr32(device, 0x002100, 0x00000010); 400 } 401 402 if (stat & 0x40000000) { 403 nvkm_wr32(device, 0x002100, 0x40000000); 404 nvkm_fifo_uevent(&fifo->base); 405 stat &= ~0x40000000; 406 } 407 } 408 409 if (stat) { 410 nvkm_warn(subdev, "intr %08x\n", stat); 411 nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); 412 nvkm_wr32(device, NV03_PFIFO_INTR_0, stat); 413 } 414 415 nvkm_wr32(device, NV03_PFIFO_CACHES, reassign); 416 } 417 418 void 419 nv04_fifo_init(struct nvkm_fifo *fifo) 420 { 421 struct nvkm_device *device = fifo->engine.subdev.device; 422 struct nvkm_instmem *imem = device->imem; 423 struct nvkm_ramht *ramht = imem->ramht; 424 struct nvkm_memory *ramro = imem->ramro; 425 struct nvkm_memory *ramfc = imem->ramfc; 426 427 nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff); 428 nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); 429 430 nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 431 ((ramht->bits - 9) << 16) | 432 (ramht->gpuobj->addr >> 8)); 433 nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8); 434 nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8); 435 436 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->chid->mask); 437 438 nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); 439 nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); 440 441 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); 442 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 443 nvkm_wr32(device, NV03_PFIFO_CACHES, 1); 444 } 445 446 int 447 nv04_fifo_runl_ctor(struct nvkm_fifo *fifo) 448 { 449 struct nvkm_runl *runl; 450 451 runl = nvkm_runl_new(fifo, 0, 0, 0); 452 if (IS_ERR(runl)) 453 return PTR_ERR(runl); 454 455 nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_SW, 0); 456 nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_DMAOBJ, 0); 457 nvkm_runl_add(runl, 1, fifo->func->engn , NVKM_ENGINE_GR, 0); 458 nvkm_runl_add(runl, 2, fifo->func->engn , NVKM_ENGINE_MPEG, 0); /* NV31- */ 459 return 0; 460 } 461 462 int 463 nv04_fifo_chid_ctor(struct nvkm_fifo *fifo, int nr) 464 { 465 /* The last CHID is reserved by HW as a "channel invalid" marker. */ 466 return nvkm_chid_new(&nvkm_chan_event, &fifo->engine.subdev, nr, 0, nr - 1, &fifo->chid); 467 } 468 469 static int 470 nv04_fifo_chid_nr(struct nvkm_fifo *fifo) 471 { 472 return 16; 473 } 474 475 int 476 nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device, 477 enum nvkm_subdev_type type, int inst, int nr, const struct nv04_fifo_ramfc *ramfc, 478 struct nvkm_fifo **pfifo) 479 { 480 struct nv04_fifo *fifo; 481 int ret; 482 483 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) 484 return -ENOMEM; 485 fifo->ramfc = ramfc; 486 *pfifo = &fifo->base; 487 488 ret = nvkm_fifo_ctor(func, device, type, inst, &fifo->base); 489 if (ret) 490 return ret; 491 492 set_bit(nr - 1, fifo->base.mask); /* inactive channel */ 493 return 0; 494 } 495 496 static const struct nvkm_fifo_func 497 nv04_fifo = { 498 .chid_nr = nv04_fifo_chid_nr, 499 .chid_ctor = nv04_fifo_chid_ctor, 500 .runl_ctor = nv04_fifo_runl_ctor, 501 .init = nv04_fifo_init, 502 .intr = nv04_fifo_intr, 503 .engine_id = nv04_fifo_engine_id, 504 .id_engine = nv04_fifo_id_engine, 505 .pause = nv04_fifo_pause, 506 .start = nv04_fifo_start, 507 .runl = &nv04_runl, 508 .engn = &nv04_engn, 509 .engn_sw = &nv04_engn, 510 .cgrp = {{ }, &nv04_cgrp }, 511 .chan = {{ 0, 0, NV03_CHANNEL_DMA }, &nv04_chan, .oclass = &nv04_fifo_dma_oclass }, 512 }; 513 514 int 515 nv04_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, 516 struct nvkm_fifo **pfifo) 517 { 518 return nv04_fifo_new_(&nv04_fifo, device, type, inst, 16, nv04_fifo_ramfc, pfifo); 519 } 520