1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "cgrp.h" 25 #include "chan.h" 26 #include "chid.h" 27 #include "runl.h" 28 29 #include "nv04.h" 30 #include "channv04.h" 31 #include "regsnv04.h" 32 33 #include <core/ramht.h> 34 #include <subdev/instmem.h> 35 #include <subdev/mc.h> 36 #include <subdev/timer.h> 37 #include <engine/sw.h> 38 39 #include <nvif/class.h> 40 41 void 42 nv04_chan_stop(struct nvkm_chan *chan) 43 { 44 struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; 45 struct nvkm_device *device = fifo->engine.subdev.device; 46 struct nvkm_memory *fctx = device->imem->ramfc; 47 const struct nvkm_ramfc_layout *c; 48 unsigned long flags; 49 u32 data = chan->ramfc_offset; 50 u32 chid; 51 52 /* prevent fifo context switches */ 53 spin_lock_irqsave(&fifo->lock, flags); 54 nvkm_wr32(device, NV03_PFIFO_CACHES, 0); 55 56 /* if this channel is active, replace it with a null context */ 57 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->chid->mask; 58 if (chid == chan->id) { 59 nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); 60 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0); 61 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); 62 63 c = chan->func->ramfc->layout; 64 nvkm_kmap(fctx); 65 do { 66 u32 rm = ((1ULL << c->bits) - 1) << c->regs; 67 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; 68 u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs; 69 u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm); 70 nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs)); 71 } while ((++c)->bits); 72 nvkm_done(fctx); 73 74 c = chan->func->ramfc->layout; 75 do { 76 nvkm_wr32(device, c->regp, 0x00000000); 77 } while ((++c)->bits); 78 79 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0); 80 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0); 81 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->chid->mask); 82 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); 83 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 84 } 85 86 /* restore normal operation, after disabling dma mode */ 87 nvkm_mask(device, NV04_PFIFO_MODE, BIT(chan->id), 0); 88 nvkm_wr32(device, NV03_PFIFO_CACHES, 1); 89 spin_unlock_irqrestore(&fifo->lock, flags); 90 } 91 92 void 93 nv04_chan_start(struct nvkm_chan *chan) 94 { 95 struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; 96 unsigned long flags; 97 98 spin_lock_irqsave(&fifo->lock, flags); 99 nvkm_mask(fifo->engine.subdev.device, NV04_PFIFO_MODE, BIT(chan->id), BIT(chan->id)); 100 spin_unlock_irqrestore(&fifo->lock, flags); 101 } 102 103 void 104 nv04_chan_ramfc_clear(struct nvkm_chan *chan) 105 { 106 struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc; 107 const struct nvkm_ramfc_layout *c = chan->func->ramfc->layout; 108 109 nvkm_kmap(ramfc); 110 do { 111 nvkm_wo32(ramfc, chan->ramfc_offset + c->ctxp, 0x00000000); 112 } while ((++c)->bits); 113 nvkm_done(ramfc); 114 } 115 116 static int 117 nv04_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv) 118 { 119 struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc; 120 const u32 base = chan->id * 32; 121 122 chan->ramfc_offset = base; 123 124 nvkm_kmap(ramfc); 125 nvkm_wo32(ramfc, base + 0x00, offset); 126 nvkm_wo32(ramfc, base + 0x04, offset); 127 nvkm_wo32(ramfc, base + 0x08, chan->push->addr >> 4); 128 nvkm_wo32(ramfc, base + 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 129 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 130 #ifdef __BIG_ENDIAN 131 NV_PFIFO_CACHE1_BIG_ENDIAN | 132 #endif 133 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); 134 nvkm_done(ramfc); 135 return 0; 136 } 137 138 static const struct nvkm_chan_func_ramfc 139 nv04_chan_ramfc = { 140 .layout = (const struct nvkm_ramfc_layout[]) { 141 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 142 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 143 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, 144 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, 145 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE }, 146 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, 147 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE }, 148 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 }, 149 {} 150 }, 151 .write = nv04_chan_ramfc_write, 152 .clear = nv04_chan_ramfc_clear, 153 .ctxdma = true, 154 }; 155 156 const struct nvkm_chan_func_userd 157 nv04_chan_userd = { 158 .bar = 0, 159 .base = 0x800000, 160 .size = 0x010000, 161 }; 162 163 const struct nvkm_chan_func_inst 164 nv04_chan_inst = { 165 .size = 0x1000, 166 }; 167 168 static const struct nvkm_chan_func 169 nv04_chan = { 170 .inst = &nv04_chan_inst, 171 .userd = &nv04_chan_userd, 172 .ramfc = &nv04_chan_ramfc, 173 .start = nv04_chan_start, 174 .stop = nv04_chan_stop, 175 }; 176 177 const struct nvkm_cgrp_func 178 nv04_cgrp = { 179 }; 180 181 const struct nvkm_engn_func 182 nv04_engn = { 183 }; 184 185 void 186 nv04_fifo_pause(struct nvkm_fifo *fifo, unsigned long *pflags) 187 __acquires(fifo->lock) 188 { 189 struct nvkm_device *device = fifo->engine.subdev.device; 190 unsigned long flags; 191 192 spin_lock_irqsave(&fifo->lock, flags); 193 *pflags = flags; 194 195 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000); 196 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000); 197 198 /* in some cases the puller may be left in an inconsistent state 199 * if you try to stop it while it's busy translating handles. 200 * sometimes you get a CACHE_ERROR, sometimes it just fails 201 * silently; sending incorrect instance offsets to PGRAPH after 202 * it's started up again. 203 * 204 * to avoid this, we invalidate the most recently calculated 205 * instance. 206 */ 207 nvkm_msec(device, 2000, 208 u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0); 209 if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY)) 210 break; 211 ); 212 213 if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) & 214 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) 215 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 216 217 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000); 218 } 219 220 void 221 nv04_fifo_start(struct nvkm_fifo *fifo, unsigned long *pflags) 222 __releases(fifo->lock) 223 { 224 struct nvkm_device *device = fifo->engine.subdev.device; 225 unsigned long flags = *pflags; 226 227 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001); 228 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001); 229 230 spin_unlock_irqrestore(&fifo->lock, flags); 231 } 232 233 const struct nvkm_runl_func 234 nv04_runl = { 235 }; 236 237 static const char * 238 nv_dma_state_err(u32 state) 239 { 240 static const char * const desc[] = { 241 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE", 242 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK" 243 }; 244 return desc[(state >> 29) & 0x7]; 245 } 246 247 static bool 248 nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data) 249 { 250 struct nvkm_sw *sw = device->sw; 251 const int subc = (addr & 0x0000e000) >> 13; 252 const int mthd = (addr & 0x00001ffc); 253 const u32 mask = 0x0000000f << (subc * 4); 254 u32 engine = nvkm_rd32(device, 0x003280); 255 bool handled = false; 256 257 switch (mthd) { 258 case 0x0000 ... 0x0000: /* subchannel's engine -> software */ 259 nvkm_wr32(device, 0x003280, (engine &= ~mask)); 260 fallthrough; 261 case 0x0180 ... 0x01fc: /* handle -> instance */ 262 data = nvkm_rd32(device, 0x003258) & 0x0000ffff; 263 fallthrough; 264 case 0x0100 ... 0x017c: 265 case 0x0200 ... 0x1ffc: /* pass method down to sw */ 266 if (!(engine & mask) && sw) 267 handled = nvkm_sw_mthd(sw, chid, subc, mthd, data); 268 break; 269 default: 270 break; 271 } 272 273 return handled; 274 } 275 276 static void 277 nv04_fifo_intr_cache_error(struct nvkm_fifo *fifo, u32 chid, u32 get) 278 { 279 struct nvkm_subdev *subdev = &fifo->engine.subdev; 280 struct nvkm_device *device = subdev->device; 281 struct nvkm_chan *chan; 282 unsigned long flags; 283 u32 pull0 = nvkm_rd32(device, 0x003250); 284 u32 mthd, data; 285 int ptr; 286 287 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my 288 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests 289 * show that it wraps around to the start at GET=0x800.. No clue as to 290 * why.. 291 */ 292 ptr = (get & 0x7ff) >> 2; 293 294 if (device->card_type < NV_40) { 295 mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr)); 296 data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr)); 297 } else { 298 mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr)); 299 data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr)); 300 } 301 302 if (!(pull0 & 0x00000100) || 303 !nv04_fifo_swmthd(device, chid, mthd, data)) { 304 chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags); 305 nvkm_error(subdev, "CACHE_ERROR - " 306 "ch %d [%s] subc %d mthd %04x data %08x\n", 307 chid, chan ? chan->name : "unknown", 308 (mthd >> 13) & 7, mthd & 0x1ffc, data); 309 nvkm_chan_put(&chan, flags); 310 } 311 312 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0); 313 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 314 315 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 316 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1); 317 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); 318 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 319 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1); 320 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0); 321 322 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 323 nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); 324 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 325 } 326 327 static void 328 nv04_fifo_intr_dma_pusher(struct nvkm_fifo *fifo, u32 chid) 329 { 330 struct nvkm_subdev *subdev = &fifo->engine.subdev; 331 struct nvkm_device *device = subdev->device; 332 u32 dma_get = nvkm_rd32(device, 0x003244); 333 u32 dma_put = nvkm_rd32(device, 0x003240); 334 u32 push = nvkm_rd32(device, 0x003220); 335 u32 state = nvkm_rd32(device, 0x003228); 336 struct nvkm_chan *chan; 337 unsigned long flags; 338 const char *name; 339 340 chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags); 341 name = chan ? chan->name : "unknown"; 342 if (device->card_type == NV_50) { 343 u32 ho_get = nvkm_rd32(device, 0x003328); 344 u32 ho_put = nvkm_rd32(device, 0x003320); 345 u32 ib_get = nvkm_rd32(device, 0x003334); 346 u32 ib_put = nvkm_rd32(device, 0x003330); 347 348 nvkm_error(subdev, "DMA_PUSHER - " 349 "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x " 350 "ib_put %08x state %08x (err: %s) push %08x\n", 351 chid, name, ho_get, dma_get, ho_put, dma_put, 352 ib_get, ib_put, state, nv_dma_state_err(state), 353 push); 354 355 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ 356 nvkm_wr32(device, 0x003364, 0x00000000); 357 if (dma_get != dma_put || ho_get != ho_put) { 358 nvkm_wr32(device, 0x003244, dma_put); 359 nvkm_wr32(device, 0x003328, ho_put); 360 } else 361 if (ib_get != ib_put) 362 nvkm_wr32(device, 0x003334, ib_put); 363 } else { 364 nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x " 365 "state %08x (err: %s) push %08x\n", 366 chid, name, dma_get, dma_put, state, 367 nv_dma_state_err(state), push); 368 369 if (dma_get != dma_put) 370 nvkm_wr32(device, 0x003244, dma_put); 371 } 372 nvkm_chan_put(&chan, flags); 373 374 nvkm_wr32(device, 0x003228, 0x00000000); 375 nvkm_wr32(device, 0x003220, 0x00000001); 376 nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); 377 } 378 379 irqreturn_t 380 nv04_fifo_intr(struct nvkm_inth *inth) 381 { 382 struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth); 383 struct nvkm_subdev *subdev = &fifo->engine.subdev; 384 struct nvkm_device *device = subdev->device; 385 u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0); 386 u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask; 387 u32 reassign, chid, get, sem; 388 389 reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1; 390 nvkm_wr32(device, NV03_PFIFO_CACHES, 0); 391 392 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->chid->mask; 393 get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET); 394 395 if (stat & NV_PFIFO_INTR_CACHE_ERROR) { 396 nv04_fifo_intr_cache_error(fifo, chid, get); 397 stat &= ~NV_PFIFO_INTR_CACHE_ERROR; 398 } 399 400 if (stat & NV_PFIFO_INTR_DMA_PUSHER) { 401 nv04_fifo_intr_dma_pusher(fifo, chid); 402 stat &= ~NV_PFIFO_INTR_DMA_PUSHER; 403 } 404 405 if (stat & NV_PFIFO_INTR_SEMAPHORE) { 406 stat &= ~NV_PFIFO_INTR_SEMAPHORE; 407 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); 408 409 sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE); 410 nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); 411 412 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); 413 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 414 } 415 416 if (device->card_type == NV_50) { 417 if (stat & 0x00000010) { 418 stat &= ~0x00000010; 419 nvkm_wr32(device, 0x002100, 0x00000010); 420 } 421 422 if (stat & 0x40000000) { 423 nvkm_wr32(device, 0x002100, 0x40000000); 424 nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT); 425 stat &= ~0x40000000; 426 } 427 } 428 429 if (stat) { 430 nvkm_warn(subdev, "intr %08x\n", stat); 431 nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); 432 nvkm_wr32(device, NV03_PFIFO_INTR_0, stat); 433 } 434 435 nvkm_wr32(device, NV03_PFIFO_CACHES, reassign); 436 return IRQ_HANDLED; 437 } 438 439 void 440 nv04_fifo_init(struct nvkm_fifo *fifo) 441 { 442 struct nvkm_device *device = fifo->engine.subdev.device; 443 struct nvkm_instmem *imem = device->imem; 444 struct nvkm_ramht *ramht = imem->ramht; 445 struct nvkm_memory *ramro = imem->ramro; 446 struct nvkm_memory *ramfc = imem->ramfc; 447 448 nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff); 449 nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); 450 451 nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 452 ((ramht->bits - 9) << 16) | 453 (ramht->gpuobj->addr >> 8)); 454 nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8); 455 nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8); 456 457 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->chid->mask); 458 459 nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); 460 nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); 461 462 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); 463 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 464 nvkm_wr32(device, NV03_PFIFO_CACHES, 1); 465 } 466 467 int 468 nv04_fifo_runl_ctor(struct nvkm_fifo *fifo) 469 { 470 struct nvkm_runl *runl; 471 472 runl = nvkm_runl_new(fifo, 0, 0, 0); 473 if (IS_ERR(runl)) 474 return PTR_ERR(runl); 475 476 nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_SW, 0); 477 nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_DMAOBJ, 0); 478 nvkm_runl_add(runl, 1, fifo->func->engn , NVKM_ENGINE_GR, 0); 479 nvkm_runl_add(runl, 2, fifo->func->engn , NVKM_ENGINE_MPEG, 0); /* NV31- */ 480 return 0; 481 } 482 483 int 484 nv04_fifo_chid_ctor(struct nvkm_fifo *fifo, int nr) 485 { 486 /* The last CHID is reserved by HW as a "channel invalid" marker. */ 487 return nvkm_chid_new(&nvkm_chan_event, &fifo->engine.subdev, nr, 0, nr - 1, &fifo->chid); 488 } 489 490 static int 491 nv04_fifo_chid_nr(struct nvkm_fifo *fifo) 492 { 493 return 16; 494 } 495 496 int 497 nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device, 498 enum nvkm_subdev_type type, int inst, int nr, const struct nv04_fifo_ramfc *ramfc, 499 struct nvkm_fifo **pfifo) 500 { 501 struct nv04_fifo *fifo; 502 int ret; 503 504 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) 505 return -ENOMEM; 506 *pfifo = &fifo->base; 507 508 ret = nvkm_fifo_ctor(func, device, type, inst, &fifo->base); 509 if (ret) 510 return ret; 511 512 return 0; 513 } 514 515 static const struct nvkm_fifo_func 516 nv04_fifo = { 517 .chid_nr = nv04_fifo_chid_nr, 518 .chid_ctor = nv04_fifo_chid_ctor, 519 .runl_ctor = nv04_fifo_runl_ctor, 520 .init = nv04_fifo_init, 521 .intr = nv04_fifo_intr, 522 .pause = nv04_fifo_pause, 523 .start = nv04_fifo_start, 524 .runl = &nv04_runl, 525 .engn = &nv04_engn, 526 .engn_sw = &nv04_engn, 527 .cgrp = {{ }, &nv04_cgrp }, 528 .chan = {{ 0, 0, NV03_CHANNEL_DMA }, &nv04_chan, .oclass = &nv04_fifo_dma_oclass }, 529 }; 530 531 int 532 nv04_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, 533 struct nvkm_fifo **pfifo) 534 { 535 return nv04_fifo_new_(&nv04_fifo, device, type, inst, 0, NULL, pfifo); 536 } 537