1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "priv.h" 25 #include "cgrp.h" 26 #include "chan.h" 27 #include "chid.h" 28 #include "runl.h" 29 30 #include "regsnv04.h" 31 32 #include <core/ramht.h> 33 #include <subdev/instmem.h> 34 #include <subdev/mc.h> 35 #include <subdev/timer.h> 36 #include <engine/sw.h> 37 38 #include <nvif/class.h> 39 40 void 41 nv04_chan_stop(struct nvkm_chan *chan) 42 { 43 struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; 44 struct nvkm_device *device = fifo->engine.subdev.device; 45 struct nvkm_memory *fctx = device->imem->ramfc; 46 const struct nvkm_ramfc_layout *c; 47 unsigned long flags; 48 u32 data = chan->ramfc_offset; 49 u32 chid; 50 51 /* prevent fifo context switches */ 52 spin_lock_irqsave(&fifo->lock, flags); 53 nvkm_wr32(device, NV03_PFIFO_CACHES, 0); 54 55 /* if this channel is active, replace it with a null context */ 56 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->chid->mask; 57 if (chid == chan->id) { 58 nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); 59 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0); 60 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); 61 62 c = chan->func->ramfc->layout; 63 nvkm_kmap(fctx); 64 do { 65 u32 rm = ((1ULL << c->bits) - 1) << c->regs; 66 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; 67 u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs; 68 u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm); 69 nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs)); 70 } while ((++c)->bits); 71 nvkm_done(fctx); 72 73 c = chan->func->ramfc->layout; 74 do { 75 nvkm_wr32(device, c->regp, 0x00000000); 76 } while ((++c)->bits); 77 78 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0); 79 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0); 80 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->chid->mask); 81 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); 82 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 83 } 84 85 /* restore normal operation, after disabling dma mode */ 86 nvkm_mask(device, NV04_PFIFO_MODE, BIT(chan->id), 0); 87 nvkm_wr32(device, NV03_PFIFO_CACHES, 1); 88 spin_unlock_irqrestore(&fifo->lock, flags); 89 } 90 91 void 92 nv04_chan_start(struct nvkm_chan *chan) 93 { 94 struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; 95 unsigned long flags; 96 97 spin_lock_irqsave(&fifo->lock, flags); 98 nvkm_mask(fifo->engine.subdev.device, NV04_PFIFO_MODE, BIT(chan->id), BIT(chan->id)); 99 spin_unlock_irqrestore(&fifo->lock, flags); 100 } 101 102 void 103 nv04_chan_ramfc_clear(struct nvkm_chan *chan) 104 { 105 struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc; 106 const struct nvkm_ramfc_layout *c = chan->func->ramfc->layout; 107 108 nvkm_kmap(ramfc); 109 do { 110 nvkm_wo32(ramfc, chan->ramfc_offset + c->ctxp, 0x00000000); 111 } while ((++c)->bits); 112 nvkm_done(ramfc); 113 } 114 115 static int 116 nv04_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv) 117 { 118 struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc; 119 const u32 base = chan->id * 32; 120 121 chan->ramfc_offset = base; 122 123 nvkm_kmap(ramfc); 124 nvkm_wo32(ramfc, base + 0x00, offset); 125 nvkm_wo32(ramfc, base + 0x04, offset); 126 nvkm_wo32(ramfc, base + 0x08, chan->push->addr >> 4); 127 nvkm_wo32(ramfc, base + 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 128 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 129 #ifdef __BIG_ENDIAN 130 NV_PFIFO_CACHE1_BIG_ENDIAN | 131 #endif 132 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); 133 nvkm_done(ramfc); 134 return 0; 135 } 136 137 static const struct nvkm_chan_func_ramfc 138 nv04_chan_ramfc = { 139 .layout = (const struct nvkm_ramfc_layout[]) { 140 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, 141 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, 142 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, 143 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, 144 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE }, 145 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, 146 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE }, 147 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 }, 148 {} 149 }, 150 .write = nv04_chan_ramfc_write, 151 .clear = nv04_chan_ramfc_clear, 152 .ctxdma = true, 153 }; 154 155 const struct nvkm_chan_func_userd 156 nv04_chan_userd = { 157 .bar = 0, 158 .base = 0x800000, 159 .size = 0x010000, 160 }; 161 162 const struct nvkm_chan_func_inst 163 nv04_chan_inst = { 164 .size = 0x1000, 165 }; 166 167 static const struct nvkm_chan_func 168 nv04_chan = { 169 .inst = &nv04_chan_inst, 170 .userd = &nv04_chan_userd, 171 .ramfc = &nv04_chan_ramfc, 172 .start = nv04_chan_start, 173 .stop = nv04_chan_stop, 174 }; 175 176 const struct nvkm_cgrp_func 177 nv04_cgrp = { 178 }; 179 180 void 181 nv04_eobj_ramht_del(struct nvkm_chan *chan, int hash) 182 { 183 struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; 184 struct nvkm_instmem *imem = fifo->engine.subdev.device->imem; 185 186 mutex_lock(&fifo->mutex); 187 nvkm_ramht_remove(imem->ramht, hash); 188 mutex_unlock(&fifo->mutex); 189 } 190 191 static int 192 nv04_eobj_ramht_add(struct nvkm_engn *engn, struct nvkm_object *eobj, struct nvkm_chan *chan) 193 { 194 struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; 195 struct nvkm_instmem *imem = fifo->engine.subdev.device->imem; 196 u32 context = 0x80000000 | chan->id << 24 | engn->id << 16; 197 int hash; 198 199 mutex_lock(&fifo->mutex); 200 hash = nvkm_ramht_insert(imem->ramht, eobj, chan->id, 4, eobj->handle, context); 201 mutex_unlock(&fifo->mutex); 202 return hash; 203 } 204 205 const struct nvkm_engn_func 206 nv04_engn = { 207 .ramht_add = nv04_eobj_ramht_add, 208 .ramht_del = nv04_eobj_ramht_del, 209 }; 210 211 void 212 nv04_fifo_pause(struct nvkm_fifo *fifo, unsigned long *pflags) 213 __acquires(fifo->lock) 214 { 215 struct nvkm_device *device = fifo->engine.subdev.device; 216 unsigned long flags; 217 218 spin_lock_irqsave(&fifo->lock, flags); 219 *pflags = flags; 220 221 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000); 222 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000); 223 224 /* in some cases the puller may be left in an inconsistent state 225 * if you try to stop it while it's busy translating handles. 226 * sometimes you get a CACHE_ERROR, sometimes it just fails 227 * silently; sending incorrect instance offsets to PGRAPH after 228 * it's started up again. 229 * 230 * to avoid this, we invalidate the most recently calculated 231 * instance. 232 */ 233 nvkm_msec(device, 2000, 234 u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0); 235 if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY)) 236 break; 237 ); 238 239 if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) & 240 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) 241 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 242 243 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000); 244 } 245 246 void 247 nv04_fifo_start(struct nvkm_fifo *fifo, unsigned long *pflags) 248 __releases(fifo->lock) 249 { 250 struct nvkm_device *device = fifo->engine.subdev.device; 251 unsigned long flags = *pflags; 252 253 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001); 254 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001); 255 256 spin_unlock_irqrestore(&fifo->lock, flags); 257 } 258 259 const struct nvkm_runl_func 260 nv04_runl = { 261 }; 262 263 static const char * 264 nv_dma_state_err(u32 state) 265 { 266 static const char * const desc[] = { 267 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE", 268 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK" 269 }; 270 return desc[(state >> 29) & 0x7]; 271 } 272 273 static bool 274 nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data) 275 { 276 struct nvkm_sw *sw = device->sw; 277 const int subc = (addr & 0x0000e000) >> 13; 278 const int mthd = (addr & 0x00001ffc); 279 const u32 mask = 0x0000000f << (subc * 4); 280 u32 engine = nvkm_rd32(device, 0x003280); 281 bool handled = false; 282 283 switch (mthd) { 284 case 0x0000 ... 0x0000: /* subchannel's engine -> software */ 285 nvkm_wr32(device, 0x003280, (engine &= ~mask)); 286 fallthrough; 287 case 0x0180 ... 0x01fc: /* handle -> instance */ 288 data = nvkm_rd32(device, 0x003258) & 0x0000ffff; 289 fallthrough; 290 case 0x0100 ... 0x017c: 291 case 0x0200 ... 0x1ffc: /* pass method down to sw */ 292 if (!(engine & mask) && sw) 293 handled = nvkm_sw_mthd(sw, chid, subc, mthd, data); 294 break; 295 default: 296 break; 297 } 298 299 return handled; 300 } 301 302 static void 303 nv04_fifo_intr_cache_error(struct nvkm_fifo *fifo, u32 chid, u32 get) 304 { 305 struct nvkm_subdev *subdev = &fifo->engine.subdev; 306 struct nvkm_device *device = subdev->device; 307 struct nvkm_chan *chan; 308 unsigned long flags; 309 u32 pull0 = nvkm_rd32(device, 0x003250); 310 u32 mthd, data; 311 int ptr; 312 313 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my 314 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests 315 * show that it wraps around to the start at GET=0x800.. No clue as to 316 * why.. 317 */ 318 ptr = (get & 0x7ff) >> 2; 319 320 if (device->card_type < NV_40) { 321 mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr)); 322 data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr)); 323 } else { 324 mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr)); 325 data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr)); 326 } 327 328 if (!(pull0 & 0x00000100) || 329 !nv04_fifo_swmthd(device, chid, mthd, data)) { 330 chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags); 331 nvkm_error(subdev, "CACHE_ERROR - " 332 "ch %d [%s] subc %d mthd %04x data %08x\n", 333 chid, chan ? chan->name : "unknown", 334 (mthd >> 13) & 7, mthd & 0x1ffc, data); 335 nvkm_chan_put(&chan, flags); 336 } 337 338 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0); 339 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); 340 341 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 342 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1); 343 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); 344 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 345 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1); 346 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0); 347 348 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 349 nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); 350 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 351 } 352 353 static void 354 nv04_fifo_intr_dma_pusher(struct nvkm_fifo *fifo, u32 chid) 355 { 356 struct nvkm_subdev *subdev = &fifo->engine.subdev; 357 struct nvkm_device *device = subdev->device; 358 u32 dma_get = nvkm_rd32(device, 0x003244); 359 u32 dma_put = nvkm_rd32(device, 0x003240); 360 u32 push = nvkm_rd32(device, 0x003220); 361 u32 state = nvkm_rd32(device, 0x003228); 362 struct nvkm_chan *chan; 363 unsigned long flags; 364 const char *name; 365 366 chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags); 367 name = chan ? chan->name : "unknown"; 368 if (device->card_type == NV_50) { 369 u32 ho_get = nvkm_rd32(device, 0x003328); 370 u32 ho_put = nvkm_rd32(device, 0x003320); 371 u32 ib_get = nvkm_rd32(device, 0x003334); 372 u32 ib_put = nvkm_rd32(device, 0x003330); 373 374 nvkm_error(subdev, "DMA_PUSHER - " 375 "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x " 376 "ib_put %08x state %08x (err: %s) push %08x\n", 377 chid, name, ho_get, dma_get, ho_put, dma_put, 378 ib_get, ib_put, state, nv_dma_state_err(state), 379 push); 380 381 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ 382 nvkm_wr32(device, 0x003364, 0x00000000); 383 if (dma_get != dma_put || ho_get != ho_put) { 384 nvkm_wr32(device, 0x003244, dma_put); 385 nvkm_wr32(device, 0x003328, ho_put); 386 } else 387 if (ib_get != ib_put) 388 nvkm_wr32(device, 0x003334, ib_put); 389 } else { 390 nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x " 391 "state %08x (err: %s) push %08x\n", 392 chid, name, dma_get, dma_put, state, 393 nv_dma_state_err(state), push); 394 395 if (dma_get != dma_put) 396 nvkm_wr32(device, 0x003244, dma_put); 397 } 398 nvkm_chan_put(&chan, flags); 399 400 nvkm_wr32(device, 0x003228, 0x00000000); 401 nvkm_wr32(device, 0x003220, 0x00000001); 402 nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); 403 } 404 405 irqreturn_t 406 nv04_fifo_intr(struct nvkm_inth *inth) 407 { 408 struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth); 409 struct nvkm_subdev *subdev = &fifo->engine.subdev; 410 struct nvkm_device *device = subdev->device; 411 u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0); 412 u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask; 413 u32 reassign, chid, get, sem; 414 415 reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1; 416 nvkm_wr32(device, NV03_PFIFO_CACHES, 0); 417 418 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->chid->mask; 419 get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET); 420 421 if (stat & NV_PFIFO_INTR_CACHE_ERROR) { 422 nv04_fifo_intr_cache_error(fifo, chid, get); 423 stat &= ~NV_PFIFO_INTR_CACHE_ERROR; 424 } 425 426 if (stat & NV_PFIFO_INTR_DMA_PUSHER) { 427 nv04_fifo_intr_dma_pusher(fifo, chid); 428 stat &= ~NV_PFIFO_INTR_DMA_PUSHER; 429 } 430 431 if (stat & NV_PFIFO_INTR_SEMAPHORE) { 432 stat &= ~NV_PFIFO_INTR_SEMAPHORE; 433 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); 434 435 sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE); 436 nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); 437 438 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4); 439 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 440 } 441 442 if (device->card_type == NV_50) { 443 if (stat & 0x00000010) { 444 stat &= ~0x00000010; 445 nvkm_wr32(device, 0x002100, 0x00000010); 446 } 447 448 if (stat & 0x40000000) { 449 nvkm_wr32(device, 0x002100, 0x40000000); 450 nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT); 451 stat &= ~0x40000000; 452 } 453 } 454 455 if (stat) { 456 nvkm_warn(subdev, "intr %08x\n", stat); 457 nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); 458 nvkm_wr32(device, NV03_PFIFO_INTR_0, stat); 459 } 460 461 nvkm_wr32(device, NV03_PFIFO_CACHES, reassign); 462 return IRQ_HANDLED; 463 } 464 465 void 466 nv04_fifo_init(struct nvkm_fifo *fifo) 467 { 468 struct nvkm_device *device = fifo->engine.subdev.device; 469 struct nvkm_instmem *imem = device->imem; 470 struct nvkm_ramht *ramht = imem->ramht; 471 struct nvkm_memory *ramro = imem->ramro; 472 struct nvkm_memory *ramfc = imem->ramfc; 473 474 nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff); 475 nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); 476 477 nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 478 ((ramht->bits - 9) << 16) | 479 (ramht->gpuobj->addr >> 8)); 480 nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8); 481 nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8); 482 483 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->chid->mask); 484 485 nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff); 486 nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff); 487 488 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); 489 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); 490 nvkm_wr32(device, NV03_PFIFO_CACHES, 1); 491 } 492 493 int 494 nv04_fifo_runl_ctor(struct nvkm_fifo *fifo) 495 { 496 struct nvkm_runl *runl; 497 498 runl = nvkm_runl_new(fifo, 0, 0, 0); 499 if (IS_ERR(runl)) 500 return PTR_ERR(runl); 501 502 nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_SW, 0); 503 nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_DMAOBJ, 0); 504 nvkm_runl_add(runl, 1, fifo->func->engn , NVKM_ENGINE_GR, 0); 505 nvkm_runl_add(runl, 2, fifo->func->engn , NVKM_ENGINE_MPEG, 0); /* NV31- */ 506 return 0; 507 } 508 509 int 510 nv04_fifo_chid_ctor(struct nvkm_fifo *fifo, int nr) 511 { 512 /* The last CHID is reserved by HW as a "channel invalid" marker. */ 513 return nvkm_chid_new(&nvkm_chan_event, &fifo->engine.subdev, nr, 0, nr - 1, &fifo->chid); 514 } 515 516 static int 517 nv04_fifo_chid_nr(struct nvkm_fifo *fifo) 518 { 519 return 16; 520 } 521 522 static const struct nvkm_fifo_func 523 nv04_fifo = { 524 .chid_nr = nv04_fifo_chid_nr, 525 .chid_ctor = nv04_fifo_chid_ctor, 526 .runl_ctor = nv04_fifo_runl_ctor, 527 .init = nv04_fifo_init, 528 .intr = nv04_fifo_intr, 529 .pause = nv04_fifo_pause, 530 .start = nv04_fifo_start, 531 .runl = &nv04_runl, 532 .engn = &nv04_engn, 533 .engn_sw = &nv04_engn, 534 .cgrp = {{ }, &nv04_cgrp }, 535 .chan = {{ 0, 0, NV03_CHANNEL_DMA }, &nv04_chan }, 536 }; 537 538 int 539 nv04_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, 540 struct nvkm_fifo **pfifo) 541 { 542 return nvkm_fifo_new_(&nv04_fifo, device, type, inst, pfifo); 543 } 544