1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "gk104.h" 25 26 #include <core/client.h> 27 #include <core/engctx.h> 28 #include <core/enum.h> 29 #include <core/handle.h> 30 #include <subdev/bar.h> 31 #include <subdev/fb.h> 32 #include <subdev/mmu.h> 33 #include <subdev/timer.h> 34 35 #include <nvif/class.h> 36 #include <nvif/unpack.h> 37 38 #define _(a,b) { (a), ((1ULL << (a)) | (b)) } 39 static const struct { 40 u64 subdev; 41 u64 mask; 42 } fifo_engine[] = { 43 _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW) | 44 (1ULL << NVDEV_ENGINE_CE2)), 45 _(NVDEV_ENGINE_MSPDEC , 0), 46 _(NVDEV_ENGINE_MSPPP , 0), 47 _(NVDEV_ENGINE_MSVLD , 0), 48 _(NVDEV_ENGINE_CE0 , 0), 49 _(NVDEV_ENGINE_CE1 , 0), 50 _(NVDEV_ENGINE_MSENC , 0), 51 }; 52 #undef _ 53 #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine) 54 55 struct gk104_fifo_engn { 56 struct nvkm_gpuobj *runlist[2]; 57 int cur_runlist; 58 wait_queue_head_t wait; 59 }; 60 61 struct gk104_fifo { 62 struct nvkm_fifo base; 63 64 struct work_struct fault; 65 u64 mask; 66 67 struct gk104_fifo_engn engine[FIFO_ENGINE_NR]; 68 struct { 69 struct nvkm_gpuobj *mem; 70 struct nvkm_vma bar; 71 } user; 72 int spoon_nr; 73 }; 74 75 struct gk104_fifo_base { 76 struct nvkm_fifo_base base; 77 struct nvkm_gpuobj *pgd; 78 struct nvkm_vm *vm; 79 }; 80 81 struct gk104_fifo_chan { 82 struct nvkm_fifo_chan base; 83 u32 engine; 84 enum { 85 STOPPED, 86 RUNNING, 87 KILLED 88 } state; 89 }; 90 91 /******************************************************************************* 92 * FIFO channel objects 93 ******************************************************************************/ 94 95 static void 96 gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine) 97 { 98 struct gk104_fifo_engn *engn = &fifo->engine[engine]; 99 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 100 struct nvkm_device *device = subdev->device; 101 struct nvkm_bar *bar = device->bar; 102 struct nvkm_gpuobj *cur; 103 int i, p; 104 105 mutex_lock(&nv_subdev(fifo)->mutex); 106 cur = engn->runlist[engn->cur_runlist]; 107 engn->cur_runlist = !engn->cur_runlist; 108 109 for (i = 0, p = 0; i < fifo->base.max; i++) { 110 struct gk104_fifo_chan *chan = (void *)fifo->base.channel[i]; 111 if (chan && chan->state == RUNNING && chan->engine == engine) { 112 nv_wo32(cur, p + 0, i); 113 nv_wo32(cur, p + 4, 0x00000000); 114 p += 8; 115 } 116 } 117 bar->flush(bar); 118 119 nvkm_wr32(device, 0x002270, cur->addr >> 12); 120 nvkm_wr32(device, 0x002274, (engine << 20) | (p >> 3)); 121 122 if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 + 123 (engine * 0x08)) & 0x00100000), 124 msecs_to_jiffies(2000)) == 0) 125 nvkm_error(subdev, "runlist %d update timeout\n", engine); 126 mutex_unlock(&nv_subdev(fifo)->mutex); 127 } 128 129 static int 130 gk104_fifo_context_attach(struct nvkm_object *parent, 131 struct nvkm_object *object) 132 { 133 struct nvkm_bar *bar = nvkm_bar(parent); 134 struct gk104_fifo_base *base = (void *)parent->parent; 135 struct nvkm_engctx *ectx = (void *)object; 136 u32 addr; 137 int ret; 138 139 switch (nv_engidx(object->engine)) { 140 case NVDEV_ENGINE_SW : 141 return 0; 142 case NVDEV_ENGINE_CE0: 143 case NVDEV_ENGINE_CE1: 144 case NVDEV_ENGINE_CE2: 145 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; 146 return 0; 147 case NVDEV_ENGINE_GR : addr = 0x0210; break; 148 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break; 149 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break; 150 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break; 151 default: 152 return -EINVAL; 153 } 154 155 if (!ectx->vma.node) { 156 ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm, 157 NV_MEM_ACCESS_RW, &ectx->vma); 158 if (ret) 159 return ret; 160 161 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; 162 } 163 164 nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4); 165 nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset)); 166 bar->flush(bar); 167 return 0; 168 } 169 170 static int 171 gk104_fifo_chan_kick(struct gk104_fifo_chan *chan) 172 { 173 struct nvkm_object *obj = (void *)chan; 174 struct gk104_fifo *fifo = (void *)obj->engine; 175 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 176 struct nvkm_device *device = subdev->device; 177 178 nvkm_wr32(device, 0x002634, chan->base.chid); 179 if (nvkm_msec(device, 2000, 180 if (!(nvkm_rd32(device, 0x002634) & 0x00100000)) 181 break; 182 ) < 0) { 183 nvkm_error(subdev, "channel %d [%s] kick timeout\n", 184 chan->base.chid, nvkm_client_name(chan)); 185 return -EBUSY; 186 } 187 188 return 0; 189 } 190 191 static int 192 gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend, 193 struct nvkm_object *object) 194 { 195 struct nvkm_bar *bar = nvkm_bar(parent); 196 struct gk104_fifo_base *base = (void *)parent->parent; 197 struct gk104_fifo_chan *chan = (void *)parent; 198 u32 addr; 199 int ret; 200 201 switch (nv_engidx(object->engine)) { 202 case NVDEV_ENGINE_SW : return 0; 203 case NVDEV_ENGINE_CE0 : 204 case NVDEV_ENGINE_CE1 : 205 case NVDEV_ENGINE_CE2 : addr = 0x0000; break; 206 case NVDEV_ENGINE_GR : addr = 0x0210; break; 207 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break; 208 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break; 209 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break; 210 default: 211 return -EINVAL; 212 } 213 214 ret = gk104_fifo_chan_kick(chan); 215 if (ret && suspend) 216 return ret; 217 218 if (addr) { 219 nv_wo32(base, addr + 0x00, 0x00000000); 220 nv_wo32(base, addr + 0x04, 0x00000000); 221 bar->flush(bar); 222 } 223 224 return 0; 225 } 226 227 static int 228 gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 229 struct nvkm_oclass *oclass, void *data, u32 size, 230 struct nvkm_object **pobject) 231 { 232 union { 233 struct kepler_channel_gpfifo_a_v0 v0; 234 } *args = data; 235 struct nvkm_bar *bar = nvkm_bar(parent); 236 struct gk104_fifo *fifo = (void *)engine; 237 struct gk104_fifo_base *base = (void *)parent; 238 struct gk104_fifo_chan *chan; 239 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 240 u64 usermem, ioffset, ilength; 241 int ret, i; 242 243 nvif_ioctl(parent, "create channel gpfifo size %d\n", size); 244 if (nvif_unpack(args->v0, 0, 0, false)) { 245 nvif_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x " 246 "ioffset %016llx ilength %08x engine %08x\n", 247 args->v0.version, args->v0.pushbuf, args->v0.ioffset, 248 args->v0.ilength, args->v0.engine); 249 } else 250 return ret; 251 252 for (i = 0; i < FIFO_ENGINE_NR; i++) { 253 if (args->v0.engine & (1 << i)) { 254 if (nvkm_engine(parent, fifo_engine[i].subdev)) { 255 args->v0.engine = (1 << i); 256 break; 257 } 258 } 259 } 260 261 if (i == FIFO_ENGINE_NR) { 262 nvkm_error(subdev, "unsupported engines %08x\n", 263 args->v0.engine); 264 return -ENODEV; 265 } 266 267 ret = nvkm_fifo_channel_create(parent, engine, oclass, 1, 268 fifo->user.bar.offset, 0x200, 269 args->v0.pushbuf, 270 fifo_engine[i].mask, &chan); 271 *pobject = nv_object(chan); 272 if (ret) 273 return ret; 274 275 args->v0.chid = chan->base.chid; 276 277 nv_parent(chan)->context_attach = gk104_fifo_context_attach; 278 nv_parent(chan)->context_detach = gk104_fifo_context_detach; 279 chan->engine = i; 280 281 usermem = chan->base.chid * 0x200; 282 ioffset = args->v0.ioffset; 283 ilength = order_base_2(args->v0.ilength / 8); 284 285 for (i = 0; i < 0x200; i += 4) 286 nv_wo32(fifo->user.mem, usermem + i, 0x00000000); 287 288 nv_wo32(base, 0x08, lower_32_bits(fifo->user.mem->addr + usermem)); 289 nv_wo32(base, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem)); 290 nv_wo32(base, 0x10, 0x0000face); 291 nv_wo32(base, 0x30, 0xfffff902); 292 nv_wo32(base, 0x48, lower_32_bits(ioffset)); 293 nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16)); 294 nv_wo32(base, 0x84, 0x20400000); 295 nv_wo32(base, 0x94, 0x30000001); 296 nv_wo32(base, 0x9c, 0x00000100); 297 nv_wo32(base, 0xac, 0x0000001f); 298 nv_wo32(base, 0xe8, chan->base.chid); 299 nv_wo32(base, 0xb8, 0xf8000000); 300 nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */ 301 nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */ 302 bar->flush(bar); 303 return 0; 304 } 305 306 static int 307 gk104_fifo_chan_init(struct nvkm_object *object) 308 { 309 struct nvkm_gpuobj *base = nv_gpuobj(object->parent); 310 struct gk104_fifo *fifo = (void *)object->engine; 311 struct gk104_fifo_chan *chan = (void *)object; 312 struct nvkm_device *device = fifo->base.engine.subdev.device; 313 u32 chid = chan->base.chid; 314 int ret; 315 316 ret = nvkm_fifo_channel_init(&chan->base); 317 if (ret) 318 return ret; 319 320 nvkm_mask(device, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16); 321 nvkm_wr32(device, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12); 322 323 if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) { 324 nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400); 325 gk104_fifo_runlist_update(fifo, chan->engine); 326 nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400); 327 } 328 329 return 0; 330 } 331 332 static int 333 gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend) 334 { 335 struct gk104_fifo *fifo = (void *)object->engine; 336 struct gk104_fifo_chan *chan = (void *)object; 337 struct nvkm_device *device = fifo->base.engine.subdev.device; 338 u32 chid = chan->base.chid; 339 340 if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) { 341 nvkm_mask(device, 0x800004 + (chid * 8), 0x00000800, 0x00000800); 342 gk104_fifo_runlist_update(fifo, chan->engine); 343 } 344 345 nvkm_wr32(device, 0x800000 + (chid * 8), 0x00000000); 346 return nvkm_fifo_channel_fini(&chan->base, suspend); 347 } 348 349 struct nvkm_ofuncs 350 gk104_fifo_chan_ofuncs = { 351 .ctor = gk104_fifo_chan_ctor, 352 .dtor = _nvkm_fifo_channel_dtor, 353 .init = gk104_fifo_chan_init, 354 .fini = gk104_fifo_chan_fini, 355 .map = _nvkm_fifo_channel_map, 356 .rd32 = _nvkm_fifo_channel_rd32, 357 .wr32 = _nvkm_fifo_channel_wr32, 358 .ntfy = _nvkm_fifo_channel_ntfy 359 }; 360 361 static struct nvkm_oclass 362 gk104_fifo_sclass[] = { 363 { KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs }, 364 {} 365 }; 366 367 /******************************************************************************* 368 * FIFO context - instmem heap and vm setup 369 ******************************************************************************/ 370 371 static int 372 gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 373 struct nvkm_oclass *oclass, void *data, u32 size, 374 struct nvkm_object **pobject) 375 { 376 struct gk104_fifo_base *base; 377 int ret; 378 379 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000, 380 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base); 381 *pobject = nv_object(base); 382 if (ret) 383 return ret; 384 385 ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0, 386 &base->pgd); 387 if (ret) 388 return ret; 389 390 nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr)); 391 nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr)); 392 nv_wo32(base, 0x0208, 0xffffffff); 393 nv_wo32(base, 0x020c, 0x000000ff); 394 395 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd); 396 if (ret) 397 return ret; 398 399 return 0; 400 } 401 402 static void 403 gk104_fifo_context_dtor(struct nvkm_object *object) 404 { 405 struct gk104_fifo_base *base = (void *)object; 406 nvkm_vm_ref(NULL, &base->vm, base->pgd); 407 nvkm_gpuobj_ref(NULL, &base->pgd); 408 nvkm_fifo_context_destroy(&base->base); 409 } 410 411 static struct nvkm_oclass 412 gk104_fifo_cclass = { 413 .handle = NV_ENGCTX(FIFO, 0xe0), 414 .ofuncs = &(struct nvkm_ofuncs) { 415 .ctor = gk104_fifo_context_ctor, 416 .dtor = gk104_fifo_context_dtor, 417 .init = _nvkm_fifo_context_init, 418 .fini = _nvkm_fifo_context_fini, 419 .rd32 = _nvkm_fifo_context_rd32, 420 .wr32 = _nvkm_fifo_context_wr32, 421 }, 422 }; 423 424 /******************************************************************************* 425 * PFIFO engine 426 ******************************************************************************/ 427 428 static inline int 429 gk104_fifo_engidx(struct gk104_fifo *fifo, u32 engn) 430 { 431 switch (engn) { 432 case NVDEV_ENGINE_GR : 433 case NVDEV_ENGINE_CE2 : engn = 0; break; 434 case NVDEV_ENGINE_MSVLD : engn = 1; break; 435 case NVDEV_ENGINE_MSPPP : engn = 2; break; 436 case NVDEV_ENGINE_MSPDEC: engn = 3; break; 437 case NVDEV_ENGINE_CE0 : engn = 4; break; 438 case NVDEV_ENGINE_CE1 : engn = 5; break; 439 case NVDEV_ENGINE_MSENC : engn = 6; break; 440 default: 441 return -1; 442 } 443 444 return engn; 445 } 446 447 static inline struct nvkm_engine * 448 gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn) 449 { 450 if (engn >= ARRAY_SIZE(fifo_engine)) 451 return NULL; 452 return nvkm_engine(fifo, fifo_engine[engn].subdev); 453 } 454 455 static void 456 gk104_fifo_recover_work(struct work_struct *work) 457 { 458 struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault); 459 struct nvkm_device *device = fifo->base.engine.subdev.device; 460 struct nvkm_object *engine; 461 unsigned long flags; 462 u32 engn, engm = 0; 463 u64 mask, todo; 464 465 spin_lock_irqsave(&fifo->base.lock, flags); 466 mask = fifo->mask; 467 fifo->mask = 0ULL; 468 spin_unlock_irqrestore(&fifo->base.lock, flags); 469 470 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) 471 engm |= 1 << gk104_fifo_engidx(fifo, engn); 472 nvkm_mask(device, 0x002630, engm, engm); 473 474 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) { 475 if ((engine = (void *)nvkm_engine(fifo, engn))) { 476 nv_ofuncs(engine)->fini(engine, false); 477 WARN_ON(nv_ofuncs(engine)->init(engine)); 478 } 479 gk104_fifo_runlist_update(fifo, gk104_fifo_engidx(fifo, engn)); 480 } 481 482 nvkm_wr32(device, 0x00262c, engm); 483 nvkm_mask(device, 0x002630, engm, 0x00000000); 484 } 485 486 static void 487 gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine, 488 struct gk104_fifo_chan *chan) 489 { 490 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 491 struct nvkm_device *device = subdev->device; 492 u32 chid = chan->base.chid; 493 unsigned long flags; 494 495 nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n", 496 nv_subdev(engine)->name, chid); 497 498 nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800); 499 chan->state = KILLED; 500 501 spin_lock_irqsave(&fifo->base.lock, flags); 502 fifo->mask |= 1ULL << nv_engidx(engine); 503 spin_unlock_irqrestore(&fifo->base.lock, flags); 504 schedule_work(&fifo->fault); 505 } 506 507 static int 508 gk104_fifo_swmthd(struct gk104_fifo *fifo, u32 chid, u32 mthd, u32 data) 509 { 510 struct gk104_fifo_chan *chan = NULL; 511 struct nvkm_handle *bind; 512 unsigned long flags; 513 int ret = -EINVAL; 514 515 spin_lock_irqsave(&fifo->base.lock, flags); 516 if (likely(chid >= fifo->base.min && chid <= fifo->base.max)) 517 chan = (void *)fifo->base.channel[chid]; 518 if (unlikely(!chan)) 519 goto out; 520 521 bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e); 522 if (likely(bind)) { 523 if (!mthd || !nv_call(bind->object, mthd, data)) 524 ret = 0; 525 nvkm_namedb_put(bind); 526 } 527 528 out: 529 spin_unlock_irqrestore(&fifo->base.lock, flags); 530 return ret; 531 } 532 533 static const struct nvkm_enum 534 gk104_fifo_bind_reason[] = { 535 { 0x01, "BIND_NOT_UNBOUND" }, 536 { 0x02, "SNOOP_WITHOUT_BAR1" }, 537 { 0x03, "UNBIND_WHILE_RUNNING" }, 538 { 0x05, "INVALID_RUNLIST" }, 539 { 0x06, "INVALID_CTX_TGT" }, 540 { 0x0b, "UNBIND_WHILE_PARKED" }, 541 {} 542 }; 543 544 static void 545 gk104_fifo_intr_bind(struct gk104_fifo *fifo) 546 { 547 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 548 struct nvkm_device *device = subdev->device; 549 u32 intr = nvkm_rd32(device, 0x00252c); 550 u32 code = intr & 0x000000ff; 551 const struct nvkm_enum *en = 552 nvkm_enum_find(gk104_fifo_bind_reason, code); 553 554 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : ""); 555 } 556 557 static const struct nvkm_enum 558 gk104_fifo_sched_reason[] = { 559 { 0x0a, "CTXSW_TIMEOUT" }, 560 {} 561 }; 562 563 static void 564 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) 565 { 566 struct nvkm_device *device = fifo->base.engine.subdev.device; 567 struct nvkm_engine *engine; 568 struct gk104_fifo_chan *chan; 569 u32 engn; 570 571 for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) { 572 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04)); 573 u32 busy = (stat & 0x80000000); 574 u32 next = (stat & 0x07ff0000) >> 16; 575 u32 chsw = (stat & 0x00008000); 576 u32 save = (stat & 0x00004000); 577 u32 load = (stat & 0x00002000); 578 u32 prev = (stat & 0x000007ff); 579 u32 chid = load ? next : prev; 580 (void)save; 581 582 if (busy && chsw) { 583 if (!(chan = (void *)fifo->base.channel[chid])) 584 continue; 585 if (!(engine = gk104_fifo_engine(fifo, engn))) 586 continue; 587 gk104_fifo_recover(fifo, engine, chan); 588 } 589 } 590 } 591 592 static void 593 gk104_fifo_intr_sched(struct gk104_fifo *fifo) 594 { 595 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 596 struct nvkm_device *device = subdev->device; 597 u32 intr = nvkm_rd32(device, 0x00254c); 598 u32 code = intr & 0x000000ff; 599 const struct nvkm_enum *en = 600 nvkm_enum_find(gk104_fifo_sched_reason, code); 601 602 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : ""); 603 604 switch (code) { 605 case 0x0a: 606 gk104_fifo_intr_sched_ctxsw(fifo); 607 break; 608 default: 609 break; 610 } 611 } 612 613 static void 614 gk104_fifo_intr_chsw(struct gk104_fifo *fifo) 615 { 616 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 617 struct nvkm_device *device = subdev->device; 618 u32 stat = nvkm_rd32(device, 0x00256c); 619 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat); 620 nvkm_wr32(device, 0x00256c, stat); 621 } 622 623 static void 624 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo) 625 { 626 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 627 struct nvkm_device *device = subdev->device; 628 u32 stat = nvkm_rd32(device, 0x00259c); 629 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat); 630 } 631 632 static const struct nvkm_enum 633 gk104_fifo_fault_engine[] = { 634 { 0x00, "GR", NULL, NVDEV_ENGINE_GR }, 635 { 0x03, "IFB", NULL, NVDEV_ENGINE_IFB }, 636 { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR }, 637 { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM }, 638 { 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO }, 639 { 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO }, 640 { 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO }, 641 { 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD }, 642 { 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP }, 643 { 0x13, "PERF" }, 644 { 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC }, 645 { 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 }, 646 { 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 }, 647 { 0x17, "PMU" }, 648 { 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC }, 649 { 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 }, 650 {} 651 }; 652 653 static const struct nvkm_enum 654 gk104_fifo_fault_reason[] = { 655 { 0x00, "PDE" }, 656 { 0x01, "PDE_SIZE" }, 657 { 0x02, "PTE" }, 658 { 0x03, "VA_LIMIT_VIOLATION" }, 659 { 0x04, "UNBOUND_INST_BLOCK" }, 660 { 0x05, "PRIV_VIOLATION" }, 661 { 0x06, "RO_VIOLATION" }, 662 { 0x07, "WO_VIOLATION" }, 663 { 0x08, "PITCH_MASK_VIOLATION" }, 664 { 0x09, "WORK_CREATION" }, 665 { 0x0a, "UNSUPPORTED_APERTURE" }, 666 { 0x0b, "COMPRESSION_FAILURE" }, 667 { 0x0c, "UNSUPPORTED_KIND" }, 668 { 0x0d, "REGION_VIOLATION" }, 669 { 0x0e, "BOTH_PTES_VALID" }, 670 { 0x0f, "INFO_TYPE_POISONED" }, 671 {} 672 }; 673 674 static const struct nvkm_enum 675 gk104_fifo_fault_hubclient[] = { 676 { 0x00, "VIP" }, 677 { 0x01, "CE0" }, 678 { 0x02, "CE1" }, 679 { 0x03, "DNISO" }, 680 { 0x04, "FE" }, 681 { 0x05, "FECS" }, 682 { 0x06, "HOST" }, 683 { 0x07, "HOST_CPU" }, 684 { 0x08, "HOST_CPU_NB" }, 685 { 0x09, "ISO" }, 686 { 0x0a, "MMU" }, 687 { 0x0b, "MSPDEC" }, 688 { 0x0c, "MSPPP" }, 689 { 0x0d, "MSVLD" }, 690 { 0x0e, "NISO" }, 691 { 0x0f, "P2P" }, 692 { 0x10, "PD" }, 693 { 0x11, "PERF" }, 694 { 0x12, "PMU" }, 695 { 0x13, "RASTERTWOD" }, 696 { 0x14, "SCC" }, 697 { 0x15, "SCC_NB" }, 698 { 0x16, "SEC" }, 699 { 0x17, "SSYNC" }, 700 { 0x18, "GR_CE" }, 701 { 0x19, "CE2" }, 702 { 0x1a, "XV" }, 703 { 0x1b, "MMU_NB" }, 704 { 0x1c, "MSENC" }, 705 { 0x1d, "DFALCON" }, 706 { 0x1e, "SKED" }, 707 { 0x1f, "AFALCON" }, 708 {} 709 }; 710 711 static const struct nvkm_enum 712 gk104_fifo_fault_gpcclient[] = { 713 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" }, 714 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" }, 715 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" }, 716 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" }, 717 { 0x0c, "RAST" }, 718 { 0x0d, "GCC" }, 719 { 0x0e, "GPCCS" }, 720 { 0x0f, "PROP_0" }, 721 { 0x10, "PROP_1" }, 722 { 0x11, "PROP_2" }, 723 { 0x12, "PROP_3" }, 724 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" }, 725 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" }, 726 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" }, 727 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" }, 728 { 0x1f, "GPM" }, 729 { 0x20, "LTP_UTLB_0" }, 730 { 0x21, "LTP_UTLB_1" }, 731 { 0x22, "LTP_UTLB_2" }, 732 { 0x23, "LTP_UTLB_3" }, 733 { 0x24, "GPC_RGG_UTLB" }, 734 {} 735 }; 736 737 static void 738 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) 739 { 740 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 741 struct nvkm_device *device = subdev->device; 742 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10)); 743 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10)); 744 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10)); 745 u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10)); 746 u32 gpc = (stat & 0x1f000000) >> 24; 747 u32 client = (stat & 0x00001f00) >> 8; 748 u32 write = (stat & 0x00000080); 749 u32 hub = (stat & 0x00000040); 750 u32 reason = (stat & 0x0000000f); 751 struct nvkm_object *engctx = NULL, *object; 752 struct nvkm_engine *engine = NULL; 753 const struct nvkm_enum *er, *eu, *ec; 754 char gpcid[8] = ""; 755 756 er = nvkm_enum_find(gk104_fifo_fault_reason, reason); 757 eu = nvkm_enum_find(gk104_fifo_fault_engine, unit); 758 if (hub) { 759 ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client); 760 } else { 761 ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client); 762 snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc); 763 } 764 765 if (eu) { 766 switch (eu->data2) { 767 case NVDEV_SUBDEV_BAR: 768 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000); 769 break; 770 case NVDEV_SUBDEV_INSTMEM: 771 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000); 772 break; 773 case NVDEV_ENGINE_IFB: 774 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000); 775 break; 776 default: 777 engine = nvkm_engine(fifo, eu->data2); 778 if (engine) 779 engctx = nvkm_engctx_get(engine, inst); 780 break; 781 } 782 } 783 784 nvkm_error(subdev, 785 "%s fault at %010llx engine %02x [%s] client %02x [%s%s] " 786 "reason %02x [%s] on channel %d [%010llx %s]\n", 787 write ? "write" : "read", (u64)vahi << 32 | valo, 788 unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "", 789 reason, er ? er->name : "", -1, (u64)inst << 12, 790 nvkm_client_name(engctx)); 791 792 object = engctx; 793 while (object) { 794 switch (nv_mclass(object)) { 795 case KEPLER_CHANNEL_GPFIFO_A: 796 case MAXWELL_CHANNEL_GPFIFO_A: 797 gk104_fifo_recover(fifo, engine, (void *)object); 798 break; 799 } 800 object = object->parent; 801 } 802 803 nvkm_engctx_put(engctx); 804 } 805 806 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { 807 { 0x00000001, "MEMREQ" }, 808 { 0x00000002, "MEMACK_TIMEOUT" }, 809 { 0x00000004, "MEMACK_EXTRA" }, 810 { 0x00000008, "MEMDAT_TIMEOUT" }, 811 { 0x00000010, "MEMDAT_EXTRA" }, 812 { 0x00000020, "MEMFLUSH" }, 813 { 0x00000040, "MEMOP" }, 814 { 0x00000080, "LBCONNECT" }, 815 { 0x00000100, "LBREQ" }, 816 { 0x00000200, "LBACK_TIMEOUT" }, 817 { 0x00000400, "LBACK_EXTRA" }, 818 { 0x00000800, "LBDAT_TIMEOUT" }, 819 { 0x00001000, "LBDAT_EXTRA" }, 820 { 0x00002000, "GPFIFO" }, 821 { 0x00004000, "GPPTR" }, 822 { 0x00008000, "GPENTRY" }, 823 { 0x00010000, "GPCRC" }, 824 { 0x00020000, "PBPTR" }, 825 { 0x00040000, "PBENTRY" }, 826 { 0x00080000, "PBCRC" }, 827 { 0x00100000, "XBARCONNECT" }, 828 { 0x00200000, "METHOD" }, 829 { 0x00400000, "METHODCRC" }, 830 { 0x00800000, "DEVICE" }, 831 { 0x02000000, "SEMAPHORE" }, 832 { 0x04000000, "ACQUIRE" }, 833 { 0x08000000, "PRI" }, 834 { 0x20000000, "NO_CTXSW_SEG" }, 835 { 0x40000000, "PBSEG" }, 836 { 0x80000000, "SIGNATURE" }, 837 {} 838 }; 839 840 static void 841 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit) 842 { 843 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 844 struct nvkm_device *device = subdev->device; 845 u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000)); 846 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask; 847 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000)); 848 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000)); 849 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; 850 u32 subc = (addr & 0x00070000) >> 16; 851 u32 mthd = (addr & 0x00003ffc); 852 u32 show = stat; 853 char msg[128]; 854 855 if (stat & 0x00800000) { 856 if (!gk104_fifo_swmthd(fifo, chid, mthd, data)) 857 show &= ~0x00800000; 858 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008); 859 } 860 861 if (show) { 862 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show); 863 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d " 864 "mthd %04x data %08x\n", 865 unit, show, msg, chid, 866 nvkm_client_name_for_fifo_chid(&fifo->base, chid), 867 subc, mthd, data); 868 } 869 870 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat); 871 } 872 873 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = { 874 { 0x00000001, "HCE_RE_ILLEGAL_OP" }, 875 { 0x00000002, "HCE_RE_ALIGNB" }, 876 { 0x00000004, "HCE_PRIV" }, 877 { 0x00000008, "HCE_ILLEGAL_MTHD" }, 878 { 0x00000010, "HCE_ILLEGAL_CLASS" }, 879 {} 880 }; 881 882 static void 883 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit) 884 { 885 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 886 struct nvkm_device *device = subdev->device; 887 u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000)); 888 u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask; 889 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; 890 char msg[128]; 891 892 if (stat) { 893 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat); 894 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n", 895 unit, stat, msg, chid, 896 nvkm_rd32(device, 0x040150 + (unit * 0x2000)), 897 nvkm_rd32(device, 0x040154 + (unit * 0x2000))); 898 } 899 900 nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat); 901 } 902 903 static void 904 gk104_fifo_intr_runlist(struct gk104_fifo *fifo) 905 { 906 struct nvkm_device *device = fifo->base.engine.subdev.device; 907 u32 mask = nvkm_rd32(device, 0x002a00); 908 while (mask) { 909 u32 engn = __ffs(mask); 910 wake_up(&fifo->engine[engn].wait); 911 nvkm_wr32(device, 0x002a00, 1 << engn); 912 mask &= ~(1 << engn); 913 } 914 } 915 916 static void 917 gk104_fifo_intr_engine(struct gk104_fifo *fifo) 918 { 919 nvkm_fifo_uevent(&fifo->base); 920 } 921 922 static void 923 gk104_fifo_intr(struct nvkm_subdev *subdev) 924 { 925 struct gk104_fifo *fifo = (void *)subdev; 926 struct nvkm_device *device = fifo->base.engine.subdev.device; 927 u32 mask = nvkm_rd32(device, 0x002140); 928 u32 stat = nvkm_rd32(device, 0x002100) & mask; 929 930 if (stat & 0x00000001) { 931 gk104_fifo_intr_bind(fifo); 932 nvkm_wr32(device, 0x002100, 0x00000001); 933 stat &= ~0x00000001; 934 } 935 936 if (stat & 0x00000010) { 937 nvkm_error(subdev, "PIO_ERROR\n"); 938 nvkm_wr32(device, 0x002100, 0x00000010); 939 stat &= ~0x00000010; 940 } 941 942 if (stat & 0x00000100) { 943 gk104_fifo_intr_sched(fifo); 944 nvkm_wr32(device, 0x002100, 0x00000100); 945 stat &= ~0x00000100; 946 } 947 948 if (stat & 0x00010000) { 949 gk104_fifo_intr_chsw(fifo); 950 nvkm_wr32(device, 0x002100, 0x00010000); 951 stat &= ~0x00010000; 952 } 953 954 if (stat & 0x00800000) { 955 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n"); 956 nvkm_wr32(device, 0x002100, 0x00800000); 957 stat &= ~0x00800000; 958 } 959 960 if (stat & 0x01000000) { 961 nvkm_error(subdev, "LB_ERROR\n"); 962 nvkm_wr32(device, 0x002100, 0x01000000); 963 stat &= ~0x01000000; 964 } 965 966 if (stat & 0x08000000) { 967 gk104_fifo_intr_dropped_fault(fifo); 968 nvkm_wr32(device, 0x002100, 0x08000000); 969 stat &= ~0x08000000; 970 } 971 972 if (stat & 0x10000000) { 973 u32 mask = nvkm_rd32(device, 0x00259c); 974 while (mask) { 975 u32 unit = __ffs(mask); 976 gk104_fifo_intr_fault(fifo, unit); 977 nvkm_wr32(device, 0x00259c, (1 << unit)); 978 mask &= ~(1 << unit); 979 } 980 stat &= ~0x10000000; 981 } 982 983 if (stat & 0x20000000) { 984 u32 mask = nvkm_rd32(device, 0x0025a0); 985 while (mask) { 986 u32 unit = __ffs(mask); 987 gk104_fifo_intr_pbdma_0(fifo, unit); 988 gk104_fifo_intr_pbdma_1(fifo, unit); 989 nvkm_wr32(device, 0x0025a0, (1 << unit)); 990 mask &= ~(1 << unit); 991 } 992 stat &= ~0x20000000; 993 } 994 995 if (stat & 0x40000000) { 996 gk104_fifo_intr_runlist(fifo); 997 stat &= ~0x40000000; 998 } 999 1000 if (stat & 0x80000000) { 1001 nvkm_wr32(device, 0x002100, 0x80000000); 1002 gk104_fifo_intr_engine(fifo); 1003 stat &= ~0x80000000; 1004 } 1005 1006 if (stat) { 1007 nvkm_error(subdev, "INTR %08x\n", stat); 1008 nvkm_mask(device, 0x002140, stat, 0x00000000); 1009 nvkm_wr32(device, 0x002100, stat); 1010 } 1011 } 1012 1013 static void 1014 gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index) 1015 { 1016 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); 1017 struct nvkm_device *device = fifo->engine.subdev.device; 1018 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000); 1019 } 1020 1021 static void 1022 gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index) 1023 { 1024 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); 1025 struct nvkm_device *device = fifo->engine.subdev.device; 1026 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000); 1027 } 1028 1029 static const struct nvkm_event_func 1030 gk104_fifo_uevent_func = { 1031 .ctor = nvkm_fifo_uevent_ctor, 1032 .init = gk104_fifo_uevent_init, 1033 .fini = gk104_fifo_uevent_fini, 1034 }; 1035 1036 int 1037 gk104_fifo_fini(struct nvkm_object *object, bool suspend) 1038 { 1039 struct gk104_fifo *fifo = (void *)object; 1040 struct nvkm_device *device = fifo->base.engine.subdev.device; 1041 int ret; 1042 1043 ret = nvkm_fifo_fini(&fifo->base, suspend); 1044 if (ret) 1045 return ret; 1046 1047 /* allow mmu fault interrupts, even when we're not using fifo */ 1048 nvkm_mask(device, 0x002140, 0x10000000, 0x10000000); 1049 return 0; 1050 } 1051 1052 int 1053 gk104_fifo_init(struct nvkm_object *object) 1054 { 1055 struct gk104_fifo *fifo = (void *)object; 1056 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 1057 struct nvkm_device *device = subdev->device; 1058 int ret, i; 1059 1060 ret = nvkm_fifo_init(&fifo->base); 1061 if (ret) 1062 return ret; 1063 1064 /* enable all available PBDMA units */ 1065 nvkm_wr32(device, 0x000204, 0xffffffff); 1066 fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204)); 1067 nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr); 1068 1069 /* PBDMA[n] */ 1070 for (i = 0; i < fifo->spoon_nr; i++) { 1071 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); 1072 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ 1073 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ 1074 } 1075 1076 /* PBDMA[n].HCE */ 1077 for (i = 0; i < fifo->spoon_nr; i++) { 1078 nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */ 1079 nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */ 1080 } 1081 1082 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12); 1083 1084 nvkm_wr32(device, 0x002100, 0xffffffff); 1085 nvkm_wr32(device, 0x002140, 0x7fffffff); 1086 return 0; 1087 } 1088 1089 void 1090 gk104_fifo_dtor(struct nvkm_object *object) 1091 { 1092 struct gk104_fifo *fifo = (void *)object; 1093 int i; 1094 1095 nvkm_gpuobj_unmap(&fifo->user.bar); 1096 nvkm_gpuobj_ref(NULL, &fifo->user.mem); 1097 1098 for (i = 0; i < FIFO_ENGINE_NR; i++) { 1099 nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[1]); 1100 nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[0]); 1101 } 1102 1103 nvkm_fifo_destroy(&fifo->base); 1104 } 1105 1106 int 1107 gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 1108 struct nvkm_oclass *oclass, void *data, u32 size, 1109 struct nvkm_object **pobject) 1110 { 1111 struct gk104_fifo_impl *impl = (void *)oclass; 1112 struct gk104_fifo *fifo; 1113 int ret, i; 1114 1115 ret = nvkm_fifo_create(parent, engine, oclass, 0, 1116 impl->channels - 1, &fifo); 1117 *pobject = nv_object(fifo); 1118 if (ret) 1119 return ret; 1120 1121 INIT_WORK(&fifo->fault, gk104_fifo_recover_work); 1122 1123 for (i = 0; i < FIFO_ENGINE_NR; i++) { 1124 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000, 1125 0, &fifo->engine[i].runlist[0]); 1126 if (ret) 1127 return ret; 1128 1129 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000, 1130 0, &fifo->engine[i].runlist[1]); 1131 if (ret) 1132 return ret; 1133 1134 init_waitqueue_head(&fifo->engine[i].wait); 1135 } 1136 1137 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, impl->channels * 0x200, 1138 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &fifo->user.mem); 1139 if (ret) 1140 return ret; 1141 1142 ret = nvkm_gpuobj_map(fifo->user.mem, NV_MEM_ACCESS_RW, 1143 &fifo->user.bar); 1144 if (ret) 1145 return ret; 1146 1147 ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &fifo->base.uevent); 1148 if (ret) 1149 return ret; 1150 1151 nv_subdev(fifo)->unit = 0x00000100; 1152 nv_subdev(fifo)->intr = gk104_fifo_intr; 1153 nv_engine(fifo)->cclass = &gk104_fifo_cclass; 1154 nv_engine(fifo)->sclass = gk104_fifo_sclass; 1155 return 0; 1156 } 1157 1158 struct nvkm_oclass * 1159 gk104_fifo_oclass = &(struct gk104_fifo_impl) { 1160 .base.handle = NV_ENGINE(FIFO, 0xe0), 1161 .base.ofuncs = &(struct nvkm_ofuncs) { 1162 .ctor = gk104_fifo_ctor, 1163 .dtor = gk104_fifo_dtor, 1164 .init = gk104_fifo_init, 1165 .fini = gk104_fifo_fini, 1166 }, 1167 .channels = 4096, 1168 }.base; 1169