1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "gk104.h" 25 #include "cgrp.h" 26 #include "changk104.h" 27 28 #include <core/client.h> 29 #include <core/gpuobj.h> 30 #include <subdev/bar.h> 31 #include <subdev/fault.h> 32 #include <subdev/timer.h> 33 #include <subdev/top.h> 34 #include <engine/sw.h> 35 36 #include <nvif/class.h> 37 #include <nvif/cl0080.h> 38 39 struct gk104_fifo_engine_status { 40 bool busy; 41 bool faulted; 42 bool chsw; 43 bool save; 44 bool load; 45 struct { 46 bool tsg; 47 u32 id; 48 } prev, next, *chan; 49 }; 50 51 static void 52 gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn, 53 struct gk104_fifo_engine_status *status) 54 { 55 struct nvkm_engine *engine = fifo->engine[engn].engine; 56 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 57 struct nvkm_device *device = subdev->device; 58 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08)); 59 60 status->busy = !!(stat & 0x80000000); 61 status->faulted = !!(stat & 0x40000000); 62 status->next.tsg = !!(stat & 0x10000000); 63 status->next.id = (stat & 0x0fff0000) >> 16; 64 status->chsw = !!(stat & 0x00008000); 65 status->save = !!(stat & 0x00004000); 66 status->load = !!(stat & 0x00002000); 67 status->prev.tsg = !!(stat & 0x00001000); 68 status->prev.id = (stat & 0x00000fff); 69 status->chan = NULL; 70 71 if (status->busy && status->chsw) { 72 if (status->load && status->save) { 73 if (engine && nvkm_engine_chsw_load(engine)) 74 status->chan = &status->next; 75 else 76 status->chan = &status->prev; 77 } else 78 if (status->load) { 79 status->chan = &status->next; 80 } else { 81 status->chan = &status->prev; 82 } 83 } else 84 if (status->load) { 85 status->chan = &status->prev; 86 } 87 88 nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d " 89 "save %d load %d %sid %d%s-> %sid %d%s\n", 90 engn, status->busy, status->faulted, 91 status->chsw, status->save, status->load, 92 status->prev.tsg ? "tsg" : "ch", status->prev.id, 93 status->chan == &status->prev ? "*" : " ", 94 status->next.tsg ? "tsg" : "ch", status->next.id, 95 status->chan == &status->next ? "*" : " "); 96 } 97 98 static int 99 gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, 100 void *argv, u32 argc, struct nvkm_object **pobject) 101 { 102 struct gk104_fifo *fifo = gk104_fifo(base); 103 if (oclass->engn == &fifo->func->chan) { 104 const struct gk104_fifo_chan_user *user = oclass->engn; 105 return user->ctor(fifo, oclass, argv, argc, pobject); 106 } 107 WARN_ON(1); 108 return -EINVAL; 109 } 110 111 static int 112 gk104_fifo_class_get(struct nvkm_fifo *base, int index, 113 struct nvkm_oclass *oclass) 114 { 115 struct gk104_fifo *fifo = gk104_fifo(base); 116 int c = 0; 117 118 if (fifo->func->chan.ctor && c++ == index) { 119 oclass->base = fifo->func->chan.user; 120 oclass->engn = &fifo->func->chan; 121 return 0; 122 } 123 124 return c; 125 } 126 127 static void 128 gk104_fifo_uevent_fini(struct nvkm_fifo *fifo) 129 { 130 struct nvkm_device *device = fifo->engine.subdev.device; 131 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000); 132 } 133 134 static void 135 gk104_fifo_uevent_init(struct nvkm_fifo *fifo) 136 { 137 struct nvkm_device *device = fifo->engine.subdev.device; 138 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000); 139 } 140 141 void 142 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl) 143 { 144 const struct gk104_fifo_runlist_func *func = fifo->func->runlist; 145 struct gk104_fifo_chan *chan; 146 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 147 struct nvkm_device *device = subdev->device; 148 struct nvkm_memory *mem; 149 struct nvkm_fifo_cgrp *cgrp; 150 int nr = 0; 151 int target; 152 153 mutex_lock(&subdev->mutex); 154 mem = fifo->runlist[runl].mem[fifo->runlist[runl].next]; 155 fifo->runlist[runl].next = !fifo->runlist[runl].next; 156 157 nvkm_kmap(mem); 158 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 159 func->chan(chan, mem, nr++ * func->size); 160 } 161 162 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { 163 func->cgrp(cgrp, mem, nr++ * func->size); 164 list_for_each_entry(chan, &cgrp->chan, head) { 165 func->chan(chan, mem, nr++ * func->size); 166 } 167 } 168 nvkm_done(mem); 169 170 switch (nvkm_memory_target(mem)) { 171 case NVKM_MEM_TARGET_VRAM: target = 0; break; 172 case NVKM_MEM_TARGET_NCOH: target = 3; break; 173 default: 174 WARN_ON(1); 175 goto unlock; 176 } 177 178 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | 179 (target << 28)); 180 nvkm_wr32(device, 0x002274, (runl << 20) | nr); 181 182 if (nvkm_msec(device, 2000, 183 if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000)) 184 break; 185 ) < 0) 186 nvkm_error(subdev, "runlist %d update timeout\n", runl); 187 unlock: 188 mutex_unlock(&subdev->mutex); 189 } 190 191 void 192 gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 193 { 194 struct nvkm_fifo_cgrp *cgrp = chan->cgrp; 195 mutex_lock(&fifo->base.engine.subdev.mutex); 196 if (!list_empty(&chan->head)) { 197 list_del_init(&chan->head); 198 if (cgrp && !--cgrp->chan_nr) 199 list_del_init(&cgrp->head); 200 } 201 mutex_unlock(&fifo->base.engine.subdev.mutex); 202 } 203 204 void 205 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 206 { 207 struct nvkm_fifo_cgrp *cgrp = chan->cgrp; 208 mutex_lock(&fifo->base.engine.subdev.mutex); 209 if (cgrp) { 210 if (!cgrp->chan_nr++) 211 list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp); 212 list_add_tail(&chan->head, &cgrp->chan); 213 } else { 214 list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan); 215 } 216 mutex_unlock(&fifo->base.engine.subdev.mutex); 217 } 218 219 void 220 gk104_fifo_runlist_chan(struct gk104_fifo_chan *chan, 221 struct nvkm_memory *memory, u32 offset) 222 { 223 nvkm_wo32(memory, offset + 0, chan->base.chid); 224 nvkm_wo32(memory, offset + 4, 0x00000000); 225 } 226 227 const struct gk104_fifo_runlist_func 228 gk104_fifo_runlist = { 229 .size = 8, 230 .chan = gk104_fifo_runlist_chan, 231 }; 232 233 static void 234 gk104_fifo_recover_work(struct work_struct *w) 235 { 236 struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work); 237 struct nvkm_device *device = fifo->base.engine.subdev.device; 238 struct nvkm_engine *engine; 239 unsigned long flags; 240 u32 engm, runm, todo; 241 int engn, runl; 242 243 spin_lock_irqsave(&fifo->base.lock, flags); 244 runm = fifo->recover.runm; 245 engm = fifo->recover.engm; 246 fifo->recover.engm = 0; 247 fifo->recover.runm = 0; 248 spin_unlock_irqrestore(&fifo->base.lock, flags); 249 250 nvkm_mask(device, 0x002630, runm, runm); 251 252 for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) { 253 if ((engine = fifo->engine[engn].engine)) { 254 nvkm_subdev_fini(&engine->subdev, false); 255 WARN_ON(nvkm_subdev_init(&engine->subdev)); 256 } 257 } 258 259 for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl)) 260 gk104_fifo_runlist_commit(fifo, runl); 261 262 nvkm_wr32(device, 0x00262c, runm); 263 nvkm_mask(device, 0x002630, runm, 0x00000000); 264 } 265 266 static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn); 267 268 static void 269 gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl) 270 { 271 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 272 struct nvkm_device *device = subdev->device; 273 const u32 runm = BIT(runl); 274 275 assert_spin_locked(&fifo->base.lock); 276 if (fifo->recover.runm & runm) 277 return; 278 fifo->recover.runm |= runm; 279 280 /* Block runlist to prevent channel assignment(s) from changing. */ 281 nvkm_mask(device, 0x002630, runm, runm); 282 283 /* Schedule recovery. */ 284 nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl); 285 schedule_work(&fifo->recover.work); 286 } 287 288 static void 289 gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid) 290 { 291 struct gk104_fifo *fifo = gk104_fifo(base); 292 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 293 struct nvkm_device *device = subdev->device; 294 const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08)); 295 const u32 runl = (stat & 0x000f0000) >> 16; 296 const bool used = (stat & 0x00000001); 297 unsigned long engn, engm = fifo->runlist[runl].engm; 298 struct gk104_fifo_chan *chan; 299 300 assert_spin_locked(&fifo->base.lock); 301 if (!used) 302 return; 303 304 /* Lookup SW state for channel, and mark it as dead. */ 305 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 306 if (chan->base.chid == chid) { 307 list_del_init(&chan->head); 308 chan->killed = true; 309 nvkm_fifo_kevent(&fifo->base, chid); 310 break; 311 } 312 } 313 314 /* Disable channel. */ 315 nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800); 316 nvkm_warn(subdev, "channel %d: killed\n", chid); 317 318 /* Block channel assignments from changing during recovery. */ 319 gk104_fifo_recover_runl(fifo, runl); 320 321 /* Schedule recovery for any engines the channel is on. */ 322 for_each_set_bit(engn, &engm, fifo->engine_nr) { 323 struct gk104_fifo_engine_status status; 324 gk104_fifo_engine_status(fifo, engn, &status); 325 if (!status.chan || status.chan->id != chid) 326 continue; 327 gk104_fifo_recover_engn(fifo, engn); 328 } 329 } 330 331 static void 332 gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn) 333 { 334 struct nvkm_engine *engine = fifo->engine[engn].engine; 335 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 336 struct nvkm_device *device = subdev->device; 337 const u32 runl = fifo->engine[engn].runl; 338 const u32 engm = BIT(engn); 339 struct gk104_fifo_engine_status status; 340 int mmui = -1; 341 342 assert_spin_locked(&fifo->base.lock); 343 if (fifo->recover.engm & engm) 344 return; 345 fifo->recover.engm |= engm; 346 347 /* Block channel assignments from changing during recovery. */ 348 gk104_fifo_recover_runl(fifo, runl); 349 350 /* Determine which channel (if any) is currently on the engine. */ 351 gk104_fifo_engine_status(fifo, engn, &status); 352 if (status.chan) { 353 /* The channel is not longer viable, kill it. */ 354 gk104_fifo_recover_chan(&fifo->base, status.chan->id); 355 } 356 357 /* Determine MMU fault ID for the engine, if we're not being 358 * called from the fault handler already. 359 */ 360 if (!status.faulted && engine) { 361 mmui = nvkm_top_fault_id(device, engine->subdev.index); 362 if (mmui < 0) { 363 const struct nvkm_enum *en = fifo->func->fault.engine; 364 for (; en && en->name; en++) { 365 if (en->data2 == engine->subdev.index) { 366 mmui = en->value; 367 break; 368 } 369 } 370 } 371 WARN_ON(mmui < 0); 372 } 373 374 /* Trigger a MMU fault for the engine. 375 * 376 * No good idea why this is needed, but nvgpu does something similar, 377 * and it makes recovery from CTXSW_TIMEOUT a lot more reliable. 378 */ 379 if (mmui >= 0) { 380 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000100 | mmui); 381 382 /* Wait for fault to trigger. */ 383 nvkm_msec(device, 2000, 384 gk104_fifo_engine_status(fifo, engn, &status); 385 if (status.faulted) 386 break; 387 ); 388 389 /* Release MMU fault trigger, and ACK the fault. */ 390 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000000); 391 nvkm_wr32(device, 0x00259c, BIT(mmui)); 392 nvkm_wr32(device, 0x002100, 0x10000000); 393 } 394 395 /* Schedule recovery. */ 396 nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn); 397 schedule_work(&fifo->recover.work); 398 } 399 400 static void 401 gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info) 402 { 403 struct gk104_fifo *fifo = gk104_fifo(base); 404 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 405 struct nvkm_device *device = subdev->device; 406 const struct nvkm_enum *er, *ee, *ec, *ea; 407 struct nvkm_engine *engine = NULL; 408 struct nvkm_fifo_chan *chan; 409 unsigned long flags; 410 char ct[8] = "HUB/", en[16] = ""; 411 int engn; 412 413 er = nvkm_enum_find(fifo->func->fault.reason, info->reason); 414 ee = nvkm_enum_find(fifo->func->fault.engine, info->engine); 415 if (info->hub) { 416 ec = nvkm_enum_find(fifo->func->fault.hubclient, info->client); 417 } else { 418 ec = nvkm_enum_find(fifo->func->fault.gpcclient, info->client); 419 snprintf(ct, sizeof(ct), "GPC%d/", info->gpc); 420 } 421 ea = nvkm_enum_find(fifo->func->fault.access, info->access); 422 423 if (ee && ee->data2) { 424 switch (ee->data2) { 425 case NVKM_SUBDEV_BAR: 426 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000); 427 break; 428 case NVKM_SUBDEV_INSTMEM: 429 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000); 430 break; 431 case NVKM_ENGINE_IFB: 432 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000); 433 break; 434 default: 435 engine = nvkm_device_engine(device, ee->data2); 436 break; 437 } 438 } 439 440 if (ee == NULL) { 441 enum nvkm_devidx engidx = nvkm_top_fault(device, info->engine); 442 if (engidx < NVKM_SUBDEV_NR) { 443 const char *src = nvkm_subdev_name[engidx]; 444 char *dst = en; 445 do { 446 *dst++ = toupper(*src++); 447 } while(*src); 448 engine = nvkm_device_engine(device, engidx); 449 } 450 } else { 451 snprintf(en, sizeof(en), "%s", ee->name); 452 } 453 454 spin_lock_irqsave(&fifo->base.lock, flags); 455 chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst); 456 457 nvkm_error(subdev, 458 "fault %02x [%s] at %016llx engine %02x [%s] client %02x " 459 "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n", 460 info->access, ea ? ea->name : "", info->addr, 461 info->engine, ee ? ee->name : en, 462 info->client, ct, ec ? ec->name : "", 463 info->reason, er ? er->name : "", chan ? chan->chid : -1, 464 info->inst, chan ? chan->object.client->name : "unknown"); 465 466 /* Kill the channel that caused the fault. */ 467 if (chan) 468 gk104_fifo_recover_chan(&fifo->base, chan->chid); 469 470 /* Channel recovery will probably have already done this for the 471 * correct engine(s), but just in case we can't find the channel 472 * information... 473 */ 474 for (engn = 0; engn < fifo->engine_nr && engine; engn++) { 475 if (fifo->engine[engn].engine == engine) { 476 gk104_fifo_recover_engn(fifo, engn); 477 break; 478 } 479 } 480 481 spin_unlock_irqrestore(&fifo->base.lock, flags); 482 } 483 484 static const struct nvkm_enum 485 gk104_fifo_bind_reason[] = { 486 { 0x01, "BIND_NOT_UNBOUND" }, 487 { 0x02, "SNOOP_WITHOUT_BAR1" }, 488 { 0x03, "UNBIND_WHILE_RUNNING" }, 489 { 0x05, "INVALID_RUNLIST" }, 490 { 0x06, "INVALID_CTX_TGT" }, 491 { 0x0b, "UNBIND_WHILE_PARKED" }, 492 {} 493 }; 494 495 static void 496 gk104_fifo_intr_bind(struct gk104_fifo *fifo) 497 { 498 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 499 struct nvkm_device *device = subdev->device; 500 u32 intr = nvkm_rd32(device, 0x00252c); 501 u32 code = intr & 0x000000ff; 502 const struct nvkm_enum *en = 503 nvkm_enum_find(gk104_fifo_bind_reason, code); 504 505 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : ""); 506 } 507 508 static const struct nvkm_enum 509 gk104_fifo_sched_reason[] = { 510 { 0x0a, "CTXSW_TIMEOUT" }, 511 {} 512 }; 513 514 static void 515 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) 516 { 517 struct nvkm_device *device = fifo->base.engine.subdev.device; 518 unsigned long flags, engm = 0; 519 u32 engn; 520 521 /* We need to ACK the SCHED_ERROR here, and prevent it reasserting, 522 * as MMU_FAULT cannot be triggered while it's pending. 523 */ 524 spin_lock_irqsave(&fifo->base.lock, flags); 525 nvkm_mask(device, 0x002140, 0x00000100, 0x00000000); 526 nvkm_wr32(device, 0x002100, 0x00000100); 527 528 for (engn = 0; engn < fifo->engine_nr; engn++) { 529 struct gk104_fifo_engine_status status; 530 531 gk104_fifo_engine_status(fifo, engn, &status); 532 if (!status.busy || !status.chsw) 533 continue; 534 535 engm |= BIT(engn); 536 } 537 538 for_each_set_bit(engn, &engm, fifo->engine_nr) 539 gk104_fifo_recover_engn(fifo, engn); 540 541 nvkm_mask(device, 0x002140, 0x00000100, 0x00000100); 542 spin_unlock_irqrestore(&fifo->base.lock, flags); 543 } 544 545 static void 546 gk104_fifo_intr_sched(struct gk104_fifo *fifo) 547 { 548 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 549 struct nvkm_device *device = subdev->device; 550 u32 intr = nvkm_rd32(device, 0x00254c); 551 u32 code = intr & 0x000000ff; 552 const struct nvkm_enum *en = 553 nvkm_enum_find(gk104_fifo_sched_reason, code); 554 555 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : ""); 556 557 switch (code) { 558 case 0x0a: 559 gk104_fifo_intr_sched_ctxsw(fifo); 560 break; 561 default: 562 break; 563 } 564 } 565 566 static void 567 gk104_fifo_intr_chsw(struct gk104_fifo *fifo) 568 { 569 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 570 struct nvkm_device *device = subdev->device; 571 u32 stat = nvkm_rd32(device, 0x00256c); 572 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat); 573 nvkm_wr32(device, 0x00256c, stat); 574 } 575 576 static void 577 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo) 578 { 579 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 580 struct nvkm_device *device = subdev->device; 581 u32 stat = nvkm_rd32(device, 0x00259c); 582 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat); 583 } 584 585 static void 586 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) 587 { 588 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 589 struct nvkm_device *device = subdev->device; 590 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10)); 591 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10)); 592 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10)); 593 u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10)); 594 struct nvkm_fault_data info; 595 596 info.inst = (u64)inst << 12; 597 info.addr = ((u64)vahi << 32) | valo; 598 info.time = 0; 599 info.engine = unit; 600 info.valid = 1; 601 info.gpc = (type & 0x1f000000) >> 24; 602 info.client = (type & 0x00001f00) >> 8; 603 info.access = (type & 0x00000080) >> 7; 604 info.hub = (type & 0x00000040) >> 6; 605 info.reason = (type & 0x000000ff); 606 607 nvkm_fifo_fault(&fifo->base, &info); 608 } 609 610 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { 611 { 0x00000001, "MEMREQ" }, 612 { 0x00000002, "MEMACK_TIMEOUT" }, 613 { 0x00000004, "MEMACK_EXTRA" }, 614 { 0x00000008, "MEMDAT_TIMEOUT" }, 615 { 0x00000010, "MEMDAT_EXTRA" }, 616 { 0x00000020, "MEMFLUSH" }, 617 { 0x00000040, "MEMOP" }, 618 { 0x00000080, "LBCONNECT" }, 619 { 0x00000100, "LBREQ" }, 620 { 0x00000200, "LBACK_TIMEOUT" }, 621 { 0x00000400, "LBACK_EXTRA" }, 622 { 0x00000800, "LBDAT_TIMEOUT" }, 623 { 0x00001000, "LBDAT_EXTRA" }, 624 { 0x00002000, "GPFIFO" }, 625 { 0x00004000, "GPPTR" }, 626 { 0x00008000, "GPENTRY" }, 627 { 0x00010000, "GPCRC" }, 628 { 0x00020000, "PBPTR" }, 629 { 0x00040000, "PBENTRY" }, 630 { 0x00080000, "PBCRC" }, 631 { 0x00100000, "XBARCONNECT" }, 632 { 0x00200000, "METHOD" }, 633 { 0x00400000, "METHODCRC" }, 634 { 0x00800000, "DEVICE" }, 635 { 0x02000000, "SEMAPHORE" }, 636 { 0x04000000, "ACQUIRE" }, 637 { 0x08000000, "PRI" }, 638 { 0x20000000, "NO_CTXSW_SEG" }, 639 { 0x40000000, "PBSEG" }, 640 { 0x80000000, "SIGNATURE" }, 641 {} 642 }; 643 644 static void 645 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit) 646 { 647 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 648 struct nvkm_device *device = subdev->device; 649 u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000)); 650 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask; 651 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000)); 652 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000)); 653 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; 654 u32 subc = (addr & 0x00070000) >> 16; 655 u32 mthd = (addr & 0x00003ffc); 656 u32 show = stat; 657 struct nvkm_fifo_chan *chan; 658 unsigned long flags; 659 char msg[128]; 660 661 if (stat & 0x00800000) { 662 if (device->sw) { 663 if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data)) 664 show &= ~0x00800000; 665 } 666 } 667 668 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008); 669 670 if (show) { 671 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show); 672 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); 673 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] " 674 "subc %d mthd %04x data %08x\n", 675 unit, show, msg, chid, chan ? chan->inst->addr : 0, 676 chan ? chan->object.client->name : "unknown", 677 subc, mthd, data); 678 nvkm_fifo_chan_put(&fifo->base, flags, &chan); 679 } 680 681 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat); 682 } 683 684 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = { 685 { 0x00000001, "HCE_RE_ILLEGAL_OP" }, 686 { 0x00000002, "HCE_RE_ALIGNB" }, 687 { 0x00000004, "HCE_PRIV" }, 688 { 0x00000008, "HCE_ILLEGAL_MTHD" }, 689 { 0x00000010, "HCE_ILLEGAL_CLASS" }, 690 {} 691 }; 692 693 static void 694 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit) 695 { 696 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 697 struct nvkm_device *device = subdev->device; 698 u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000)); 699 u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask; 700 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; 701 char msg[128]; 702 703 if (stat) { 704 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat); 705 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n", 706 unit, stat, msg, chid, 707 nvkm_rd32(device, 0x040150 + (unit * 0x2000)), 708 nvkm_rd32(device, 0x040154 + (unit * 0x2000))); 709 } 710 711 nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat); 712 } 713 714 static void 715 gk104_fifo_intr_runlist(struct gk104_fifo *fifo) 716 { 717 struct nvkm_device *device = fifo->base.engine.subdev.device; 718 u32 mask = nvkm_rd32(device, 0x002a00); 719 while (mask) { 720 int runl = __ffs(mask); 721 wake_up(&fifo->runlist[runl].wait); 722 nvkm_wr32(device, 0x002a00, 1 << runl); 723 mask &= ~(1 << runl); 724 } 725 } 726 727 static void 728 gk104_fifo_intr_engine(struct gk104_fifo *fifo) 729 { 730 nvkm_fifo_uevent(&fifo->base); 731 } 732 733 static void 734 gk104_fifo_intr(struct nvkm_fifo *base) 735 { 736 struct gk104_fifo *fifo = gk104_fifo(base); 737 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 738 struct nvkm_device *device = subdev->device; 739 u32 mask = nvkm_rd32(device, 0x002140); 740 u32 stat = nvkm_rd32(device, 0x002100) & mask; 741 742 if (stat & 0x00000001) { 743 gk104_fifo_intr_bind(fifo); 744 nvkm_wr32(device, 0x002100, 0x00000001); 745 stat &= ~0x00000001; 746 } 747 748 if (stat & 0x00000010) { 749 nvkm_error(subdev, "PIO_ERROR\n"); 750 nvkm_wr32(device, 0x002100, 0x00000010); 751 stat &= ~0x00000010; 752 } 753 754 if (stat & 0x00000100) { 755 gk104_fifo_intr_sched(fifo); 756 nvkm_wr32(device, 0x002100, 0x00000100); 757 stat &= ~0x00000100; 758 } 759 760 if (stat & 0x00010000) { 761 gk104_fifo_intr_chsw(fifo); 762 nvkm_wr32(device, 0x002100, 0x00010000); 763 stat &= ~0x00010000; 764 } 765 766 if (stat & 0x00800000) { 767 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n"); 768 nvkm_wr32(device, 0x002100, 0x00800000); 769 stat &= ~0x00800000; 770 } 771 772 if (stat & 0x01000000) { 773 nvkm_error(subdev, "LB_ERROR\n"); 774 nvkm_wr32(device, 0x002100, 0x01000000); 775 stat &= ~0x01000000; 776 } 777 778 if (stat & 0x08000000) { 779 gk104_fifo_intr_dropped_fault(fifo); 780 nvkm_wr32(device, 0x002100, 0x08000000); 781 stat &= ~0x08000000; 782 } 783 784 if (stat & 0x10000000) { 785 u32 mask = nvkm_rd32(device, 0x00259c); 786 while (mask) { 787 u32 unit = __ffs(mask); 788 gk104_fifo_intr_fault(fifo, unit); 789 nvkm_wr32(device, 0x00259c, (1 << unit)); 790 mask &= ~(1 << unit); 791 } 792 stat &= ~0x10000000; 793 } 794 795 if (stat & 0x20000000) { 796 u32 mask = nvkm_rd32(device, 0x0025a0); 797 while (mask) { 798 u32 unit = __ffs(mask); 799 gk104_fifo_intr_pbdma_0(fifo, unit); 800 gk104_fifo_intr_pbdma_1(fifo, unit); 801 nvkm_wr32(device, 0x0025a0, (1 << unit)); 802 mask &= ~(1 << unit); 803 } 804 stat &= ~0x20000000; 805 } 806 807 if (stat & 0x40000000) { 808 gk104_fifo_intr_runlist(fifo); 809 stat &= ~0x40000000; 810 } 811 812 if (stat & 0x80000000) { 813 nvkm_wr32(device, 0x002100, 0x80000000); 814 gk104_fifo_intr_engine(fifo); 815 stat &= ~0x80000000; 816 } 817 818 if (stat) { 819 nvkm_error(subdev, "INTR %08x\n", stat); 820 nvkm_mask(device, 0x002140, stat, 0x00000000); 821 nvkm_wr32(device, 0x002100, stat); 822 } 823 } 824 825 static void 826 gk104_fifo_fini(struct nvkm_fifo *base) 827 { 828 struct gk104_fifo *fifo = gk104_fifo(base); 829 struct nvkm_device *device = fifo->base.engine.subdev.device; 830 flush_work(&fifo->recover.work); 831 /* allow mmu fault interrupts, even when we're not using fifo */ 832 nvkm_mask(device, 0x002140, 0x10000000, 0x10000000); 833 } 834 835 static int 836 gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data) 837 { 838 struct gk104_fifo *fifo = gk104_fifo(base); 839 switch (mthd) { 840 case NV_DEVICE_FIFO_RUNLISTS: 841 *data = (1ULL << fifo->runlist_nr) - 1; 842 return 0; 843 case NV_DEVICE_FIFO_RUNLIST_ENGINES(0)... 844 NV_DEVICE_FIFO_RUNLIST_ENGINES(63): { 845 int runl = mthd - NV_DEVICE_FIFO_RUNLIST_ENGINES(0), engn; 846 if (runl < fifo->runlist_nr) { 847 unsigned long engm = fifo->runlist[runl].engm; 848 struct nvkm_engine *engine; 849 *data = 0; 850 for_each_set_bit(engn, &engm, fifo->engine_nr) { 851 if ((engine = fifo->engine[engn].engine)) 852 *data |= BIT_ULL(engine->subdev.index); 853 } 854 return 0; 855 } 856 } 857 return -EINVAL; 858 default: 859 return -EINVAL; 860 } 861 } 862 863 static int 864 gk104_fifo_oneinit(struct nvkm_fifo *base) 865 { 866 struct gk104_fifo *fifo = gk104_fifo(base); 867 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 868 struct nvkm_device *device = subdev->device; 869 struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device); 870 int engn, runl, pbid, ret, i, j; 871 enum nvkm_devidx engidx; 872 u32 *map; 873 874 /* Determine number of PBDMAs by checking valid enable bits. */ 875 nvkm_wr32(device, 0x000204, 0xffffffff); 876 fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204)); 877 nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr); 878 879 /* Read PBDMA->runlist(s) mapping from HW. */ 880 if (!(map = kzalloc(sizeof(*map) * fifo->pbdma_nr, GFP_KERNEL))) 881 return -ENOMEM; 882 883 for (i = 0; i < fifo->pbdma_nr; i++) 884 map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04)); 885 886 /* Determine runlist configuration from topology device info. */ 887 i = 0; 888 while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) { 889 /* Determine which PBDMA handles requests for this engine. */ 890 for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) { 891 if (map[j] & (1 << runl)) { 892 pbid = j; 893 break; 894 } 895 } 896 897 nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n", 898 engn, runl, pbid, nvkm_subdev_name[engidx]); 899 900 fifo->engine[engn].engine = nvkm_device_engine(device, engidx); 901 fifo->engine[engn].runl = runl; 902 fifo->engine[engn].pbid = pbid; 903 fifo->engine_nr = max(fifo->engine_nr, engn + 1); 904 fifo->runlist[runl].engm |= 1 << engn; 905 fifo->runlist_nr = max(fifo->runlist_nr, runl + 1); 906 } 907 908 kfree(map); 909 910 for (i = 0; i < fifo->runlist_nr; i++) { 911 for (j = 0; j < ARRAY_SIZE(fifo->runlist[i].mem); j++) { 912 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 913 fifo->base.nr * 2/* TSG+chan */ * 914 fifo->func->runlist->size, 915 0x1000, false, 916 &fifo->runlist[i].mem[j]); 917 if (ret) 918 return ret; 919 } 920 921 init_waitqueue_head(&fifo->runlist[i].wait); 922 INIT_LIST_HEAD(&fifo->runlist[i].cgrp); 923 INIT_LIST_HEAD(&fifo->runlist[i].chan); 924 } 925 926 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 927 fifo->base.nr * 0x200, 0x1000, true, 928 &fifo->user.mem); 929 if (ret) 930 return ret; 931 932 ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem), 933 &fifo->user.bar); 934 if (ret) 935 return ret; 936 937 return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0); 938 } 939 940 static void 941 gk104_fifo_init(struct nvkm_fifo *base) 942 { 943 struct gk104_fifo *fifo = gk104_fifo(base); 944 struct nvkm_device *device = fifo->base.engine.subdev.device; 945 int i; 946 947 /* Enable PBDMAs. */ 948 nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1); 949 950 /* PBDMA[n] */ 951 for (i = 0; i < fifo->pbdma_nr; i++) { 952 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); 953 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ 954 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ 955 } 956 957 /* PBDMA[n].HCE */ 958 for (i = 0; i < fifo->pbdma_nr; i++) { 959 nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */ 960 nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */ 961 } 962 963 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12); 964 965 if (fifo->func->init_pbdma_timeout) 966 fifo->func->init_pbdma_timeout(fifo); 967 968 nvkm_wr32(device, 0x002100, 0xffffffff); 969 nvkm_wr32(device, 0x002140, 0x7fffffff); 970 } 971 972 static void * 973 gk104_fifo_dtor(struct nvkm_fifo *base) 974 { 975 struct gk104_fifo *fifo = gk104_fifo(base); 976 struct nvkm_device *device = fifo->base.engine.subdev.device; 977 int i; 978 979 nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar); 980 nvkm_memory_unref(&fifo->user.mem); 981 982 for (i = 0; i < fifo->runlist_nr; i++) { 983 nvkm_memory_unref(&fifo->runlist[i].mem[1]); 984 nvkm_memory_unref(&fifo->runlist[i].mem[0]); 985 } 986 987 return fifo; 988 } 989 990 static const struct nvkm_fifo_func 991 gk104_fifo_ = { 992 .dtor = gk104_fifo_dtor, 993 .oneinit = gk104_fifo_oneinit, 994 .info = gk104_fifo_info, 995 .init = gk104_fifo_init, 996 .fini = gk104_fifo_fini, 997 .intr = gk104_fifo_intr, 998 .fault = gk104_fifo_fault, 999 .uevent_init = gk104_fifo_uevent_init, 1000 .uevent_fini = gk104_fifo_uevent_fini, 1001 .recover_chan = gk104_fifo_recover_chan, 1002 .class_get = gk104_fifo_class_get, 1003 .class_new = gk104_fifo_class_new, 1004 }; 1005 1006 int 1007 gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device, 1008 int index, int nr, struct nvkm_fifo **pfifo) 1009 { 1010 struct gk104_fifo *fifo; 1011 1012 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) 1013 return -ENOMEM; 1014 fifo->func = func; 1015 INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work); 1016 *pfifo = &fifo->base; 1017 1018 return nvkm_fifo_ctor(&gk104_fifo_, device, index, nr, &fifo->base); 1019 } 1020 1021 const struct nvkm_enum 1022 gk104_fifo_fault_access[] = { 1023 { 0x0, "READ" }, 1024 { 0x1, "WRITE" }, 1025 {} 1026 }; 1027 1028 const struct nvkm_enum 1029 gk104_fifo_fault_engine[] = { 1030 { 0x00, "GR", NULL, NVKM_ENGINE_GR }, 1031 { 0x01, "DISPLAY" }, 1032 { 0x02, "CAPTURE" }, 1033 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB }, 1034 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR }, 1035 { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM }, 1036 { 0x06, "SCHED" }, 1037 { 0x07, "HOST0", NULL, NVKM_ENGINE_FIFO }, 1038 { 0x08, "HOST1", NULL, NVKM_ENGINE_FIFO }, 1039 { 0x09, "HOST2", NULL, NVKM_ENGINE_FIFO }, 1040 { 0x0a, "HOST3", NULL, NVKM_ENGINE_FIFO }, 1041 { 0x0b, "HOST4", NULL, NVKM_ENGINE_FIFO }, 1042 { 0x0c, "HOST5", NULL, NVKM_ENGINE_FIFO }, 1043 { 0x0d, "HOST6", NULL, NVKM_ENGINE_FIFO }, 1044 { 0x0e, "HOST7", NULL, NVKM_ENGINE_FIFO }, 1045 { 0x0f, "HOSTSR" }, 1046 { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD }, 1047 { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP }, 1048 { 0x13, "PERF" }, 1049 { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC }, 1050 { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 }, 1051 { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 }, 1052 { 0x17, "PMU" }, 1053 { 0x18, "PTP" }, 1054 { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC }, 1055 { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 }, 1056 {} 1057 }; 1058 1059 const struct nvkm_enum 1060 gk104_fifo_fault_reason[] = { 1061 { 0x00, "PDE" }, 1062 { 0x01, "PDE_SIZE" }, 1063 { 0x02, "PTE" }, 1064 { 0x03, "VA_LIMIT_VIOLATION" }, 1065 { 0x04, "UNBOUND_INST_BLOCK" }, 1066 { 0x05, "PRIV_VIOLATION" }, 1067 { 0x06, "RO_VIOLATION" }, 1068 { 0x07, "WO_VIOLATION" }, 1069 { 0x08, "PITCH_MASK_VIOLATION" }, 1070 { 0x09, "WORK_CREATION" }, 1071 { 0x0a, "UNSUPPORTED_APERTURE" }, 1072 { 0x0b, "COMPRESSION_FAILURE" }, 1073 { 0x0c, "UNSUPPORTED_KIND" }, 1074 { 0x0d, "REGION_VIOLATION" }, 1075 { 0x0e, "BOTH_PTES_VALID" }, 1076 { 0x0f, "INFO_TYPE_POISONED" }, 1077 {} 1078 }; 1079 1080 const struct nvkm_enum 1081 gk104_fifo_fault_hubclient[] = { 1082 { 0x00, "VIP" }, 1083 { 0x01, "CE0" }, 1084 { 0x02, "CE1" }, 1085 { 0x03, "DNISO" }, 1086 { 0x04, "FE" }, 1087 { 0x05, "FECS" }, 1088 { 0x06, "HOST" }, 1089 { 0x07, "HOST_CPU" }, 1090 { 0x08, "HOST_CPU_NB" }, 1091 { 0x09, "ISO" }, 1092 { 0x0a, "MMU" }, 1093 { 0x0b, "MSPDEC" }, 1094 { 0x0c, "MSPPP" }, 1095 { 0x0d, "MSVLD" }, 1096 { 0x0e, "NISO" }, 1097 { 0x0f, "P2P" }, 1098 { 0x10, "PD" }, 1099 { 0x11, "PERF" }, 1100 { 0x12, "PMU" }, 1101 { 0x13, "RASTERTWOD" }, 1102 { 0x14, "SCC" }, 1103 { 0x15, "SCC_NB" }, 1104 { 0x16, "SEC" }, 1105 { 0x17, "SSYNC" }, 1106 { 0x18, "GR_CE" }, 1107 { 0x19, "CE2" }, 1108 { 0x1a, "XV" }, 1109 { 0x1b, "MMU_NB" }, 1110 { 0x1c, "MSENC" }, 1111 { 0x1d, "DFALCON" }, 1112 { 0x1e, "SKED" }, 1113 { 0x1f, "AFALCON" }, 1114 {} 1115 }; 1116 1117 const struct nvkm_enum 1118 gk104_fifo_fault_gpcclient[] = { 1119 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" }, 1120 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" }, 1121 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" }, 1122 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" }, 1123 { 0x0c, "RAST" }, 1124 { 0x0d, "GCC" }, 1125 { 0x0e, "GPCCS" }, 1126 { 0x0f, "PROP_0" }, 1127 { 0x10, "PROP_1" }, 1128 { 0x11, "PROP_2" }, 1129 { 0x12, "PROP_3" }, 1130 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" }, 1131 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" }, 1132 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" }, 1133 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" }, 1134 { 0x1f, "GPM" }, 1135 { 0x20, "LTP_UTLB_0" }, 1136 { 0x21, "LTP_UTLB_1" }, 1137 { 0x22, "LTP_UTLB_2" }, 1138 { 0x23, "LTP_UTLB_3" }, 1139 { 0x24, "GPC_RGG_UTLB" }, 1140 {} 1141 }; 1142 1143 static const struct gk104_fifo_func 1144 gk104_fifo = { 1145 .fault.access = gk104_fifo_fault_access, 1146 .fault.engine = gk104_fifo_fault_engine, 1147 .fault.reason = gk104_fifo_fault_reason, 1148 .fault.hubclient = gk104_fifo_fault_hubclient, 1149 .fault.gpcclient = gk104_fifo_fault_gpcclient, 1150 .runlist = &gk104_fifo_runlist, 1151 .chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new }, 1152 }; 1153 1154 int 1155 gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) 1156 { 1157 return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo); 1158 } 1159