1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "gk104.h" 25 #include "changk104.h" 26 27 #include <core/client.h> 28 #include <core/gpuobj.h> 29 #include <subdev/bar.h> 30 #include <subdev/fb.h> 31 #include <subdev/timer.h> 32 #include <subdev/top.h> 33 #include <engine/sw.h> 34 35 #include <nvif/class.h> 36 37 struct gk104_fifo_engine_status { 38 bool busy; 39 bool faulted; 40 bool chsw; 41 bool save; 42 bool load; 43 struct { 44 bool tsg; 45 u32 id; 46 } prev, next, *chan; 47 }; 48 49 static void 50 gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn, 51 struct gk104_fifo_engine_status *status) 52 { 53 struct nvkm_engine *engine = fifo->engine[engn].engine; 54 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 55 struct nvkm_device *device = subdev->device; 56 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08)); 57 58 status->busy = !!(stat & 0x80000000); 59 status->faulted = !!(stat & 0x40000000); 60 status->next.tsg = !!(stat & 0x10000000); 61 status->next.id = (stat & 0x0fff0000) >> 16; 62 status->chsw = !!(stat & 0x00008000); 63 status->save = !!(stat & 0x00004000); 64 status->load = !!(stat & 0x00002000); 65 status->prev.tsg = !!(stat & 0x00001000); 66 status->prev.id = (stat & 0x00000fff); 67 status->chan = NULL; 68 69 if (status->busy && status->chsw) { 70 if (status->load && status->save) { 71 if (engine && nvkm_engine_chsw_load(engine)) 72 status->chan = &status->next; 73 else 74 status->chan = &status->prev; 75 } else 76 if (status->load) { 77 status->chan = &status->next; 78 } else { 79 status->chan = &status->prev; 80 } 81 } else 82 if (status->load) { 83 status->chan = &status->prev; 84 } 85 86 nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d " 87 "save %d load %d %sid %d%s-> %sid %d%s\n", 88 engn, status->busy, status->faulted, 89 status->chsw, status->save, status->load, 90 status->prev.tsg ? "tsg" : "ch", status->prev.id, 91 status->chan == &status->prev ? "*" : " ", 92 status->next.tsg ? "tsg" : "ch", status->next.id, 93 status->chan == &status->next ? "*" : " "); 94 } 95 96 static int 97 gk104_fifo_class_get(struct nvkm_fifo *base, int index, 98 const struct nvkm_fifo_chan_oclass **psclass) 99 { 100 struct gk104_fifo *fifo = gk104_fifo(base); 101 int c = 0; 102 103 while ((*psclass = fifo->func->chan[c])) { 104 if (c++ == index) 105 return 0; 106 } 107 108 return c; 109 } 110 111 static void 112 gk104_fifo_uevent_fini(struct nvkm_fifo *fifo) 113 { 114 struct nvkm_device *device = fifo->engine.subdev.device; 115 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000); 116 } 117 118 static void 119 gk104_fifo_uevent_init(struct nvkm_fifo *fifo) 120 { 121 struct nvkm_device *device = fifo->engine.subdev.device; 122 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000); 123 } 124 125 void 126 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl) 127 { 128 struct gk104_fifo_chan *chan; 129 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 130 struct nvkm_device *device = subdev->device; 131 struct nvkm_memory *mem; 132 int nr = 0; 133 int target; 134 135 mutex_lock(&subdev->mutex); 136 mem = fifo->runlist[runl].mem[fifo->runlist[runl].next]; 137 fifo->runlist[runl].next = !fifo->runlist[runl].next; 138 139 nvkm_kmap(mem); 140 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 141 nvkm_wo32(mem, (nr * 8) + 0, chan->base.chid); 142 nvkm_wo32(mem, (nr * 8) + 4, 0x00000000); 143 nr++; 144 } 145 nvkm_done(mem); 146 147 switch (nvkm_memory_target(mem)) { 148 case NVKM_MEM_TARGET_VRAM: target = 0; break; 149 case NVKM_MEM_TARGET_NCOH: target = 3; break; 150 default: 151 WARN_ON(1); 152 goto unlock; 153 } 154 155 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | 156 (target << 28)); 157 nvkm_wr32(device, 0x002274, (runl << 20) | nr); 158 159 if (wait_event_timeout(fifo->runlist[runl].wait, 160 !(nvkm_rd32(device, 0x002284 + (runl * 0x08)) 161 & 0x00100000), 162 msecs_to_jiffies(2000)) == 0) 163 nvkm_error(subdev, "runlist %d update timeout\n", runl); 164 unlock: 165 mutex_unlock(&subdev->mutex); 166 } 167 168 void 169 gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 170 { 171 mutex_lock(&fifo->base.engine.subdev.mutex); 172 list_del_init(&chan->head); 173 mutex_unlock(&fifo->base.engine.subdev.mutex); 174 } 175 176 void 177 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 178 { 179 mutex_lock(&fifo->base.engine.subdev.mutex); 180 list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan); 181 mutex_unlock(&fifo->base.engine.subdev.mutex); 182 } 183 184 static void 185 gk104_fifo_recover_work(struct work_struct *w) 186 { 187 struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work); 188 struct nvkm_device *device = fifo->base.engine.subdev.device; 189 struct nvkm_engine *engine; 190 unsigned long flags; 191 u32 engm, runm, todo; 192 int engn, runl; 193 194 spin_lock_irqsave(&fifo->base.lock, flags); 195 runm = fifo->recover.runm; 196 engm = fifo->recover.engm; 197 fifo->recover.engm = 0; 198 fifo->recover.runm = 0; 199 spin_unlock_irqrestore(&fifo->base.lock, flags); 200 201 nvkm_mask(device, 0x002630, runm, runm); 202 203 for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) { 204 if ((engine = fifo->engine[engn].engine)) { 205 nvkm_subdev_fini(&engine->subdev, false); 206 WARN_ON(nvkm_subdev_init(&engine->subdev)); 207 } 208 } 209 210 for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl)) 211 gk104_fifo_runlist_commit(fifo, runl); 212 213 nvkm_wr32(device, 0x00262c, runm); 214 nvkm_mask(device, 0x002630, runm, 0x00000000); 215 } 216 217 static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn); 218 219 static void 220 gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl) 221 { 222 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 223 struct nvkm_device *device = subdev->device; 224 const u32 runm = BIT(runl); 225 226 assert_spin_locked(&fifo->base.lock); 227 if (fifo->recover.runm & runm) 228 return; 229 fifo->recover.runm |= runm; 230 231 /* Block runlist to prevent channel assignment(s) from changing. */ 232 nvkm_mask(device, 0x002630, runm, runm); 233 234 /* Schedule recovery. */ 235 nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl); 236 schedule_work(&fifo->recover.work); 237 } 238 239 static void 240 gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid) 241 { 242 struct gk104_fifo *fifo = gk104_fifo(base); 243 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 244 struct nvkm_device *device = subdev->device; 245 const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08)); 246 const u32 runl = (stat & 0x000f0000) >> 16; 247 const bool used = (stat & 0x00000001); 248 unsigned long engn, engm = fifo->runlist[runl].engm; 249 struct gk104_fifo_chan *chan; 250 251 assert_spin_locked(&fifo->base.lock); 252 if (!used) 253 return; 254 255 /* Lookup SW state for channel, and mark it as dead. */ 256 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 257 if (chan->base.chid == chid) { 258 list_del_init(&chan->head); 259 chan->killed = true; 260 nvkm_fifo_kevent(&fifo->base, chid); 261 break; 262 } 263 } 264 265 /* Disable channel. */ 266 nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800); 267 nvkm_warn(subdev, "channel %d: killed\n", chid); 268 269 /* Block channel assignments from changing during recovery. */ 270 gk104_fifo_recover_runl(fifo, runl); 271 272 /* Schedule recovery for any engines the channel is on. */ 273 for_each_set_bit(engn, &engm, fifo->engine_nr) { 274 struct gk104_fifo_engine_status status; 275 gk104_fifo_engine_status(fifo, engn, &status); 276 if (!status.chan || status.chan->id != chid) 277 continue; 278 gk104_fifo_recover_engn(fifo, engn); 279 } 280 } 281 282 static void 283 gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn) 284 { 285 struct nvkm_engine *engine = fifo->engine[engn].engine; 286 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 287 struct nvkm_device *device = subdev->device; 288 const u32 runl = fifo->engine[engn].runl; 289 const u32 engm = BIT(engn); 290 struct gk104_fifo_engine_status status; 291 int mmui = -1; 292 293 assert_spin_locked(&fifo->base.lock); 294 if (fifo->recover.engm & engm) 295 return; 296 fifo->recover.engm |= engm; 297 298 /* Block channel assignments from changing during recovery. */ 299 gk104_fifo_recover_runl(fifo, runl); 300 301 /* Determine which channel (if any) is currently on the engine. */ 302 gk104_fifo_engine_status(fifo, engn, &status); 303 if (status.chan) { 304 /* The channel is not longer viable, kill it. */ 305 gk104_fifo_recover_chan(&fifo->base, status.chan->id); 306 } 307 308 /* Determine MMU fault ID for the engine, if we're not being 309 * called from the fault handler already. 310 */ 311 if (!status.faulted && engine) { 312 mmui = nvkm_top_fault_id(device, engine->subdev.index); 313 if (mmui < 0) { 314 const struct nvkm_enum *en = fifo->func->fault.engine; 315 for (; en && en->name; en++) { 316 if (en->data2 == engine->subdev.index) { 317 mmui = en->value; 318 break; 319 } 320 } 321 } 322 WARN_ON(mmui < 0); 323 } 324 325 /* Trigger a MMU fault for the engine. 326 * 327 * No good idea why this is needed, but nvgpu does something similar, 328 * and it makes recovery from CTXSW_TIMEOUT a lot more reliable. 329 */ 330 if (mmui >= 0) { 331 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000100 | mmui); 332 333 /* Wait for fault to trigger. */ 334 nvkm_msec(device, 2000, 335 gk104_fifo_engine_status(fifo, engn, &status); 336 if (status.faulted) 337 break; 338 ); 339 340 /* Release MMU fault trigger, and ACK the fault. */ 341 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000000); 342 nvkm_wr32(device, 0x00259c, BIT(mmui)); 343 nvkm_wr32(device, 0x002100, 0x10000000); 344 } 345 346 /* Schedule recovery. */ 347 nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn); 348 schedule_work(&fifo->recover.work); 349 } 350 351 static const struct nvkm_enum 352 gk104_fifo_bind_reason[] = { 353 { 0x01, "BIND_NOT_UNBOUND" }, 354 { 0x02, "SNOOP_WITHOUT_BAR1" }, 355 { 0x03, "UNBIND_WHILE_RUNNING" }, 356 { 0x05, "INVALID_RUNLIST" }, 357 { 0x06, "INVALID_CTX_TGT" }, 358 { 0x0b, "UNBIND_WHILE_PARKED" }, 359 {} 360 }; 361 362 static void 363 gk104_fifo_intr_bind(struct gk104_fifo *fifo) 364 { 365 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 366 struct nvkm_device *device = subdev->device; 367 u32 intr = nvkm_rd32(device, 0x00252c); 368 u32 code = intr & 0x000000ff; 369 const struct nvkm_enum *en = 370 nvkm_enum_find(gk104_fifo_bind_reason, code); 371 372 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : ""); 373 } 374 375 static const struct nvkm_enum 376 gk104_fifo_sched_reason[] = { 377 { 0x0a, "CTXSW_TIMEOUT" }, 378 {} 379 }; 380 381 static void 382 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) 383 { 384 struct nvkm_device *device = fifo->base.engine.subdev.device; 385 unsigned long flags, engm = 0; 386 u32 engn; 387 388 /* We need to ACK the SCHED_ERROR here, and prevent it reasserting, 389 * as MMU_FAULT cannot be triggered while it's pending. 390 */ 391 spin_lock_irqsave(&fifo->base.lock, flags); 392 nvkm_mask(device, 0x002140, 0x00000100, 0x00000000); 393 nvkm_wr32(device, 0x002100, 0x00000100); 394 395 for (engn = 0; engn < fifo->engine_nr; engn++) { 396 struct gk104_fifo_engine_status status; 397 398 gk104_fifo_engine_status(fifo, engn, &status); 399 if (!status.busy || !status.chsw) 400 continue; 401 402 engm |= BIT(engn); 403 } 404 405 for_each_set_bit(engn, &engm, fifo->engine_nr) 406 gk104_fifo_recover_engn(fifo, engn); 407 408 nvkm_mask(device, 0x002140, 0x00000100, 0x00000100); 409 spin_unlock_irqrestore(&fifo->base.lock, flags); 410 } 411 412 static void 413 gk104_fifo_intr_sched(struct gk104_fifo *fifo) 414 { 415 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 416 struct nvkm_device *device = subdev->device; 417 u32 intr = nvkm_rd32(device, 0x00254c); 418 u32 code = intr & 0x000000ff; 419 const struct nvkm_enum *en = 420 nvkm_enum_find(gk104_fifo_sched_reason, code); 421 422 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : ""); 423 424 switch (code) { 425 case 0x0a: 426 gk104_fifo_intr_sched_ctxsw(fifo); 427 break; 428 default: 429 break; 430 } 431 } 432 433 static void 434 gk104_fifo_intr_chsw(struct gk104_fifo *fifo) 435 { 436 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 437 struct nvkm_device *device = subdev->device; 438 u32 stat = nvkm_rd32(device, 0x00256c); 439 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat); 440 nvkm_wr32(device, 0x00256c, stat); 441 } 442 443 static void 444 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo) 445 { 446 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 447 struct nvkm_device *device = subdev->device; 448 u32 stat = nvkm_rd32(device, 0x00259c); 449 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat); 450 } 451 452 static void 453 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) 454 { 455 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 456 struct nvkm_device *device = subdev->device; 457 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10)); 458 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10)); 459 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10)); 460 u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10)); 461 u32 gpc = (stat & 0x1f000000) >> 24; 462 u32 client = (stat & 0x00001f00) >> 8; 463 u32 write = (stat & 0x00000080); 464 u32 hub = (stat & 0x00000040); 465 u32 reason = (stat & 0x0000000f); 466 const struct nvkm_enum *er, *eu, *ec; 467 struct nvkm_engine *engine = NULL; 468 struct nvkm_fifo_chan *chan; 469 unsigned long flags; 470 char gpcid[8] = "", en[16] = ""; 471 int engn; 472 473 er = nvkm_enum_find(fifo->func->fault.reason, reason); 474 eu = nvkm_enum_find(fifo->func->fault.engine, unit); 475 if (hub) { 476 ec = nvkm_enum_find(fifo->func->fault.hubclient, client); 477 } else { 478 ec = nvkm_enum_find(fifo->func->fault.gpcclient, client); 479 snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc); 480 } 481 482 if (eu && eu->data2) { 483 switch (eu->data2) { 484 case NVKM_SUBDEV_BAR: 485 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000); 486 break; 487 case NVKM_SUBDEV_INSTMEM: 488 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000); 489 break; 490 case NVKM_ENGINE_IFB: 491 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000); 492 break; 493 default: 494 engine = nvkm_device_engine(device, eu->data2); 495 break; 496 } 497 } 498 499 if (eu == NULL) { 500 enum nvkm_devidx engidx = nvkm_top_fault(device, unit); 501 if (engidx < NVKM_SUBDEV_NR) { 502 const char *src = nvkm_subdev_name[engidx]; 503 char *dst = en; 504 do { 505 *dst++ = toupper(*src++); 506 } while(*src); 507 engine = nvkm_device_engine(device, engidx); 508 } 509 } else { 510 snprintf(en, sizeof(en), "%s", eu->name); 511 } 512 513 spin_lock_irqsave(&fifo->base.lock, flags); 514 chan = nvkm_fifo_chan_inst_locked(&fifo->base, (u64)inst << 12); 515 516 nvkm_error(subdev, 517 "%s fault at %010llx engine %02x [%s] client %02x [%s%s] " 518 "reason %02x [%s] on channel %d [%010llx %s]\n", 519 write ? "write" : "read", (u64)vahi << 32 | valo, 520 unit, en, client, gpcid, ec ? ec->name : "", 521 reason, er ? er->name : "", chan ? chan->chid : -1, 522 (u64)inst << 12, 523 chan ? chan->object.client->name : "unknown"); 524 525 526 /* Kill the channel that caused the fault. */ 527 if (chan) 528 gk104_fifo_recover_chan(&fifo->base, chan->chid); 529 530 /* Channel recovery will probably have already done this for the 531 * correct engine(s), but just in case we can't find the channel 532 * information... 533 */ 534 for (engn = 0; engn < fifo->engine_nr && engine; engn++) { 535 if (fifo->engine[engn].engine == engine) { 536 gk104_fifo_recover_engn(fifo, engn); 537 break; 538 } 539 } 540 541 spin_unlock_irqrestore(&fifo->base.lock, flags); 542 } 543 544 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { 545 { 0x00000001, "MEMREQ" }, 546 { 0x00000002, "MEMACK_TIMEOUT" }, 547 { 0x00000004, "MEMACK_EXTRA" }, 548 { 0x00000008, "MEMDAT_TIMEOUT" }, 549 { 0x00000010, "MEMDAT_EXTRA" }, 550 { 0x00000020, "MEMFLUSH" }, 551 { 0x00000040, "MEMOP" }, 552 { 0x00000080, "LBCONNECT" }, 553 { 0x00000100, "LBREQ" }, 554 { 0x00000200, "LBACK_TIMEOUT" }, 555 { 0x00000400, "LBACK_EXTRA" }, 556 { 0x00000800, "LBDAT_TIMEOUT" }, 557 { 0x00001000, "LBDAT_EXTRA" }, 558 { 0x00002000, "GPFIFO" }, 559 { 0x00004000, "GPPTR" }, 560 { 0x00008000, "GPENTRY" }, 561 { 0x00010000, "GPCRC" }, 562 { 0x00020000, "PBPTR" }, 563 { 0x00040000, "PBENTRY" }, 564 { 0x00080000, "PBCRC" }, 565 { 0x00100000, "XBARCONNECT" }, 566 { 0x00200000, "METHOD" }, 567 { 0x00400000, "METHODCRC" }, 568 { 0x00800000, "DEVICE" }, 569 { 0x02000000, "SEMAPHORE" }, 570 { 0x04000000, "ACQUIRE" }, 571 { 0x08000000, "PRI" }, 572 { 0x20000000, "NO_CTXSW_SEG" }, 573 { 0x40000000, "PBSEG" }, 574 { 0x80000000, "SIGNATURE" }, 575 {} 576 }; 577 578 static void 579 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit) 580 { 581 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 582 struct nvkm_device *device = subdev->device; 583 u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000)); 584 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask; 585 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000)); 586 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000)); 587 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; 588 u32 subc = (addr & 0x00070000) >> 16; 589 u32 mthd = (addr & 0x00003ffc); 590 u32 show = stat; 591 struct nvkm_fifo_chan *chan; 592 unsigned long flags; 593 char msg[128]; 594 595 if (stat & 0x00800000) { 596 if (device->sw) { 597 if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data)) 598 show &= ~0x00800000; 599 } 600 } 601 602 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008); 603 604 if (show) { 605 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show); 606 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); 607 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] " 608 "subc %d mthd %04x data %08x\n", 609 unit, show, msg, chid, chan ? chan->inst->addr : 0, 610 chan ? chan->object.client->name : "unknown", 611 subc, mthd, data); 612 nvkm_fifo_chan_put(&fifo->base, flags, &chan); 613 } 614 615 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat); 616 } 617 618 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = { 619 { 0x00000001, "HCE_RE_ILLEGAL_OP" }, 620 { 0x00000002, "HCE_RE_ALIGNB" }, 621 { 0x00000004, "HCE_PRIV" }, 622 { 0x00000008, "HCE_ILLEGAL_MTHD" }, 623 { 0x00000010, "HCE_ILLEGAL_CLASS" }, 624 {} 625 }; 626 627 static void 628 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit) 629 { 630 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 631 struct nvkm_device *device = subdev->device; 632 u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000)); 633 u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask; 634 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; 635 char msg[128]; 636 637 if (stat) { 638 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat); 639 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n", 640 unit, stat, msg, chid, 641 nvkm_rd32(device, 0x040150 + (unit * 0x2000)), 642 nvkm_rd32(device, 0x040154 + (unit * 0x2000))); 643 } 644 645 nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat); 646 } 647 648 static void 649 gk104_fifo_intr_runlist(struct gk104_fifo *fifo) 650 { 651 struct nvkm_device *device = fifo->base.engine.subdev.device; 652 u32 mask = nvkm_rd32(device, 0x002a00); 653 while (mask) { 654 int runl = __ffs(mask); 655 wake_up(&fifo->runlist[runl].wait); 656 nvkm_wr32(device, 0x002a00, 1 << runl); 657 mask &= ~(1 << runl); 658 } 659 } 660 661 static void 662 gk104_fifo_intr_engine(struct gk104_fifo *fifo) 663 { 664 nvkm_fifo_uevent(&fifo->base); 665 } 666 667 static void 668 gk104_fifo_intr(struct nvkm_fifo *base) 669 { 670 struct gk104_fifo *fifo = gk104_fifo(base); 671 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 672 struct nvkm_device *device = subdev->device; 673 u32 mask = nvkm_rd32(device, 0x002140); 674 u32 stat = nvkm_rd32(device, 0x002100) & mask; 675 676 if (stat & 0x00000001) { 677 gk104_fifo_intr_bind(fifo); 678 nvkm_wr32(device, 0x002100, 0x00000001); 679 stat &= ~0x00000001; 680 } 681 682 if (stat & 0x00000010) { 683 nvkm_error(subdev, "PIO_ERROR\n"); 684 nvkm_wr32(device, 0x002100, 0x00000010); 685 stat &= ~0x00000010; 686 } 687 688 if (stat & 0x00000100) { 689 gk104_fifo_intr_sched(fifo); 690 nvkm_wr32(device, 0x002100, 0x00000100); 691 stat &= ~0x00000100; 692 } 693 694 if (stat & 0x00010000) { 695 gk104_fifo_intr_chsw(fifo); 696 nvkm_wr32(device, 0x002100, 0x00010000); 697 stat &= ~0x00010000; 698 } 699 700 if (stat & 0x00800000) { 701 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n"); 702 nvkm_wr32(device, 0x002100, 0x00800000); 703 stat &= ~0x00800000; 704 } 705 706 if (stat & 0x01000000) { 707 nvkm_error(subdev, "LB_ERROR\n"); 708 nvkm_wr32(device, 0x002100, 0x01000000); 709 stat &= ~0x01000000; 710 } 711 712 if (stat & 0x08000000) { 713 gk104_fifo_intr_dropped_fault(fifo); 714 nvkm_wr32(device, 0x002100, 0x08000000); 715 stat &= ~0x08000000; 716 } 717 718 if (stat & 0x10000000) { 719 u32 mask = nvkm_rd32(device, 0x00259c); 720 while (mask) { 721 u32 unit = __ffs(mask); 722 gk104_fifo_intr_fault(fifo, unit); 723 nvkm_wr32(device, 0x00259c, (1 << unit)); 724 mask &= ~(1 << unit); 725 } 726 stat &= ~0x10000000; 727 } 728 729 if (stat & 0x20000000) { 730 u32 mask = nvkm_rd32(device, 0x0025a0); 731 while (mask) { 732 u32 unit = __ffs(mask); 733 gk104_fifo_intr_pbdma_0(fifo, unit); 734 gk104_fifo_intr_pbdma_1(fifo, unit); 735 nvkm_wr32(device, 0x0025a0, (1 << unit)); 736 mask &= ~(1 << unit); 737 } 738 stat &= ~0x20000000; 739 } 740 741 if (stat & 0x40000000) { 742 gk104_fifo_intr_runlist(fifo); 743 stat &= ~0x40000000; 744 } 745 746 if (stat & 0x80000000) { 747 nvkm_wr32(device, 0x002100, 0x80000000); 748 gk104_fifo_intr_engine(fifo); 749 stat &= ~0x80000000; 750 } 751 752 if (stat) { 753 nvkm_error(subdev, "INTR %08x\n", stat); 754 nvkm_mask(device, 0x002140, stat, 0x00000000); 755 nvkm_wr32(device, 0x002100, stat); 756 } 757 } 758 759 static void 760 gk104_fifo_fini(struct nvkm_fifo *base) 761 { 762 struct gk104_fifo *fifo = gk104_fifo(base); 763 struct nvkm_device *device = fifo->base.engine.subdev.device; 764 flush_work(&fifo->recover.work); 765 /* allow mmu fault interrupts, even when we're not using fifo */ 766 nvkm_mask(device, 0x002140, 0x10000000, 0x10000000); 767 } 768 769 static int 770 gk104_fifo_oneinit(struct nvkm_fifo *base) 771 { 772 struct gk104_fifo *fifo = gk104_fifo(base); 773 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 774 struct nvkm_device *device = subdev->device; 775 struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device); 776 int engn, runl, pbid, ret, i, j; 777 enum nvkm_devidx engidx; 778 u32 *map; 779 780 /* Determine number of PBDMAs by checking valid enable bits. */ 781 nvkm_wr32(device, 0x000204, 0xffffffff); 782 fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204)); 783 nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr); 784 785 /* Read PBDMA->runlist(s) mapping from HW. */ 786 if (!(map = kzalloc(sizeof(*map) * fifo->pbdma_nr, GFP_KERNEL))) 787 return -ENOMEM; 788 789 for (i = 0; i < fifo->pbdma_nr; i++) 790 map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04)); 791 792 /* Determine runlist configuration from topology device info. */ 793 i = 0; 794 while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) { 795 /* Determine which PBDMA handles requests for this engine. */ 796 for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) { 797 if (map[j] & (1 << runl)) { 798 pbid = j; 799 break; 800 } 801 } 802 803 nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n", 804 engn, runl, pbid, nvkm_subdev_name[engidx]); 805 806 fifo->engine[engn].engine = nvkm_device_engine(device, engidx); 807 fifo->engine[engn].runl = runl; 808 fifo->engine[engn].pbid = pbid; 809 fifo->engine_nr = max(fifo->engine_nr, engn + 1); 810 fifo->runlist[runl].engm |= 1 << engn; 811 fifo->runlist_nr = max(fifo->runlist_nr, runl + 1); 812 } 813 814 kfree(map); 815 816 for (i = 0; i < fifo->runlist_nr; i++) { 817 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 818 0x8000, 0x1000, false, 819 &fifo->runlist[i].mem[0]); 820 if (ret) 821 return ret; 822 823 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 824 0x8000, 0x1000, false, 825 &fifo->runlist[i].mem[1]); 826 if (ret) 827 return ret; 828 829 init_waitqueue_head(&fifo->runlist[i].wait); 830 INIT_LIST_HEAD(&fifo->runlist[i].chan); 831 } 832 833 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 834 fifo->base.nr * 0x200, 0x1000, true, 835 &fifo->user.mem); 836 if (ret) 837 return ret; 838 839 ret = nvkm_vm_get(bar, nvkm_memory_size(fifo->user.mem), 12, 840 NV_MEM_ACCESS_RW, &fifo->user.bar); 841 if (ret) 842 return ret; 843 844 nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0); 845 return 0; 846 } 847 848 static void 849 gk104_fifo_init(struct nvkm_fifo *base) 850 { 851 struct gk104_fifo *fifo = gk104_fifo(base); 852 struct nvkm_device *device = fifo->base.engine.subdev.device; 853 int i; 854 855 /* Enable PBDMAs. */ 856 nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1); 857 858 /* PBDMA[n] */ 859 for (i = 0; i < fifo->pbdma_nr; i++) { 860 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); 861 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ 862 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ 863 } 864 865 /* PBDMA[n].HCE */ 866 for (i = 0; i < fifo->pbdma_nr; i++) { 867 nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */ 868 nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */ 869 } 870 871 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12); 872 873 nvkm_wr32(device, 0x002100, 0xffffffff); 874 nvkm_wr32(device, 0x002140, 0x7fffffff); 875 } 876 877 static void * 878 gk104_fifo_dtor(struct nvkm_fifo *base) 879 { 880 struct gk104_fifo *fifo = gk104_fifo(base); 881 int i; 882 883 nvkm_vm_put(&fifo->user.bar); 884 nvkm_memory_del(&fifo->user.mem); 885 886 for (i = 0; i < fifo->runlist_nr; i++) { 887 nvkm_memory_del(&fifo->runlist[i].mem[1]); 888 nvkm_memory_del(&fifo->runlist[i].mem[0]); 889 } 890 891 return fifo; 892 } 893 894 static const struct nvkm_fifo_func 895 gk104_fifo_ = { 896 .dtor = gk104_fifo_dtor, 897 .oneinit = gk104_fifo_oneinit, 898 .init = gk104_fifo_init, 899 .fini = gk104_fifo_fini, 900 .intr = gk104_fifo_intr, 901 .uevent_init = gk104_fifo_uevent_init, 902 .uevent_fini = gk104_fifo_uevent_fini, 903 .recover_chan = gk104_fifo_recover_chan, 904 .class_get = gk104_fifo_class_get, 905 }; 906 907 int 908 gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device, 909 int index, int nr, struct nvkm_fifo **pfifo) 910 { 911 struct gk104_fifo *fifo; 912 913 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) 914 return -ENOMEM; 915 fifo->func = func; 916 INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work); 917 *pfifo = &fifo->base; 918 919 return nvkm_fifo_ctor(&gk104_fifo_, device, index, nr, &fifo->base); 920 } 921 922 const struct nvkm_enum 923 gk104_fifo_fault_engine[] = { 924 { 0x00, "GR", NULL, NVKM_ENGINE_GR }, 925 { 0x01, "DISPLAY" }, 926 { 0x02, "CAPTURE" }, 927 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB }, 928 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR }, 929 { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM }, 930 { 0x06, "SCHED" }, 931 { 0x07, "HOST0", NULL, NVKM_ENGINE_FIFO }, 932 { 0x08, "HOST1", NULL, NVKM_ENGINE_FIFO }, 933 { 0x09, "HOST2", NULL, NVKM_ENGINE_FIFO }, 934 { 0x0a, "HOST3", NULL, NVKM_ENGINE_FIFO }, 935 { 0x0b, "HOST4", NULL, NVKM_ENGINE_FIFO }, 936 { 0x0c, "HOST5", NULL, NVKM_ENGINE_FIFO }, 937 { 0x0d, "HOST6", NULL, NVKM_ENGINE_FIFO }, 938 { 0x0e, "HOST7", NULL, NVKM_ENGINE_FIFO }, 939 { 0x0f, "HOSTSR" }, 940 { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD }, 941 { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP }, 942 { 0x13, "PERF" }, 943 { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC }, 944 { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 }, 945 { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 }, 946 { 0x17, "PMU" }, 947 { 0x18, "PTP" }, 948 { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC }, 949 { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 }, 950 {} 951 }; 952 953 const struct nvkm_enum 954 gk104_fifo_fault_reason[] = { 955 { 0x00, "PDE" }, 956 { 0x01, "PDE_SIZE" }, 957 { 0x02, "PTE" }, 958 { 0x03, "VA_LIMIT_VIOLATION" }, 959 { 0x04, "UNBOUND_INST_BLOCK" }, 960 { 0x05, "PRIV_VIOLATION" }, 961 { 0x06, "RO_VIOLATION" }, 962 { 0x07, "WO_VIOLATION" }, 963 { 0x08, "PITCH_MASK_VIOLATION" }, 964 { 0x09, "WORK_CREATION" }, 965 { 0x0a, "UNSUPPORTED_APERTURE" }, 966 { 0x0b, "COMPRESSION_FAILURE" }, 967 { 0x0c, "UNSUPPORTED_KIND" }, 968 { 0x0d, "REGION_VIOLATION" }, 969 { 0x0e, "BOTH_PTES_VALID" }, 970 { 0x0f, "INFO_TYPE_POISONED" }, 971 {} 972 }; 973 974 const struct nvkm_enum 975 gk104_fifo_fault_hubclient[] = { 976 { 0x00, "VIP" }, 977 { 0x01, "CE0" }, 978 { 0x02, "CE1" }, 979 { 0x03, "DNISO" }, 980 { 0x04, "FE" }, 981 { 0x05, "FECS" }, 982 { 0x06, "HOST" }, 983 { 0x07, "HOST_CPU" }, 984 { 0x08, "HOST_CPU_NB" }, 985 { 0x09, "ISO" }, 986 { 0x0a, "MMU" }, 987 { 0x0b, "MSPDEC" }, 988 { 0x0c, "MSPPP" }, 989 { 0x0d, "MSVLD" }, 990 { 0x0e, "NISO" }, 991 { 0x0f, "P2P" }, 992 { 0x10, "PD" }, 993 { 0x11, "PERF" }, 994 { 0x12, "PMU" }, 995 { 0x13, "RASTERTWOD" }, 996 { 0x14, "SCC" }, 997 { 0x15, "SCC_NB" }, 998 { 0x16, "SEC" }, 999 { 0x17, "SSYNC" }, 1000 { 0x18, "GR_CE" }, 1001 { 0x19, "CE2" }, 1002 { 0x1a, "XV" }, 1003 { 0x1b, "MMU_NB" }, 1004 { 0x1c, "MSENC" }, 1005 { 0x1d, "DFALCON" }, 1006 { 0x1e, "SKED" }, 1007 { 0x1f, "AFALCON" }, 1008 {} 1009 }; 1010 1011 const struct nvkm_enum 1012 gk104_fifo_fault_gpcclient[] = { 1013 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" }, 1014 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" }, 1015 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" }, 1016 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" }, 1017 { 0x0c, "RAST" }, 1018 { 0x0d, "GCC" }, 1019 { 0x0e, "GPCCS" }, 1020 { 0x0f, "PROP_0" }, 1021 { 0x10, "PROP_1" }, 1022 { 0x11, "PROP_2" }, 1023 { 0x12, "PROP_3" }, 1024 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" }, 1025 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" }, 1026 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" }, 1027 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" }, 1028 { 0x1f, "GPM" }, 1029 { 0x20, "LTP_UTLB_0" }, 1030 { 0x21, "LTP_UTLB_1" }, 1031 { 0x22, "LTP_UTLB_2" }, 1032 { 0x23, "LTP_UTLB_3" }, 1033 { 0x24, "GPC_RGG_UTLB" }, 1034 {} 1035 }; 1036 1037 static const struct gk104_fifo_func 1038 gk104_fifo = { 1039 .fault.engine = gk104_fifo_fault_engine, 1040 .fault.reason = gk104_fifo_fault_reason, 1041 .fault.hubclient = gk104_fifo_fault_hubclient, 1042 .fault.gpcclient = gk104_fifo_fault_gpcclient, 1043 .chan = { 1044 &gk104_fifo_gpfifo_oclass, 1045 NULL 1046 }, 1047 }; 1048 1049 int 1050 gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) 1051 { 1052 return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo); 1053 } 1054