1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "gk104.h" 25 #include "changk104.h" 26 27 #include <core/client.h> 28 #include <core/gpuobj.h> 29 #include <subdev/bar.h> 30 #include <subdev/timer.h> 31 #include <subdev/top.h> 32 #include <engine/sw.h> 33 34 #include <nvif/class.h> 35 36 struct gk104_fifo_engine_status { 37 bool busy; 38 bool faulted; 39 bool chsw; 40 bool save; 41 bool load; 42 struct { 43 bool tsg; 44 u32 id; 45 } prev, next, *chan; 46 }; 47 48 static void 49 gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn, 50 struct gk104_fifo_engine_status *status) 51 { 52 struct nvkm_engine *engine = fifo->engine[engn].engine; 53 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 54 struct nvkm_device *device = subdev->device; 55 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08)); 56 57 status->busy = !!(stat & 0x80000000); 58 status->faulted = !!(stat & 0x40000000); 59 status->next.tsg = !!(stat & 0x10000000); 60 status->next.id = (stat & 0x0fff0000) >> 16; 61 status->chsw = !!(stat & 0x00008000); 62 status->save = !!(stat & 0x00004000); 63 status->load = !!(stat & 0x00002000); 64 status->prev.tsg = !!(stat & 0x00001000); 65 status->prev.id = (stat & 0x00000fff); 66 status->chan = NULL; 67 68 if (status->busy && status->chsw) { 69 if (status->load && status->save) { 70 if (engine && nvkm_engine_chsw_load(engine)) 71 status->chan = &status->next; 72 else 73 status->chan = &status->prev; 74 } else 75 if (status->load) { 76 status->chan = &status->next; 77 } else { 78 status->chan = &status->prev; 79 } 80 } else 81 if (status->load) { 82 status->chan = &status->prev; 83 } 84 85 nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d " 86 "save %d load %d %sid %d%s-> %sid %d%s\n", 87 engn, status->busy, status->faulted, 88 status->chsw, status->save, status->load, 89 status->prev.tsg ? "tsg" : "ch", status->prev.id, 90 status->chan == &status->prev ? "*" : " ", 91 status->next.tsg ? "tsg" : "ch", status->next.id, 92 status->chan == &status->next ? "*" : " "); 93 } 94 95 static int 96 gk104_fifo_class_get(struct nvkm_fifo *base, int index, 97 const struct nvkm_fifo_chan_oclass **psclass) 98 { 99 struct gk104_fifo *fifo = gk104_fifo(base); 100 int c = 0; 101 102 while ((*psclass = fifo->func->chan[c])) { 103 if (c++ == index) 104 return 0; 105 } 106 107 return c; 108 } 109 110 static void 111 gk104_fifo_uevent_fini(struct nvkm_fifo *fifo) 112 { 113 struct nvkm_device *device = fifo->engine.subdev.device; 114 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000); 115 } 116 117 static void 118 gk104_fifo_uevent_init(struct nvkm_fifo *fifo) 119 { 120 struct nvkm_device *device = fifo->engine.subdev.device; 121 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000); 122 } 123 124 void 125 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl) 126 { 127 struct gk104_fifo_chan *chan; 128 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 129 struct nvkm_device *device = subdev->device; 130 struct nvkm_memory *mem; 131 int nr = 0; 132 int target; 133 134 mutex_lock(&subdev->mutex); 135 mem = fifo->runlist[runl].mem[fifo->runlist[runl].next]; 136 fifo->runlist[runl].next = !fifo->runlist[runl].next; 137 138 nvkm_kmap(mem); 139 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 140 nvkm_wo32(mem, (nr * 8) + 0, chan->base.chid); 141 nvkm_wo32(mem, (nr * 8) + 4, 0x00000000); 142 nr++; 143 } 144 nvkm_done(mem); 145 146 switch (nvkm_memory_target(mem)) { 147 case NVKM_MEM_TARGET_VRAM: target = 0; break; 148 case NVKM_MEM_TARGET_NCOH: target = 3; break; 149 default: 150 WARN_ON(1); 151 return; 152 } 153 154 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | 155 (target << 28)); 156 nvkm_wr32(device, 0x002274, (runl << 20) | nr); 157 158 if (wait_event_timeout(fifo->runlist[runl].wait, 159 !(nvkm_rd32(device, 0x002284 + (runl * 0x08)) 160 & 0x00100000), 161 msecs_to_jiffies(2000)) == 0) 162 nvkm_error(subdev, "runlist %d update timeout\n", runl); 163 mutex_unlock(&subdev->mutex); 164 } 165 166 void 167 gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 168 { 169 mutex_lock(&fifo->base.engine.subdev.mutex); 170 list_del_init(&chan->head); 171 mutex_unlock(&fifo->base.engine.subdev.mutex); 172 } 173 174 void 175 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 176 { 177 mutex_lock(&fifo->base.engine.subdev.mutex); 178 list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan); 179 mutex_unlock(&fifo->base.engine.subdev.mutex); 180 } 181 182 static void 183 gk104_fifo_recover_work(struct work_struct *w) 184 { 185 struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work); 186 struct nvkm_device *device = fifo->base.engine.subdev.device; 187 struct nvkm_engine *engine; 188 unsigned long flags; 189 u32 engm, runm, todo; 190 int engn, runl; 191 192 spin_lock_irqsave(&fifo->base.lock, flags); 193 runm = fifo->recover.runm; 194 engm = fifo->recover.engm; 195 fifo->recover.engm = 0; 196 fifo->recover.runm = 0; 197 spin_unlock_irqrestore(&fifo->base.lock, flags); 198 199 nvkm_mask(device, 0x002630, runm, runm); 200 201 for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) { 202 if ((engine = fifo->engine[engn].engine)) { 203 nvkm_subdev_fini(&engine->subdev, false); 204 WARN_ON(nvkm_subdev_init(&engine->subdev)); 205 } 206 } 207 208 for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl)) 209 gk104_fifo_runlist_commit(fifo, runl); 210 211 nvkm_wr32(device, 0x00262c, runm); 212 nvkm_mask(device, 0x002630, runm, 0x00000000); 213 } 214 215 static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn); 216 217 static void 218 gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl) 219 { 220 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 221 struct nvkm_device *device = subdev->device; 222 const u32 runm = BIT(runl); 223 224 assert_spin_locked(&fifo->base.lock); 225 if (fifo->recover.runm & runm) 226 return; 227 fifo->recover.runm |= runm; 228 229 /* Block runlist to prevent channel assignment(s) from changing. */ 230 nvkm_mask(device, 0x002630, runm, runm); 231 232 /* Schedule recovery. */ 233 nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl); 234 schedule_work(&fifo->recover.work); 235 } 236 237 static void 238 gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid) 239 { 240 struct gk104_fifo *fifo = gk104_fifo(base); 241 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 242 struct nvkm_device *device = subdev->device; 243 const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08)); 244 const u32 runl = (stat & 0x000f0000) >> 16; 245 const bool used = (stat & 0x00000001); 246 unsigned long engn, engm = fifo->runlist[runl].engm; 247 struct gk104_fifo_chan *chan; 248 249 assert_spin_locked(&fifo->base.lock); 250 if (!used) 251 return; 252 253 /* Lookup SW state for channel, and mark it as dead. */ 254 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 255 if (chan->base.chid == chid) { 256 list_del_init(&chan->head); 257 chan->killed = true; 258 nvkm_fifo_kevent(&fifo->base, chid); 259 break; 260 } 261 } 262 263 /* Disable channel. */ 264 nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800); 265 nvkm_warn(subdev, "channel %d: killed\n", chid); 266 267 /* Block channel assignments from changing during recovery. */ 268 gk104_fifo_recover_runl(fifo, runl); 269 270 /* Schedule recovery for any engines the channel is on. */ 271 for_each_set_bit(engn, &engm, fifo->engine_nr) { 272 struct gk104_fifo_engine_status status; 273 gk104_fifo_engine_status(fifo, engn, &status); 274 if (!status.chan || status.chan->id != chid) 275 continue; 276 gk104_fifo_recover_engn(fifo, engn); 277 } 278 } 279 280 static void 281 gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn) 282 { 283 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 284 const u32 runl = fifo->engine[engn].runl; 285 const u32 engm = BIT(engn); 286 struct gk104_fifo_engine_status status; 287 288 assert_spin_locked(&fifo->base.lock); 289 if (fifo->recover.engm & engm) 290 return; 291 fifo->recover.engm |= engm; 292 293 /* Block channel assignments from changing during recovery. */ 294 gk104_fifo_recover_runl(fifo, runl); 295 296 /* Determine which channel (if any) is currently on the engine. */ 297 gk104_fifo_engine_status(fifo, engn, &status); 298 if (status.chan) { 299 /* The channel is not longer viable, kill it. */ 300 gk104_fifo_recover_chan(&fifo->base, status.chan->id); 301 } 302 303 /* Schedule recovery. */ 304 nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn); 305 schedule_work(&fifo->recover.work); 306 } 307 308 static const struct nvkm_enum 309 gk104_fifo_bind_reason[] = { 310 { 0x01, "BIND_NOT_UNBOUND" }, 311 { 0x02, "SNOOP_WITHOUT_BAR1" }, 312 { 0x03, "UNBIND_WHILE_RUNNING" }, 313 { 0x05, "INVALID_RUNLIST" }, 314 { 0x06, "INVALID_CTX_TGT" }, 315 { 0x0b, "UNBIND_WHILE_PARKED" }, 316 {} 317 }; 318 319 static void 320 gk104_fifo_intr_bind(struct gk104_fifo *fifo) 321 { 322 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 323 struct nvkm_device *device = subdev->device; 324 u32 intr = nvkm_rd32(device, 0x00252c); 325 u32 code = intr & 0x000000ff; 326 const struct nvkm_enum *en = 327 nvkm_enum_find(gk104_fifo_bind_reason, code); 328 329 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : ""); 330 } 331 332 static const struct nvkm_enum 333 gk104_fifo_sched_reason[] = { 334 { 0x0a, "CTXSW_TIMEOUT" }, 335 {} 336 }; 337 338 static void 339 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) 340 { 341 unsigned long flags, engm = 0; 342 u32 engn; 343 344 spin_lock_irqsave(&fifo->base.lock, flags); 345 for (engn = 0; engn < fifo->engine_nr; engn++) { 346 struct gk104_fifo_engine_status status; 347 348 gk104_fifo_engine_status(fifo, engn, &status); 349 if (!status.busy || !status.chsw) 350 continue; 351 352 engm |= BIT(engn); 353 } 354 355 for_each_set_bit(engn, &engm, fifo->engine_nr) 356 gk104_fifo_recover_engn(fifo, engn); 357 358 spin_unlock_irqrestore(&fifo->base.lock, flags); 359 } 360 361 static void 362 gk104_fifo_intr_sched(struct gk104_fifo *fifo) 363 { 364 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 365 struct nvkm_device *device = subdev->device; 366 u32 intr = nvkm_rd32(device, 0x00254c); 367 u32 code = intr & 0x000000ff; 368 const struct nvkm_enum *en = 369 nvkm_enum_find(gk104_fifo_sched_reason, code); 370 371 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : ""); 372 373 switch (code) { 374 case 0x0a: 375 gk104_fifo_intr_sched_ctxsw(fifo); 376 break; 377 default: 378 break; 379 } 380 } 381 382 static void 383 gk104_fifo_intr_chsw(struct gk104_fifo *fifo) 384 { 385 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 386 struct nvkm_device *device = subdev->device; 387 u32 stat = nvkm_rd32(device, 0x00256c); 388 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat); 389 nvkm_wr32(device, 0x00256c, stat); 390 } 391 392 static void 393 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo) 394 { 395 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 396 struct nvkm_device *device = subdev->device; 397 u32 stat = nvkm_rd32(device, 0x00259c); 398 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat); 399 } 400 401 static void 402 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) 403 { 404 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 405 struct nvkm_device *device = subdev->device; 406 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10)); 407 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10)); 408 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10)); 409 u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10)); 410 u32 gpc = (stat & 0x1f000000) >> 24; 411 u32 client = (stat & 0x00001f00) >> 8; 412 u32 write = (stat & 0x00000080); 413 u32 hub = (stat & 0x00000040); 414 u32 reason = (stat & 0x0000000f); 415 const struct nvkm_enum *er, *eu, *ec; 416 struct nvkm_engine *engine = NULL; 417 struct nvkm_fifo_chan *chan; 418 unsigned long flags; 419 char gpcid[8] = "", en[16] = ""; 420 int engn; 421 422 er = nvkm_enum_find(fifo->func->fault.reason, reason); 423 eu = nvkm_enum_find(fifo->func->fault.engine, unit); 424 if (hub) { 425 ec = nvkm_enum_find(fifo->func->fault.hubclient, client); 426 } else { 427 ec = nvkm_enum_find(fifo->func->fault.gpcclient, client); 428 snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc); 429 } 430 431 if (eu && eu->data2) { 432 switch (eu->data2) { 433 case NVKM_SUBDEV_BAR: 434 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000); 435 break; 436 case NVKM_SUBDEV_INSTMEM: 437 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000); 438 break; 439 case NVKM_ENGINE_IFB: 440 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000); 441 break; 442 default: 443 engine = nvkm_device_engine(device, eu->data2); 444 break; 445 } 446 } 447 448 if (eu == NULL) { 449 enum nvkm_devidx engidx = nvkm_top_fault(device, unit); 450 if (engidx < NVKM_SUBDEV_NR) { 451 const char *src = nvkm_subdev_name[engidx]; 452 char *dst = en; 453 do { 454 *dst++ = toupper(*src++); 455 } while(*src); 456 engine = nvkm_device_engine(device, engidx); 457 } 458 } else { 459 snprintf(en, sizeof(en), "%s", eu->name); 460 } 461 462 spin_lock_irqsave(&fifo->base.lock, flags); 463 chan = nvkm_fifo_chan_inst_locked(&fifo->base, (u64)inst << 12); 464 465 nvkm_error(subdev, 466 "%s fault at %010llx engine %02x [%s] client %02x [%s%s] " 467 "reason %02x [%s] on channel %d [%010llx %s]\n", 468 write ? "write" : "read", (u64)vahi << 32 | valo, 469 unit, en, client, gpcid, ec ? ec->name : "", 470 reason, er ? er->name : "", chan ? chan->chid : -1, 471 (u64)inst << 12, 472 chan ? chan->object.client->name : "unknown"); 473 474 475 /* Kill the channel that caused the fault. */ 476 if (chan) 477 gk104_fifo_recover_chan(&fifo->base, chan->chid); 478 479 /* Channel recovery will probably have already done this for the 480 * correct engine(s), but just in case we can't find the channel 481 * information... 482 */ 483 for (engn = 0; engn < fifo->engine_nr && engine; engn++) { 484 if (fifo->engine[engn].engine == engine) { 485 gk104_fifo_recover_engn(fifo, engn); 486 break; 487 } 488 } 489 490 spin_unlock_irqrestore(&fifo->base.lock, flags); 491 } 492 493 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { 494 { 0x00000001, "MEMREQ" }, 495 { 0x00000002, "MEMACK_TIMEOUT" }, 496 { 0x00000004, "MEMACK_EXTRA" }, 497 { 0x00000008, "MEMDAT_TIMEOUT" }, 498 { 0x00000010, "MEMDAT_EXTRA" }, 499 { 0x00000020, "MEMFLUSH" }, 500 { 0x00000040, "MEMOP" }, 501 { 0x00000080, "LBCONNECT" }, 502 { 0x00000100, "LBREQ" }, 503 { 0x00000200, "LBACK_TIMEOUT" }, 504 { 0x00000400, "LBACK_EXTRA" }, 505 { 0x00000800, "LBDAT_TIMEOUT" }, 506 { 0x00001000, "LBDAT_EXTRA" }, 507 { 0x00002000, "GPFIFO" }, 508 { 0x00004000, "GPPTR" }, 509 { 0x00008000, "GPENTRY" }, 510 { 0x00010000, "GPCRC" }, 511 { 0x00020000, "PBPTR" }, 512 { 0x00040000, "PBENTRY" }, 513 { 0x00080000, "PBCRC" }, 514 { 0x00100000, "XBARCONNECT" }, 515 { 0x00200000, "METHOD" }, 516 { 0x00400000, "METHODCRC" }, 517 { 0x00800000, "DEVICE" }, 518 { 0x02000000, "SEMAPHORE" }, 519 { 0x04000000, "ACQUIRE" }, 520 { 0x08000000, "PRI" }, 521 { 0x20000000, "NO_CTXSW_SEG" }, 522 { 0x40000000, "PBSEG" }, 523 { 0x80000000, "SIGNATURE" }, 524 {} 525 }; 526 527 static void 528 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit) 529 { 530 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 531 struct nvkm_device *device = subdev->device; 532 u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000)); 533 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask; 534 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000)); 535 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000)); 536 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; 537 u32 subc = (addr & 0x00070000) >> 16; 538 u32 mthd = (addr & 0x00003ffc); 539 u32 show = stat; 540 struct nvkm_fifo_chan *chan; 541 unsigned long flags; 542 char msg[128]; 543 544 if (stat & 0x00800000) { 545 if (device->sw) { 546 if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data)) 547 show &= ~0x00800000; 548 } 549 } 550 551 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008); 552 553 if (show) { 554 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show); 555 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); 556 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] " 557 "subc %d mthd %04x data %08x\n", 558 unit, show, msg, chid, chan ? chan->inst->addr : 0, 559 chan ? chan->object.client->name : "unknown", 560 subc, mthd, data); 561 nvkm_fifo_chan_put(&fifo->base, flags, &chan); 562 } 563 564 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat); 565 } 566 567 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = { 568 { 0x00000001, "HCE_RE_ILLEGAL_OP" }, 569 { 0x00000002, "HCE_RE_ALIGNB" }, 570 { 0x00000004, "HCE_PRIV" }, 571 { 0x00000008, "HCE_ILLEGAL_MTHD" }, 572 { 0x00000010, "HCE_ILLEGAL_CLASS" }, 573 {} 574 }; 575 576 static void 577 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit) 578 { 579 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 580 struct nvkm_device *device = subdev->device; 581 u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000)); 582 u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask; 583 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; 584 char msg[128]; 585 586 if (stat) { 587 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat); 588 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n", 589 unit, stat, msg, chid, 590 nvkm_rd32(device, 0x040150 + (unit * 0x2000)), 591 nvkm_rd32(device, 0x040154 + (unit * 0x2000))); 592 } 593 594 nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat); 595 } 596 597 static void 598 gk104_fifo_intr_runlist(struct gk104_fifo *fifo) 599 { 600 struct nvkm_device *device = fifo->base.engine.subdev.device; 601 u32 mask = nvkm_rd32(device, 0x002a00); 602 while (mask) { 603 int runl = __ffs(mask); 604 wake_up(&fifo->runlist[runl].wait); 605 nvkm_wr32(device, 0x002a00, 1 << runl); 606 mask &= ~(1 << runl); 607 } 608 } 609 610 static void 611 gk104_fifo_intr_engine(struct gk104_fifo *fifo) 612 { 613 nvkm_fifo_uevent(&fifo->base); 614 } 615 616 static void 617 gk104_fifo_intr(struct nvkm_fifo *base) 618 { 619 struct gk104_fifo *fifo = gk104_fifo(base); 620 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 621 struct nvkm_device *device = subdev->device; 622 u32 mask = nvkm_rd32(device, 0x002140); 623 u32 stat = nvkm_rd32(device, 0x002100) & mask; 624 625 if (stat & 0x00000001) { 626 gk104_fifo_intr_bind(fifo); 627 nvkm_wr32(device, 0x002100, 0x00000001); 628 stat &= ~0x00000001; 629 } 630 631 if (stat & 0x00000010) { 632 nvkm_error(subdev, "PIO_ERROR\n"); 633 nvkm_wr32(device, 0x002100, 0x00000010); 634 stat &= ~0x00000010; 635 } 636 637 if (stat & 0x00000100) { 638 gk104_fifo_intr_sched(fifo); 639 nvkm_wr32(device, 0x002100, 0x00000100); 640 stat &= ~0x00000100; 641 } 642 643 if (stat & 0x00010000) { 644 gk104_fifo_intr_chsw(fifo); 645 nvkm_wr32(device, 0x002100, 0x00010000); 646 stat &= ~0x00010000; 647 } 648 649 if (stat & 0x00800000) { 650 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n"); 651 nvkm_wr32(device, 0x002100, 0x00800000); 652 stat &= ~0x00800000; 653 } 654 655 if (stat & 0x01000000) { 656 nvkm_error(subdev, "LB_ERROR\n"); 657 nvkm_wr32(device, 0x002100, 0x01000000); 658 stat &= ~0x01000000; 659 } 660 661 if (stat & 0x08000000) { 662 gk104_fifo_intr_dropped_fault(fifo); 663 nvkm_wr32(device, 0x002100, 0x08000000); 664 stat &= ~0x08000000; 665 } 666 667 if (stat & 0x10000000) { 668 u32 mask = nvkm_rd32(device, 0x00259c); 669 while (mask) { 670 u32 unit = __ffs(mask); 671 gk104_fifo_intr_fault(fifo, unit); 672 nvkm_wr32(device, 0x00259c, (1 << unit)); 673 mask &= ~(1 << unit); 674 } 675 stat &= ~0x10000000; 676 } 677 678 if (stat & 0x20000000) { 679 u32 mask = nvkm_rd32(device, 0x0025a0); 680 while (mask) { 681 u32 unit = __ffs(mask); 682 gk104_fifo_intr_pbdma_0(fifo, unit); 683 gk104_fifo_intr_pbdma_1(fifo, unit); 684 nvkm_wr32(device, 0x0025a0, (1 << unit)); 685 mask &= ~(1 << unit); 686 } 687 stat &= ~0x20000000; 688 } 689 690 if (stat & 0x40000000) { 691 gk104_fifo_intr_runlist(fifo); 692 stat &= ~0x40000000; 693 } 694 695 if (stat & 0x80000000) { 696 nvkm_wr32(device, 0x002100, 0x80000000); 697 gk104_fifo_intr_engine(fifo); 698 stat &= ~0x80000000; 699 } 700 701 if (stat) { 702 nvkm_error(subdev, "INTR %08x\n", stat); 703 nvkm_mask(device, 0x002140, stat, 0x00000000); 704 nvkm_wr32(device, 0x002100, stat); 705 } 706 } 707 708 static void 709 gk104_fifo_fini(struct nvkm_fifo *base) 710 { 711 struct gk104_fifo *fifo = gk104_fifo(base); 712 struct nvkm_device *device = fifo->base.engine.subdev.device; 713 flush_work(&fifo->recover.work); 714 /* allow mmu fault interrupts, even when we're not using fifo */ 715 nvkm_mask(device, 0x002140, 0x10000000, 0x10000000); 716 } 717 718 static int 719 gk104_fifo_oneinit(struct nvkm_fifo *base) 720 { 721 struct gk104_fifo *fifo = gk104_fifo(base); 722 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 723 struct nvkm_device *device = subdev->device; 724 int engn, runl, pbid, ret, i, j; 725 enum nvkm_devidx engidx; 726 u32 *map; 727 728 /* Determine number of PBDMAs by checking valid enable bits. */ 729 nvkm_wr32(device, 0x000204, 0xffffffff); 730 fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204)); 731 nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr); 732 733 /* Read PBDMA->runlist(s) mapping from HW. */ 734 if (!(map = kzalloc(sizeof(*map) * fifo->pbdma_nr, GFP_KERNEL))) 735 return -ENOMEM; 736 737 for (i = 0; i < fifo->pbdma_nr; i++) 738 map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04)); 739 740 /* Determine runlist configuration from topology device info. */ 741 i = 0; 742 while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) { 743 /* Determine which PBDMA handles requests for this engine. */ 744 for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) { 745 if (map[j] & (1 << runl)) { 746 pbid = j; 747 break; 748 } 749 } 750 751 nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n", 752 engn, runl, pbid, nvkm_subdev_name[engidx]); 753 754 fifo->engine[engn].engine = nvkm_device_engine(device, engidx); 755 fifo->engine[engn].runl = runl; 756 fifo->engine[engn].pbid = pbid; 757 fifo->engine_nr = max(fifo->engine_nr, engn + 1); 758 fifo->runlist[runl].engm |= 1 << engn; 759 fifo->runlist_nr = max(fifo->runlist_nr, runl + 1); 760 } 761 762 kfree(map); 763 764 for (i = 0; i < fifo->runlist_nr; i++) { 765 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 766 0x8000, 0x1000, false, 767 &fifo->runlist[i].mem[0]); 768 if (ret) 769 return ret; 770 771 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 772 0x8000, 0x1000, false, 773 &fifo->runlist[i].mem[1]); 774 if (ret) 775 return ret; 776 777 init_waitqueue_head(&fifo->runlist[i].wait); 778 INIT_LIST_HEAD(&fifo->runlist[i].chan); 779 } 780 781 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 782 fifo->base.nr * 0x200, 0x1000, true, 783 &fifo->user.mem); 784 if (ret) 785 return ret; 786 787 ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12, 788 &fifo->user.bar); 789 if (ret) 790 return ret; 791 792 nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0); 793 return 0; 794 } 795 796 static void 797 gk104_fifo_init(struct nvkm_fifo *base) 798 { 799 struct gk104_fifo *fifo = gk104_fifo(base); 800 struct nvkm_device *device = fifo->base.engine.subdev.device; 801 int i; 802 803 /* Enable PBDMAs. */ 804 nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1); 805 806 /* PBDMA[n] */ 807 for (i = 0; i < fifo->pbdma_nr; i++) { 808 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); 809 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ 810 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ 811 } 812 813 /* PBDMA[n].HCE */ 814 for (i = 0; i < fifo->pbdma_nr; i++) { 815 nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */ 816 nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */ 817 } 818 819 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12); 820 821 nvkm_wr32(device, 0x002100, 0xffffffff); 822 nvkm_wr32(device, 0x002140, 0x7fffffff); 823 } 824 825 static void * 826 gk104_fifo_dtor(struct nvkm_fifo *base) 827 { 828 struct gk104_fifo *fifo = gk104_fifo(base); 829 int i; 830 831 nvkm_vm_put(&fifo->user.bar); 832 nvkm_memory_del(&fifo->user.mem); 833 834 for (i = 0; i < fifo->runlist_nr; i++) { 835 nvkm_memory_del(&fifo->runlist[i].mem[1]); 836 nvkm_memory_del(&fifo->runlist[i].mem[0]); 837 } 838 839 return fifo; 840 } 841 842 static const struct nvkm_fifo_func 843 gk104_fifo_ = { 844 .dtor = gk104_fifo_dtor, 845 .oneinit = gk104_fifo_oneinit, 846 .init = gk104_fifo_init, 847 .fini = gk104_fifo_fini, 848 .intr = gk104_fifo_intr, 849 .uevent_init = gk104_fifo_uevent_init, 850 .uevent_fini = gk104_fifo_uevent_fini, 851 .recover_chan = gk104_fifo_recover_chan, 852 .class_get = gk104_fifo_class_get, 853 }; 854 855 int 856 gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device, 857 int index, int nr, struct nvkm_fifo **pfifo) 858 { 859 struct gk104_fifo *fifo; 860 861 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) 862 return -ENOMEM; 863 fifo->func = func; 864 INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work); 865 *pfifo = &fifo->base; 866 867 return nvkm_fifo_ctor(&gk104_fifo_, device, index, nr, &fifo->base); 868 } 869 870 const struct nvkm_enum 871 gk104_fifo_fault_engine[] = { 872 { 0x00, "GR", NULL, NVKM_ENGINE_GR }, 873 { 0x01, "DISPLAY" }, 874 { 0x02, "CAPTURE" }, 875 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB }, 876 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR }, 877 { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM }, 878 { 0x06, "SCHED" }, 879 { 0x07, "HOST0", NULL, NVKM_ENGINE_FIFO }, 880 { 0x08, "HOST1", NULL, NVKM_ENGINE_FIFO }, 881 { 0x09, "HOST2", NULL, NVKM_ENGINE_FIFO }, 882 { 0x0a, "HOST3", NULL, NVKM_ENGINE_FIFO }, 883 { 0x0b, "HOST4", NULL, NVKM_ENGINE_FIFO }, 884 { 0x0c, "HOST5", NULL, NVKM_ENGINE_FIFO }, 885 { 0x0d, "HOST6", NULL, NVKM_ENGINE_FIFO }, 886 { 0x0e, "HOST7", NULL, NVKM_ENGINE_FIFO }, 887 { 0x0f, "HOSTSR" }, 888 { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD }, 889 { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP }, 890 { 0x13, "PERF" }, 891 { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC }, 892 { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 }, 893 { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 }, 894 { 0x17, "PMU" }, 895 { 0x18, "PTP" }, 896 { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC }, 897 { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 }, 898 {} 899 }; 900 901 const struct nvkm_enum 902 gk104_fifo_fault_reason[] = { 903 { 0x00, "PDE" }, 904 { 0x01, "PDE_SIZE" }, 905 { 0x02, "PTE" }, 906 { 0x03, "VA_LIMIT_VIOLATION" }, 907 { 0x04, "UNBOUND_INST_BLOCK" }, 908 { 0x05, "PRIV_VIOLATION" }, 909 { 0x06, "RO_VIOLATION" }, 910 { 0x07, "WO_VIOLATION" }, 911 { 0x08, "PITCH_MASK_VIOLATION" }, 912 { 0x09, "WORK_CREATION" }, 913 { 0x0a, "UNSUPPORTED_APERTURE" }, 914 { 0x0b, "COMPRESSION_FAILURE" }, 915 { 0x0c, "UNSUPPORTED_KIND" }, 916 { 0x0d, "REGION_VIOLATION" }, 917 { 0x0e, "BOTH_PTES_VALID" }, 918 { 0x0f, "INFO_TYPE_POISONED" }, 919 {} 920 }; 921 922 const struct nvkm_enum 923 gk104_fifo_fault_hubclient[] = { 924 { 0x00, "VIP" }, 925 { 0x01, "CE0" }, 926 { 0x02, "CE1" }, 927 { 0x03, "DNISO" }, 928 { 0x04, "FE" }, 929 { 0x05, "FECS" }, 930 { 0x06, "HOST" }, 931 { 0x07, "HOST_CPU" }, 932 { 0x08, "HOST_CPU_NB" }, 933 { 0x09, "ISO" }, 934 { 0x0a, "MMU" }, 935 { 0x0b, "MSPDEC" }, 936 { 0x0c, "MSPPP" }, 937 { 0x0d, "MSVLD" }, 938 { 0x0e, "NISO" }, 939 { 0x0f, "P2P" }, 940 { 0x10, "PD" }, 941 { 0x11, "PERF" }, 942 { 0x12, "PMU" }, 943 { 0x13, "RASTERTWOD" }, 944 { 0x14, "SCC" }, 945 { 0x15, "SCC_NB" }, 946 { 0x16, "SEC" }, 947 { 0x17, "SSYNC" }, 948 { 0x18, "GR_CE" }, 949 { 0x19, "CE2" }, 950 { 0x1a, "XV" }, 951 { 0x1b, "MMU_NB" }, 952 { 0x1c, "MSENC" }, 953 { 0x1d, "DFALCON" }, 954 { 0x1e, "SKED" }, 955 { 0x1f, "AFALCON" }, 956 {} 957 }; 958 959 const struct nvkm_enum 960 gk104_fifo_fault_gpcclient[] = { 961 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" }, 962 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" }, 963 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" }, 964 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" }, 965 { 0x0c, "RAST" }, 966 { 0x0d, "GCC" }, 967 { 0x0e, "GPCCS" }, 968 { 0x0f, "PROP_0" }, 969 { 0x10, "PROP_1" }, 970 { 0x11, "PROP_2" }, 971 { 0x12, "PROP_3" }, 972 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" }, 973 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" }, 974 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" }, 975 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" }, 976 { 0x1f, "GPM" }, 977 { 0x20, "LTP_UTLB_0" }, 978 { 0x21, "LTP_UTLB_1" }, 979 { 0x22, "LTP_UTLB_2" }, 980 { 0x23, "LTP_UTLB_3" }, 981 { 0x24, "GPC_RGG_UTLB" }, 982 {} 983 }; 984 985 static const struct gk104_fifo_func 986 gk104_fifo = { 987 .fault.engine = gk104_fifo_fault_engine, 988 .fault.reason = gk104_fifo_fault_reason, 989 .fault.hubclient = gk104_fifo_fault_hubclient, 990 .fault.gpcclient = gk104_fifo_fault_gpcclient, 991 .chan = { 992 &gk104_fifo_gpfifo_oclass, 993 NULL 994 }, 995 }; 996 997 int 998 gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) 999 { 1000 return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo); 1001 } 1002