1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "chan.h" 25 #include "chid.h" 26 #include "runl.h" 27 #include "runq.h" 28 29 #include "gk104.h" 30 #include "cgrp.h" 31 #include "changk104.h" 32 33 #include <core/gpuobj.h> 34 #include <subdev/mc.h> 35 #include <subdev/top.h> 36 37 #include <nvif/class.h> 38 39 void 40 gk104_chan_stop(struct nvkm_chan *chan) 41 { 42 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; 43 44 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800); 45 } 46 47 void 48 gk104_chan_start(struct nvkm_chan *chan) 49 { 50 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; 51 52 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400); 53 } 54 55 void 56 gk104_chan_unbind(struct nvkm_chan *chan) 57 { 58 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; 59 60 nvkm_wr32(device, 0x800000 + (chan->id * 8), 0x00000000); 61 } 62 63 void 64 gk104_chan_bind_inst(struct nvkm_chan *chan) 65 { 66 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; 67 68 nvkm_wr32(device, 0x800000 + (chan->id * 8), 0x80000000 | chan->inst->addr >> 12); 69 } 70 71 void 72 gk104_chan_bind(struct nvkm_chan *chan) 73 { 74 struct nvkm_runl *runl = chan->cgrp->runl; 75 struct nvkm_device *device = runl->fifo->engine.subdev.device; 76 77 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x000f0000, runl->id << 16); 78 gk104_chan_bind_inst(chan); 79 } 80 81 const struct nvkm_chan_func_userd 82 gk104_chan_userd = { 83 .bar = 1, 84 .size = 0x200, 85 .clear = gf100_chan_userd_clear, 86 }; 87 88 static const struct nvkm_chan_func 89 gk104_chan = { 90 .inst = &gf100_chan_inst, 91 .userd = &gk104_chan_userd, 92 .bind = gk104_chan_bind, 93 .unbind = gk104_chan_unbind, 94 .start = gk104_chan_start, 95 .stop = gk104_chan_stop, 96 .preempt = gf100_chan_preempt, 97 }; 98 99 /*TODO: clean this up */ 100 struct gk104_engn_status { 101 bool busy; 102 bool faulted; 103 bool chsw; 104 bool save; 105 bool load; 106 struct { 107 bool tsg; 108 u32 id; 109 } prev, next, *chan; 110 }; 111 112 static void 113 gk104_engn_status(struct nvkm_engn *engn, struct gk104_engn_status *status) 114 { 115 u32 stat = nvkm_rd32(engn->runl->fifo->engine.subdev.device, 0x002640 + (engn->id * 0x08)); 116 117 status->busy = !!(stat & 0x80000000); 118 status->faulted = !!(stat & 0x40000000); 119 status->next.tsg = !!(stat & 0x10000000); 120 status->next.id = (stat & 0x0fff0000) >> 16; 121 status->chsw = !!(stat & 0x00008000); 122 status->save = !!(stat & 0x00004000); 123 status->load = !!(stat & 0x00002000); 124 status->prev.tsg = !!(stat & 0x00001000); 125 status->prev.id = (stat & 0x00000fff); 126 status->chan = NULL; 127 128 if (status->busy && status->chsw) { 129 if (status->load && status->save) { 130 if (nvkm_engine_chsw_load(engn->engine)) 131 status->chan = &status->next; 132 else 133 status->chan = &status->prev; 134 } else 135 if (status->load) { 136 status->chan = &status->next; 137 } else { 138 status->chan = &status->prev; 139 } 140 } else 141 if (status->load) { 142 status->chan = &status->prev; 143 } 144 145 ENGN_DEBUG(engn, "%08x: busy %d faulted %d chsw %d save %d load %d %sid %d%s-> %sid %d%s", 146 stat, status->busy, status->faulted, status->chsw, status->save, status->load, 147 status->prev.tsg ? "tsg" : "ch", status->prev.id, 148 status->chan == &status->prev ? "*" : " ", 149 status->next.tsg ? "tsg" : "ch", status->next.id, 150 status->chan == &status->next ? "*" : " "); 151 } 152 153 int 154 gk104_engn_cxid(struct nvkm_engn *engn, bool *cgid) 155 { 156 struct gk104_engn_status status; 157 158 gk104_engn_status(engn, &status); 159 if (status.chan) { 160 *cgid = status.chan->tsg; 161 return status.chan->id; 162 } 163 164 return -ENODEV; 165 } 166 167 bool 168 gk104_engn_chsw(struct nvkm_engn *engn) 169 { 170 struct gk104_engn_status status; 171 172 gk104_engn_status(engn, &status); 173 if (status.busy && status.chsw) 174 return true; 175 176 return false; 177 } 178 179 const struct nvkm_engn_func 180 gk104_engn = { 181 .chsw = gk104_engn_chsw, 182 .cxid = gk104_engn_cxid, 183 .mmu_fault_trigger = gf100_engn_mmu_fault_trigger, 184 .mmu_fault_triggered = gf100_engn_mmu_fault_triggered, 185 }; 186 187 const struct nvkm_engn_func 188 gk104_engn_ce = { 189 .chsw = gk104_engn_chsw, 190 .cxid = gk104_engn_cxid, 191 .mmu_fault_trigger = gf100_engn_mmu_fault_trigger, 192 .mmu_fault_triggered = gf100_engn_mmu_fault_triggered, 193 }; 194 195 bool 196 gk104_runq_idle(struct nvkm_runq *runq) 197 { 198 struct nvkm_device *device = runq->fifo->engine.subdev.device; 199 200 return !(nvkm_rd32(device, 0x003080 + (runq->id * 4)) & 0x0000e000); 201 } 202 203 static const struct nvkm_bitfield 204 gk104_runq_intr_1_names[] = { 205 { 0x00000001, "HCE_RE_ILLEGAL_OP" }, 206 { 0x00000002, "HCE_RE_ALIGNB" }, 207 { 0x00000004, "HCE_PRIV" }, 208 { 0x00000008, "HCE_ILLEGAL_MTHD" }, 209 { 0x00000010, "HCE_ILLEGAL_CLASS" }, 210 {} 211 }; 212 213 static bool 214 gk104_runq_intr_1(struct nvkm_runq *runq) 215 { 216 struct nvkm_subdev *subdev = &runq->fifo->engine.subdev; 217 struct nvkm_device *device = subdev->device; 218 u32 mask = nvkm_rd32(device, 0x04014c + (runq->id * 0x2000)); 219 u32 stat = nvkm_rd32(device, 0x040148 + (runq->id * 0x2000)) & mask; 220 u32 chid = nvkm_rd32(device, 0x040120 + (runq->id * 0x2000)) & 0xfff; 221 char msg[128]; 222 223 if (stat & 0x80000000) { 224 if (runq->func->intr_1_ctxnotvalid && 225 runq->func->intr_1_ctxnotvalid(runq, chid)) 226 stat &= ~0x80000000; 227 } 228 229 if (stat) { 230 nvkm_snprintbf(msg, sizeof(msg), gk104_runq_intr_1_names, stat); 231 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n", 232 runq->id, stat, msg, chid, 233 nvkm_rd32(device, 0x040150 + (runq->id * 0x2000)), 234 nvkm_rd32(device, 0x040154 + (runq->id * 0x2000))); 235 } 236 237 nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), stat); 238 return true; 239 } 240 241 const struct nvkm_bitfield 242 gk104_runq_intr_0_names[] = { 243 { 0x00000001, "MEMREQ" }, 244 { 0x00000002, "MEMACK_TIMEOUT" }, 245 { 0x00000004, "MEMACK_EXTRA" }, 246 { 0x00000008, "MEMDAT_TIMEOUT" }, 247 { 0x00000010, "MEMDAT_EXTRA" }, 248 { 0x00000020, "MEMFLUSH" }, 249 { 0x00000040, "MEMOP" }, 250 { 0x00000080, "LBCONNECT" }, 251 { 0x00000100, "LBREQ" }, 252 { 0x00000200, "LBACK_TIMEOUT" }, 253 { 0x00000400, "LBACK_EXTRA" }, 254 { 0x00000800, "LBDAT_TIMEOUT" }, 255 { 0x00001000, "LBDAT_EXTRA" }, 256 { 0x00002000, "GPFIFO" }, 257 { 0x00004000, "GPPTR" }, 258 { 0x00008000, "GPENTRY" }, 259 { 0x00010000, "GPCRC" }, 260 { 0x00020000, "PBPTR" }, 261 { 0x00040000, "PBENTRY" }, 262 { 0x00080000, "PBCRC" }, 263 { 0x00100000, "XBARCONNECT" }, 264 { 0x00200000, "METHOD" }, 265 { 0x00400000, "METHODCRC" }, 266 { 0x00800000, "DEVICE" }, 267 { 0x02000000, "SEMAPHORE" }, 268 { 0x04000000, "ACQUIRE" }, 269 { 0x08000000, "PRI" }, 270 { 0x20000000, "NO_CTXSW_SEG" }, 271 { 0x40000000, "PBSEG" }, 272 { 0x80000000, "SIGNATURE" }, 273 {} 274 }; 275 276 bool 277 gk104_runq_intr(struct nvkm_runq *runq, struct nvkm_runl *null) 278 { 279 bool intr0 = gf100_runq_intr(runq, NULL); 280 bool intr1 = gk104_runq_intr_1(runq); 281 282 return intr0 || intr1; 283 } 284 285 void 286 gk104_runq_init(struct nvkm_runq *runq) 287 { 288 struct nvkm_device *device = runq->fifo->engine.subdev.device; 289 290 gf100_runq_init(runq); 291 292 nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), 0xffffffff); /* HCE.INTR */ 293 nvkm_wr32(device, 0x04014c + (runq->id * 0x2000), 0xffffffff); /* HCE.INTREN */ 294 } 295 296 static u32 297 gk104_runq_runm(struct nvkm_runq *runq) 298 { 299 return nvkm_rd32(runq->fifo->engine.subdev.device, 0x002390 + (runq->id * 0x04)); 300 } 301 302 const struct nvkm_runq_func 303 gk104_runq = { 304 .init = gk104_runq_init, 305 .intr = gk104_runq_intr, 306 .intr_0_names = gk104_runq_intr_0_names, 307 .idle = gk104_runq_idle, 308 }; 309 310 void 311 gk104_runl_fault_clear(struct nvkm_runl *runl) 312 { 313 nvkm_wr32(runl->fifo->engine.subdev.device, 0x00262c, BIT(runl->id)); 314 } 315 316 void 317 gk104_runl_allow(struct nvkm_runl *runl, u32 engm) 318 { 319 nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), 0x00000000); 320 } 321 322 void 323 gk104_runl_block(struct nvkm_runl *runl, u32 engm) 324 { 325 nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), BIT(runl->id)); 326 } 327 328 bool 329 gk104_runl_pending(struct nvkm_runl *runl) 330 { 331 struct nvkm_device *device = runl->fifo->engine.subdev.device; 332 333 return nvkm_rd32(device, 0x002284 + (runl->id * 0x08)) & 0x00100000; 334 } 335 336 void 337 gk104_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count) 338 { 339 struct nvkm_fifo *fifo = runl->fifo; 340 struct nvkm_device *device = fifo->engine.subdev.device; 341 u64 addr = nvkm_memory_addr(memory) + start; 342 int target; 343 344 switch (nvkm_memory_target(memory)) { 345 case NVKM_MEM_TARGET_VRAM: target = 0; break; 346 case NVKM_MEM_TARGET_NCOH: target = 3; break; 347 default: 348 WARN_ON(1); 349 return; 350 } 351 352 spin_lock_irq(&fifo->lock); 353 nvkm_wr32(device, 0x002270, (target << 28) | (addr >> 12)); 354 nvkm_wr32(device, 0x002274, (runl->id << 20) | count); 355 spin_unlock_irq(&fifo->lock); 356 } 357 358 void 359 gk104_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset) 360 { 361 nvkm_wo32(memory, offset + 0, chan->id); 362 nvkm_wo32(memory, offset + 4, 0x00000000); 363 } 364 365 static const struct nvkm_runl_func 366 gk104_runl = { 367 .size = 8, 368 .update = nv50_runl_update, 369 .insert_chan = gk104_runl_insert_chan, 370 .commit = gk104_runl_commit, 371 .wait = nv50_runl_wait, 372 .pending = gk104_runl_pending, 373 .block = gk104_runl_block, 374 .allow = gk104_runl_allow, 375 .fault_clear = gk104_runl_fault_clear, 376 .preempt_pending = gf100_runl_preempt_pending, 377 }; 378 379 int 380 gk104_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine) 381 { 382 struct gk104_fifo *fifo = gk104_fifo(base); 383 int engn; 384 385 if (engine->subdev.type == NVKM_ENGINE_SW) 386 return GK104_FIFO_ENGN_SW; 387 388 for (engn = 0; engn < fifo->engine_nr && engine; engn++) { 389 if (fifo->engine[engn].engine == engine) 390 return engn; 391 } 392 393 WARN_ON(1); 394 return -1; 395 } 396 397 static const struct nvkm_enum 398 gk104_fifo_mmu_fault_engine[] = { 399 { 0x00, "GR", NULL, NVKM_ENGINE_GR }, 400 { 0x01, "DISPLAY" }, 401 { 0x02, "CAPTURE" }, 402 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB }, 403 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR }, 404 { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM }, 405 { 0x06, "SCHED" }, 406 { 0x07, "HOST0" }, 407 { 0x08, "HOST1" }, 408 { 0x09, "HOST2" }, 409 { 0x0a, "HOST3" }, 410 { 0x0b, "HOST4" }, 411 { 0x0c, "HOST5" }, 412 { 0x0d, "HOST6" }, 413 { 0x0e, "HOST7" }, 414 { 0x0f, "HOSTSR" }, 415 { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD }, 416 { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP }, 417 { 0x13, "PERF" }, 418 { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC }, 419 { 0x15, "CE0", NULL, NVKM_ENGINE_CE, 0 }, 420 { 0x16, "CE1", NULL, NVKM_ENGINE_CE, 1 }, 421 { 0x17, "PMU" }, 422 { 0x18, "PTP" }, 423 { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC }, 424 { 0x1b, "CE2", NULL, NVKM_ENGINE_CE, 2 }, 425 {} 426 }; 427 428 const struct nvkm_enum 429 gk104_fifo_mmu_fault_reason[] = { 430 { 0x00, "PDE" }, 431 { 0x01, "PDE_SIZE" }, 432 { 0x02, "PTE" }, 433 { 0x03, "VA_LIMIT_VIOLATION" }, 434 { 0x04, "UNBOUND_INST_BLOCK" }, 435 { 0x05, "PRIV_VIOLATION" }, 436 { 0x06, "RO_VIOLATION" }, 437 { 0x07, "WO_VIOLATION" }, 438 { 0x08, "PITCH_MASK_VIOLATION" }, 439 { 0x09, "WORK_CREATION" }, 440 { 0x0a, "UNSUPPORTED_APERTURE" }, 441 { 0x0b, "COMPRESSION_FAILURE" }, 442 { 0x0c, "UNSUPPORTED_KIND" }, 443 { 0x0d, "REGION_VIOLATION" }, 444 { 0x0e, "BOTH_PTES_VALID" }, 445 { 0x0f, "INFO_TYPE_POISONED" }, 446 {} 447 }; 448 449 const struct nvkm_enum 450 gk104_fifo_mmu_fault_hubclient[] = { 451 { 0x00, "VIP" }, 452 { 0x01, "CE0" }, 453 { 0x02, "CE1" }, 454 { 0x03, "DNISO" }, 455 { 0x04, "FE" }, 456 { 0x05, "FECS" }, 457 { 0x06, "HOST" }, 458 { 0x07, "HOST_CPU" }, 459 { 0x08, "HOST_CPU_NB" }, 460 { 0x09, "ISO" }, 461 { 0x0a, "MMU" }, 462 { 0x0b, "MSPDEC" }, 463 { 0x0c, "MSPPP" }, 464 { 0x0d, "MSVLD" }, 465 { 0x0e, "NISO" }, 466 { 0x0f, "P2P" }, 467 { 0x10, "PD" }, 468 { 0x11, "PERF" }, 469 { 0x12, "PMU" }, 470 { 0x13, "RASTERTWOD" }, 471 { 0x14, "SCC" }, 472 { 0x15, "SCC_NB" }, 473 { 0x16, "SEC" }, 474 { 0x17, "SSYNC" }, 475 { 0x18, "GR_CE" }, 476 { 0x19, "CE2" }, 477 { 0x1a, "XV" }, 478 { 0x1b, "MMU_NB" }, 479 { 0x1c, "MSENC" }, 480 { 0x1d, "DFALCON" }, 481 { 0x1e, "SKED" }, 482 { 0x1f, "AFALCON" }, 483 {} 484 }; 485 486 const struct nvkm_enum 487 gk104_fifo_mmu_fault_gpcclient[] = { 488 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" }, 489 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" }, 490 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" }, 491 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" }, 492 { 0x0c, "RAST" }, 493 { 0x0d, "GCC" }, 494 { 0x0e, "GPCCS" }, 495 { 0x0f, "PROP_0" }, 496 { 0x10, "PROP_1" }, 497 { 0x11, "PROP_2" }, 498 { 0x12, "PROP_3" }, 499 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" }, 500 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" }, 501 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" }, 502 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" }, 503 { 0x1f, "GPM" }, 504 { 0x20, "LTP_UTLB_0" }, 505 { 0x21, "LTP_UTLB_1" }, 506 { 0x22, "LTP_UTLB_2" }, 507 { 0x23, "LTP_UTLB_3" }, 508 { 0x24, "GPC_RGG_UTLB" }, 509 {} 510 }; 511 512 const struct nvkm_fifo_func_mmu_fault 513 gk104_fifo_mmu_fault = { 514 .recover = gf100_fifo_mmu_fault_recover, 515 .access = gf100_fifo_mmu_fault_access, 516 .engine = gk104_fifo_mmu_fault_engine, 517 .reason = gk104_fifo_mmu_fault_reason, 518 .hubclient = gk104_fifo_mmu_fault_hubclient, 519 .gpcclient = gk104_fifo_mmu_fault_gpcclient, 520 }; 521 522 static const struct nvkm_enum 523 gk104_fifo_intr_bind_reason[] = { 524 { 0x01, "BIND_NOT_UNBOUND" }, 525 { 0x02, "SNOOP_WITHOUT_BAR1" }, 526 { 0x03, "UNBIND_WHILE_RUNNING" }, 527 { 0x05, "INVALID_RUNLIST" }, 528 { 0x06, "INVALID_CTX_TGT" }, 529 { 0x0b, "UNBIND_WHILE_PARKED" }, 530 {} 531 }; 532 533 void 534 gk104_fifo_intr_bind(struct nvkm_fifo *fifo) 535 { 536 struct nvkm_subdev *subdev = &fifo->engine.subdev; 537 u32 intr = nvkm_rd32(subdev->device, 0x00252c); 538 u32 code = intr & 0x000000ff; 539 const struct nvkm_enum *en = nvkm_enum_find(gk104_fifo_intr_bind_reason, code); 540 541 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : ""); 542 } 543 544 void 545 gk104_fifo_intr_chsw(struct nvkm_fifo *fifo) 546 { 547 struct nvkm_subdev *subdev = &fifo->engine.subdev; 548 struct nvkm_device *device = subdev->device; 549 u32 stat = nvkm_rd32(device, 0x00256c); 550 551 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat); 552 nvkm_wr32(device, 0x00256c, stat); 553 } 554 555 static void 556 gk104_fifo_intr_dropped_fault(struct nvkm_fifo *fifo) 557 { 558 struct nvkm_subdev *subdev = &fifo->engine.subdev; 559 u32 stat = nvkm_rd32(subdev->device, 0x00259c); 560 561 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat); 562 } 563 564 void 565 gk104_fifo_intr_runlist(struct nvkm_fifo *fifo) 566 { 567 struct nvkm_device *device = fifo->engine.subdev.device; 568 struct nvkm_runl *runl; 569 u32 mask = nvkm_rd32(device, 0x002a00); 570 571 nvkm_runl_foreach_cond(runl, fifo, mask & BIT(runl->id)) { 572 nvkm_wr32(device, 0x002a00, BIT(runl->id)); 573 } 574 } 575 576 irqreturn_t 577 gk104_fifo_intr(struct nvkm_inth *inth) 578 { 579 struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth); 580 struct nvkm_subdev *subdev = &fifo->engine.subdev; 581 struct nvkm_device *device = subdev->device; 582 u32 mask = nvkm_rd32(device, 0x002140); 583 u32 stat = nvkm_rd32(device, 0x002100) & mask; 584 585 if (stat & 0x00000001) { 586 gk104_fifo_intr_bind(fifo); 587 nvkm_wr32(device, 0x002100, 0x00000001); 588 stat &= ~0x00000001; 589 } 590 591 if (stat & 0x00000010) { 592 nvkm_error(subdev, "PIO_ERROR\n"); 593 nvkm_wr32(device, 0x002100, 0x00000010); 594 stat &= ~0x00000010; 595 } 596 597 if (stat & 0x00000100) { 598 gf100_fifo_intr_sched(fifo); 599 nvkm_wr32(device, 0x002100, 0x00000100); 600 stat &= ~0x00000100; 601 } 602 603 if (stat & 0x00010000) { 604 gk104_fifo_intr_chsw(fifo); 605 nvkm_wr32(device, 0x002100, 0x00010000); 606 stat &= ~0x00010000; 607 } 608 609 if (stat & 0x00800000) { 610 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n"); 611 nvkm_wr32(device, 0x002100, 0x00800000); 612 stat &= ~0x00800000; 613 } 614 615 if (stat & 0x01000000) { 616 nvkm_error(subdev, "LB_ERROR\n"); 617 nvkm_wr32(device, 0x002100, 0x01000000); 618 stat &= ~0x01000000; 619 } 620 621 if (stat & 0x08000000) { 622 gk104_fifo_intr_dropped_fault(fifo); 623 nvkm_wr32(device, 0x002100, 0x08000000); 624 stat &= ~0x08000000; 625 } 626 627 if (stat & 0x10000000) { 628 gf100_fifo_intr_mmu_fault(fifo); 629 stat &= ~0x10000000; 630 } 631 632 if (stat & 0x20000000) { 633 if (gf100_fifo_intr_pbdma(fifo)) 634 stat &= ~0x20000000; 635 } 636 637 if (stat & 0x40000000) { 638 gk104_fifo_intr_runlist(fifo); 639 stat &= ~0x40000000; 640 } 641 642 if (stat & 0x80000000) { 643 nvkm_wr32(device, 0x002100, 0x80000000); 644 nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT); 645 stat &= ~0x80000000; 646 } 647 648 if (stat) { 649 nvkm_error(subdev, "INTR %08x\n", stat); 650 spin_lock(&fifo->lock); 651 nvkm_mask(device, 0x002140, stat, 0x00000000); 652 spin_unlock(&fifo->lock); 653 nvkm_wr32(device, 0x002100, stat); 654 } 655 656 return IRQ_HANDLED; 657 } 658 659 void 660 gk104_fifo_init_pbdmas(struct nvkm_fifo *fifo, u32 mask) 661 { 662 struct nvkm_device *device = fifo->engine.subdev.device; 663 664 nvkm_wr32(device, 0x000204, mask); 665 nvkm_mask(device, 0x002a04, 0xbfffffff, 0xbfffffff); 666 } 667 668 void 669 gk104_fifo_init(struct nvkm_fifo *fifo) 670 { 671 struct nvkm_device *device = fifo->engine.subdev.device; 672 673 if (fifo->func->chan.func->userd->bar == 1) 674 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->userd.bar1->addr >> 12); 675 676 nvkm_wr32(device, 0x002100, 0xffffffff); 677 nvkm_wr32(device, 0x002140, 0x7fffffff); 678 } 679 680 int 681 gk104_fifo_runl_ctor(struct nvkm_fifo *fifo) 682 { 683 struct nvkm_device *device = fifo->engine.subdev.device; 684 struct nvkm_top_device *tdev; 685 struct nvkm_runl *runl; 686 struct nvkm_runq *runq; 687 const struct nvkm_engn_func *func; 688 689 nvkm_list_foreach(tdev, &device->top->device, head, tdev->runlist >= 0) { 690 runl = nvkm_runl_get(fifo, tdev->runlist, tdev->runlist); 691 if (!runl) { 692 runl = nvkm_runl_new(fifo, tdev->runlist, tdev->runlist, 0); 693 if (IS_ERR(runl)) 694 return PTR_ERR(runl); 695 696 nvkm_runq_foreach_cond(runq, fifo, gk104_runq_runm(runq) & BIT(runl->id)) { 697 if (WARN_ON(runl->runq_nr == ARRAY_SIZE(runl->runq))) 698 return -ENOMEM; 699 700 runl->runq[runl->runq_nr++] = runq; 701 } 702 703 } 704 705 if (tdev->engine < 0) 706 continue; 707 708 switch (tdev->type) { 709 case NVKM_ENGINE_CE: 710 func = fifo->func->engn_ce; 711 break; 712 case NVKM_ENGINE_GR: 713 nvkm_runl_add(runl, 15, &gf100_engn_sw, NVKM_ENGINE_SW, 0); 714 fallthrough; 715 default: 716 func = fifo->func->engn; 717 break; 718 } 719 720 nvkm_runl_add(runl, tdev->engine, func, tdev->type, tdev->inst); 721 } 722 723 return 0; 724 } 725 726 int 727 gk104_fifo_chid_nr(struct nvkm_fifo *fifo) 728 { 729 return 4096; 730 } 731 732 int 733 gk104_fifo_oneinit(struct nvkm_fifo *base) 734 { 735 struct gk104_fifo *fifo = gk104_fifo(base); 736 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 737 struct nvkm_device *device = subdev->device; 738 struct nvkm_top_device *tdev; 739 740 /* Determine runlist configuration from topology device info. */ 741 list_for_each_entry(tdev, &device->top->device, head) { 742 const int engn = tdev->engine; 743 744 if (engn < 0) 745 continue; 746 747 fifo->engine[engn].engine = nvkm_device_engine(device, tdev->type, tdev->inst); 748 fifo->engine_nr = max(fifo->engine_nr, engn + 1); 749 fifo->runlist[tdev->runlist].engm |= BIT(engn); 750 fifo->runlist[tdev->runlist].engm_sw |= BIT(engn); 751 if (tdev->type == NVKM_ENGINE_GR) 752 fifo->runlist[tdev->runlist].engm_sw |= BIT(GK104_FIFO_ENGN_SW); 753 fifo->runlist_nr = max(fifo->runlist_nr, tdev->runlist + 1); 754 } 755 756 return 0; 757 } 758 759 void * 760 gk104_fifo_dtor(struct nvkm_fifo *base) 761 { 762 struct gk104_fifo *fifo = gk104_fifo(base); 763 return fifo; 764 } 765 766 int 767 gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device, 768 enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo **pfifo) 769 { 770 struct gk104_fifo *fifo; 771 772 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) 773 return -ENOMEM; 774 fifo->func = func; 775 *pfifo = &fifo->base; 776 777 return nvkm_fifo_ctor(func, device, type, inst, &fifo->base); 778 } 779 780 static const struct nvkm_fifo_func 781 gk104_fifo = { 782 .dtor = gk104_fifo_dtor, 783 .oneinit = gk104_fifo_oneinit, 784 .chid_nr = gk104_fifo_chid_nr, 785 .chid_ctor = gf100_fifo_chid_ctor, 786 .runq_nr = gf100_fifo_runq_nr, 787 .runl_ctor = gk104_fifo_runl_ctor, 788 .init = gk104_fifo_init, 789 .init_pbdmas = gk104_fifo_init_pbdmas, 790 .intr = gk104_fifo_intr, 791 .intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit, 792 .intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout, 793 .mmu_fault = &gk104_fifo_mmu_fault, 794 .engine_id = gk104_fifo_engine_id, 795 .nonstall = &gf100_fifo_nonstall, 796 .runl = &gk104_runl, 797 .runq = &gk104_runq, 798 .engn = &gk104_engn, 799 .engn_ce = &gk104_engn_ce, 800 .cgrp = {{ }, &nv04_cgrp }, 801 .chan = {{ 0, 0, KEPLER_CHANNEL_GPFIFO_A }, &gk104_chan, .ctor = &gk104_fifo_gpfifo_new }, 802 }; 803 804 int 805 gk104_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, 806 struct nvkm_fifo **pfifo) 807 { 808 return gk104_fifo_new_(&gk104_fifo, device, type, inst, 0, pfifo); 809 } 810