1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "chan.h" 25 #include "chid.h" 26 #include "runl.h" 27 #include "runq.h" 28 29 #include "gk104.h" 30 #include "cgrp.h" 31 #include "changk104.h" 32 33 #include <core/gpuobj.h> 34 #include <subdev/mc.h> 35 #include <subdev/top.h> 36 37 #include <nvif/class.h> 38 39 void 40 gk104_chan_stop(struct nvkm_chan *chan) 41 { 42 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; 43 44 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800); 45 } 46 47 void 48 gk104_chan_start(struct nvkm_chan *chan) 49 { 50 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; 51 52 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400); 53 } 54 55 void 56 gk104_chan_unbind(struct nvkm_chan *chan) 57 { 58 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; 59 60 nvkm_wr32(device, 0x800000 + (chan->id * 8), 0x00000000); 61 } 62 63 void 64 gk104_chan_bind_inst(struct nvkm_chan *chan) 65 { 66 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; 67 68 nvkm_wr32(device, 0x800000 + (chan->id * 8), 0x80000000 | chan->inst->addr >> 12); 69 } 70 71 void 72 gk104_chan_bind(struct nvkm_chan *chan) 73 { 74 struct nvkm_runl *runl = chan->cgrp->runl; 75 struct nvkm_device *device = runl->fifo->engine.subdev.device; 76 77 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x000f0000, runl->id << 16); 78 gk104_chan_bind_inst(chan); 79 } 80 81 static int 82 gk104_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv) 83 { 84 const u64 userd = nvkm_memory_addr(chan->userd.mem) + chan->userd.base; 85 const u32 limit2 = ilog2(length / 8); 86 87 nvkm_kmap(chan->inst); 88 nvkm_wo32(chan->inst, 0x08, lower_32_bits(userd)); 89 nvkm_wo32(chan->inst, 0x0c, upper_32_bits(userd)); 90 nvkm_wo32(chan->inst, 0x10, 0x0000face); 91 nvkm_wo32(chan->inst, 0x30, 0xfffff902); 92 nvkm_wo32(chan->inst, 0x48, lower_32_bits(offset)); 93 nvkm_wo32(chan->inst, 0x4c, upper_32_bits(offset) | (limit2 << 16)); 94 nvkm_wo32(chan->inst, 0x84, 0x20400000); 95 nvkm_wo32(chan->inst, 0x94, 0x30000000 | devm); 96 nvkm_wo32(chan->inst, 0x9c, 0x00000100); 97 nvkm_wo32(chan->inst, 0xac, 0x0000001f); 98 nvkm_wo32(chan->inst, 0xe4, priv ? 0x00000020 : 0x00000000); 99 nvkm_wo32(chan->inst, 0xe8, chan->id); 100 nvkm_wo32(chan->inst, 0xb8, 0xf8000000); 101 nvkm_wo32(chan->inst, 0xf8, 0x10003080); /* 0x002310 */ 102 nvkm_wo32(chan->inst, 0xfc, 0x10000010); /* 0x002350 */ 103 nvkm_done(chan->inst); 104 return 0; 105 } 106 107 const struct nvkm_chan_func_ramfc 108 gk104_chan_ramfc = { 109 .write = gk104_chan_ramfc_write, 110 .devm = 0xfff, 111 .priv = true, 112 }; 113 114 const struct nvkm_chan_func_userd 115 gk104_chan_userd = { 116 .bar = 1, 117 .size = 0x200, 118 .clear = gf100_chan_userd_clear, 119 }; 120 121 static const struct nvkm_chan_func 122 gk104_chan = { 123 .inst = &gf100_chan_inst, 124 .userd = &gk104_chan_userd, 125 .ramfc = &gk104_chan_ramfc, 126 .bind = gk104_chan_bind, 127 .unbind = gk104_chan_unbind, 128 .start = gk104_chan_start, 129 .stop = gk104_chan_stop, 130 .preempt = gf100_chan_preempt, 131 }; 132 133 /*TODO: clean this up */ 134 struct gk104_engn_status { 135 bool busy; 136 bool faulted; 137 bool chsw; 138 bool save; 139 bool load; 140 struct { 141 bool tsg; 142 u32 id; 143 } prev, next, *chan; 144 }; 145 146 static void 147 gk104_engn_status(struct nvkm_engn *engn, struct gk104_engn_status *status) 148 { 149 u32 stat = nvkm_rd32(engn->runl->fifo->engine.subdev.device, 0x002640 + (engn->id * 0x08)); 150 151 status->busy = !!(stat & 0x80000000); 152 status->faulted = !!(stat & 0x40000000); 153 status->next.tsg = !!(stat & 0x10000000); 154 status->next.id = (stat & 0x0fff0000) >> 16; 155 status->chsw = !!(stat & 0x00008000); 156 status->save = !!(stat & 0x00004000); 157 status->load = !!(stat & 0x00002000); 158 status->prev.tsg = !!(stat & 0x00001000); 159 status->prev.id = (stat & 0x00000fff); 160 status->chan = NULL; 161 162 if (status->busy && status->chsw) { 163 if (status->load && status->save) { 164 if (nvkm_engine_chsw_load(engn->engine)) 165 status->chan = &status->next; 166 else 167 status->chan = &status->prev; 168 } else 169 if (status->load) { 170 status->chan = &status->next; 171 } else { 172 status->chan = &status->prev; 173 } 174 } else 175 if (status->load) { 176 status->chan = &status->prev; 177 } 178 179 ENGN_DEBUG(engn, "%08x: busy %d faulted %d chsw %d save %d load %d %sid %d%s-> %sid %d%s", 180 stat, status->busy, status->faulted, status->chsw, status->save, status->load, 181 status->prev.tsg ? "tsg" : "ch", status->prev.id, 182 status->chan == &status->prev ? "*" : " ", 183 status->next.tsg ? "tsg" : "ch", status->next.id, 184 status->chan == &status->next ? "*" : " "); 185 } 186 187 int 188 gk104_engn_cxid(struct nvkm_engn *engn, bool *cgid) 189 { 190 struct gk104_engn_status status; 191 192 gk104_engn_status(engn, &status); 193 if (status.chan) { 194 *cgid = status.chan->tsg; 195 return status.chan->id; 196 } 197 198 return -ENODEV; 199 } 200 201 bool 202 gk104_engn_chsw(struct nvkm_engn *engn) 203 { 204 struct gk104_engn_status status; 205 206 gk104_engn_status(engn, &status); 207 if (status.busy && status.chsw) 208 return true; 209 210 return false; 211 } 212 213 const struct nvkm_engn_func 214 gk104_engn = { 215 .chsw = gk104_engn_chsw, 216 .cxid = gk104_engn_cxid, 217 .mmu_fault_trigger = gf100_engn_mmu_fault_trigger, 218 .mmu_fault_triggered = gf100_engn_mmu_fault_triggered, 219 }; 220 221 const struct nvkm_engn_func 222 gk104_engn_ce = { 223 .chsw = gk104_engn_chsw, 224 .cxid = gk104_engn_cxid, 225 .mmu_fault_trigger = gf100_engn_mmu_fault_trigger, 226 .mmu_fault_triggered = gf100_engn_mmu_fault_triggered, 227 }; 228 229 bool 230 gk104_runq_idle(struct nvkm_runq *runq) 231 { 232 struct nvkm_device *device = runq->fifo->engine.subdev.device; 233 234 return !(nvkm_rd32(device, 0x003080 + (runq->id * 4)) & 0x0000e000); 235 } 236 237 static const struct nvkm_bitfield 238 gk104_runq_intr_1_names[] = { 239 { 0x00000001, "HCE_RE_ILLEGAL_OP" }, 240 { 0x00000002, "HCE_RE_ALIGNB" }, 241 { 0x00000004, "HCE_PRIV" }, 242 { 0x00000008, "HCE_ILLEGAL_MTHD" }, 243 { 0x00000010, "HCE_ILLEGAL_CLASS" }, 244 {} 245 }; 246 247 static bool 248 gk104_runq_intr_1(struct nvkm_runq *runq) 249 { 250 struct nvkm_subdev *subdev = &runq->fifo->engine.subdev; 251 struct nvkm_device *device = subdev->device; 252 u32 mask = nvkm_rd32(device, 0x04014c + (runq->id * 0x2000)); 253 u32 stat = nvkm_rd32(device, 0x040148 + (runq->id * 0x2000)) & mask; 254 u32 chid = nvkm_rd32(device, 0x040120 + (runq->id * 0x2000)) & 0xfff; 255 char msg[128]; 256 257 if (stat & 0x80000000) { 258 if (runq->func->intr_1_ctxnotvalid && 259 runq->func->intr_1_ctxnotvalid(runq, chid)) 260 stat &= ~0x80000000; 261 } 262 263 if (stat) { 264 nvkm_snprintbf(msg, sizeof(msg), gk104_runq_intr_1_names, stat); 265 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n", 266 runq->id, stat, msg, chid, 267 nvkm_rd32(device, 0x040150 + (runq->id * 0x2000)), 268 nvkm_rd32(device, 0x040154 + (runq->id * 0x2000))); 269 } 270 271 nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), stat); 272 return true; 273 } 274 275 const struct nvkm_bitfield 276 gk104_runq_intr_0_names[] = { 277 { 0x00000001, "MEMREQ" }, 278 { 0x00000002, "MEMACK_TIMEOUT" }, 279 { 0x00000004, "MEMACK_EXTRA" }, 280 { 0x00000008, "MEMDAT_TIMEOUT" }, 281 { 0x00000010, "MEMDAT_EXTRA" }, 282 { 0x00000020, "MEMFLUSH" }, 283 { 0x00000040, "MEMOP" }, 284 { 0x00000080, "LBCONNECT" }, 285 { 0x00000100, "LBREQ" }, 286 { 0x00000200, "LBACK_TIMEOUT" }, 287 { 0x00000400, "LBACK_EXTRA" }, 288 { 0x00000800, "LBDAT_TIMEOUT" }, 289 { 0x00001000, "LBDAT_EXTRA" }, 290 { 0x00002000, "GPFIFO" }, 291 { 0x00004000, "GPPTR" }, 292 { 0x00008000, "GPENTRY" }, 293 { 0x00010000, "GPCRC" }, 294 { 0x00020000, "PBPTR" }, 295 { 0x00040000, "PBENTRY" }, 296 { 0x00080000, "PBCRC" }, 297 { 0x00100000, "XBARCONNECT" }, 298 { 0x00200000, "METHOD" }, 299 { 0x00400000, "METHODCRC" }, 300 { 0x00800000, "DEVICE" }, 301 { 0x02000000, "SEMAPHORE" }, 302 { 0x04000000, "ACQUIRE" }, 303 { 0x08000000, "PRI" }, 304 { 0x20000000, "NO_CTXSW_SEG" }, 305 { 0x40000000, "PBSEG" }, 306 { 0x80000000, "SIGNATURE" }, 307 {} 308 }; 309 310 bool 311 gk104_runq_intr(struct nvkm_runq *runq, struct nvkm_runl *null) 312 { 313 bool intr0 = gf100_runq_intr(runq, NULL); 314 bool intr1 = gk104_runq_intr_1(runq); 315 316 return intr0 || intr1; 317 } 318 319 void 320 gk104_runq_init(struct nvkm_runq *runq) 321 { 322 struct nvkm_device *device = runq->fifo->engine.subdev.device; 323 324 gf100_runq_init(runq); 325 326 nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), 0xffffffff); /* HCE.INTR */ 327 nvkm_wr32(device, 0x04014c + (runq->id * 0x2000), 0xffffffff); /* HCE.INTREN */ 328 } 329 330 static u32 331 gk104_runq_runm(struct nvkm_runq *runq) 332 { 333 return nvkm_rd32(runq->fifo->engine.subdev.device, 0x002390 + (runq->id * 0x04)); 334 } 335 336 const struct nvkm_runq_func 337 gk104_runq = { 338 .init = gk104_runq_init, 339 .intr = gk104_runq_intr, 340 .intr_0_names = gk104_runq_intr_0_names, 341 .idle = gk104_runq_idle, 342 }; 343 344 void 345 gk104_runl_fault_clear(struct nvkm_runl *runl) 346 { 347 nvkm_wr32(runl->fifo->engine.subdev.device, 0x00262c, BIT(runl->id)); 348 } 349 350 void 351 gk104_runl_allow(struct nvkm_runl *runl, u32 engm) 352 { 353 nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), 0x00000000); 354 } 355 356 void 357 gk104_runl_block(struct nvkm_runl *runl, u32 engm) 358 { 359 nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), BIT(runl->id)); 360 } 361 362 bool 363 gk104_runl_pending(struct nvkm_runl *runl) 364 { 365 struct nvkm_device *device = runl->fifo->engine.subdev.device; 366 367 return nvkm_rd32(device, 0x002284 + (runl->id * 0x08)) & 0x00100000; 368 } 369 370 void 371 gk104_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count) 372 { 373 struct nvkm_fifo *fifo = runl->fifo; 374 struct nvkm_device *device = fifo->engine.subdev.device; 375 u64 addr = nvkm_memory_addr(memory) + start; 376 int target; 377 378 switch (nvkm_memory_target(memory)) { 379 case NVKM_MEM_TARGET_VRAM: target = 0; break; 380 case NVKM_MEM_TARGET_NCOH: target = 3; break; 381 default: 382 WARN_ON(1); 383 return; 384 } 385 386 spin_lock_irq(&fifo->lock); 387 nvkm_wr32(device, 0x002270, (target << 28) | (addr >> 12)); 388 nvkm_wr32(device, 0x002274, (runl->id << 20) | count); 389 spin_unlock_irq(&fifo->lock); 390 } 391 392 void 393 gk104_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset) 394 { 395 nvkm_wo32(memory, offset + 0, chan->id); 396 nvkm_wo32(memory, offset + 4, 0x00000000); 397 } 398 399 static const struct nvkm_runl_func 400 gk104_runl = { 401 .size = 8, 402 .update = nv50_runl_update, 403 .insert_chan = gk104_runl_insert_chan, 404 .commit = gk104_runl_commit, 405 .wait = nv50_runl_wait, 406 .pending = gk104_runl_pending, 407 .block = gk104_runl_block, 408 .allow = gk104_runl_allow, 409 .fault_clear = gk104_runl_fault_clear, 410 .preempt_pending = gf100_runl_preempt_pending, 411 }; 412 413 int 414 gk104_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine) 415 { 416 struct gk104_fifo *fifo = gk104_fifo(base); 417 int engn; 418 419 if (engine->subdev.type == NVKM_ENGINE_SW) 420 return GK104_FIFO_ENGN_SW; 421 422 for (engn = 0; engn < fifo->engine_nr && engine; engn++) { 423 if (fifo->engine[engn].engine == engine) 424 return engn; 425 } 426 427 WARN_ON(1); 428 return -1; 429 } 430 431 static const struct nvkm_enum 432 gk104_fifo_mmu_fault_engine[] = { 433 { 0x00, "GR", NULL, NVKM_ENGINE_GR }, 434 { 0x01, "DISPLAY" }, 435 { 0x02, "CAPTURE" }, 436 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB }, 437 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR }, 438 { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM }, 439 { 0x06, "SCHED" }, 440 { 0x07, "HOST0" }, 441 { 0x08, "HOST1" }, 442 { 0x09, "HOST2" }, 443 { 0x0a, "HOST3" }, 444 { 0x0b, "HOST4" }, 445 { 0x0c, "HOST5" }, 446 { 0x0d, "HOST6" }, 447 { 0x0e, "HOST7" }, 448 { 0x0f, "HOSTSR" }, 449 { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD }, 450 { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP }, 451 { 0x13, "PERF" }, 452 { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC }, 453 { 0x15, "CE0", NULL, NVKM_ENGINE_CE, 0 }, 454 { 0x16, "CE1", NULL, NVKM_ENGINE_CE, 1 }, 455 { 0x17, "PMU" }, 456 { 0x18, "PTP" }, 457 { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC }, 458 { 0x1b, "CE2", NULL, NVKM_ENGINE_CE, 2 }, 459 {} 460 }; 461 462 const struct nvkm_enum 463 gk104_fifo_mmu_fault_reason[] = { 464 { 0x00, "PDE" }, 465 { 0x01, "PDE_SIZE" }, 466 { 0x02, "PTE" }, 467 { 0x03, "VA_LIMIT_VIOLATION" }, 468 { 0x04, "UNBOUND_INST_BLOCK" }, 469 { 0x05, "PRIV_VIOLATION" }, 470 { 0x06, "RO_VIOLATION" }, 471 { 0x07, "WO_VIOLATION" }, 472 { 0x08, "PITCH_MASK_VIOLATION" }, 473 { 0x09, "WORK_CREATION" }, 474 { 0x0a, "UNSUPPORTED_APERTURE" }, 475 { 0x0b, "COMPRESSION_FAILURE" }, 476 { 0x0c, "UNSUPPORTED_KIND" }, 477 { 0x0d, "REGION_VIOLATION" }, 478 { 0x0e, "BOTH_PTES_VALID" }, 479 { 0x0f, "INFO_TYPE_POISONED" }, 480 {} 481 }; 482 483 const struct nvkm_enum 484 gk104_fifo_mmu_fault_hubclient[] = { 485 { 0x00, "VIP" }, 486 { 0x01, "CE0" }, 487 { 0x02, "CE1" }, 488 { 0x03, "DNISO" }, 489 { 0x04, "FE" }, 490 { 0x05, "FECS" }, 491 { 0x06, "HOST" }, 492 { 0x07, "HOST_CPU" }, 493 { 0x08, "HOST_CPU_NB" }, 494 { 0x09, "ISO" }, 495 { 0x0a, "MMU" }, 496 { 0x0b, "MSPDEC" }, 497 { 0x0c, "MSPPP" }, 498 { 0x0d, "MSVLD" }, 499 { 0x0e, "NISO" }, 500 { 0x0f, "P2P" }, 501 { 0x10, "PD" }, 502 { 0x11, "PERF" }, 503 { 0x12, "PMU" }, 504 { 0x13, "RASTERTWOD" }, 505 { 0x14, "SCC" }, 506 { 0x15, "SCC_NB" }, 507 { 0x16, "SEC" }, 508 { 0x17, "SSYNC" }, 509 { 0x18, "GR_CE" }, 510 { 0x19, "CE2" }, 511 { 0x1a, "XV" }, 512 { 0x1b, "MMU_NB" }, 513 { 0x1c, "MSENC" }, 514 { 0x1d, "DFALCON" }, 515 { 0x1e, "SKED" }, 516 { 0x1f, "AFALCON" }, 517 {} 518 }; 519 520 const struct nvkm_enum 521 gk104_fifo_mmu_fault_gpcclient[] = { 522 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" }, 523 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" }, 524 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" }, 525 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" }, 526 { 0x0c, "RAST" }, 527 { 0x0d, "GCC" }, 528 { 0x0e, "GPCCS" }, 529 { 0x0f, "PROP_0" }, 530 { 0x10, "PROP_1" }, 531 { 0x11, "PROP_2" }, 532 { 0x12, "PROP_3" }, 533 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" }, 534 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" }, 535 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" }, 536 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" }, 537 { 0x1f, "GPM" }, 538 { 0x20, "LTP_UTLB_0" }, 539 { 0x21, "LTP_UTLB_1" }, 540 { 0x22, "LTP_UTLB_2" }, 541 { 0x23, "LTP_UTLB_3" }, 542 { 0x24, "GPC_RGG_UTLB" }, 543 {} 544 }; 545 546 const struct nvkm_fifo_func_mmu_fault 547 gk104_fifo_mmu_fault = { 548 .recover = gf100_fifo_mmu_fault_recover, 549 .access = gf100_fifo_mmu_fault_access, 550 .engine = gk104_fifo_mmu_fault_engine, 551 .reason = gk104_fifo_mmu_fault_reason, 552 .hubclient = gk104_fifo_mmu_fault_hubclient, 553 .gpcclient = gk104_fifo_mmu_fault_gpcclient, 554 }; 555 556 static const struct nvkm_enum 557 gk104_fifo_intr_bind_reason[] = { 558 { 0x01, "BIND_NOT_UNBOUND" }, 559 { 0x02, "SNOOP_WITHOUT_BAR1" }, 560 { 0x03, "UNBIND_WHILE_RUNNING" }, 561 { 0x05, "INVALID_RUNLIST" }, 562 { 0x06, "INVALID_CTX_TGT" }, 563 { 0x0b, "UNBIND_WHILE_PARKED" }, 564 {} 565 }; 566 567 void 568 gk104_fifo_intr_bind(struct nvkm_fifo *fifo) 569 { 570 struct nvkm_subdev *subdev = &fifo->engine.subdev; 571 u32 intr = nvkm_rd32(subdev->device, 0x00252c); 572 u32 code = intr & 0x000000ff; 573 const struct nvkm_enum *en = nvkm_enum_find(gk104_fifo_intr_bind_reason, code); 574 575 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : ""); 576 } 577 578 void 579 gk104_fifo_intr_chsw(struct nvkm_fifo *fifo) 580 { 581 struct nvkm_subdev *subdev = &fifo->engine.subdev; 582 struct nvkm_device *device = subdev->device; 583 u32 stat = nvkm_rd32(device, 0x00256c); 584 585 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat); 586 nvkm_wr32(device, 0x00256c, stat); 587 } 588 589 static void 590 gk104_fifo_intr_dropped_fault(struct nvkm_fifo *fifo) 591 { 592 struct nvkm_subdev *subdev = &fifo->engine.subdev; 593 u32 stat = nvkm_rd32(subdev->device, 0x00259c); 594 595 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat); 596 } 597 598 void 599 gk104_fifo_intr_runlist(struct nvkm_fifo *fifo) 600 { 601 struct nvkm_device *device = fifo->engine.subdev.device; 602 struct nvkm_runl *runl; 603 u32 mask = nvkm_rd32(device, 0x002a00); 604 605 nvkm_runl_foreach_cond(runl, fifo, mask & BIT(runl->id)) { 606 nvkm_wr32(device, 0x002a00, BIT(runl->id)); 607 } 608 } 609 610 irqreturn_t 611 gk104_fifo_intr(struct nvkm_inth *inth) 612 { 613 struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth); 614 struct nvkm_subdev *subdev = &fifo->engine.subdev; 615 struct nvkm_device *device = subdev->device; 616 u32 mask = nvkm_rd32(device, 0x002140); 617 u32 stat = nvkm_rd32(device, 0x002100) & mask; 618 619 if (stat & 0x00000001) { 620 gk104_fifo_intr_bind(fifo); 621 nvkm_wr32(device, 0x002100, 0x00000001); 622 stat &= ~0x00000001; 623 } 624 625 if (stat & 0x00000010) { 626 nvkm_error(subdev, "PIO_ERROR\n"); 627 nvkm_wr32(device, 0x002100, 0x00000010); 628 stat &= ~0x00000010; 629 } 630 631 if (stat & 0x00000100) { 632 gf100_fifo_intr_sched(fifo); 633 nvkm_wr32(device, 0x002100, 0x00000100); 634 stat &= ~0x00000100; 635 } 636 637 if (stat & 0x00010000) { 638 gk104_fifo_intr_chsw(fifo); 639 nvkm_wr32(device, 0x002100, 0x00010000); 640 stat &= ~0x00010000; 641 } 642 643 if (stat & 0x00800000) { 644 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n"); 645 nvkm_wr32(device, 0x002100, 0x00800000); 646 stat &= ~0x00800000; 647 } 648 649 if (stat & 0x01000000) { 650 nvkm_error(subdev, "LB_ERROR\n"); 651 nvkm_wr32(device, 0x002100, 0x01000000); 652 stat &= ~0x01000000; 653 } 654 655 if (stat & 0x08000000) { 656 gk104_fifo_intr_dropped_fault(fifo); 657 nvkm_wr32(device, 0x002100, 0x08000000); 658 stat &= ~0x08000000; 659 } 660 661 if (stat & 0x10000000) { 662 gf100_fifo_intr_mmu_fault(fifo); 663 stat &= ~0x10000000; 664 } 665 666 if (stat & 0x20000000) { 667 if (gf100_fifo_intr_pbdma(fifo)) 668 stat &= ~0x20000000; 669 } 670 671 if (stat & 0x40000000) { 672 gk104_fifo_intr_runlist(fifo); 673 stat &= ~0x40000000; 674 } 675 676 if (stat & 0x80000000) { 677 nvkm_wr32(device, 0x002100, 0x80000000); 678 nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT); 679 stat &= ~0x80000000; 680 } 681 682 if (stat) { 683 nvkm_error(subdev, "INTR %08x\n", stat); 684 spin_lock(&fifo->lock); 685 nvkm_mask(device, 0x002140, stat, 0x00000000); 686 spin_unlock(&fifo->lock); 687 nvkm_wr32(device, 0x002100, stat); 688 } 689 690 return IRQ_HANDLED; 691 } 692 693 void 694 gk104_fifo_init_pbdmas(struct nvkm_fifo *fifo, u32 mask) 695 { 696 struct nvkm_device *device = fifo->engine.subdev.device; 697 698 nvkm_wr32(device, 0x000204, mask); 699 nvkm_mask(device, 0x002a04, 0xbfffffff, 0xbfffffff); 700 } 701 702 void 703 gk104_fifo_init(struct nvkm_fifo *fifo) 704 { 705 struct nvkm_device *device = fifo->engine.subdev.device; 706 707 if (fifo->func->chan.func->userd->bar == 1) 708 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->userd.bar1->addr >> 12); 709 710 nvkm_wr32(device, 0x002100, 0xffffffff); 711 nvkm_wr32(device, 0x002140, 0x7fffffff); 712 } 713 714 int 715 gk104_fifo_runl_ctor(struct nvkm_fifo *fifo) 716 { 717 struct nvkm_device *device = fifo->engine.subdev.device; 718 struct nvkm_top_device *tdev; 719 struct nvkm_runl *runl; 720 struct nvkm_runq *runq; 721 const struct nvkm_engn_func *func; 722 723 nvkm_list_foreach(tdev, &device->top->device, head, tdev->runlist >= 0) { 724 runl = nvkm_runl_get(fifo, tdev->runlist, tdev->runlist); 725 if (!runl) { 726 runl = nvkm_runl_new(fifo, tdev->runlist, tdev->runlist, 0); 727 if (IS_ERR(runl)) 728 return PTR_ERR(runl); 729 730 nvkm_runq_foreach_cond(runq, fifo, gk104_runq_runm(runq) & BIT(runl->id)) { 731 if (WARN_ON(runl->runq_nr == ARRAY_SIZE(runl->runq))) 732 return -ENOMEM; 733 734 runl->runq[runl->runq_nr++] = runq; 735 } 736 737 } 738 739 if (tdev->engine < 0) 740 continue; 741 742 switch (tdev->type) { 743 case NVKM_ENGINE_CE: 744 func = fifo->func->engn_ce; 745 break; 746 case NVKM_ENGINE_GR: 747 nvkm_runl_add(runl, 15, &gf100_engn_sw, NVKM_ENGINE_SW, 0); 748 fallthrough; 749 default: 750 func = fifo->func->engn; 751 break; 752 } 753 754 nvkm_runl_add(runl, tdev->engine, func, tdev->type, tdev->inst); 755 } 756 757 return 0; 758 } 759 760 int 761 gk104_fifo_chid_nr(struct nvkm_fifo *fifo) 762 { 763 return 4096; 764 } 765 766 int 767 gk104_fifo_oneinit(struct nvkm_fifo *base) 768 { 769 struct gk104_fifo *fifo = gk104_fifo(base); 770 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 771 struct nvkm_device *device = subdev->device; 772 struct nvkm_top_device *tdev; 773 774 /* Determine runlist configuration from topology device info. */ 775 list_for_each_entry(tdev, &device->top->device, head) { 776 const int engn = tdev->engine; 777 778 if (engn < 0) 779 continue; 780 781 fifo->engine[engn].engine = nvkm_device_engine(device, tdev->type, tdev->inst); 782 fifo->engine_nr = max(fifo->engine_nr, engn + 1); 783 fifo->runlist[tdev->runlist].engm |= BIT(engn); 784 fifo->runlist[tdev->runlist].engm_sw |= BIT(engn); 785 if (tdev->type == NVKM_ENGINE_GR) 786 fifo->runlist[tdev->runlist].engm_sw |= BIT(GK104_FIFO_ENGN_SW); 787 fifo->runlist_nr = max(fifo->runlist_nr, tdev->runlist + 1); 788 } 789 790 return 0; 791 } 792 793 void * 794 gk104_fifo_dtor(struct nvkm_fifo *base) 795 { 796 struct gk104_fifo *fifo = gk104_fifo(base); 797 return fifo; 798 } 799 800 int 801 gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device, 802 enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo **pfifo) 803 { 804 struct gk104_fifo *fifo; 805 806 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) 807 return -ENOMEM; 808 fifo->func = func; 809 *pfifo = &fifo->base; 810 811 return nvkm_fifo_ctor(func, device, type, inst, &fifo->base); 812 } 813 814 static const struct nvkm_fifo_func 815 gk104_fifo = { 816 .dtor = gk104_fifo_dtor, 817 .oneinit = gk104_fifo_oneinit, 818 .chid_nr = gk104_fifo_chid_nr, 819 .chid_ctor = gf100_fifo_chid_ctor, 820 .runq_nr = gf100_fifo_runq_nr, 821 .runl_ctor = gk104_fifo_runl_ctor, 822 .init = gk104_fifo_init, 823 .init_pbdmas = gk104_fifo_init_pbdmas, 824 .intr = gk104_fifo_intr, 825 .intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit, 826 .intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout, 827 .mmu_fault = &gk104_fifo_mmu_fault, 828 .engine_id = gk104_fifo_engine_id, 829 .nonstall = &gf100_fifo_nonstall, 830 .runl = &gk104_runl, 831 .runq = &gk104_runq, 832 .engn = &gk104_engn, 833 .engn_ce = &gk104_engn_ce, 834 .cgrp = {{ }, &nv04_cgrp }, 835 .chan = {{ 0, 0, KEPLER_CHANNEL_GPFIFO_A }, &gk104_chan, .ctor = &gk104_fifo_gpfifo_new }, 836 }; 837 838 int 839 gk104_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, 840 struct nvkm_fifo **pfifo) 841 { 842 return gk104_fifo_new_(&gk104_fifo, device, type, inst, 0, pfifo); 843 } 844