Lines Matching +full:0 +full:x10000010

43 	nvkm_wr32(chan->cgrp->runl->fifo->engine.subdev.device, 0x002634, chan->id);  in gf100_chan_preempt()
51 nvkm_mask(device, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000); in gf100_chan_stop()
59 nvkm_wr32(device, 0x003004 + (chan->id * 8), 0x001f0001); in gf100_chan_start()
73 nvkm_wr32(device, 0x003000 + (chan->id * 8), 0x00000000); in gf100_chan_unbind()
81 nvkm_wr32(device, 0x003000 + (chan->id * 8), 0xc0000000 | chan->inst->addr >> 12); in gf100_chan_bind()
91 nvkm_wo32(chan->inst, 0x08, lower_32_bits(userd)); in gf100_chan_ramfc_write()
92 nvkm_wo32(chan->inst, 0x0c, upper_32_bits(userd)); in gf100_chan_ramfc_write()
93 nvkm_wo32(chan->inst, 0x10, 0x0000face); in gf100_chan_ramfc_write()
94 nvkm_wo32(chan->inst, 0x30, 0xfffff902); in gf100_chan_ramfc_write()
95 nvkm_wo32(chan->inst, 0x48, lower_32_bits(offset)); in gf100_chan_ramfc_write()
96 nvkm_wo32(chan->inst, 0x4c, upper_32_bits(offset) | (limit2 << 16)); in gf100_chan_ramfc_write()
97 nvkm_wo32(chan->inst, 0x54, 0x00000002); in gf100_chan_ramfc_write()
98 nvkm_wo32(chan->inst, 0x84, 0x20400000); in gf100_chan_ramfc_write()
99 nvkm_wo32(chan->inst, 0x94, 0x30000000 | devm); in gf100_chan_ramfc_write()
100 nvkm_wo32(chan->inst, 0x9c, 0x00000100); in gf100_chan_ramfc_write()
101 nvkm_wo32(chan->inst, 0xa4, 0x1f1f1f1f); in gf100_chan_ramfc_write()
102 nvkm_wo32(chan->inst, 0xa8, 0x1f1f1f1f); in gf100_chan_ramfc_write()
103 nvkm_wo32(chan->inst, 0xac, 0x0000001f); in gf100_chan_ramfc_write()
104 nvkm_wo32(chan->inst, 0xb8, 0xf8000000); in gf100_chan_ramfc_write()
105 nvkm_wo32(chan->inst, 0xf8, 0x10003080); /* 0x002310 */ in gf100_chan_ramfc_write()
106 nvkm_wo32(chan->inst, 0xfc, 0x10000010); /* 0x002350 */ in gf100_chan_ramfc_write()
108 return 0; in gf100_chan_ramfc_write()
114 .devm = 0xfff,
121 nvkm_wo32(chan->userd.mem, chan->userd.base + 0x040, 0x00000000); in gf100_chan_userd_clear()
122 nvkm_wo32(chan->userd.mem, chan->userd.base + 0x044, 0x00000000); in gf100_chan_userd_clear()
123 nvkm_wo32(chan->userd.mem, chan->userd.base + 0x048, 0x00000000); in gf100_chan_userd_clear()
124 nvkm_wo32(chan->userd.mem, chan->userd.base + 0x04c, 0x00000000); in gf100_chan_userd_clear()
125 nvkm_wo32(chan->userd.mem, chan->userd.base + 0x050, 0x00000000); in gf100_chan_userd_clear()
126 nvkm_wo32(chan->userd.mem, chan->userd.base + 0x058, 0x00000000); in gf100_chan_userd_clear()
127 nvkm_wo32(chan->userd.mem, chan->userd.base + 0x05c, 0x00000000); in gf100_chan_userd_clear()
128 nvkm_wo32(chan->userd.mem, chan->userd.base + 0x060, 0x00000000); in gf100_chan_userd_clear()
129 nvkm_wo32(chan->userd.mem, chan->userd.base + 0x088, 0x00000000); in gf100_chan_userd_clear()
130 nvkm_wo32(chan->userd.mem, chan->userd.base + 0x08c, 0x00000000); in gf100_chan_userd_clear()
137 .size = 0x1000,
143 .size = 0x1000,
163 u64 addr = 0ULL; in gf100_ectx_bind()
168 case NVKM_ENGINE_GR : ptr0 = 0x0210; break; in gf100_ectx_bind()
169 case NVKM_ENGINE_CE : ptr0 = 0x0230 + (engn->engine->subdev.inst * 0x10); break; in gf100_ectx_bind()
170 case NVKM_ENGINE_MSPDEC: ptr0 = 0x0250; break; in gf100_ectx_bind()
171 case NVKM_ENGINE_MSPPP : ptr0 = 0x0260; break; in gf100_ectx_bind()
172 case NVKM_ENGINE_MSVLD : ptr0 = 0x0270; break; in gf100_ectx_bind()
184 nvkm_wo32(chan->inst, ptr0 + 0, lower_32_bits(addr)); in gf100_ectx_bind()
198 return nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, NULL, 0); in gf100_ectx_ctor()
207 u32 data = nvkm_rd32(device, 0x002a30 + (engn->id * 4)); in gf100_engn_mmu_fault_triggered()
210 if (!(data & 0x00000100)) in gf100_engn_mmu_fault_triggered()
214 nvkm_mask(device, 0x002a30 + (engn->id * 4), 0x00000100, 0x00000000); in gf100_engn_mmu_fault_triggered()
216 nvkm_mask(device, 0x002140, 0x00000100, 0x00000100); in gf100_engn_mmu_fault_triggered()
228 ENGN_DEBUG(engn, "triggering mmu fault on 0x%02x", engn->fault); in gf100_engn_mmu_fault_trigger()
231 nvkm_mask(device, 0x002140, 0x00000100, 0x00000000); in gf100_engn_mmu_fault_trigger()
232 nvkm_wr32(device, 0x002100, 0x00000100); in gf100_engn_mmu_fault_trigger()
233 nvkm_wr32(device, 0x002a30 + (engn->id * 4), 0x00000100 | engn->fault); in gf100_engn_mmu_fault_trigger()
249 u32 stat = nvkm_rd32(engn->engine->subdev.device, 0x002640 + (engn->id * 4)); in gf100_engn_status()
251 status->busy = (stat & 0x10000000); in gf100_engn_status()
252 status->save = (stat & 0x00100000); in gf100_engn_status()
253 status->unk0 = (stat & 0x00004000); in gf100_engn_status()
254 status->unk1 = (stat & 0x00001000); in gf100_engn_status()
255 status->chid = (stat & 0x0000007f); in gf100_engn_status()
303 /* { 0x00008000, "" } seen with null ib push */
304 { 0x00200000, "ILLEGAL_MTHD" },
305 { 0x00800000, "EMPTY_SUBC" },
314 u32 mask = nvkm_rd32(device, 0x04010c + (runq->id * 0x2000)); in gf100_runq_intr()
315 u32 stat = nvkm_rd32(device, 0x040108 + (runq->id * 0x2000)) & mask; in gf100_runq_intr()
316 u32 addr = nvkm_rd32(device, 0x0400c0 + (runq->id * 0x2000)); in gf100_runq_intr()
317 u32 data = nvkm_rd32(device, 0x0400c4 + (runq->id * 0x2000)); in gf100_runq_intr()
318 u32 chid = nvkm_rd32(device, 0x040120 + (runq->id * 0x2000)) & runq->fifo->chid->mask; in gf100_runq_intr()
319 u32 subc = (addr & 0x00070000) >> 16; in gf100_runq_intr()
320 u32 mthd = (addr & 0x00003ffc); in gf100_runq_intr()
326 if (stat & 0x00800000) { in gf100_runq_intr()
329 show &= ~0x00800000; in gf100_runq_intr()
338 runq->id, show, msg, chid, chan ? chan->inst->addr : 0, in gf100_runq_intr()
342 if ((stat & 0xc67fe000) && chan) in gf100_runq_intr()
347 nvkm_wr32(device, 0x0400c0 + (runq->id * 0x2000), 0x80600008); in gf100_runq_intr()
348 nvkm_wr32(device, 0x040108 + (runq->id * 0x2000), stat); in gf100_runq_intr()
357 nvkm_mask(device, 0x04013c + (runq->id * 0x2000), 0x10000100, 0x00000000); in gf100_runq_init()
358 nvkm_wr32(device, 0x040108 + (runq->id * 0x2000), 0xffffffff); /* INTR */ in gf100_runq_init()
359 nvkm_wr32(device, 0x04010c + (runq->id * 0x2000), 0xfffffeff); /* INTREN */ in gf100_runq_init()
372 return nvkm_rd32(runl->fifo->engine.subdev.device, 0x002634) & 0x00100000; in gf100_runl_preempt_pending()
378 nvkm_mask(runl->fifo->engine.subdev.device, 0x00262c, 0x00000000, 0x00000000); in gf100_runl_fault_clear()
384 nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, engm, 0x00000000); in gf100_runl_allow()
390 nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, engm, engm); in gf100_runl_block()
396 return nvkm_rd32(runl->fifo->engine.subdev.device, 0x00227c) & 0x00100000; in gf100_runl_pending()
407 case NVKM_MEM_TARGET_VRAM: target = 0; break; in gf100_runl_commit()
414 nvkm_wr32(device, 0x002270, (target << 28) | (addr >> 12)); in gf100_runl_commit()
415 nvkm_wr32(device, 0x002274, 0x01f00000 | count); in gf100_runl_commit()
421 nvkm_wo32(memory, offset + 0, chan->id); in gf100_runl_insert_chan()
422 nvkm_wo32(memory, offset + 4, 0x00000004); in gf100_runl_insert_chan()
446 nvkm_mask(fifo->engine.subdev.device, 0x002140, 0x80000000, 0x80000000); in gf100_fifo_nonstall_allow()
457 nvkm_mask(fifo->engine.subdev.device, 0x002140, 0x80000000, 0x00000000); in gf100_fifo_nonstall_block()
469 { 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
470 { 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
471 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
472 { 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
473 { 0x07, "PFIFO" },
474 { 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
475 { 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
476 { 0x13, "PCOUNTER" },
477 { 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
478 { 0x15, "PCE0", NULL, NVKM_ENGINE_CE, 0 },
479 { 0x16, "PCE1", NULL, NVKM_ENGINE_CE, 1 },
480 { 0x17, "PMU" },
486 { 0x00, "PT_NOT_PRESENT" },
487 { 0x01, "PT_TOO_SHORT" },
488 { 0x02, "PAGE_NOT_PRESENT" },
489 { 0x03, "VM_LIMIT_EXCEEDED" },
490 { 0x04, "NO_CHANNEL" },
491 { 0x05, "PAGE_SYSTEM_ONLY" },
492 { 0x06, "PAGE_READ_ONLY" },
493 { 0x0a, "COMPRESSED_SYSRAM" },
494 { 0x0c, "INVALID_STORAGE_TYPE" },
500 { 0x01, "PCOPY0" },
501 { 0x02, "PCOPY1" },
502 { 0x04, "DISPATCH" },
503 { 0x05, "CTXCTL" },
504 { 0x06, "PFIFO" },
505 { 0x07, "BAR_READ" },
506 { 0x08, "BAR_WRITE" },
507 { 0x0b, "PVP" },
508 { 0x0c, "PMSPPP" },
509 { 0x0d, "PMSVLD" },
510 { 0x11, "PCOUNTER" },
511 { 0x12, "PMU" },
512 { 0x14, "CCACHE" },
513 { 0x15, "CCACHE_POST" },
519 { 0x01, "TEX" },
520 { 0x0c, "ESETUP" },
521 { 0x0e, "CTXCTL" },
522 { 0x0f, "PROP" },
528 { 0x00, "READ" },
529 { 0x01, "WRITE" },
582 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000); in gf100_fifo_mmu_fault_recover()
631 if (id >= 0) { in gf100_fifo_intr_ctxsw_timeout()
649 u32 engm = 0; in gf100_fifo_intr_sched_ctxsw()
654 if (WARN_ON(engn->fault < 0) || !engn->func->chsw(engn)) in gf100_fifo_intr_sched_ctxsw()
669 { 0x0a, "CTXSW_TIMEOUT" },
678 u32 intr = nvkm_rd32(device, 0x00254c); in gf100_fifo_intr_sched()
679 u32 code = intr & 0x000000ff; in gf100_fifo_intr_sched()
687 case 0x0a: in gf100_fifo_intr_sched()
699 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10)); in gf100_fifo_intr_mmu_fault_unit()
700 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10)); in gf100_fifo_intr_mmu_fault_unit()
701 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10)); in gf100_fifo_intr_mmu_fault_unit()
702 u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10)); in gf100_fifo_intr_mmu_fault_unit()
707 info.time = 0; in gf100_fifo_intr_mmu_fault_unit()
710 info.gpc = (type & 0x1f000000) >> 24; in gf100_fifo_intr_mmu_fault_unit()
711 info.client = (type & 0x00001f00) >> 8; in gf100_fifo_intr_mmu_fault_unit()
712 info.access = (type & 0x00000080) >> 7; in gf100_fifo_intr_mmu_fault_unit()
713 info.hub = (type & 0x00000040) >> 6; in gf100_fifo_intr_mmu_fault_unit()
714 info.reason = (type & 0x0000000f); in gf100_fifo_intr_mmu_fault_unit()
723 unsigned long mask = nvkm_rd32(device, 0x00259c); in gf100_fifo_intr_mmu_fault()
728 nvkm_wr32(device, 0x00259c, BIT(unit)); in gf100_fifo_intr_mmu_fault()
737 u32 mask = nvkm_rd32(device, 0x0025a0); in gf100_fifo_intr_pbdma()
744 nvkm_wr32(device, 0x0025a0, BIT(runq->id)); in gf100_fifo_intr_pbdma()
755 u32 intr = nvkm_rd32(device, 0x002a00); in gf100_fifo_intr_runlist()
757 if (intr & 0x10000000) { in gf100_fifo_intr_runlist()
758 nvkm_wr32(device, 0x002a00, 0x10000000); in gf100_fifo_intr_runlist()
759 intr &= ~0x10000000; in gf100_fifo_intr_runlist()
764 nvkm_wr32(device, 0x002a00, intr); in gf100_fifo_intr_runlist()
773 u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04)); in gf100_fifo_intr_engine_unit()
774 u32 inte = nvkm_rd32(device, 0x002628); in gf100_fifo_intr_engine_unit()
777 nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr); in gf100_fifo_intr_engine_unit()
779 for (unkn = 0; unkn < 8; unkn++) { in gf100_fifo_intr_engine_unit()
780 u32 ints = (intr >> (unkn * 0x04)) & inte; in gf100_fifo_intr_engine_unit()
781 if (ints & 0x1) { in gf100_fifo_intr_engine_unit()
782 nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT); in gf100_fifo_intr_engine_unit()
787 nvkm_mask(device, 0x002628, ints, 0); in gf100_fifo_intr_engine_unit()
796 u32 mask = nvkm_rd32(device, 0x0025a4); in gf100_fifo_intr_engine()
811 u32 mask = nvkm_rd32(device, 0x002140); in gf100_fifo_intr()
812 u32 stat = nvkm_rd32(device, 0x002100) & mask; in gf100_fifo_intr()
814 if (stat & 0x00000001) { in gf100_fifo_intr()
815 u32 intr = nvkm_rd32(device, 0x00252c); in gf100_fifo_intr()
817 nvkm_wr32(device, 0x002100, 0x00000001); in gf100_fifo_intr()
818 stat &= ~0x00000001; in gf100_fifo_intr()
821 if (stat & 0x00000100) { in gf100_fifo_intr()
823 nvkm_wr32(device, 0x002100, 0x00000100); in gf100_fifo_intr()
824 stat &= ~0x00000100; in gf100_fifo_intr()
827 if (stat & 0x00010000) { in gf100_fifo_intr()
828 u32 intr = nvkm_rd32(device, 0x00256c); in gf100_fifo_intr()
830 nvkm_wr32(device, 0x002100, 0x00010000); in gf100_fifo_intr()
831 stat &= ~0x00010000; in gf100_fifo_intr()
834 if (stat & 0x01000000) { in gf100_fifo_intr()
835 u32 intr = nvkm_rd32(device, 0x00258c); in gf100_fifo_intr()
837 nvkm_wr32(device, 0x002100, 0x01000000); in gf100_fifo_intr()
838 stat &= ~0x01000000; in gf100_fifo_intr()
841 if (stat & 0x10000000) { in gf100_fifo_intr()
843 stat &= ~0x10000000; in gf100_fifo_intr()
846 if (stat & 0x20000000) { in gf100_fifo_intr()
848 stat &= ~0x20000000; in gf100_fifo_intr()
851 if (stat & 0x40000000) { in gf100_fifo_intr()
853 stat &= ~0x40000000; in gf100_fifo_intr()
856 if (stat & 0x80000000) { in gf100_fifo_intr()
858 stat &= ~0x80000000; in gf100_fifo_intr()
864 nvkm_mask(device, 0x002140, stat, 0x00000000); in gf100_fifo_intr()
866 nvkm_wr32(device, 0x002100, stat); in gf100_fifo_intr()
878 nvkm_wr32(device, 0x000204, mask); in gf100_fifo_init_pbdmas()
879 nvkm_wr32(device, 0x002204, mask); in gf100_fifo_init_pbdmas()
883 nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */ in gf100_fifo_init_pbdmas()
884 nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */ in gf100_fifo_init_pbdmas()
885 nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */ in gf100_fifo_init_pbdmas()
886 nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */ in gf100_fifo_init_pbdmas()
887 nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */ in gf100_fifo_init_pbdmas()
888 nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */ in gf100_fifo_init_pbdmas()
891 nvkm_mask(device, 0x002a04, 0xbfffffff, 0xbfffffff); in gf100_fifo_init_pbdmas()
899 nvkm_mask(device, 0x002200, 0x00000001, 0x00000001); in gf100_fifo_init()
900 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->userd.bar1->addr >> 12); in gf100_fifo_init()
902 nvkm_wr32(device, 0x002100, 0xffffffff); in gf100_fifo_init()
903 nvkm_wr32(device, 0x002140, 0x7fffffff); in gf100_fifo_init()
904 nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */ in gf100_fifo_init()
912 runl = nvkm_runl_new(fifo, 0, 0, 0); in gf100_fifo_runl_ctor()
916 nvkm_runl_add(runl, 0, fifo->func->engn, NVKM_ENGINE_GR, 0); in gf100_fifo_runl_ctor()
917 nvkm_runl_add(runl, 1, fifo->func->engn, NVKM_ENGINE_MSPDEC, 0); in gf100_fifo_runl_ctor()
918 nvkm_runl_add(runl, 2, fifo->func->engn, NVKM_ENGINE_MSPPP, 0); in gf100_fifo_runl_ctor()
919 nvkm_runl_add(runl, 3, fifo->func->engn, NVKM_ENGINE_MSVLD, 0); in gf100_fifo_runl_ctor()
920 nvkm_runl_add(runl, 4, fifo->func->engn, NVKM_ENGINE_CE, 0); in gf100_fifo_runl_ctor()
922 nvkm_runl_add(runl, 15, &gf100_engn_sw, NVKM_ENGINE_SW, 0); in gf100_fifo_runl_ctor()
923 return 0; in gf100_fifo_runl_ctor()
933 save = nvkm_mask(device, 0x000204, 0xffffffff, 0xffffffff); in gf100_fifo_runq_nr()
934 save = nvkm_mask(device, 0x000204, 0xffffffff, save); in gf100_fifo_runq_nr()
941 return nvkm_chid_new(&nvkm_chan_event, &fifo->engine.subdev, nr, 0, nr, &fifo->chid); in gf100_fifo_chid_ctor()
961 .chan = {{ 0, 0, FERMI_CHANNEL_GPFIFO }, &gf100_chan },