Lines Matching full:fault
66 u8 fault; member
68 } **fault; member
163 * page fault) and maybe some other commands. in nouveau_svmm_bind()
382 /* Issue fault replay for GPU to retry accesses that faulted previously. */
393 /* Cancel a replayable fault that could not be handled.
395 * Cancelling the fault will trigger recovery to reset the engine
415 struct nouveau_svm_fault *fault) in nouveau_svm_fault_cancel_fault() argument
417 nouveau_svm_fault_cancel(svm, fault->inst, in nouveau_svm_fault_cancel_fault()
418 fault->hub, in nouveau_svm_fault_cancel_fault()
419 fault->gpc, in nouveau_svm_fault_cancel_fault()
420 fault->client); in nouveau_svm_fault_cancel_fault()
424 nouveau_svm_fault_priority(u8 fault) in nouveau_svm_fault_priority() argument
426 switch (fault) { in nouveau_svm_fault_priority()
472 struct nouveau_svm_fault *fault; in nouveau_svm_fault_cache() local
480 if (!buffer->fault[buffer->fault_nr]) { in nouveau_svm_fault_cache()
481 fault = kmalloc(sizeof(*fault), GFP_KERNEL); in nouveau_svm_fault_cache()
482 if (WARN_ON(!fault)) { in nouveau_svm_fault_cache()
486 buffer->fault[buffer->fault_nr] = fault; in nouveau_svm_fault_cache()
489 fault = buffer->fault[buffer->fault_nr++]; in nouveau_svm_fault_cache()
490 fault->inst = inst; in nouveau_svm_fault_cache()
491 fault->addr = (u64)addrhi << 32 | addrlo; in nouveau_svm_fault_cache()
492 fault->time = (u64)timehi << 32 | timelo; in nouveau_svm_fault_cache()
493 fault->engine = engine; in nouveau_svm_fault_cache()
494 fault->gpc = gpc; in nouveau_svm_fault_cache()
495 fault->hub = hub; in nouveau_svm_fault_cache()
496 fault->access = (info & 0x000f0000) >> 16; in nouveau_svm_fault_cache()
497 fault->client = client; in nouveau_svm_fault_cache()
498 fault->fault = (info & 0x0000001f); in nouveau_svm_fault_cache()
500 SVM_DBG(svm, "fault %016llx %016llx %02x", in nouveau_svm_fault_cache()
501 fault->inst, fault->addr, fault->access); in nouveau_svm_fault_cache()
659 /* Have HMM fault pages within the fault window to the GPU. */ in nouveau_range_fault()
731 /* Parse available fault buffer entries into a cache, and update in nouveau_svm_fault()
734 SVM_DBG(svm, "fault handler"); in nouveau_svm_fault()
750 SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr); in nouveau_svm_fault()
756 sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault), in nouveau_svm_fault()
762 if (!svmm || buffer->fault[fi]->inst != inst) { in nouveau_svm_fault()
764 nouveau_ivmm_find(svm, buffer->fault[fi]->inst); in nouveau_svm_fault()
766 inst = buffer->fault[fi]->inst; in nouveau_svm_fault()
769 buffer->fault[fi]->svmm = svmm; in nouveau_svm_fault()
785 if (!(svmm = buffer->fault[fi]->svmm)) { in nouveau_svm_fault()
786 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); in nouveau_svm_fault()
789 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr); in nouveau_svm_fault()
794 start = buffer->fault[fi]->addr; in nouveau_svm_fault()
801 * fault window, determining required pages and access in nouveau_svm_fault()
808 * Determine required permissions based on GPU fault in nouveau_svm_fault()
811 switch (buffer->fault[fi]->access) { in nouveau_svm_fault()
828 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); in nouveau_svm_fault()
852 * fault addresses have sufficient access permission. in nouveau_svm_fault()
854 if (buffer->fault[fn]->svmm != svmm || in nouveau_svm_fault()
855 buffer->fault[fn]->addr >= limit || in nouveau_svm_fault()
856 (buffer->fault[fi]->access == FAULT_ACCESS_READ && in nouveau_svm_fault()
858 (buffer->fault[fi]->access != FAULT_ACCESS_READ && in nouveau_svm_fault()
859 buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH && in nouveau_svm_fault()
861 (buffer->fault[fi]->access != FAULT_ACCESS_READ && in nouveau_svm_fault()
862 buffer->fault[fi]->access != FAULT_ACCESS_WRITE && in nouveau_svm_fault()
863 buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH && in nouveau_svm_fault()
871 struct nouveau_svm_fault *fault = in nouveau_svm_fault() local
872 buffer->fault[fi++]; in nouveau_svm_fault()
874 nouveau_svm_fault_cancel_fault(svm, fault); in nouveau_svm_fault()
880 /* Issue fault replay to the GPU. */ in nouveau_svm_fault()
975 if (buffer->fault) { in nouveau_svm_fault_buffer_dtor()
976 for (i = 0; buffer->fault[i] && i < buffer->entries; i++) in nouveau_svm_fault_buffer_dtor()
977 kfree(buffer->fault[i]); in nouveau_svm_fault_buffer_dtor()
978 kvfree(buffer->fault); in nouveau_svm_fault_buffer_dtor()
999 SVM_ERR(svm, "Fault buffer allocation failed: %d", ret); in nouveau_svm_fault_buffer_ctor()
1014 buffer->fault = kvcalloc(buffer->entries, sizeof(*buffer->fault), GFP_KERNEL); in nouveau_svm_fault_buffer_ctor()
1015 if (!buffer->fault) in nouveau_svm_fault_buffer_ctor()
1075 SVM_DBG(svm, "No supported fault buffer class"); in nouveau_svm_init()