Lines Matching full:fault

17  * struct iopf_queue - IO Page Fault queue
18 * @wq: the fault workqueue
29 * struct iopf_device_param - IO Page Fault data attached to a device
44 struct iommu_fault fault; member
60 .pasid = iopf->fault.prm.pasid, in iopf_complete_group()
61 .grpid = iopf->fault.prm.grpid, in iopf_complete_group()
65 if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) && in iopf_complete_group()
66 (iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID)) in iopf_complete_group()
81 group->last_fault.fault.prm.pasid, 0); in iopf_handler()
91 status = domain->iopf_handler(&iopf->fault, in iopf_handler()
94 if (!(iopf->fault.prm.flags & in iopf_handler()
104 * iommu_queue_iopf - IO Page Fault handler
105 * @fault: fault event
108 * Add a fault to the device workqueue, to be handled by mm.
127 * b. The IOMMU driver flushes all fault queues on unbind() before freeing the
134 * Any valid page fault will be eventually routed to an iommu domain and the
135 * page fault handler installed there will get called. The users of this
143 int iommu_queue_iopf(struct iommu_fault *fault, void *cookie) in iommu_queue_iopf() argument
155 if (fault->type != IOMMU_FAULT_PAGE_REQ) in iommu_queue_iopf()
156 /* Not a recoverable page fault */ in iommu_queue_iopf()
167 if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { in iommu_queue_iopf()
172 iopf->fault = *fault; in iommu_queue_iopf()
192 group->last_fault.fault = *fault; in iommu_queue_iopf()
199 if (iopf->fault.prm.grpid == fault->prm.grpid) in iommu_queue_iopf()
200 /* Insert *before* the last fault */ in iommu_queue_iopf()
209 if (iopf->fault.prm.grpid == fault->prm.grpid) { in iommu_queue_iopf()
225 * that no new fault is added to the queue. In particular it must flush its
252 * iopf_queue_discard_partial - Remove all pending partial fault
283 * iopf_queue_add_device - Add producer to the fault queue
324 * iopf_queue_remove_device - Remove producer from fault queue
366 * iopf_queue_alloc - Allocate and initialize a fault queue