1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Low-level SPU handling 4 * 5 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 6 * 7 * Author: Arnd Bergmann <arndb@de.ibm.com> 8 */ 9 #include <linux/sched/signal.h> 10 #include <linux/mm.h> 11 12 #include <asm/spu.h> 13 #include <asm/spu_csa.h> 14 15 #include "spufs.h" 16 17 /** 18 * Handle an SPE event, depending on context SPU_CREATE_EVENTS_ENABLED flag. 19 * 20 * If the context was created with events, we just set the return event. 21 * Otherwise, send an appropriate signal to the process. 22 */ 23 static void spufs_handle_event(struct spu_context *ctx, 24 unsigned long ea, int type) 25 { 26 if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { 27 ctx->event_return |= type; 28 wake_up_all(&ctx->stop_wq); 29 return; 30 } 31 32 switch (type) { 33 case SPE_EVENT_INVALID_DMA: 34 force_sig_fault(SIGBUS, BUS_OBJERR, NULL, current); 35 break; 36 case SPE_EVENT_SPE_DATA_STORAGE: 37 ctx->ops->restart_dma(ctx); 38 force_sig_fault(SIGSEGV, SEGV_ACCERR, (void __user *)ea, 39 current); 40 break; 41 case SPE_EVENT_DMA_ALIGNMENT: 42 /* DAR isn't set for an alignment fault :( */ 43 force_sig_fault(SIGBUS, BUS_ADRALN, NULL, current); 44 break; 45 case SPE_EVENT_SPE_ERROR: 46 force_sig_fault( 47 SIGILL, ILL_ILLOPC, 48 (void __user *)(unsigned long) 49 ctx->ops->npc_read(ctx) - 4, current); 50 break; 51 } 52 } 53 54 int spufs_handle_class0(struct spu_context *ctx) 55 { 56 unsigned long stat = ctx->csa.class_0_pending & CLASS0_INTR_MASK; 57 58 if (likely(!stat)) 59 return 0; 60 61 if (stat & CLASS0_DMA_ALIGNMENT_INTR) 62 spufs_handle_event(ctx, ctx->csa.class_0_dar, 63 SPE_EVENT_DMA_ALIGNMENT); 64 65 if (stat & CLASS0_INVALID_DMA_COMMAND_INTR) 66 spufs_handle_event(ctx, ctx->csa.class_0_dar, 67 SPE_EVENT_INVALID_DMA); 68 69 if (stat & CLASS0_SPU_ERROR_INTR) 70 spufs_handle_event(ctx, ctx->csa.class_0_dar, 71 SPE_EVENT_SPE_ERROR); 72 73 ctx->csa.class_0_pending = 0; 74 75 return -EIO; 76 } 77 78 /* 79 * bottom half handler for page faults, we can't do this from 80 * interrupt context, since we might need to sleep. 81 * we also need to give up the mutex so we can get scheduled 82 * out while waiting for the backing store. 83 * 84 * TODO: try calling hash_page from the interrupt handler first 85 * in order to speed up the easy case. 86 */ 87 int spufs_handle_class1(struct spu_context *ctx) 88 { 89 u64 ea, dsisr, access; 90 unsigned long flags; 91 vm_fault_t flt = 0; 92 int ret; 93 94 /* 95 * dar and dsisr get passed from the registers 96 * to the spu_context, to this function, but not 97 * back to the spu if it gets scheduled again. 98 * 99 * if we don't handle the fault for a saved context 100 * in time, we can still expect to get the same fault 101 * the immediately after the context restore. 102 */ 103 ea = ctx->csa.class_1_dar; 104 dsisr = ctx->csa.class_1_dsisr; 105 106 if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) 107 return 0; 108 109 spuctx_switch_state(ctx, SPU_UTIL_IOWAIT); 110 111 pr_debug("ctx %p: ea %016llx, dsisr %016llx state %d\n", ctx, ea, 112 dsisr, ctx->state); 113 114 ctx->stats.hash_flt++; 115 if (ctx->state == SPU_STATE_RUNNABLE) 116 ctx->spu->stats.hash_flt++; 117 118 /* we must not hold the lock when entering copro_handle_mm_fault */ 119 spu_release(ctx); 120 121 access = (_PAGE_PRESENT | _PAGE_READ); 122 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_WRITE : 0UL; 123 local_irq_save(flags); 124 ret = hash_page(ea, access, 0x300, dsisr); 125 local_irq_restore(flags); 126 127 /* hashing failed, so try the actual fault handler */ 128 if (ret) 129 ret = copro_handle_mm_fault(current->mm, ea, dsisr, &flt); 130 131 /* 132 * This is nasty: we need the state_mutex for all the bookkeeping even 133 * if the syscall was interrupted by a signal. ewww. 134 */ 135 mutex_lock(&ctx->state_mutex); 136 137 /* 138 * Clear dsisr under ctxt lock after handling the fault, so that 139 * time slicing will not preempt the context while the page fault 140 * handler is running. Context switch code removes mappings. 141 */ 142 ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0; 143 144 /* 145 * If we handled the fault successfully and are in runnable 146 * state, restart the DMA. 147 * In case of unhandled error report the problem to user space. 148 */ 149 if (!ret) { 150 if (flt & VM_FAULT_MAJOR) 151 ctx->stats.maj_flt++; 152 else 153 ctx->stats.min_flt++; 154 if (ctx->state == SPU_STATE_RUNNABLE) { 155 if (flt & VM_FAULT_MAJOR) 156 ctx->spu->stats.maj_flt++; 157 else 158 ctx->spu->stats.min_flt++; 159 } 160 161 if (ctx->spu) 162 ctx->ops->restart_dma(ctx); 163 } else 164 spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE); 165 166 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 167 return ret; 168 } 169