1 /* 2 * Low-level SPU handling 3 * 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 5 * 6 * Author: Arnd Bergmann <arndb@de.ibm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 #include <linux/sched/signal.h> 23 #include <linux/mm.h> 24 25 #include <asm/spu.h> 26 #include <asm/spu_csa.h> 27 28 #include "spufs.h" 29 30 /** 31 * Handle an SPE event, depending on context SPU_CREATE_EVENTS_ENABLED flag. 32 * 33 * If the context was created with events, we just set the return event. 34 * Otherwise, send an appropriate signal to the process. 35 */ 36 static void spufs_handle_event(struct spu_context *ctx, 37 unsigned long ea, int type) 38 { 39 if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { 40 ctx->event_return |= type; 41 wake_up_all(&ctx->stop_wq); 42 return; 43 } 44 45 switch (type) { 46 case SPE_EVENT_INVALID_DMA: 47 force_sig_fault(SIGBUS, BUS_OBJERR, NULL, current); 48 break; 49 case SPE_EVENT_SPE_DATA_STORAGE: 50 ctx->ops->restart_dma(ctx); 51 force_sig_fault(SIGSEGV, SEGV_ACCERR, (void __user *)ea, 52 current); 53 break; 54 case SPE_EVENT_DMA_ALIGNMENT: 55 /* DAR isn't set for an alignment fault :( */ 56 force_sig_fault(SIGBUS, BUS_ADRALN, NULL, current); 57 break; 58 case SPE_EVENT_SPE_ERROR: 59 force_sig_fault( 60 SIGILL, ILL_ILLOPC, 61 (void __user *)(unsigned long) 62 ctx->ops->npc_read(ctx) - 4, current); 63 break; 64 } 65 } 66 67 int spufs_handle_class0(struct spu_context *ctx) 68 { 69 unsigned long stat = ctx->csa.class_0_pending & CLASS0_INTR_MASK; 70 71 if (likely(!stat)) 72 return 0; 73 74 if (stat & CLASS0_DMA_ALIGNMENT_INTR) 75 spufs_handle_event(ctx, ctx->csa.class_0_dar, 76 SPE_EVENT_DMA_ALIGNMENT); 77 78 if (stat & CLASS0_INVALID_DMA_COMMAND_INTR) 79 spufs_handle_event(ctx, ctx->csa.class_0_dar, 80 SPE_EVENT_INVALID_DMA); 81 82 if (stat & CLASS0_SPU_ERROR_INTR) 83 spufs_handle_event(ctx, ctx->csa.class_0_dar, 84 SPE_EVENT_SPE_ERROR); 85 86 ctx->csa.class_0_pending = 0; 87 88 return -EIO; 89 } 90 91 /* 92 * bottom half handler for page faults, we can't do this from 93 * interrupt context, since we might need to sleep. 94 * we also need to give up the mutex so we can get scheduled 95 * out while waiting for the backing store. 96 * 97 * TODO: try calling hash_page from the interrupt handler first 98 * in order to speed up the easy case. 99 */ 100 int spufs_handle_class1(struct spu_context *ctx) 101 { 102 u64 ea, dsisr, access; 103 unsigned long flags; 104 vm_fault_t flt = 0; 105 int ret; 106 107 /* 108 * dar and dsisr get passed from the registers 109 * to the spu_context, to this function, but not 110 * back to the spu if it gets scheduled again. 111 * 112 * if we don't handle the fault for a saved context 113 * in time, we can still expect to get the same fault 114 * the immediately after the context restore. 115 */ 116 ea = ctx->csa.class_1_dar; 117 dsisr = ctx->csa.class_1_dsisr; 118 119 if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) 120 return 0; 121 122 spuctx_switch_state(ctx, SPU_UTIL_IOWAIT); 123 124 pr_debug("ctx %p: ea %016llx, dsisr %016llx state %d\n", ctx, ea, 125 dsisr, ctx->state); 126 127 ctx->stats.hash_flt++; 128 if (ctx->state == SPU_STATE_RUNNABLE) 129 ctx->spu->stats.hash_flt++; 130 131 /* we must not hold the lock when entering copro_handle_mm_fault */ 132 spu_release(ctx); 133 134 access = (_PAGE_PRESENT | _PAGE_READ); 135 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_WRITE : 0UL; 136 local_irq_save(flags); 137 ret = hash_page(ea, access, 0x300, dsisr); 138 local_irq_restore(flags); 139 140 /* hashing failed, so try the actual fault handler */ 141 if (ret) 142 ret = copro_handle_mm_fault(current->mm, ea, dsisr, &flt); 143 144 /* 145 * This is nasty: we need the state_mutex for all the bookkeeping even 146 * if the syscall was interrupted by a signal. ewww. 147 */ 148 mutex_lock(&ctx->state_mutex); 149 150 /* 151 * Clear dsisr under ctxt lock after handling the fault, so that 152 * time slicing will not preempt the context while the page fault 153 * handler is running. Context switch code removes mappings. 154 */ 155 ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0; 156 157 /* 158 * If we handled the fault successfully and are in runnable 159 * state, restart the DMA. 160 * In case of unhandled error report the problem to user space. 161 */ 162 if (!ret) { 163 if (flt & VM_FAULT_MAJOR) 164 ctx->stats.maj_flt++; 165 else 166 ctx->stats.min_flt++; 167 if (ctx->state == SPU_STATE_RUNNABLE) { 168 if (flt & VM_FAULT_MAJOR) 169 ctx->spu->stats.maj_flt++; 170 else 171 ctx->spu->stats.min_flt++; 172 } 173 174 if (ctx->spu) 175 ctx->ops->restart_dma(ctx); 176 } else 177 spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE); 178 179 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 180 return ret; 181 } 182