1 /* 2 * Low-level SPU handling 3 * 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 5 * 6 * Author: Arnd Bergmann <arndb@de.ibm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 #include <linux/sched.h> 23 #include <linux/mm.h> 24 #include <linux/module.h> 25 26 #include <asm/spu.h> 27 #include <asm/spu_csa.h> 28 29 #include "spufs.h" 30 31 /* 32 * This ought to be kept in sync with the powerpc specific do_page_fault 33 * function. Currently, there are a few corner cases that we haven't had 34 * to handle fortunately. 35 */ 36 static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, 37 unsigned long dsisr, unsigned *flt) 38 { 39 struct vm_area_struct *vma; 40 unsigned long is_write; 41 int ret; 42 43 #if 0 44 if (!IS_VALID_EA(ea)) { 45 return -EFAULT; 46 } 47 #endif /* XXX */ 48 if (mm == NULL) { 49 return -EFAULT; 50 } 51 if (mm->pgd == NULL) { 52 return -EFAULT; 53 } 54 55 down_read(&mm->mmap_sem); 56 vma = find_vma(mm, ea); 57 if (!vma) 58 goto bad_area; 59 if (vma->vm_start <= ea) 60 goto good_area; 61 if (!(vma->vm_flags & VM_GROWSDOWN)) 62 goto bad_area; 63 if (expand_stack(vma, ea)) 64 goto bad_area; 65 good_area: 66 is_write = dsisr & MFC_DSISR_ACCESS_PUT; 67 if (is_write) { 68 if (!(vma->vm_flags & VM_WRITE)) 69 goto bad_area; 70 } else { 71 if (dsisr & MFC_DSISR_ACCESS_DENIED) 72 goto bad_area; 73 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 74 goto bad_area; 75 } 76 ret = 0; 77 *flt = handle_mm_fault(mm, vma, ea, is_write); 78 if (unlikely(*flt & VM_FAULT_ERROR)) { 79 if (*flt & VM_FAULT_OOM) { 80 ret = -ENOMEM; 81 goto bad_area; 82 } else if (*flt & VM_FAULT_SIGBUS) { 83 ret = -EFAULT; 84 goto bad_area; 85 } 86 BUG(); 87 } 88 if (*flt & VM_FAULT_MAJOR) 89 current->maj_flt++; 90 else 91 current->min_flt++; 92 up_read(&mm->mmap_sem); 93 return ret; 94 95 bad_area: 96 up_read(&mm->mmap_sem); 97 return -EFAULT; 98 } 99 100 static void spufs_handle_dma_error(struct spu_context *ctx, 101 unsigned long ea, int type) 102 { 103 if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { 104 ctx->event_return |= type; 105 wake_up_all(&ctx->stop_wq); 106 } else { 107 siginfo_t info; 108 memset(&info, 0, sizeof(info)); 109 110 switch (type) { 111 case SPE_EVENT_INVALID_DMA: 112 info.si_signo = SIGBUS; 113 info.si_code = BUS_OBJERR; 114 break; 115 case SPE_EVENT_SPE_DATA_STORAGE: 116 info.si_signo = SIGBUS; 117 info.si_addr = (void __user *)ea; 118 info.si_code = BUS_ADRERR; 119 break; 120 case SPE_EVENT_DMA_ALIGNMENT: 121 info.si_signo = SIGBUS; 122 /* DAR isn't set for an alignment fault :( */ 123 info.si_code = BUS_ADRALN; 124 break; 125 case SPE_EVENT_SPE_ERROR: 126 info.si_signo = SIGILL; 127 info.si_addr = (void __user *)(unsigned long) 128 ctx->ops->npc_read(ctx) - 4; 129 info.si_code = ILL_ILLOPC; 130 break; 131 } 132 if (info.si_signo) 133 force_sig_info(info.si_signo, &info, current); 134 } 135 } 136 137 void spufs_dma_callback(struct spu *spu, int type) 138 { 139 spufs_handle_dma_error(spu->ctx, spu->dar, type); 140 } 141 EXPORT_SYMBOL_GPL(spufs_dma_callback); 142 143 /* 144 * bottom half handler for page faults, we can't do this from 145 * interrupt context, since we might need to sleep. 146 * we also need to give up the mutex so we can get scheduled 147 * out while waiting for the backing store. 148 * 149 * TODO: try calling hash_page from the interrupt handler first 150 * in order to speed up the easy case. 151 */ 152 int spufs_handle_class1(struct spu_context *ctx) 153 { 154 u64 ea, dsisr, access; 155 unsigned long flags; 156 unsigned flt = 0; 157 int ret; 158 159 /* 160 * dar and dsisr get passed from the registers 161 * to the spu_context, to this function, but not 162 * back to the spu if it gets scheduled again. 163 * 164 * if we don't handle the fault for a saved context 165 * in time, we can still expect to get the same fault 166 * the immediately after the context restore. 167 */ 168 if (ctx->state == SPU_STATE_RUNNABLE) { 169 ea = ctx->spu->dar; 170 dsisr = ctx->spu->dsisr; 171 ctx->spu->dar= ctx->spu->dsisr = 0; 172 } else { 173 ea = ctx->csa.priv1.mfc_dar_RW; 174 dsisr = ctx->csa.priv1.mfc_dsisr_RW; 175 ctx->csa.priv1.mfc_dar_RW = 0; 176 ctx->csa.priv1.mfc_dsisr_RW = 0; 177 } 178 179 if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) 180 return 0; 181 182 spuctx_switch_state(ctx, SPU_UTIL_IOWAIT); 183 184 pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea, 185 dsisr, ctx->state); 186 187 ctx->stats.hash_flt++; 188 if (ctx->state == SPU_STATE_RUNNABLE) 189 ctx->spu->stats.hash_flt++; 190 191 /* we must not hold the lock when entering spu_handle_mm_fault */ 192 spu_release(ctx); 193 194 access = (_PAGE_PRESENT | _PAGE_USER); 195 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; 196 local_irq_save(flags); 197 ret = hash_page(ea, access, 0x300); 198 local_irq_restore(flags); 199 200 /* hashing failed, so try the actual fault handler */ 201 if (ret) 202 ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt); 203 204 spu_acquire(ctx); 205 /* 206 * If we handled the fault successfully and are in runnable 207 * state, restart the DMA. 208 * In case of unhandled error report the problem to user space. 209 */ 210 if (!ret) { 211 if (flt & VM_FAULT_MAJOR) 212 ctx->stats.maj_flt++; 213 else 214 ctx->stats.min_flt++; 215 if (ctx->state == SPU_STATE_RUNNABLE) { 216 if (flt & VM_FAULT_MAJOR) 217 ctx->spu->stats.maj_flt++; 218 else 219 ctx->spu->stats.min_flt++; 220 } 221 222 if (ctx->spu) 223 ctx->ops->restart_dma(ctx); 224 } else 225 spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE); 226 227 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 228 return ret; 229 } 230 EXPORT_SYMBOL_GPL(spufs_handle_class1); 231