1 #define DEBUG 2 3 #include <linux/wait.h> 4 #include <linux/ptrace.h> 5 6 #include <asm/spu.h> 7 #include <asm/spu_priv1.h> 8 #include <asm/io.h> 9 #include <asm/unistd.h> 10 11 #include "spufs.h" 12 13 /* interrupt-level stop callback function. */ 14 void spufs_stop_callback(struct spu *spu) 15 { 16 struct spu_context *ctx = spu->ctx; 17 18 wake_up_all(&ctx->stop_wq); 19 } 20 21 static inline int spu_stopped(struct spu_context *ctx, u32 * stat) 22 { 23 struct spu *spu; 24 u64 pte_fault; 25 26 *stat = ctx->ops->status_read(ctx); 27 if (ctx->state != SPU_STATE_RUNNABLE) 28 return 1; 29 spu = ctx->spu; 30 pte_fault = spu->dsisr & 31 (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED); 32 return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0; 33 } 34 35 static int spu_setup_isolated(struct spu_context *ctx) 36 { 37 int ret; 38 u64 __iomem *mfc_cntl; 39 u64 sr1; 40 u32 status; 41 unsigned long timeout; 42 const u32 status_loading = SPU_STATUS_RUNNING 43 | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS; 44 45 ret = -ENODEV; 46 if (!isolated_loader) 47 goto out; 48 49 /* 50 * We need to exclude userspace access to the context. 51 * 52 * To protect against memory access we invalidate all ptes 53 * and make sure the pagefault handlers block on the mutex. 54 */ 55 spu_unmap_mappings(ctx); 56 57 mfc_cntl = &ctx->spu->priv2->mfc_control_RW; 58 59 /* purge the MFC DMA queue to ensure no spurious accesses before we 60 * enter kernel mode */ 61 timeout = jiffies + HZ; 62 out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST); 63 while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK) 64 != MFC_CNTL_PURGE_DMA_COMPLETE) { 65 if (time_after(jiffies, timeout)) { 66 printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n", 67 __FUNCTION__); 68 ret = -EIO; 69 goto out; 70 } 71 cond_resched(); 72 } 73 74 /* put the SPE in kernel mode to allow access to the loader */ 75 sr1 = spu_mfc_sr1_get(ctx->spu); 76 sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK; 77 spu_mfc_sr1_set(ctx->spu, sr1); 78 79 /* start the loader */ 80 ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32); 81 ctx->ops->signal2_write(ctx, 82 (unsigned long)isolated_loader & 0xffffffff); 83 84 ctx->ops->runcntl_write(ctx, 85 SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); 86 87 ret = 0; 88 timeout = jiffies + HZ; 89 while (((status = ctx->ops->status_read(ctx)) & status_loading) == 90 status_loading) { 91 if (time_after(jiffies, timeout)) { 92 printk(KERN_ERR "%s: timeout waiting for loader\n", 93 __FUNCTION__); 94 ret = -EIO; 95 goto out_drop_priv; 96 } 97 cond_resched(); 98 } 99 100 if (!(status & SPU_STATUS_RUNNING)) { 101 /* If isolated LOAD has failed: run SPU, we will get a stop-and 102 * signal later. */ 103 pr_debug("%s: isolated LOAD failed\n", __FUNCTION__); 104 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 105 ret = -EACCES; 106 goto out_drop_priv; 107 } 108 109 if (!(status & SPU_STATUS_ISOLATED_STATE)) { 110 /* This isn't allowed by the CBEA, but check anyway */ 111 pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__); 112 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP); 113 ret = -EINVAL; 114 goto out_drop_priv; 115 } 116 117 out_drop_priv: 118 /* Finished accessing the loader. Drop kernel mode */ 119 sr1 |= MFC_STATE1_PROBLEM_STATE_MASK; 120 spu_mfc_sr1_set(ctx->spu, sr1); 121 122 out: 123 return ret; 124 } 125 126 static int spu_run_init(struct spu_context *ctx, u32 * npc) 127 { 128 if (ctx->flags & SPU_CREATE_ISOLATE) { 129 unsigned long runcntl; 130 131 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { 132 int ret = spu_setup_isolated(ctx); 133 if (ret) 134 return ret; 135 } 136 137 /* if userspace has set the runcntrl register (eg, to issue an 138 * isolated exit), we need to re-set it here */ 139 runcntl = ctx->ops->runcntl_read(ctx) & 140 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); 141 if (runcntl == 0) 142 runcntl = SPU_RUNCNTL_RUNNABLE; 143 ctx->ops->runcntl_write(ctx, runcntl); 144 } else { 145 spu_start_tick(ctx); 146 ctx->ops->npc_write(ctx, *npc); 147 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 148 } 149 150 return 0; 151 } 152 153 static int spu_run_fini(struct spu_context *ctx, u32 * npc, 154 u32 * status) 155 { 156 int ret = 0; 157 158 spu_stop_tick(ctx); 159 *status = ctx->ops->status_read(ctx); 160 *npc = ctx->ops->npc_read(ctx); 161 spu_release(ctx); 162 163 if (signal_pending(current)) 164 ret = -ERESTARTSYS; 165 166 return ret; 167 } 168 169 static int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc, 170 u32 *status) 171 { 172 int ret; 173 174 ret = spu_run_fini(ctx, npc, status); 175 if (ret) 176 return ret; 177 178 if (*status & (SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_STOPPED_BY_HALT)) 179 return *status; 180 181 ret = spu_acquire_runnable(ctx, 0); 182 if (ret) 183 return ret; 184 185 ret = spu_run_init(ctx, npc); 186 if (ret) { 187 spu_release(ctx); 188 return ret; 189 } 190 return 0; 191 } 192 193 /* 194 * SPU syscall restarting is tricky because we violate the basic 195 * assumption that the signal handler is running on the interrupted 196 * thread. Here instead, the handler runs on PowerPC user space code, 197 * while the syscall was called from the SPU. 198 * This means we can only do a very rough approximation of POSIX 199 * signal semantics. 200 */ 201 int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret, 202 unsigned int *npc) 203 { 204 int ret; 205 206 switch (*spu_ret) { 207 case -ERESTARTSYS: 208 case -ERESTARTNOINTR: 209 /* 210 * Enter the regular syscall restarting for 211 * sys_spu_run, then restart the SPU syscall 212 * callback. 213 */ 214 *npc -= 8; 215 ret = -ERESTARTSYS; 216 break; 217 case -ERESTARTNOHAND: 218 case -ERESTART_RESTARTBLOCK: 219 /* 220 * Restart block is too hard for now, just return -EINTR 221 * to the SPU. 222 * ERESTARTNOHAND comes from sys_pause, we also return 223 * -EINTR from there. 224 * Assume that we need to be restarted ourselves though. 225 */ 226 *spu_ret = -EINTR; 227 ret = -ERESTARTSYS; 228 break; 229 default: 230 printk(KERN_WARNING "%s: unexpected return code %ld\n", 231 __FUNCTION__, *spu_ret); 232 ret = 0; 233 } 234 return ret; 235 } 236 237 int spu_process_callback(struct spu_context *ctx) 238 { 239 struct spu_syscall_block s; 240 u32 ls_pointer, npc; 241 void __iomem *ls; 242 long spu_ret; 243 int ret; 244 245 /* get syscall block from local store */ 246 npc = ctx->ops->npc_read(ctx) & ~3; 247 ls = (void __iomem *)ctx->ops->get_ls(ctx); 248 ls_pointer = in_be32(ls + npc); 249 if (ls_pointer > (LS_SIZE - sizeof(s))) 250 return -EFAULT; 251 memcpy_fromio(&s, ls + ls_pointer, sizeof(s)); 252 253 /* do actual syscall without pinning the spu */ 254 ret = 0; 255 spu_ret = -ENOSYS; 256 npc += 4; 257 258 if (s.nr_ret < __NR_syscalls) { 259 spu_release(ctx); 260 /* do actual system call from here */ 261 spu_ret = spu_sys_callback(&s); 262 if (spu_ret <= -ERESTARTSYS) { 263 ret = spu_handle_restartsys(ctx, &spu_ret, &npc); 264 } 265 spu_acquire(ctx); 266 if (ret == -ERESTARTSYS) 267 return ret; 268 } 269 270 /* write result, jump over indirect pointer */ 271 memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret)); 272 ctx->ops->npc_write(ctx, npc); 273 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 274 return ret; 275 } 276 277 static inline int spu_process_events(struct spu_context *ctx) 278 { 279 struct spu *spu = ctx->spu; 280 int ret = 0; 281 282 if (spu->class_0_pending) 283 ret = spu_irq_class_0_bottom(spu); 284 if (!ret && signal_pending(current)) 285 ret = -ERESTARTSYS; 286 return ret; 287 } 288 289 long spufs_run_spu(struct file *file, struct spu_context *ctx, 290 u32 *npc, u32 *event) 291 { 292 int ret; 293 u32 status; 294 295 if (mutex_lock_interruptible(&ctx->run_mutex)) 296 return -ERESTARTSYS; 297 298 ctx->ops->master_start(ctx); 299 ctx->event_return = 0; 300 301 ret = spu_acquire_runnable(ctx, 0); 302 if (ret) 303 return ret; 304 305 ret = spu_run_init(ctx, npc); 306 if (ret) { 307 spu_release(ctx); 308 goto out; 309 } 310 311 do { 312 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); 313 if (unlikely(ret)) 314 break; 315 if ((status & SPU_STATUS_STOPPED_BY_STOP) && 316 (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) { 317 ret = spu_process_callback(ctx); 318 if (ret) 319 break; 320 status &= ~SPU_STATUS_STOPPED_BY_STOP; 321 } 322 ret = spufs_handle_class1(ctx); 323 if (ret) 324 break; 325 326 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { 327 ret = spu_reacquire_runnable(ctx, npc, &status); 328 if (ret) { 329 spu_stop_tick(ctx); 330 goto out2; 331 } 332 continue; 333 } 334 ret = spu_process_events(ctx); 335 336 } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | 337 SPU_STATUS_STOPPED_BY_HALT))); 338 339 ctx->ops->master_stop(ctx); 340 ret = spu_run_fini(ctx, npc, &status); 341 spu_yield(ctx); 342 343 out2: 344 if ((ret == 0) || 345 ((ret == -ERESTARTSYS) && 346 ((status & SPU_STATUS_STOPPED_BY_HALT) || 347 ((status & SPU_STATUS_STOPPED_BY_STOP) && 348 (status >> SPU_STOP_STATUS_SHIFT != 0x2104))))) 349 ret = status; 350 351 if ((status & SPU_STATUS_STOPPED_BY_STOP) 352 && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) { 353 force_sig(SIGTRAP, current); 354 ret = -ERESTARTSYS; 355 } 356 357 out: 358 *event = ctx->event_return; 359 mutex_unlock(&ctx->run_mutex); 360 return ret; 361 } 362