1 #define DEBUG 2 3 #include <linux/wait.h> 4 #include <linux/ptrace.h> 5 6 #include <asm/spu.h> 7 #include <asm/spu_priv1.h> 8 #include <asm/io.h> 9 #include <asm/unistd.h> 10 11 #include "spufs.h" 12 13 /* interrupt-level stop callback function. */ 14 void spufs_stop_callback(struct spu *spu) 15 { 16 struct spu_context *ctx = spu->ctx; 17 18 wake_up_all(&ctx->stop_wq); 19 } 20 21 static inline int spu_stopped(struct spu_context *ctx, u32 *stat) 22 { 23 struct spu *spu; 24 u64 pte_fault; 25 26 *stat = ctx->ops->status_read(ctx); 27 28 spu = ctx->spu; 29 if (ctx->state != SPU_STATE_RUNNABLE || 30 test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) 31 return 1; 32 pte_fault = spu->dsisr & 33 (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED); 34 return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ? 35 1 : 0; 36 } 37 38 static int spu_setup_isolated(struct spu_context *ctx) 39 { 40 int ret; 41 u64 __iomem *mfc_cntl; 42 u64 sr1; 43 u32 status; 44 unsigned long timeout; 45 const u32 status_loading = SPU_STATUS_RUNNING 46 | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS; 47 48 ret = -ENODEV; 49 if (!isolated_loader) 50 goto out; 51 52 /* 53 * We need to exclude userspace access to the context. 54 * 55 * To protect against memory access we invalidate all ptes 56 * and make sure the pagefault handlers block on the mutex. 57 */ 58 spu_unmap_mappings(ctx); 59 60 mfc_cntl = &ctx->spu->priv2->mfc_control_RW; 61 62 /* purge the MFC DMA queue to ensure no spurious accesses before we 63 * enter kernel mode */ 64 timeout = jiffies + HZ; 65 out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST); 66 while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK) 67 != MFC_CNTL_PURGE_DMA_COMPLETE) { 68 if (time_after(jiffies, timeout)) { 69 printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n", 70 __FUNCTION__); 71 ret = -EIO; 72 goto out; 73 } 74 cond_resched(); 75 } 76 77 /* put the SPE in kernel mode to allow access to the loader */ 78 sr1 = spu_mfc_sr1_get(ctx->spu); 79 sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK; 80 spu_mfc_sr1_set(ctx->spu, sr1); 81 82 /* start the loader */ 83 ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32); 84 ctx->ops->signal2_write(ctx, 85 (unsigned long)isolated_loader & 0xffffffff); 86 87 ctx->ops->runcntl_write(ctx, 88 SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); 89 90 ret = 0; 91 timeout = jiffies + HZ; 92 while (((status = ctx->ops->status_read(ctx)) & status_loading) == 93 status_loading) { 94 if (time_after(jiffies, timeout)) { 95 printk(KERN_ERR "%s: timeout waiting for loader\n", 96 __FUNCTION__); 97 ret = -EIO; 98 goto out_drop_priv; 99 } 100 cond_resched(); 101 } 102 103 if (!(status & SPU_STATUS_RUNNING)) { 104 /* If isolated LOAD has failed: run SPU, we will get a stop-and 105 * signal later. */ 106 pr_debug("%s: isolated LOAD failed\n", __FUNCTION__); 107 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 108 ret = -EACCES; 109 goto out_drop_priv; 110 } 111 112 if (!(status & SPU_STATUS_ISOLATED_STATE)) { 113 /* This isn't allowed by the CBEA, but check anyway */ 114 pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__); 115 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP); 116 ret = -EINVAL; 117 goto out_drop_priv; 118 } 119 120 out_drop_priv: 121 /* Finished accessing the loader. Drop kernel mode */ 122 sr1 |= MFC_STATE1_PROBLEM_STATE_MASK; 123 spu_mfc_sr1_set(ctx->spu, sr1); 124 125 out: 126 return ret; 127 } 128 129 static int spu_run_init(struct spu_context *ctx, u32 *npc) 130 { 131 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 132 133 if (ctx->flags & SPU_CREATE_ISOLATE) { 134 unsigned long runcntl; 135 136 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { 137 int ret = spu_setup_isolated(ctx); 138 if (ret) 139 return ret; 140 } 141 142 /* if userspace has set the runcntrl register (eg, to issue an 143 * isolated exit), we need to re-set it here */ 144 runcntl = ctx->ops->runcntl_read(ctx) & 145 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); 146 if (runcntl == 0) 147 runcntl = SPU_RUNCNTL_RUNNABLE; 148 ctx->ops->runcntl_write(ctx, runcntl); 149 } else { 150 unsigned long mode = SPU_PRIVCNTL_MODE_NORMAL; 151 ctx->ops->npc_write(ctx, *npc); 152 if (test_thread_flag(TIF_SINGLESTEP)) 153 mode = SPU_PRIVCNTL_MODE_SINGLE_STEP; 154 out_be64(&ctx->spu->priv2->spu_privcntl_RW, mode); 155 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 156 } 157 158 spuctx_switch_state(ctx, SPU_UTIL_USER); 159 160 return 0; 161 } 162 163 static int spu_run_fini(struct spu_context *ctx, u32 *npc, 164 u32 *status) 165 { 166 int ret = 0; 167 168 *status = ctx->ops->status_read(ctx); 169 *npc = ctx->ops->npc_read(ctx); 170 171 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); 172 spu_release(ctx); 173 174 if (signal_pending(current)) 175 ret = -ERESTARTSYS; 176 177 return ret; 178 } 179 180 static int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc, 181 u32 *status) 182 { 183 int ret; 184 185 ret = spu_run_fini(ctx, npc, status); 186 if (ret) 187 return ret; 188 189 if (*status & (SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_STOPPED_BY_HALT)) 190 return *status; 191 192 ret = spu_acquire_runnable(ctx, 0); 193 if (ret) 194 return ret; 195 196 spuctx_switch_state(ctx, SPU_UTIL_USER); 197 return 0; 198 } 199 200 /* 201 * SPU syscall restarting is tricky because we violate the basic 202 * assumption that the signal handler is running on the interrupted 203 * thread. Here instead, the handler runs on PowerPC user space code, 204 * while the syscall was called from the SPU. 205 * This means we can only do a very rough approximation of POSIX 206 * signal semantics. 207 */ 208 static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret, 209 unsigned int *npc) 210 { 211 int ret; 212 213 switch (*spu_ret) { 214 case -ERESTARTSYS: 215 case -ERESTARTNOINTR: 216 /* 217 * Enter the regular syscall restarting for 218 * sys_spu_run, then restart the SPU syscall 219 * callback. 220 */ 221 *npc -= 8; 222 ret = -ERESTARTSYS; 223 break; 224 case -ERESTARTNOHAND: 225 case -ERESTART_RESTARTBLOCK: 226 /* 227 * Restart block is too hard for now, just return -EINTR 228 * to the SPU. 229 * ERESTARTNOHAND comes from sys_pause, we also return 230 * -EINTR from there. 231 * Assume that we need to be restarted ourselves though. 232 */ 233 *spu_ret = -EINTR; 234 ret = -ERESTARTSYS; 235 break; 236 default: 237 printk(KERN_WARNING "%s: unexpected return code %ld\n", 238 __FUNCTION__, *spu_ret); 239 ret = 0; 240 } 241 return ret; 242 } 243 244 static int spu_process_callback(struct spu_context *ctx) 245 { 246 struct spu_syscall_block s; 247 u32 ls_pointer, npc; 248 void __iomem *ls; 249 long spu_ret; 250 int ret; 251 252 /* get syscall block from local store */ 253 npc = ctx->ops->npc_read(ctx) & ~3; 254 ls = (void __iomem *)ctx->ops->get_ls(ctx); 255 ls_pointer = in_be32(ls + npc); 256 if (ls_pointer > (LS_SIZE - sizeof(s))) 257 return -EFAULT; 258 memcpy_fromio(&s, ls + ls_pointer, sizeof(s)); 259 260 /* do actual syscall without pinning the spu */ 261 ret = 0; 262 spu_ret = -ENOSYS; 263 npc += 4; 264 265 if (s.nr_ret < __NR_syscalls) { 266 spu_release(ctx); 267 /* do actual system call from here */ 268 spu_ret = spu_sys_callback(&s); 269 if (spu_ret <= -ERESTARTSYS) { 270 ret = spu_handle_restartsys(ctx, &spu_ret, &npc); 271 } 272 spu_acquire(ctx); 273 if (ret == -ERESTARTSYS) 274 return ret; 275 } 276 277 /* write result, jump over indirect pointer */ 278 memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret)); 279 ctx->ops->npc_write(ctx, npc); 280 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 281 return ret; 282 } 283 284 static inline int spu_process_events(struct spu_context *ctx) 285 { 286 struct spu *spu = ctx->spu; 287 int ret = 0; 288 289 if (spu->class_0_pending) 290 ret = spu_irq_class_0_bottom(spu); 291 if (!ret && signal_pending(current)) 292 ret = -ERESTARTSYS; 293 return ret; 294 } 295 296 long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) 297 { 298 int ret; 299 struct spu *spu; 300 u32 status; 301 302 if (mutex_lock_interruptible(&ctx->run_mutex)) 303 return -ERESTARTSYS; 304 305 ctx->ops->master_start(ctx); 306 ctx->event_return = 0; 307 308 spu_acquire(ctx); 309 if (ctx->state == SPU_STATE_SAVED) { 310 __spu_update_sched_info(ctx); 311 spu_set_timeslice(ctx); 312 313 ret = spu_activate(ctx, 0); 314 if (ret) { 315 spu_release(ctx); 316 goto out; 317 } 318 } else { 319 /* 320 * We have to update the scheduling priority under active_mutex 321 * to protect against find_victim(). 322 * 323 * No need to update the timeslice ASAP, it will get updated 324 * once the current one has expired. 325 */ 326 spu_update_sched_info(ctx); 327 } 328 329 ret = spu_run_init(ctx, npc); 330 if (ret) { 331 spu_release(ctx); 332 goto out; 333 } 334 335 do { 336 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); 337 if (unlikely(ret)) 338 break; 339 spu = ctx->spu; 340 if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE, 341 &ctx->sched_flags))) { 342 if (!(status & SPU_STATUS_STOPPED_BY_STOP)) { 343 spu_switch_notify(spu, ctx); 344 continue; 345 } 346 } 347 348 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 349 350 if ((status & SPU_STATUS_STOPPED_BY_STOP) && 351 (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) { 352 ret = spu_process_callback(ctx); 353 if (ret) 354 break; 355 status &= ~SPU_STATUS_STOPPED_BY_STOP; 356 } 357 ret = spufs_handle_class1(ctx); 358 if (ret) 359 break; 360 361 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { 362 ret = spu_reacquire_runnable(ctx, npc, &status); 363 if (ret) 364 goto out2; 365 continue; 366 } 367 ret = spu_process_events(ctx); 368 369 } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | 370 SPU_STATUS_STOPPED_BY_HALT | 371 SPU_STATUS_SINGLE_STEP))); 372 373 if ((status & SPU_STATUS_STOPPED_BY_STOP) && 374 (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100) && 375 (ctx->state == SPU_STATE_RUNNABLE)) 376 ctx->stats.libassist++; 377 378 379 ctx->ops->master_stop(ctx); 380 ret = spu_run_fini(ctx, npc, &status); 381 spu_yield(ctx); 382 383 out2: 384 if ((ret == 0) || 385 ((ret == -ERESTARTSYS) && 386 ((status & SPU_STATUS_STOPPED_BY_HALT) || 387 (status & SPU_STATUS_SINGLE_STEP) || 388 ((status & SPU_STATUS_STOPPED_BY_STOP) && 389 (status >> SPU_STOP_STATUS_SHIFT != 0x2104))))) 390 ret = status; 391 392 /* Note: we don't need to force_sig SIGTRAP on single-step 393 * since we have TIF_SINGLESTEP set, thus the kernel will do 394 * it upon return from the syscall anyawy 395 */ 396 if ((status & SPU_STATUS_STOPPED_BY_STOP) 397 && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) { 398 force_sig(SIGTRAP, current); 399 ret = -ERESTARTSYS; 400 } 401 402 out: 403 *event = ctx->event_return; 404 mutex_unlock(&ctx->run_mutex); 405 return ret; 406 } 407