1 #define DEBUG 2 3 #include <linux/wait.h> 4 #include <linux/ptrace.h> 5 6 #include <asm/spu.h> 7 #include <asm/spu_priv1.h> 8 #include <asm/io.h> 9 #include <asm/unistd.h> 10 11 #include "spufs.h" 12 13 /* interrupt-level stop callback function. */ 14 void spufs_stop_callback(struct spu *spu) 15 { 16 struct spu_context *ctx = spu->ctx; 17 18 wake_up_all(&ctx->stop_wq); 19 } 20 21 static inline int spu_stopped(struct spu_context *ctx, u32 *stat) 22 { 23 struct spu *spu; 24 u64 pte_fault; 25 26 *stat = ctx->ops->status_read(ctx); 27 28 spu = ctx->spu; 29 if (ctx->state != SPU_STATE_RUNNABLE || 30 test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) 31 return 1; 32 pte_fault = spu->dsisr & 33 (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED); 34 return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ? 35 1 : 0; 36 } 37 38 static int spu_setup_isolated(struct spu_context *ctx) 39 { 40 int ret; 41 u64 __iomem *mfc_cntl; 42 u64 sr1; 43 u32 status; 44 unsigned long timeout; 45 const u32 status_loading = SPU_STATUS_RUNNING 46 | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS; 47 48 ret = -ENODEV; 49 if (!isolated_loader) 50 goto out; 51 52 /* 53 * We need to exclude userspace access to the context. 54 * 55 * To protect against memory access we invalidate all ptes 56 * and make sure the pagefault handlers block on the mutex. 57 */ 58 spu_unmap_mappings(ctx); 59 60 mfc_cntl = &ctx->spu->priv2->mfc_control_RW; 61 62 /* purge the MFC DMA queue to ensure no spurious accesses before we 63 * enter kernel mode */ 64 timeout = jiffies + HZ; 65 out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST); 66 while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK) 67 != MFC_CNTL_PURGE_DMA_COMPLETE) { 68 if (time_after(jiffies, timeout)) { 69 printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n", 70 __FUNCTION__); 71 ret = -EIO; 72 goto out; 73 } 74 cond_resched(); 75 } 76 77 /* put the SPE in kernel mode to allow access to the loader */ 78 sr1 = spu_mfc_sr1_get(ctx->spu); 79 sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK; 80 spu_mfc_sr1_set(ctx->spu, sr1); 81 82 /* start the loader */ 83 ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32); 84 ctx->ops->signal2_write(ctx, 85 (unsigned long)isolated_loader & 0xffffffff); 86 87 ctx->ops->runcntl_write(ctx, 88 SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); 89 90 ret = 0; 91 timeout = jiffies + HZ; 92 while (((status = ctx->ops->status_read(ctx)) & status_loading) == 93 status_loading) { 94 if (time_after(jiffies, timeout)) { 95 printk(KERN_ERR "%s: timeout waiting for loader\n", 96 __FUNCTION__); 97 ret = -EIO; 98 goto out_drop_priv; 99 } 100 cond_resched(); 101 } 102 103 if (!(status & SPU_STATUS_RUNNING)) { 104 /* If isolated LOAD has failed: run SPU, we will get a stop-and 105 * signal later. */ 106 pr_debug("%s: isolated LOAD failed\n", __FUNCTION__); 107 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 108 ret = -EACCES; 109 goto out_drop_priv; 110 } 111 112 if (!(status & SPU_STATUS_ISOLATED_STATE)) { 113 /* This isn't allowed by the CBEA, but check anyway */ 114 pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__); 115 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP); 116 ret = -EINVAL; 117 goto out_drop_priv; 118 } 119 120 out_drop_priv: 121 /* Finished accessing the loader. Drop kernel mode */ 122 sr1 |= MFC_STATE1_PROBLEM_STATE_MASK; 123 spu_mfc_sr1_set(ctx->spu, sr1); 124 125 out: 126 return ret; 127 } 128 129 static int spu_run_init(struct spu_context *ctx, u32 *npc) 130 { 131 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 132 133 if (ctx->flags & SPU_CREATE_ISOLATE) { 134 unsigned long runcntl; 135 136 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { 137 int ret = spu_setup_isolated(ctx); 138 if (ret) 139 return ret; 140 } 141 142 /* if userspace has set the runcntrl register (eg, to issue an 143 * isolated exit), we need to re-set it here */ 144 runcntl = ctx->ops->runcntl_read(ctx) & 145 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); 146 if (runcntl == 0) 147 runcntl = SPU_RUNCNTL_RUNNABLE; 148 ctx->ops->runcntl_write(ctx, runcntl); 149 } else { 150 unsigned long mode = SPU_PRIVCNTL_MODE_NORMAL; 151 ctx->ops->npc_write(ctx, *npc); 152 if (test_thread_flag(TIF_SINGLESTEP)) 153 mode = SPU_PRIVCNTL_MODE_SINGLE_STEP; 154 out_be64(&ctx->spu->priv2->spu_privcntl_RW, mode); 155 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 156 } 157 158 spuctx_switch_state(ctx, SPU_UTIL_USER); 159 160 return 0; 161 } 162 163 static int spu_run_fini(struct spu_context *ctx, u32 *npc, 164 u32 *status) 165 { 166 int ret = 0; 167 168 *status = ctx->ops->status_read(ctx); 169 *npc = ctx->ops->npc_read(ctx); 170 171 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); 172 spu_release(ctx); 173 174 if (signal_pending(current)) 175 ret = -ERESTARTSYS; 176 177 return ret; 178 } 179 180 static int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc, 181 u32 *status) 182 { 183 int ret; 184 185 ret = spu_run_fini(ctx, npc, status); 186 if (ret) 187 return ret; 188 189 if (*status & (SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_STOPPED_BY_HALT)) 190 return *status; 191 192 ret = spu_acquire_runnable(ctx, 0); 193 if (ret) 194 return ret; 195 196 ret = spu_run_init(ctx, npc); 197 if (ret) { 198 spu_release(ctx); 199 return ret; 200 } 201 return 0; 202 } 203 204 /* 205 * SPU syscall restarting is tricky because we violate the basic 206 * assumption that the signal handler is running on the interrupted 207 * thread. Here instead, the handler runs on PowerPC user space code, 208 * while the syscall was called from the SPU. 209 * This means we can only do a very rough approximation of POSIX 210 * signal semantics. 211 */ 212 int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret, 213 unsigned int *npc) 214 { 215 int ret; 216 217 switch (*spu_ret) { 218 case -ERESTARTSYS: 219 case -ERESTARTNOINTR: 220 /* 221 * Enter the regular syscall restarting for 222 * sys_spu_run, then restart the SPU syscall 223 * callback. 224 */ 225 *npc -= 8; 226 ret = -ERESTARTSYS; 227 break; 228 case -ERESTARTNOHAND: 229 case -ERESTART_RESTARTBLOCK: 230 /* 231 * Restart block is too hard for now, just return -EINTR 232 * to the SPU. 233 * ERESTARTNOHAND comes from sys_pause, we also return 234 * -EINTR from there. 235 * Assume that we need to be restarted ourselves though. 236 */ 237 *spu_ret = -EINTR; 238 ret = -ERESTARTSYS; 239 break; 240 default: 241 printk(KERN_WARNING "%s: unexpected return code %ld\n", 242 __FUNCTION__, *spu_ret); 243 ret = 0; 244 } 245 return ret; 246 } 247 248 int spu_process_callback(struct spu_context *ctx) 249 { 250 struct spu_syscall_block s; 251 u32 ls_pointer, npc; 252 void __iomem *ls; 253 long spu_ret; 254 int ret; 255 256 /* get syscall block from local store */ 257 npc = ctx->ops->npc_read(ctx) & ~3; 258 ls = (void __iomem *)ctx->ops->get_ls(ctx); 259 ls_pointer = in_be32(ls + npc); 260 if (ls_pointer > (LS_SIZE - sizeof(s))) 261 return -EFAULT; 262 memcpy_fromio(&s, ls + ls_pointer, sizeof(s)); 263 264 /* do actual syscall without pinning the spu */ 265 ret = 0; 266 spu_ret = -ENOSYS; 267 npc += 4; 268 269 if (s.nr_ret < __NR_syscalls) { 270 spu_release(ctx); 271 /* do actual system call from here */ 272 spu_ret = spu_sys_callback(&s); 273 if (spu_ret <= -ERESTARTSYS) { 274 ret = spu_handle_restartsys(ctx, &spu_ret, &npc); 275 } 276 spu_acquire(ctx); 277 if (ret == -ERESTARTSYS) 278 return ret; 279 } 280 281 /* write result, jump over indirect pointer */ 282 memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret)); 283 ctx->ops->npc_write(ctx, npc); 284 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 285 return ret; 286 } 287 288 static inline int spu_process_events(struct spu_context *ctx) 289 { 290 struct spu *spu = ctx->spu; 291 int ret = 0; 292 293 if (spu->class_0_pending) 294 ret = spu_irq_class_0_bottom(spu); 295 if (!ret && signal_pending(current)) 296 ret = -ERESTARTSYS; 297 return ret; 298 } 299 300 long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) 301 { 302 int ret; 303 struct spu *spu; 304 u32 status; 305 306 if (mutex_lock_interruptible(&ctx->run_mutex)) 307 return -ERESTARTSYS; 308 309 ctx->ops->master_start(ctx); 310 ctx->event_return = 0; 311 312 spu_acquire(ctx); 313 if (ctx->state == SPU_STATE_SAVED) { 314 __spu_update_sched_info(ctx); 315 spu_set_timeslice(ctx); 316 317 ret = spu_activate(ctx, 0); 318 if (ret) { 319 spu_release(ctx); 320 goto out; 321 } 322 } else { 323 /* 324 * We have to update the scheduling priority under active_mutex 325 * to protect against find_victim(). 326 * 327 * No need to update the timeslice ASAP, it will get updated 328 * once the current one has expired. 329 */ 330 spu_update_sched_info(ctx); 331 } 332 333 ret = spu_run_init(ctx, npc); 334 if (ret) { 335 spu_release(ctx); 336 goto out; 337 } 338 339 do { 340 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); 341 if (unlikely(ret)) 342 break; 343 spu = ctx->spu; 344 if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE, 345 &ctx->sched_flags))) { 346 if (!(status & SPU_STATUS_STOPPED_BY_STOP)) { 347 spu_switch_notify(spu, ctx); 348 continue; 349 } 350 } 351 352 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 353 354 if ((status & SPU_STATUS_STOPPED_BY_STOP) && 355 (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) { 356 ret = spu_process_callback(ctx); 357 if (ret) 358 break; 359 status &= ~SPU_STATUS_STOPPED_BY_STOP; 360 } 361 ret = spufs_handle_class1(ctx); 362 if (ret) 363 break; 364 365 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { 366 ret = spu_reacquire_runnable(ctx, npc, &status); 367 if (ret) 368 goto out2; 369 continue; 370 } 371 ret = spu_process_events(ctx); 372 373 } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | 374 SPU_STATUS_STOPPED_BY_HALT | 375 SPU_STATUS_SINGLE_STEP))); 376 377 if ((status & SPU_STATUS_STOPPED_BY_STOP) && 378 (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100) && 379 (ctx->state == SPU_STATE_RUNNABLE)) 380 ctx->stats.libassist++; 381 382 383 ctx->ops->master_stop(ctx); 384 ret = spu_run_fini(ctx, npc, &status); 385 spu_yield(ctx); 386 387 out2: 388 if ((ret == 0) || 389 ((ret == -ERESTARTSYS) && 390 ((status & SPU_STATUS_STOPPED_BY_HALT) || 391 (status & SPU_STATUS_SINGLE_STEP) || 392 ((status & SPU_STATUS_STOPPED_BY_STOP) && 393 (status >> SPU_STOP_STATUS_SHIFT != 0x2104))))) 394 ret = status; 395 396 /* Note: we don't need to force_sig SIGTRAP on single-step 397 * since we have TIF_SINGLESTEP set, thus the kernel will do 398 * it upon return from the syscall anyawy 399 */ 400 if ((status & SPU_STATUS_STOPPED_BY_STOP) 401 && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) { 402 force_sig(SIGTRAP, current); 403 ret = -ERESTARTSYS; 404 } 405 406 out: 407 *event = ctx->event_return; 408 mutex_unlock(&ctx->run_mutex); 409 return ret; 410 } 411