10afacde3Sarnd@arndb.de #define DEBUG 20afacde3Sarnd@arndb.de 3ce8ab854SArnd Bergmann #include <linux/wait.h> 4ce8ab854SArnd Bergmann #include <linux/ptrace.h> 5ce8ab854SArnd Bergmann 6ce8ab854SArnd Bergmann #include <asm/spu.h> 7c6730ed4SJeremy Kerr #include <asm/spu_priv1.h> 8c6730ed4SJeremy Kerr #include <asm/io.h> 9cfff5b23SDave Jones #include <asm/unistd.h> 10ce8ab854SArnd Bergmann 11ce8ab854SArnd Bergmann #include "spufs.h" 12ce8ab854SArnd Bergmann 13ce8ab854SArnd Bergmann /* interrupt-level stop callback function. */ 14f3d69e05SLuke Browning void spufs_stop_callback(struct spu *spu, int irq) 15ce8ab854SArnd Bergmann { 16ce8ab854SArnd Bergmann struct spu_context *ctx = spu->ctx; 17ce8ab854SArnd Bergmann 18d6ad39bcSJeremy Kerr /* 19d6ad39bcSJeremy Kerr * It should be impossible to preempt a context while an exception 20d6ad39bcSJeremy Kerr * is being processed, since the context switch code is specially 21d6ad39bcSJeremy Kerr * coded to deal with interrupts ... But, just in case, sanity check 22d6ad39bcSJeremy Kerr * the context pointer. It is OK to return doing nothing since 23d6ad39bcSJeremy Kerr * the exception will be regenerated when the context is resumed. 24d6ad39bcSJeremy Kerr */ 25d6ad39bcSJeremy Kerr if (ctx) { 26d6ad39bcSJeremy Kerr /* Copy exception arguments into module specific structure */ 27f3d69e05SLuke Browning switch(irq) { 28f3d69e05SLuke Browning case 0 : 29d6ad39bcSJeremy Kerr ctx->csa.class_0_pending = spu->class_0_pending; 30f3d69e05SLuke Browning ctx->csa.class_0_dar = spu->class_0_dar; 31f3d69e05SLuke Browning break; 32f3d69e05SLuke Browning case 1 : 33f3d69e05SLuke Browning ctx->csa.class_1_dsisr = spu->class_1_dsisr; 34f3d69e05SLuke Browning ctx->csa.class_1_dar = spu->class_1_dar; 35f3d69e05SLuke Browning break; 36f3d69e05SLuke Browning case 2 : 37f3d69e05SLuke Browning break; 38f3d69e05SLuke Browning } 39d6ad39bcSJeremy Kerr 40d6ad39bcSJeremy Kerr /* ensure that the exception status has hit memory before a 41d6ad39bcSJeremy Kerr * thread waiting on the context's stop queue is woken */ 42d6ad39bcSJeremy Kerr smp_wmb(); 43d6ad39bcSJeremy Kerr 44ce8ab854SArnd Bergmann wake_up_all(&ctx->stop_wq); 45ce8ab854SArnd Bergmann } 46d6ad39bcSJeremy Kerr } 47d6ad39bcSJeremy Kerr 48e65c2f6fSLuke Browning int spu_stopped(struct spu_context *ctx, u32 *stat) 49ce8ab854SArnd Bergmann { 50e65c2f6fSLuke Browning u64 dsisr; 51e65c2f6fSLuke Browning u32 stopped; 52ce8ab854SArnd Bergmann 53e65c2f6fSLuke Browning stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | 54e65c2f6fSLuke Browning SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; 55d84050f4SLuke Browning 56d84050f4SLuke Browning top: 57d84050f4SLuke Browning *stat = ctx->ops->status_read(ctx); 58d84050f4SLuke Browning if (*stat & stopped) { 59d84050f4SLuke Browning /* 60d84050f4SLuke Browning * If the spu hasn't finished stopping, we need to 61d84050f4SLuke Browning * re-read the register to get the stopped value. 62d84050f4SLuke Browning */ 63d84050f4SLuke Browning if (*stat & SPU_STATUS_RUNNING) 64d84050f4SLuke Browning goto top; 65d84050f4SLuke Browning return 1; 66d84050f4SLuke Browning } 67d84050f4SLuke Browning 68d84050f4SLuke Browning if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) 69e65c2f6fSLuke Browning return 1; 70e65c2f6fSLuke Browning 71f3d69e05SLuke Browning dsisr = ctx->csa.class_1_dsisr; 72e65c2f6fSLuke Browning if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) 73e65c2f6fSLuke Browning return 1; 74e65c2f6fSLuke Browning 75e65c2f6fSLuke Browning if (ctx->csa.class_0_pending) 76e65c2f6fSLuke Browning return 1; 77e65c2f6fSLuke Browning 78e65c2f6fSLuke Browning return 0; 79ce8ab854SArnd Bergmann } 80ce8ab854SArnd Bergmann 81c6730ed4SJeremy Kerr static int spu_setup_isolated(struct spu_context *ctx) 82c6730ed4SJeremy Kerr { 83c6730ed4SJeremy Kerr int ret; 84c6730ed4SJeremy Kerr u64 __iomem *mfc_cntl; 85c6730ed4SJeremy Kerr u64 sr1; 86c6730ed4SJeremy Kerr u32 status; 87c6730ed4SJeremy Kerr unsigned long timeout; 88c6730ed4SJeremy Kerr const u32 status_loading = SPU_STATUS_RUNNING 89c6730ed4SJeremy Kerr | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS; 90c6730ed4SJeremy Kerr 917ec18ab9SChristoph Hellwig ret = -ENODEV; 92c6730ed4SJeremy Kerr if (!isolated_loader) 93c6730ed4SJeremy Kerr goto out; 94c6730ed4SJeremy Kerr 957ec18ab9SChristoph Hellwig /* 967ec18ab9SChristoph Hellwig * We need to exclude userspace access to the context. 977ec18ab9SChristoph Hellwig * 987ec18ab9SChristoph Hellwig * To protect against memory access we invalidate all ptes 997ec18ab9SChristoph Hellwig * and make sure the pagefault handlers block on the mutex. 1007ec18ab9SChristoph Hellwig */ 1017ec18ab9SChristoph Hellwig spu_unmap_mappings(ctx); 1027ec18ab9SChristoph Hellwig 103c6730ed4SJeremy Kerr mfc_cntl = &ctx->spu->priv2->mfc_control_RW; 104c6730ed4SJeremy Kerr 105c6730ed4SJeremy Kerr /* purge the MFC DMA queue to ensure no spurious accesses before we 106c6730ed4SJeremy Kerr * enter kernel mode */ 107c6730ed4SJeremy Kerr timeout = jiffies + HZ; 108c6730ed4SJeremy Kerr out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST); 109c6730ed4SJeremy Kerr while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK) 110c6730ed4SJeremy Kerr != MFC_CNTL_PURGE_DMA_COMPLETE) { 111c6730ed4SJeremy Kerr if (time_after(jiffies, timeout)) { 112c6730ed4SJeremy Kerr printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n", 113e48b1b45SHarvey Harrison __func__); 114c6730ed4SJeremy Kerr ret = -EIO; 1157ec18ab9SChristoph Hellwig goto out; 116c6730ed4SJeremy Kerr } 117c6730ed4SJeremy Kerr cond_resched(); 118c6730ed4SJeremy Kerr } 119c6730ed4SJeremy Kerr 1203688b46bSJeremy Kerr /* clear purge status */ 1213688b46bSJeremy Kerr out_be64(mfc_cntl, 0); 1223688b46bSJeremy Kerr 123c6730ed4SJeremy Kerr /* put the SPE in kernel mode to allow access to the loader */ 124c6730ed4SJeremy Kerr sr1 = spu_mfc_sr1_get(ctx->spu); 125c6730ed4SJeremy Kerr sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK; 126c6730ed4SJeremy Kerr spu_mfc_sr1_set(ctx->spu, sr1); 127c6730ed4SJeremy Kerr 128c6730ed4SJeremy Kerr /* start the loader */ 129c6730ed4SJeremy Kerr ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32); 130c6730ed4SJeremy Kerr ctx->ops->signal2_write(ctx, 131c6730ed4SJeremy Kerr (unsigned long)isolated_loader & 0xffffffff); 132c6730ed4SJeremy Kerr 133c6730ed4SJeremy Kerr ctx->ops->runcntl_write(ctx, 134c6730ed4SJeremy Kerr SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); 135c6730ed4SJeremy Kerr 136c6730ed4SJeremy Kerr ret = 0; 137c6730ed4SJeremy Kerr timeout = jiffies + HZ; 138c6730ed4SJeremy Kerr while (((status = ctx->ops->status_read(ctx)) & status_loading) == 139c6730ed4SJeremy Kerr status_loading) { 140c6730ed4SJeremy Kerr if (time_after(jiffies, timeout)) { 141c6730ed4SJeremy Kerr printk(KERN_ERR "%s: timeout waiting for loader\n", 142e48b1b45SHarvey Harrison __func__); 143c6730ed4SJeremy Kerr ret = -EIO; 144c6730ed4SJeremy Kerr goto out_drop_priv; 145c6730ed4SJeremy Kerr } 146c6730ed4SJeremy Kerr cond_resched(); 147c6730ed4SJeremy Kerr } 148c6730ed4SJeremy Kerr 149c6730ed4SJeremy Kerr if (!(status & SPU_STATUS_RUNNING)) { 150c6730ed4SJeremy Kerr /* If isolated LOAD has failed: run SPU, we will get a stop-and 151c6730ed4SJeremy Kerr * signal later. */ 152e48b1b45SHarvey Harrison pr_debug("%s: isolated LOAD failed\n", __func__); 153c6730ed4SJeremy Kerr ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 154c6730ed4SJeremy Kerr ret = -EACCES; 1557ec18ab9SChristoph Hellwig goto out_drop_priv; 1567ec18ab9SChristoph Hellwig } 157c6730ed4SJeremy Kerr 1587ec18ab9SChristoph Hellwig if (!(status & SPU_STATUS_ISOLATED_STATE)) { 159c6730ed4SJeremy Kerr /* This isn't allowed by the CBEA, but check anyway */ 160e48b1b45SHarvey Harrison pr_debug("%s: SPU fell out of isolated mode?\n", __func__); 161c6730ed4SJeremy Kerr ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP); 162c6730ed4SJeremy Kerr ret = -EINVAL; 1637ec18ab9SChristoph Hellwig goto out_drop_priv; 164c6730ed4SJeremy Kerr } 165c6730ed4SJeremy Kerr 166c6730ed4SJeremy Kerr out_drop_priv: 167c6730ed4SJeremy Kerr /* Finished accessing the loader. Drop kernel mode */ 168c6730ed4SJeremy Kerr sr1 |= MFC_STATE1_PROBLEM_STATE_MASK; 169c6730ed4SJeremy Kerr spu_mfc_sr1_set(ctx->spu, sr1); 170c6730ed4SJeremy Kerr 171c6730ed4SJeremy Kerr out: 172c6730ed4SJeremy Kerr return ret; 173c6730ed4SJeremy Kerr } 174c6730ed4SJeremy Kerr 175aa45e256SChristoph Hellwig static int spu_run_init(struct spu_context *ctx, u32 *npc) 176ce8ab854SArnd Bergmann { 177e65c2f6fSLuke Browning unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; 17891569531SLuke Browning int ret; 179cc210b3eSLuke Browning 18027ec41d3SAndre Detsch spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 18127ec41d3SAndre Detsch 18291569531SLuke Browning /* 183e65c2f6fSLuke Browning * NOSCHED is synchronous scheduling with respect to the caller. 184e65c2f6fSLuke Browning * The caller waits for the context to be loaded. 18591569531SLuke Browning */ 186e65c2f6fSLuke Browning if (ctx->flags & SPU_CREATE_NOSCHED) { 18791569531SLuke Browning if (ctx->state == SPU_STATE_SAVED) { 18891569531SLuke Browning ret = spu_activate(ctx, 0); 189c6730ed4SJeremy Kerr if (ret) 190ce8ab854SArnd Bergmann return ret; 1910afacde3Sarnd@arndb.de } 192e65c2f6fSLuke Browning } 193c6730ed4SJeremy Kerr 194e65c2f6fSLuke Browning /* 195e65c2f6fSLuke Browning * Apply special setup as required. 196e65c2f6fSLuke Browning */ 197e65c2f6fSLuke Browning if (ctx->flags & SPU_CREATE_ISOLATE) { 19891569531SLuke Browning if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { 19991569531SLuke Browning ret = spu_setup_isolated(ctx); 20091569531SLuke Browning if (ret) 20191569531SLuke Browning return ret; 20291569531SLuke Browning } 20391569531SLuke Browning 20491569531SLuke Browning /* 20591569531SLuke Browning * If userspace has set the runcntrl register (eg, to 20691569531SLuke Browning * issue an isolated exit), we need to re-set it here 20791569531SLuke Browning */ 208c6730ed4SJeremy Kerr runcntl = ctx->ops->runcntl_read(ctx) & 209c6730ed4SJeremy Kerr (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); 210c6730ed4SJeremy Kerr if (runcntl == 0) 211c6730ed4SJeremy Kerr runcntl = SPU_RUNCNTL_RUNNABLE; 2122eb1b120SChristoph Hellwig } else { 213cc210b3eSLuke Browning unsigned long privcntl; 214cc210b3eSLuke Browning 21505169237SBenjamin Herrenschmidt if (test_thread_flag(TIF_SINGLESTEP)) 216cc210b3eSLuke Browning privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP; 217cc210b3eSLuke Browning else 218cc210b3eSLuke Browning privcntl = SPU_PRIVCNTL_MODE_NORMAL; 219cc210b3eSLuke Browning 220cc210b3eSLuke Browning ctx->ops->privcntl_write(ctx, privcntl); 221d9dd421fSJeremy Kerr ctx->ops->npc_write(ctx, *npc); 222d9dd421fSJeremy Kerr } 223d9dd421fSJeremy Kerr 224e65c2f6fSLuke Browning ctx->ops->runcntl_write(ctx, runcntl); 22591569531SLuke Browning 226d9dd421fSJeremy Kerr if (ctx->flags & SPU_CREATE_NOSCHED) { 227d9dd421fSJeremy Kerr spuctx_switch_state(ctx, SPU_UTIL_USER); 228d9dd421fSJeremy Kerr } else { 229d9dd421fSJeremy Kerr 23091569531SLuke Browning if (ctx->state == SPU_STATE_SAVED) { 23191569531SLuke Browning ret = spu_activate(ctx, 0); 23291569531SLuke Browning if (ret) 23391569531SLuke Browning return ret; 234e65c2f6fSLuke Browning } else { 23527ec41d3SAndre Detsch spuctx_switch_state(ctx, SPU_UTIL_USER); 236e65c2f6fSLuke Browning } 23791569531SLuke Browning } 23827ec41d3SAndre Detsch 239ce7c191bSJeremy Kerr set_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); 240aa45e256SChristoph Hellwig return 0; 241ce8ab854SArnd Bergmann } 242ce8ab854SArnd Bergmann 243aa45e256SChristoph Hellwig static int spu_run_fini(struct spu_context *ctx, u32 *npc, 244ce8ab854SArnd Bergmann u32 *status) 245ce8ab854SArnd Bergmann { 246ce8ab854SArnd Bergmann int ret = 0; 247ce8ab854SArnd Bergmann 248e65c2f6fSLuke Browning spu_del_from_rq(ctx); 249e65c2f6fSLuke Browning 250ce8ab854SArnd Bergmann *status = ctx->ops->status_read(ctx); 251ce8ab854SArnd Bergmann *npc = ctx->ops->npc_read(ctx); 25227ec41d3SAndre Detsch 25327ec41d3SAndre Detsch spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); 254ce7c191bSJeremy Kerr clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); 255f5ed0eb6SJeremy Kerr spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status); 256ce8ab854SArnd Bergmann spu_release(ctx); 257ce8ab854SArnd Bergmann 258ce8ab854SArnd Bergmann if (signal_pending(current)) 259ce8ab854SArnd Bergmann ret = -ERESTARTSYS; 2602ebb2477SMasato Noguchi 261ce8ab854SArnd Bergmann return ret; 262ce8ab854SArnd Bergmann } 263ce8ab854SArnd Bergmann 2642dd14934SArnd Bergmann /* 2652dd14934SArnd Bergmann * SPU syscall restarting is tricky because we violate the basic 2662dd14934SArnd Bergmann * assumption that the signal handler is running on the interrupted 2672dd14934SArnd Bergmann * thread. Here instead, the handler runs on PowerPC user space code, 2682dd14934SArnd Bergmann * while the syscall was called from the SPU. 2692dd14934SArnd Bergmann * This means we can only do a very rough approximation of POSIX 2702dd14934SArnd Bergmann * signal semantics. 2712dd14934SArnd Bergmann */ 2721238819aSSebastian Siewior static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret, 2732dd14934SArnd Bergmann unsigned int *npc) 2742dd14934SArnd Bergmann { 2752dd14934SArnd Bergmann int ret; 2762dd14934SArnd Bergmann 2772dd14934SArnd Bergmann switch (*spu_ret) { 2782dd14934SArnd Bergmann case -ERESTARTSYS: 2792dd14934SArnd Bergmann case -ERESTARTNOINTR: 2802dd14934SArnd Bergmann /* 2812dd14934SArnd Bergmann * Enter the regular syscall restarting for 2822dd14934SArnd Bergmann * sys_spu_run, then restart the SPU syscall 2832dd14934SArnd Bergmann * callback. 2842dd14934SArnd Bergmann */ 2852dd14934SArnd Bergmann *npc -= 8; 2862dd14934SArnd Bergmann ret = -ERESTARTSYS; 2872dd14934SArnd Bergmann break; 2882dd14934SArnd Bergmann case -ERESTARTNOHAND: 2892dd14934SArnd Bergmann case -ERESTART_RESTARTBLOCK: 2902dd14934SArnd Bergmann /* 2912dd14934SArnd Bergmann * Restart block is too hard for now, just return -EINTR 2922dd14934SArnd Bergmann * to the SPU. 2932dd14934SArnd Bergmann * ERESTARTNOHAND comes from sys_pause, we also return 2942dd14934SArnd Bergmann * -EINTR from there. 2952dd14934SArnd Bergmann * Assume that we need to be restarted ourselves though. 2962dd14934SArnd Bergmann */ 2972dd14934SArnd Bergmann *spu_ret = -EINTR; 2982dd14934SArnd Bergmann ret = -ERESTARTSYS; 2992dd14934SArnd Bergmann break; 3002dd14934SArnd Bergmann default: 3012dd14934SArnd Bergmann printk(KERN_WARNING "%s: unexpected return code %ld\n", 302e48b1b45SHarvey Harrison __func__, *spu_ret); 3032dd14934SArnd Bergmann ret = 0; 3042dd14934SArnd Bergmann } 3052dd14934SArnd Bergmann return ret; 3062dd14934SArnd Bergmann } 3072dd14934SArnd Bergmann 3081238819aSSebastian Siewior static int spu_process_callback(struct spu_context *ctx) 3092dd14934SArnd Bergmann { 3102dd14934SArnd Bergmann struct spu_syscall_block s; 3112dd14934SArnd Bergmann u32 ls_pointer, npc; 3129e2fe2ceSAkinobu Mita void __iomem *ls; 3132dd14934SArnd Bergmann long spu_ret; 314d29694f0SJeremy Kerr int ret; 3152dd14934SArnd Bergmann 3162dd14934SArnd Bergmann /* get syscall block from local store */ 3179e2fe2ceSAkinobu Mita npc = ctx->ops->npc_read(ctx) & ~3; 3189e2fe2ceSAkinobu Mita ls = (void __iomem *)ctx->ops->get_ls(ctx); 3199e2fe2ceSAkinobu Mita ls_pointer = in_be32(ls + npc); 3202dd14934SArnd Bergmann if (ls_pointer > (LS_SIZE - sizeof(s))) 3212dd14934SArnd Bergmann return -EFAULT; 3229e2fe2ceSAkinobu Mita memcpy_fromio(&s, ls + ls_pointer, sizeof(s)); 3232dd14934SArnd Bergmann 3242dd14934SArnd Bergmann /* do actual syscall without pinning the spu */ 3252dd14934SArnd Bergmann ret = 0; 3262dd14934SArnd Bergmann spu_ret = -ENOSYS; 3272dd14934SArnd Bergmann npc += 4; 3282dd14934SArnd Bergmann 329f43194e4SRashmica Gupta if (s.nr_ret < NR_syscalls) { 3302dd14934SArnd Bergmann spu_release(ctx); 3312dd14934SArnd Bergmann /* do actual system call from here */ 3322dd14934SArnd Bergmann spu_ret = spu_sys_callback(&s); 3332dd14934SArnd Bergmann if (spu_ret <= -ERESTARTSYS) { 3342dd14934SArnd Bergmann ret = spu_handle_restartsys(ctx, &spu_ret, &npc); 3352dd14934SArnd Bergmann } 336d29694f0SJeremy Kerr mutex_lock(&ctx->state_mutex); 3372dd14934SArnd Bergmann if (ret == -ERESTARTSYS) 3382dd14934SArnd Bergmann return ret; 3392dd14934SArnd Bergmann } 3402dd14934SArnd Bergmann 3414eb5aef5SJeremy Kerr /* need to re-get the ls, as it may have changed when we released the 3424eb5aef5SJeremy Kerr * spu */ 3434eb5aef5SJeremy Kerr ls = (void __iomem *)ctx->ops->get_ls(ctx); 3444eb5aef5SJeremy Kerr 3452dd14934SArnd Bergmann /* write result, jump over indirect pointer */ 3469e2fe2ceSAkinobu Mita memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret)); 3472dd14934SArnd Bergmann ctx->ops->npc_write(ctx, npc); 3482dd14934SArnd Bergmann ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 3492dd14934SArnd Bergmann return ret; 3502dd14934SArnd Bergmann } 3512dd14934SArnd Bergmann 35250af32a9SJeremy Kerr long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) 353ce8ab854SArnd Bergmann { 354ce8ab854SArnd Bergmann int ret; 35536aaccc1SBob Nelson struct spu *spu; 3569add11daSArnd Bergmann u32 status; 357ce8ab854SArnd Bergmann 358e45d48a3SChristoph Hellwig if (mutex_lock_interruptible(&ctx->run_mutex)) 359ce8ab854SArnd Bergmann return -ERESTARTSYS; 360ce8ab854SArnd Bergmann 3619add11daSArnd Bergmann ctx->event_return = 0; 362aa45e256SChristoph Hellwig 363c9101bdbSChristoph Hellwig ret = spu_acquire(ctx); 364c9101bdbSChristoph Hellwig if (ret) 365c9101bdbSChristoph Hellwig goto out_unlock; 3662cf2b3b4SChristoph Hellwig 367c0bace5cSJeremy Kerr spu_enable_spu(ctx); 368c0bace5cSJeremy Kerr 3692cf2b3b4SChristoph Hellwig spu_update_sched_info(ctx); 370aa45e256SChristoph Hellwig 371aa45e256SChristoph Hellwig ret = spu_run_init(ctx, npc); 372aa45e256SChristoph Hellwig if (ret) { 373aa45e256SChristoph Hellwig spu_release(ctx); 374ce8ab854SArnd Bergmann goto out; 375aa45e256SChristoph Hellwig } 376ce8ab854SArnd Bergmann 377ce8ab854SArnd Bergmann do { 3789add11daSArnd Bergmann ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); 379eebead5bSChristoph Hellwig if (unlikely(ret)) { 380eebead5bSChristoph Hellwig /* 381eebead5bSChristoph Hellwig * This is nasty: we need the state_mutex for all the 382eebead5bSChristoph Hellwig * bookkeeping even if the syscall was interrupted by 383eebead5bSChristoph Hellwig * a signal. ewww. 384eebead5bSChristoph Hellwig */ 385eebead5bSChristoph Hellwig mutex_lock(&ctx->state_mutex); 386ce8ab854SArnd Bergmann break; 387eebead5bSChristoph Hellwig } 38836aaccc1SBob Nelson spu = ctx->spu; 38936aaccc1SBob Nelson if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE, 39036aaccc1SBob Nelson &ctx->sched_flags))) { 39136aaccc1SBob Nelson if (!(status & SPU_STATUS_STOPPED_BY_STOP)) { 39236aaccc1SBob Nelson spu_switch_notify(spu, ctx); 39336aaccc1SBob Nelson continue; 39436aaccc1SBob Nelson } 39536aaccc1SBob Nelson } 39627ec41d3SAndre Detsch 39727ec41d3SAndre Detsch spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 39827ec41d3SAndre Detsch 3999add11daSArnd Bergmann if ((status & SPU_STATUS_STOPPED_BY_STOP) && 4009add11daSArnd Bergmann (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) { 4012dd14934SArnd Bergmann ret = spu_process_callback(ctx); 4022dd14934SArnd Bergmann if (ret) 4032dd14934SArnd Bergmann break; 4049add11daSArnd Bergmann status &= ~SPU_STATUS_STOPPED_BY_STOP; 4052dd14934SArnd Bergmann } 40657dace23SArnd Bergmann ret = spufs_handle_class1(ctx); 40757dace23SArnd Bergmann if (ret) 40857dace23SArnd Bergmann break; 40957dace23SArnd Bergmann 410d6ad39bcSJeremy Kerr ret = spufs_handle_class0(ctx); 411d6ad39bcSJeremy Kerr if (ret) 412d6ad39bcSJeremy Kerr break; 413d6ad39bcSJeremy Kerr 414d6ad39bcSJeremy Kerr if (signal_pending(current)) 415d6ad39bcSJeremy Kerr ret = -ERESTARTSYS; 4169add11daSArnd Bergmann } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | 41705169237SBenjamin Herrenschmidt SPU_STATUS_STOPPED_BY_HALT | 41805169237SBenjamin Herrenschmidt SPU_STATUS_SINGLE_STEP))); 419ce8ab854SArnd Bergmann 420c25620d7SMasato Noguchi spu_disable_spu(ctx); 4219add11daSArnd Bergmann ret = spu_run_fini(ctx, npc, &status); 422ce8ab854SArnd Bergmann spu_yield(ctx); 423ce8ab854SArnd Bergmann 424e66686b4SLuke Browning if ((status & SPU_STATUS_STOPPED_BY_STOP) && 425e66686b4SLuke Browning (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100)) 426e66686b4SLuke Browning ctx->stats.libassist++; 427e66686b4SLuke Browning 4282ebb2477SMasato Noguchi if ((ret == 0) || 4292ebb2477SMasato Noguchi ((ret == -ERESTARTSYS) && 4302ebb2477SMasato Noguchi ((status & SPU_STATUS_STOPPED_BY_HALT) || 43105169237SBenjamin Herrenschmidt (status & SPU_STATUS_SINGLE_STEP) || 4322ebb2477SMasato Noguchi ((status & SPU_STATUS_STOPPED_BY_STOP) && 4332ebb2477SMasato Noguchi (status >> SPU_STOP_STATUS_SHIFT != 0x2104))))) 4342ebb2477SMasato Noguchi ret = status; 4352ebb2477SMasato Noguchi 43605169237SBenjamin Herrenschmidt /* Note: we don't need to force_sig SIGTRAP on single-step 43705169237SBenjamin Herrenschmidt * since we have TIF_SINGLESTEP set, thus the kernel will do 438027dfac6SMichael Ellerman * it upon return from the syscall anyway. 43905169237SBenjamin Herrenschmidt */ 44060cf54dbSJeremy Kerr if (unlikely(status & SPU_STATUS_SINGLE_STEP)) 44160cf54dbSJeremy Kerr ret = -ERESTARTSYS; 44260cf54dbSJeremy Kerr 44360cf54dbSJeremy Kerr else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP) 44460cf54dbSJeremy Kerr && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) { 4452ebb2477SMasato Noguchi force_sig(SIGTRAP, current); 4462ebb2477SMasato Noguchi ret = -ERESTARTSYS; 4472ebb2477SMasato Noguchi } 4482ebb2477SMasato Noguchi 449ce8ab854SArnd Bergmann out: 4509add11daSArnd Bergmann *event = ctx->event_return; 451c9101bdbSChristoph Hellwig out_unlock: 452e45d48a3SChristoph Hellwig mutex_unlock(&ctx->run_mutex); 453ce8ab854SArnd Bergmann return ret; 454ce8ab854SArnd Bergmann } 455