1de6cc651SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 25473af04SMark Nutter /* 35473af04SMark Nutter * spu_switch.c 45473af04SMark Nutter * 55473af04SMark Nutter * (C) Copyright IBM Corp. 2005 65473af04SMark Nutter * 75473af04SMark Nutter * Author: Mark Nutter <mnutter@us.ibm.com> 85473af04SMark Nutter * 95473af04SMark Nutter * Host-side part of SPU context switch sequence outlined in 105473af04SMark Nutter * Synergistic Processor Element, Book IV. 115473af04SMark Nutter * 125473af04SMark Nutter * A fully premptive switch of an SPE is very expensive in terms 135473af04SMark Nutter * of time and system resources. SPE Book IV indicates that SPE 145473af04SMark Nutter * allocation should follow a "serially reusable device" model, 155473af04SMark Nutter * in which the SPE is assigned a task until it completes. When 165473af04SMark Nutter * this is not possible, this sequence may be used to premptively 175473af04SMark Nutter * save, and then later (optionally) restore the context of a 185473af04SMark Nutter * program executing on an SPE. 195473af04SMark Nutter */ 205473af04SMark Nutter 214b16f8e2SPaul Gortmaker #include <linux/export.h> 225473af04SMark Nutter #include <linux/errno.h> 23fae9ca79SArnd Bergmann #include <linux/hardirq.h> 245473af04SMark Nutter #include <linux/sched.h> 255473af04SMark Nutter #include <linux/kernel.h> 265473af04SMark Nutter #include <linux/mm.h> 275473af04SMark Nutter #include <linux/vmalloc.h> 285473af04SMark Nutter #include <linux/smp.h> 295473af04SMark Nutter #include <linux/stddef.h> 305473af04SMark Nutter #include <linux/unistd.h> 315473af04SMark Nutter 325473af04SMark Nutter #include <asm/io.h> 335473af04SMark Nutter #include <asm/spu.h> 34540270d8SGeoff Levand #include <asm/spu_priv1.h> 355473af04SMark Nutter #include <asm/spu_csa.h> 365473af04SMark Nutter #include <asm/mmu_context.h> 375473af04SMark Nutter 387cd58e43SJeremy Kerr #include "spufs.h" 397cd58e43SJeremy Kerr 405473af04SMark Nutter #include "spu_save_dump.h" 415473af04SMark Nutter #include "spu_restore_dump.h" 425473af04SMark Nutter 437c038749SMark Nutter #if 0 447c038749SMark Nutter #define POLL_WHILE_TRUE(_c) { \ 457c038749SMark Nutter do { \ 467c038749SMark Nutter } while (_c); \ 477c038749SMark Nutter } 487c038749SMark Nutter #else 497c038749SMark Nutter #define RELAX_SPIN_COUNT 1000 507c038749SMark Nutter #define POLL_WHILE_TRUE(_c) { \ 517c038749SMark Nutter do { \ 527c038749SMark Nutter int _i; \ 537c038749SMark Nutter for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \ 547c038749SMark Nutter cpu_relax(); \ 557c038749SMark Nutter } \ 567c038749SMark Nutter if (unlikely(_c)) yield(); \ 577c038749SMark Nutter else break; \ 587c038749SMark Nutter } while (_c); \ 597c038749SMark Nutter } 607c038749SMark Nutter #endif /* debug */ 617c038749SMark Nutter 627c038749SMark Nutter #define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c)) 637c038749SMark Nutter 647c038749SMark Nutter static inline void acquire_spu_lock(struct spu *spu) 657c038749SMark Nutter { 667c038749SMark Nutter /* Save, Step 1: 677c038749SMark Nutter * Restore, Step 1: 687c038749SMark Nutter * Acquire SPU-specific mutual exclusion lock. 697c038749SMark Nutter * TBD. 707c038749SMark Nutter */ 717c038749SMark Nutter } 727c038749SMark Nutter 737c038749SMark Nutter static inline void release_spu_lock(struct spu *spu) 747c038749SMark Nutter { 757c038749SMark Nutter /* Restore, Step 76: 767c038749SMark Nutter * Release SPU-specific mutual exclusion lock. 777c038749SMark Nutter * TBD. 787c038749SMark Nutter */ 797c038749SMark Nutter } 807c038749SMark Nutter 817c038749SMark Nutter static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu) 827c038749SMark Nutter { 837c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 847c038749SMark Nutter u32 isolate_state; 857c038749SMark Nutter 867c038749SMark Nutter /* Save, Step 2: 877c038749SMark Nutter * Save, Step 6: 887c038749SMark Nutter * If SPU_Status[E,L,IS] any field is '1', this 897c038749SMark Nutter * SPU is in isolate state and cannot be context 907c038749SMark Nutter * saved at this time. 917c038749SMark Nutter */ 927c038749SMark Nutter isolate_state = SPU_STATUS_ISOLATED_STATE | 93eb758ce5Sarnd@arndb.de SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS; 947c038749SMark Nutter return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0; 957c038749SMark Nutter } 967c038749SMark Nutter 977c038749SMark Nutter static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) 987c038749SMark Nutter { 997c038749SMark Nutter /* Save, Step 3: 1007c038749SMark Nutter * Restore, Step 2: 1017c038749SMark Nutter * Save INT_Mask_class0 in CSA. 1027c038749SMark Nutter * Write INT_MASK_class0 with value of 0. 1037c038749SMark Nutter * Save INT_Mask_class1 in CSA. 1047c038749SMark Nutter * Write INT_MASK_class1 with value of 0. 1057c038749SMark Nutter * Save INT_Mask_class2 in CSA. 1067c038749SMark Nutter * Write INT_MASK_class2 with value of 0. 107fae9ca79SArnd Bergmann * Synchronize all three interrupts to be sure 108fae9ca79SArnd Bergmann * we no longer execute a handler on another CPU. 1097c038749SMark Nutter */ 1107c038749SMark Nutter spin_lock_irq(&spu->register_lock); 1117c038749SMark Nutter if (csa) { 112f0831accSArnd Bergmann csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0); 113f0831accSArnd Bergmann csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1); 114f0831accSArnd Bergmann csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2); 1157c038749SMark Nutter } 116f0831accSArnd Bergmann spu_int_mask_set(spu, 0, 0ul); 117f0831accSArnd Bergmann spu_int_mask_set(spu, 1, 0ul); 118f0831accSArnd Bergmann spu_int_mask_set(spu, 2, 0ul); 1197c038749SMark Nutter eieio(); 1207c038749SMark Nutter spin_unlock_irq(&spu->register_lock); 121093c16bfSLuke Browning 122093c16bfSLuke Browning /* 123093c16bfSLuke Browning * This flag needs to be set before calling synchronize_irq so 124093c16bfSLuke Browning * that the update will be visible to the relevant handlers 125093c16bfSLuke Browning * via a simple load. 126093c16bfSLuke Browning */ 127093c16bfSLuke Browning set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); 128de102892SLuke Browning clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags); 129fae9ca79SArnd Bergmann synchronize_irq(spu->irqs[0]); 130fae9ca79SArnd Bergmann synchronize_irq(spu->irqs[1]); 131fae9ca79SArnd Bergmann synchronize_irq(spu->irqs[2]); 1327c038749SMark Nutter } 1337c038749SMark Nutter 1347c038749SMark Nutter static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu) 1357c038749SMark Nutter { 1367c038749SMark Nutter /* Save, Step 4: 1377c038749SMark Nutter * Restore, Step 25. 1387c038749SMark Nutter * Set a software watchdog timer, which specifies the 1397c038749SMark Nutter * maximum allowable time for a context save sequence. 1407c038749SMark Nutter * 1417c038749SMark Nutter * For present, this implementation will not set a global 1427c038749SMark Nutter * watchdog timer, as virtualization & variable system load 1437c038749SMark Nutter * may cause unpredictable execution times. 1447c038749SMark Nutter */ 1457c038749SMark Nutter } 1467c038749SMark Nutter 1477c038749SMark Nutter static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu) 1487c038749SMark Nutter { 1497c038749SMark Nutter /* Save, Step 5: 1507c038749SMark Nutter * Restore, Step 3: 1517c038749SMark Nutter * Inhibit user-space access (if provided) to this 1527c038749SMark Nutter * SPU by unmapping the virtual pages assigned to 1537c038749SMark Nutter * the SPU memory-mapped I/O (MMIO) for problem 1547c038749SMark Nutter * state. TBD. 1557c038749SMark Nutter */ 1567c038749SMark Nutter } 1577c038749SMark Nutter 1587c038749SMark Nutter static inline void set_switch_pending(struct spu_state *csa, struct spu *spu) 1597c038749SMark Nutter { 1607c038749SMark Nutter /* Save, Step 7: 1617c038749SMark Nutter * Restore, Step 5: 1627c038749SMark Nutter * Set a software context switch pending flag. 163093c16bfSLuke Browning * Done above in Step 3 - disable_interrupts(). 1647c038749SMark Nutter */ 1657c038749SMark Nutter } 1667c038749SMark Nutter 1677c038749SMark Nutter static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu) 1687c038749SMark Nutter { 1697c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 1707c038749SMark Nutter 1717c038749SMark Nutter /* Save, Step 8: 1727f52eb00SGeoff Levand * Suspend DMA and save MFC_CNTL. 1737c038749SMark Nutter */ 1747f52eb00SGeoff Levand switch (in_be64(&priv2->mfc_control_RW) & 1757f52eb00SGeoff Levand MFC_CNTL_SUSPEND_DMA_STATUS_MASK) { 1767f52eb00SGeoff Levand case MFC_CNTL_SUSPEND_IN_PROGRESS: 1777f52eb00SGeoff Levand POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & 1787f52eb00SGeoff Levand MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == 1797f52eb00SGeoff Levand MFC_CNTL_SUSPEND_COMPLETE); 1807f52eb00SGeoff Levand /* fall through */ 1817f52eb00SGeoff Levand case MFC_CNTL_SUSPEND_COMPLETE: 1821ca4264eSJeremy Kerr if (csa) 1837f52eb00SGeoff Levand csa->priv2.mfc_control_RW = 1841ca4264eSJeremy Kerr in_be64(&priv2->mfc_control_RW) | 1857f52eb00SGeoff Levand MFC_CNTL_SUSPEND_DMA_QUEUE; 1867f52eb00SGeoff Levand break; 1877f52eb00SGeoff Levand case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION: 1887f52eb00SGeoff Levand out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE); 1897f52eb00SGeoff Levand POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & 1907f52eb00SGeoff Levand MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == 1917f52eb00SGeoff Levand MFC_CNTL_SUSPEND_COMPLETE); 1921ca4264eSJeremy Kerr if (csa) 1931ca4264eSJeremy Kerr csa->priv2.mfc_control_RW = 1941ca4264eSJeremy Kerr in_be64(&priv2->mfc_control_RW) & 1951ca4264eSJeremy Kerr ~MFC_CNTL_SUSPEND_DMA_QUEUE & 1961ca4264eSJeremy Kerr ~MFC_CNTL_SUSPEND_MASK; 1977f52eb00SGeoff Levand break; 1987c038749SMark Nutter } 1997c038749SMark Nutter } 2007c038749SMark Nutter 2017c038749SMark Nutter static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu) 2027c038749SMark Nutter { 2037c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 2047c038749SMark Nutter 2057c038749SMark Nutter /* Save, Step 9: 2067c038749SMark Nutter * Save SPU_Runcntl in the CSA. This value contains 2077c038749SMark Nutter * the "Application Desired State". 2087c038749SMark Nutter */ 2097c038749SMark Nutter csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW); 2107c038749SMark Nutter } 2117c038749SMark Nutter 2127c038749SMark Nutter static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu) 2137c038749SMark Nutter { 2147c038749SMark Nutter /* Save, Step 10: 2157c038749SMark Nutter * Save MFC_SR1 in the CSA. 2167c038749SMark Nutter */ 217f0831accSArnd Bergmann csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu); 2187c038749SMark Nutter } 2197c038749SMark Nutter 2207c038749SMark Nutter static inline void save_spu_status(struct spu_state *csa, struct spu *spu) 2217c038749SMark Nutter { 2227c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 2237c038749SMark Nutter 2247c038749SMark Nutter /* Save, Step 11: 2257c038749SMark Nutter * Read SPU_Status[R], and save to CSA. 2267c038749SMark Nutter */ 2277c038749SMark Nutter if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) { 2287c038749SMark Nutter csa->prob.spu_status_R = in_be32(&prob->spu_status_R); 2297c038749SMark Nutter } else { 2307c038749SMark Nutter u32 stopped; 2317c038749SMark Nutter 2327c038749SMark Nutter out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); 2337c038749SMark Nutter eieio(); 2347c038749SMark Nutter POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 2357c038749SMark Nutter SPU_STATUS_RUNNING); 2367c038749SMark Nutter stopped = 2377c038749SMark Nutter SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | 2387c038749SMark Nutter SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; 2397c038749SMark Nutter if ((in_be32(&prob->spu_status_R) & stopped) == 0) 2407c038749SMark Nutter csa->prob.spu_status_R = SPU_STATUS_RUNNING; 2417c038749SMark Nutter else 2427c038749SMark Nutter csa->prob.spu_status_R = in_be32(&prob->spu_status_R); 2437c038749SMark Nutter } 2447c038749SMark Nutter } 2457c038749SMark Nutter 24655d7cd74SJeremy Kerr static inline void save_mfc_stopped_status(struct spu_state *csa, 24755d7cd74SJeremy Kerr struct spu *spu) 2487c038749SMark Nutter { 2497c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 25055d7cd74SJeremy Kerr const u64 mask = MFC_CNTL_DECREMENTER_RUNNING | 25155d7cd74SJeremy Kerr MFC_CNTL_DMA_QUEUES_EMPTY; 2527c038749SMark Nutter 2537c038749SMark Nutter /* Save, Step 12: 2547c038749SMark Nutter * Read MFC_CNTL[Ds]. Update saved copy of 2557c038749SMark Nutter * CSA.MFC_CNTL[Ds]. 25655d7cd74SJeremy Kerr * 25755d7cd74SJeremy Kerr * update: do the same with MFC_CNTL[Q]. 2587c038749SMark Nutter */ 25955d7cd74SJeremy Kerr csa->priv2.mfc_control_RW &= ~mask; 26055d7cd74SJeremy Kerr csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask; 2617c038749SMark Nutter } 2627c038749SMark Nutter 2637c038749SMark Nutter static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu) 2647c038749SMark Nutter { 2657c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 2667c038749SMark Nutter 2677c038749SMark Nutter /* Save, Step 13: 2687c038749SMark Nutter * Write MFC_CNTL[Dh] set to a '1' to halt 2697c038749SMark Nutter * the decrementer. 2707c038749SMark Nutter */ 27149776d30SKazunori Asayama out_be64(&priv2->mfc_control_RW, 27249776d30SKazunori Asayama MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK); 2737c038749SMark Nutter eieio(); 2747c038749SMark Nutter } 2757c038749SMark Nutter 2767c038749SMark Nutter static inline void save_timebase(struct spu_state *csa, struct spu *spu) 2777c038749SMark Nutter { 2787c038749SMark Nutter /* Save, Step 14: 2797c038749SMark Nutter * Read PPE Timebase High and Timebase low registers 2807c038749SMark Nutter * and save in CSA. TBD. 2817c038749SMark Nutter */ 2827c038749SMark Nutter csa->suspend_time = get_cycles(); 2837c038749SMark Nutter } 2847c038749SMark Nutter 2857c038749SMark Nutter static inline void remove_other_spu_access(struct spu_state *csa, 2867c038749SMark Nutter struct spu *spu) 2877c038749SMark Nutter { 2887c038749SMark Nutter /* Save, Step 15: 2897c038749SMark Nutter * Remove other SPU access to this SPU by unmapping 2907c038749SMark Nutter * this SPU's pages from their address space. TBD. 2917c038749SMark Nutter */ 2927c038749SMark Nutter } 2937c038749SMark Nutter 2947c038749SMark Nutter static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu) 2957c038749SMark Nutter { 2967c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 2977c038749SMark Nutter 2987c038749SMark Nutter /* Save, Step 16: 2997c038749SMark Nutter * Restore, Step 11. 3007c038749SMark Nutter * Write SPU_MSSync register. Poll SPU_MSSync[P] 3017c038749SMark Nutter * for a value of 0. 3027c038749SMark Nutter */ 3037c038749SMark Nutter out_be64(&prob->spc_mssync_RW, 1UL); 3047c038749SMark Nutter POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING); 3057c038749SMark Nutter } 3067c038749SMark Nutter 3077c038749SMark Nutter static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu) 3087c038749SMark Nutter { 3097c038749SMark Nutter /* Save, Step 17: 3107c038749SMark Nutter * Restore, Step 12. 3117c038749SMark Nutter * Restore, Step 48. 3127c038749SMark Nutter * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register. 3137c038749SMark Nutter * Then issue a PPE sync instruction. 3147c038749SMark Nutter */ 315f0831accSArnd Bergmann spu_tlb_invalidate(spu); 3167c038749SMark Nutter mb(); 3177c038749SMark Nutter } 3187c038749SMark Nutter 3197c038749SMark Nutter static inline void handle_pending_interrupts(struct spu_state *csa, 3207c038749SMark Nutter struct spu *spu) 3217c038749SMark Nutter { 3227c038749SMark Nutter /* Save, Step 18: 3237c038749SMark Nutter * Handle any pending interrupts from this SPU 3247c038749SMark Nutter * here. This is OS or hypervisor specific. One 3257c038749SMark Nutter * option is to re-enable interrupts to handle any 3267c038749SMark Nutter * pending interrupts, with the interrupt handlers 3277c038749SMark Nutter * recognizing the software Context Switch Pending 3287c038749SMark Nutter * flag, to ensure the SPU execution or MFC command 3297c038749SMark Nutter * queue is not restarted. TBD. 3307c038749SMark Nutter */ 3317c038749SMark Nutter } 3327c038749SMark Nutter 3337c038749SMark Nutter static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu) 3347c038749SMark Nutter { 3357c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 3367c038749SMark Nutter int i; 3377c038749SMark Nutter 3387c038749SMark Nutter /* Save, Step 19: 3397c038749SMark Nutter * If MFC_Cntl[Se]=0 then save 3407c038749SMark Nutter * MFC command queues. 3417c038749SMark Nutter */ 3427c038749SMark Nutter if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) { 3437c038749SMark Nutter for (i = 0; i < 8; i++) { 3447c038749SMark Nutter csa->priv2.puq[i].mfc_cq_data0_RW = 3457c038749SMark Nutter in_be64(&priv2->puq[i].mfc_cq_data0_RW); 3467c038749SMark Nutter csa->priv2.puq[i].mfc_cq_data1_RW = 3477c038749SMark Nutter in_be64(&priv2->puq[i].mfc_cq_data1_RW); 3487c038749SMark Nutter csa->priv2.puq[i].mfc_cq_data2_RW = 3497c038749SMark Nutter in_be64(&priv2->puq[i].mfc_cq_data2_RW); 3507c038749SMark Nutter csa->priv2.puq[i].mfc_cq_data3_RW = 3517c038749SMark Nutter in_be64(&priv2->puq[i].mfc_cq_data3_RW); 3527c038749SMark Nutter } 3537c038749SMark Nutter for (i = 0; i < 16; i++) { 3547c038749SMark Nutter csa->priv2.spuq[i].mfc_cq_data0_RW = 3557c038749SMark Nutter in_be64(&priv2->spuq[i].mfc_cq_data0_RW); 3567c038749SMark Nutter csa->priv2.spuq[i].mfc_cq_data1_RW = 3577c038749SMark Nutter in_be64(&priv2->spuq[i].mfc_cq_data1_RW); 3587c038749SMark Nutter csa->priv2.spuq[i].mfc_cq_data2_RW = 3597c038749SMark Nutter in_be64(&priv2->spuq[i].mfc_cq_data2_RW); 3607c038749SMark Nutter csa->priv2.spuq[i].mfc_cq_data3_RW = 3617c038749SMark Nutter in_be64(&priv2->spuq[i].mfc_cq_data3_RW); 3627c038749SMark Nutter } 3637c038749SMark Nutter } 3647c038749SMark Nutter } 3657c038749SMark Nutter 3667c038749SMark Nutter static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu) 3677c038749SMark Nutter { 3687c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 3697c038749SMark Nutter 3707c038749SMark Nutter /* Save, Step 20: 3717c038749SMark Nutter * Save the PPU_QueryMask register 3727c038749SMark Nutter * in the CSA. 3737c038749SMark Nutter */ 3747c038749SMark Nutter csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW); 3757c038749SMark Nutter } 3767c038749SMark Nutter 3777c038749SMark Nutter static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu) 3787c038749SMark Nutter { 3797c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 3807c038749SMark Nutter 3817c038749SMark Nutter /* Save, Step 21: 3827c038749SMark Nutter * Save the PPU_QueryType register 3837c038749SMark Nutter * in the CSA. 3847c038749SMark Nutter */ 3857c038749SMark Nutter csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW); 3867c038749SMark Nutter } 3877c038749SMark Nutter 3888d038e04SKazunori Asayama static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu) 3898d038e04SKazunori Asayama { 3908d038e04SKazunori Asayama struct spu_problem __iomem *prob = spu->problem; 3918d038e04SKazunori Asayama 3928d038e04SKazunori Asayama /* Save the Prxy_TagStatus register in the CSA. 3938d038e04SKazunori Asayama * 3948d038e04SKazunori Asayama * It is unnecessary to restore dma_tagstatus_R, however, 3958d038e04SKazunori Asayama * dma_tagstatus_R in the CSA is accessed via backing_ops, so 3968d038e04SKazunori Asayama * we must save it. 3978d038e04SKazunori Asayama */ 3988d038e04SKazunori Asayama csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R); 3998d038e04SKazunori Asayama } 4008d038e04SKazunori Asayama 4017c038749SMark Nutter static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu) 4027c038749SMark Nutter { 4037c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 4047c038749SMark Nutter 4057c038749SMark Nutter /* Save, Step 22: 4067c038749SMark Nutter * Save the MFC_CSR_TSQ register 4077c038749SMark Nutter * in the LSCSA. 4087c038749SMark Nutter */ 4097c038749SMark Nutter csa->priv2.spu_tag_status_query_RW = 4107c038749SMark Nutter in_be64(&priv2->spu_tag_status_query_RW); 4117c038749SMark Nutter } 4127c038749SMark Nutter 4137c038749SMark Nutter static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu) 4147c038749SMark Nutter { 4157c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 4167c038749SMark Nutter 4177c038749SMark Nutter /* Save, Step 23: 4187c038749SMark Nutter * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2 4197c038749SMark Nutter * registers in the CSA. 4207c038749SMark Nutter */ 4217c038749SMark Nutter csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW); 4227c038749SMark Nutter csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW); 4237c038749SMark Nutter } 4247c038749SMark Nutter 4257c038749SMark Nutter static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu) 4267c038749SMark Nutter { 4277c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 4287c038749SMark Nutter 4297c038749SMark Nutter /* Save, Step 24: 4307c038749SMark Nutter * Save the MFC_CSR_ATO register in 4317c038749SMark Nutter * the CSA. 4327c038749SMark Nutter */ 4337c038749SMark Nutter csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW); 4347c038749SMark Nutter } 4357c038749SMark Nutter 4367c038749SMark Nutter static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu) 4377c038749SMark Nutter { 4387c038749SMark Nutter /* Save, Step 25: 4397c038749SMark Nutter * Save the MFC_TCLASS_ID register in 4407c038749SMark Nutter * the CSA. 4417c038749SMark Nutter */ 442f0831accSArnd Bergmann csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu); 4437c038749SMark Nutter } 4447c038749SMark Nutter 4457c038749SMark Nutter static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu) 4467c038749SMark Nutter { 4477c038749SMark Nutter /* Save, Step 26: 4487c038749SMark Nutter * Restore, Step 23. 4497c038749SMark Nutter * Write the MFC_TCLASS_ID register with 4507c038749SMark Nutter * the value 0x10000000. 4517c038749SMark Nutter */ 452f0831accSArnd Bergmann spu_mfc_tclass_id_set(spu, 0x10000000); 4537c038749SMark Nutter eieio(); 4547c038749SMark Nutter } 4557c038749SMark Nutter 4567c038749SMark Nutter static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu) 4577c038749SMark Nutter { 4587c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 4597c038749SMark Nutter 4607c038749SMark Nutter /* Save, Step 27: 4617c038749SMark Nutter * Restore, Step 14. 4627c038749SMark Nutter * Write MFC_CNTL[Pc]=1 (purge queue). 4637c038749SMark Nutter */ 464943906baSJeremy Kerr out_be64(&priv2->mfc_control_RW, 465943906baSJeremy Kerr MFC_CNTL_PURGE_DMA_REQUEST | 466943906baSJeremy Kerr MFC_CNTL_SUSPEND_MASK); 4677c038749SMark Nutter eieio(); 4687c038749SMark Nutter } 4697c038749SMark Nutter 4707c038749SMark Nutter static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu) 4717c038749SMark Nutter { 4727c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 4737c038749SMark Nutter 4747c038749SMark Nutter /* Save, Step 28: 4757c038749SMark Nutter * Poll MFC_CNTL[Ps] until value '11' is read 4767c038749SMark Nutter * (purge complete). 4777c038749SMark Nutter */ 478910ab66bSBenjamin Herrenschmidt POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & 479910ab66bSBenjamin Herrenschmidt MFC_CNTL_PURGE_DMA_STATUS_MASK) == 4807c038749SMark Nutter MFC_CNTL_PURGE_DMA_COMPLETE); 4817c038749SMark Nutter } 4827c038749SMark Nutter 4837c038749SMark Nutter static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu) 4847c038749SMark Nutter { 4857c038749SMark Nutter /* Save, Step 30: 4867c038749SMark Nutter * Restore, Step 18: 4877c038749SMark Nutter * Write MFC_SR1 with MFC_SR1[D=0,S=1] and 4887c038749SMark Nutter * MFC_SR1[TL,R,Pr,T] set correctly for the 4897c038749SMark Nutter * OS specific environment. 4907c038749SMark Nutter * 4917c038749SMark Nutter * Implementation note: The SPU-side code 4927c038749SMark Nutter * for save/restore is privileged, so the 4937c038749SMark Nutter * MFC_SR1[Pr] bit is not set. 4947c038749SMark Nutter * 4957c038749SMark Nutter */ 496f0831accSArnd Bergmann spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK | 4977c038749SMark Nutter MFC_STATE1_RELOCATE_MASK | 4987c038749SMark Nutter MFC_STATE1_BUS_TLBIE_MASK)); 4997c038749SMark Nutter } 5007c038749SMark Nutter 5017c038749SMark Nutter static inline void save_spu_npc(struct spu_state *csa, struct spu *spu) 5027c038749SMark Nutter { 5037c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 5047c038749SMark Nutter 5057c038749SMark Nutter /* Save, Step 31: 5067c038749SMark Nutter * Save SPU_NPC in the CSA. 5077c038749SMark Nutter */ 5087c038749SMark Nutter csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW); 5097c038749SMark Nutter } 5107c038749SMark Nutter 5117c038749SMark Nutter static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu) 5127c038749SMark Nutter { 5137c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 5147c038749SMark Nutter 5157c038749SMark Nutter /* Save, Step 32: 5167c038749SMark Nutter * Save SPU_PrivCntl in the CSA. 5177c038749SMark Nutter */ 5187c038749SMark Nutter csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW); 5197c038749SMark Nutter } 5207c038749SMark Nutter 5217c038749SMark Nutter static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu) 5227c038749SMark Nutter { 5237c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 5247c038749SMark Nutter 5257c038749SMark Nutter /* Save, Step 33: 5267c038749SMark Nutter * Restore, Step 16: 5277c038749SMark Nutter * Write SPU_PrivCntl[S,Le,A] fields reset to 0. 5287c038749SMark Nutter */ 5297c038749SMark Nutter out_be64(&priv2->spu_privcntl_RW, 0UL); 5307c038749SMark Nutter eieio(); 5317c038749SMark Nutter } 5327c038749SMark Nutter 5337c038749SMark Nutter static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu) 5347c038749SMark Nutter { 5357c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 5367c038749SMark Nutter 5377c038749SMark Nutter /* Save, Step 34: 5387c038749SMark Nutter * Save SPU_LSLR in the CSA. 5397c038749SMark Nutter */ 5407c038749SMark Nutter csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW); 5417c038749SMark Nutter } 5427c038749SMark Nutter 5437c038749SMark Nutter static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu) 5447c038749SMark Nutter { 5457c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 5467c038749SMark Nutter 5477c038749SMark Nutter /* Save, Step 35: 5487c038749SMark Nutter * Restore, Step 17. 5497c038749SMark Nutter * Reset SPU_LSLR. 5507c038749SMark Nutter */ 5517c038749SMark Nutter out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK); 5527c038749SMark Nutter eieio(); 5537c038749SMark Nutter } 5547c038749SMark Nutter 5557c038749SMark Nutter static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu) 5567c038749SMark Nutter { 5577c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 5587c038749SMark Nutter 5597c038749SMark Nutter /* Save, Step 36: 5607c038749SMark Nutter * Save SPU_Cfg in the CSA. 5617c038749SMark Nutter */ 5627c038749SMark Nutter csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW); 5637c038749SMark Nutter } 5647c038749SMark Nutter 5657c038749SMark Nutter static inline void save_pm_trace(struct spu_state *csa, struct spu *spu) 5667c038749SMark Nutter { 5677c038749SMark Nutter /* Save, Step 37: 5687c038749SMark Nutter * Save PM_Trace_Tag_Wait_Mask in the CSA. 5697c038749SMark Nutter * Not performed by this implementation. 5707c038749SMark Nutter */ 5717c038749SMark Nutter } 5727c038749SMark Nutter 5737c038749SMark Nutter static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu) 5747c038749SMark Nutter { 5757c038749SMark Nutter /* Save, Step 38: 5767c038749SMark Nutter * Save RA_GROUP_ID register and the 5777c038749SMark Nutter * RA_ENABLE reigster in the CSA. 5787c038749SMark Nutter */ 5797c038749SMark Nutter csa->priv1.resource_allocation_groupID_RW = 580f0831accSArnd Bergmann spu_resource_allocation_groupID_get(spu); 5817c038749SMark Nutter csa->priv1.resource_allocation_enable_RW = 582f0831accSArnd Bergmann spu_resource_allocation_enable_get(spu); 5837c038749SMark Nutter } 5847c038749SMark Nutter 5857c038749SMark Nutter static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu) 5867c038749SMark Nutter { 5877c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 5887c038749SMark Nutter 5897c038749SMark Nutter /* Save, Step 39: 5907c038749SMark Nutter * Save MB_Stat register in the CSA. 5917c038749SMark Nutter */ 5927c038749SMark Nutter csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R); 5937c038749SMark Nutter } 5947c038749SMark Nutter 5957c038749SMark Nutter static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu) 5967c038749SMark Nutter { 5977c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 5987c038749SMark Nutter 5997c038749SMark Nutter /* Save, Step 40: 6007c038749SMark Nutter * Save the PPU_MB register in the CSA. 6017c038749SMark Nutter */ 6027c038749SMark Nutter csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R); 6037c038749SMark Nutter } 6047c038749SMark Nutter 6057c038749SMark Nutter static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu) 6067c038749SMark Nutter { 6077c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 6087c038749SMark Nutter 6097c038749SMark Nutter /* Save, Step 41: 6107c038749SMark Nutter * Save the PPUINT_MB register in the CSA. 6117c038749SMark Nutter */ 6127c038749SMark Nutter csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R); 6137c038749SMark Nutter } 6147c038749SMark Nutter 6157c038749SMark Nutter static inline void save_ch_part1(struct spu_state *csa, struct spu *spu) 6167c038749SMark Nutter { 6177c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 618daced0f7SJeremy Kerr u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; 6197c038749SMark Nutter int i; 6207c038749SMark Nutter 6217c038749SMark Nutter /* Save, Step 42: 6227c038749SMark Nutter */ 623e46a0237Sarnd@arndb.de 624e46a0237Sarnd@arndb.de /* Save CH 1, without channel count */ 625e46a0237Sarnd@arndb.de out_be64(&priv2->spu_chnlcntptr_RW, 1); 626e46a0237Sarnd@arndb.de csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW); 627e46a0237Sarnd@arndb.de 628e46a0237Sarnd@arndb.de /* Save the following CH: [0,3,4,24,25,27] */ 629daced0f7SJeremy Kerr for (i = 0; i < ARRAY_SIZE(ch_indices); i++) { 6307c038749SMark Nutter idx = ch_indices[i]; 6317c038749SMark Nutter out_be64(&priv2->spu_chnlcntptr_RW, idx); 6327c038749SMark Nutter eieio(); 6337c038749SMark Nutter csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW); 6347c038749SMark Nutter csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW); 6357c038749SMark Nutter out_be64(&priv2->spu_chnldata_RW, 0UL); 6367c038749SMark Nutter out_be64(&priv2->spu_chnlcnt_RW, 0UL); 6377c038749SMark Nutter eieio(); 6387c038749SMark Nutter } 6397c038749SMark Nutter } 6407c038749SMark Nutter 6417c038749SMark Nutter static inline void save_spu_mb(struct spu_state *csa, struct spu *spu) 6427c038749SMark Nutter { 6437c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 6447c038749SMark Nutter int i; 6457c038749SMark Nutter 6467c038749SMark Nutter /* Save, Step 43: 6477c038749SMark Nutter * Save SPU Read Mailbox Channel. 6487c038749SMark Nutter */ 6497c038749SMark Nutter out_be64(&priv2->spu_chnlcntptr_RW, 29UL); 6507c038749SMark Nutter eieio(); 6517c038749SMark Nutter csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW); 6527c038749SMark Nutter for (i = 0; i < 4; i++) { 6538b3d6663SArnd Bergmann csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW); 6547c038749SMark Nutter } 6557c038749SMark Nutter out_be64(&priv2->spu_chnlcnt_RW, 0UL); 6567c038749SMark Nutter eieio(); 6577c038749SMark Nutter } 6587c038749SMark Nutter 6597c038749SMark Nutter static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu) 6607c038749SMark Nutter { 6617c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 6627c038749SMark Nutter 6637c038749SMark Nutter /* Save, Step 44: 6647c038749SMark Nutter * Save MFC_CMD Channel. 6657c038749SMark Nutter */ 6667c038749SMark Nutter out_be64(&priv2->spu_chnlcntptr_RW, 21UL); 6677c038749SMark Nutter eieio(); 6687c038749SMark Nutter csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW); 6697c038749SMark Nutter eieio(); 6707c038749SMark Nutter } 6717c038749SMark Nutter 6727c038749SMark Nutter static inline void reset_ch(struct spu_state *csa, struct spu *spu) 6737c038749SMark Nutter { 6747c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 6757c038749SMark Nutter u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL }; 6767c038749SMark Nutter u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL }; 6777c038749SMark Nutter u64 idx; 6787c038749SMark Nutter int i; 6797c038749SMark Nutter 6807c038749SMark Nutter /* Save, Step 45: 6817c038749SMark Nutter * Reset the following CH: [21, 23, 28, 30] 6827c038749SMark Nutter */ 6837c038749SMark Nutter for (i = 0; i < 4; i++) { 6847c038749SMark Nutter idx = ch_indices[i]; 6857c038749SMark Nutter out_be64(&priv2->spu_chnlcntptr_RW, idx); 6867c038749SMark Nutter eieio(); 6877c038749SMark Nutter out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); 6887c038749SMark Nutter eieio(); 6897c038749SMark Nutter } 6907c038749SMark Nutter } 6917c038749SMark Nutter 6927c038749SMark Nutter static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu) 6937c038749SMark Nutter { 6947c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 6957c038749SMark Nutter 6967c038749SMark Nutter /* Save, Step 46: 6977c038749SMark Nutter * Restore, Step 25. 6987c038749SMark Nutter * Write MFC_CNTL[Sc]=0 (resume queue processing). 6997c038749SMark Nutter */ 7007c038749SMark Nutter out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE); 7017c038749SMark Nutter } 7027c038749SMark Nutter 703684bd614SJeremy Kerr static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu, 704684bd614SJeremy Kerr unsigned int *code, int code_size) 7057c038749SMark Nutter { 7067c038749SMark Nutter /* Save, Step 47: 7077c038749SMark Nutter * Restore, Step 30. 7087c038749SMark Nutter * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All 7097c038749SMark Nutter * register, then initialize SLB_VSID and SLB_ESID 7107c038749SMark Nutter * to provide access to SPU context save code and 7117c038749SMark Nutter * LSCSA. 7127c038749SMark Nutter * 7137c038749SMark Nutter * This implementation places both the context 7147c038749SMark Nutter * switch code and LSCSA in kernel address space. 7157c038749SMark Nutter * 7167c038749SMark Nutter * Further this implementation assumes that the 7177c038749SMark Nutter * MFC_SR1[R]=1 (in other words, assume that 7187c038749SMark Nutter * translation is desired by OS environment). 7197c038749SMark Nutter */ 72094b2a439SBenjamin Herrenschmidt spu_invalidate_slbs(spu); 721684bd614SJeremy Kerr spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size); 7227c038749SMark Nutter } 7237c038749SMark Nutter 7247c038749SMark Nutter static inline void set_switch_active(struct spu_state *csa, struct spu *spu) 7257c038749SMark Nutter { 7267c038749SMark Nutter /* Save, Step 48: 7277c038749SMark Nutter * Restore, Step 23. 7287c038749SMark Nutter * Change the software context switch pending flag 729de102892SLuke Browning * to context switch active. This implementation does 730de102892SLuke Browning * not uses a switch active flag. 73161b36fc1SAndre Detsch * 732de102892SLuke Browning * Now that we have saved the mfc in the csa, we can add in the 733de102892SLuke Browning * restart command if an exception occurred. 7347c038749SMark Nutter */ 735de102892SLuke Browning if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags)) 736de102892SLuke Browning csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND; 7378837d921SArnd Bergmann clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); 7387c038749SMark Nutter mb(); 7397c038749SMark Nutter } 7407c038749SMark Nutter 7417c038749SMark Nutter static inline void enable_interrupts(struct spu_state *csa, struct spu *spu) 7427c038749SMark Nutter { 7437c038749SMark Nutter unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR | 7447c038749SMark Nutter CLASS1_ENABLE_STORAGE_FAULT_INTR; 7457c038749SMark Nutter 7467c038749SMark Nutter /* Save, Step 49: 7477c038749SMark Nutter * Restore, Step 22: 7487c038749SMark Nutter * Reset and then enable interrupts, as 7497c038749SMark Nutter * needed by OS. 7507c038749SMark Nutter * 7517c038749SMark Nutter * This implementation enables only class1 7527c038749SMark Nutter * (translation) interrupts. 7537c038749SMark Nutter */ 7547c038749SMark Nutter spin_lock_irq(&spu->register_lock); 7559476141cSMasato Noguchi spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); 7569476141cSMasato Noguchi spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK); 7579476141cSMasato Noguchi spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); 758f0831accSArnd Bergmann spu_int_mask_set(spu, 0, 0ul); 759f0831accSArnd Bergmann spu_int_mask_set(spu, 1, class1_mask); 760f0831accSArnd Bergmann spu_int_mask_set(spu, 2, 0ul); 7617c038749SMark Nutter spin_unlock_irq(&spu->register_lock); 7627c038749SMark Nutter } 7637c038749SMark Nutter 7647c038749SMark Nutter static inline int send_mfc_dma(struct spu *spu, unsigned long ea, 7657c038749SMark Nutter unsigned int ls_offset, unsigned int size, 7667c038749SMark Nutter unsigned int tag, unsigned int rclass, 7677c038749SMark Nutter unsigned int cmd) 7687c038749SMark Nutter { 7697c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 7707c038749SMark Nutter union mfc_tag_size_class_cmd command; 7717c038749SMark Nutter unsigned int transfer_size; 7727c038749SMark Nutter volatile unsigned int status = 0x0; 7737c038749SMark Nutter 7747c038749SMark Nutter while (size > 0) { 7757c038749SMark Nutter transfer_size = 7767c038749SMark Nutter (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size; 7777c038749SMark Nutter command.u.mfc_size = transfer_size; 7787c038749SMark Nutter command.u.mfc_tag = tag; 7797c038749SMark Nutter command.u.mfc_rclassid = rclass; 7807c038749SMark Nutter command.u.mfc_cmd = cmd; 7817c038749SMark Nutter do { 7827c038749SMark Nutter out_be32(&prob->mfc_lsa_W, ls_offset); 7837c038749SMark Nutter out_be64(&prob->mfc_ea_W, ea); 7847c038749SMark Nutter out_be64(&prob->mfc_union_W.all64, command.all64); 7857c038749SMark Nutter status = 7867c038749SMark Nutter in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32); 7877c038749SMark Nutter if (unlikely(status & 0x2)) { 7887c038749SMark Nutter cpu_relax(); 7897c038749SMark Nutter } 7907c038749SMark Nutter } while (status & 0x3); 7917c038749SMark Nutter size -= transfer_size; 7927c038749SMark Nutter ea += transfer_size; 7937c038749SMark Nutter ls_offset += transfer_size; 7947c038749SMark Nutter } 7957c038749SMark Nutter return 0; 7967c038749SMark Nutter } 7977c038749SMark Nutter 7987c038749SMark Nutter static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu) 7997c038749SMark Nutter { 8007c038749SMark Nutter unsigned long addr = (unsigned long)&csa->lscsa->ls[0]; 8017c038749SMark Nutter unsigned int ls_offset = 0x0; 8027c038749SMark Nutter unsigned int size = 16384; 8037c038749SMark Nutter unsigned int tag = 0; 8047c038749SMark Nutter unsigned int rclass = 0; 8057c038749SMark Nutter unsigned int cmd = MFC_PUT_CMD; 8067c038749SMark Nutter 8077c038749SMark Nutter /* Save, Step 50: 8087c038749SMark Nutter * Issue a DMA command to copy the first 16K bytes 8097c038749SMark Nutter * of local storage to the CSA. 8107c038749SMark Nutter */ 8117c038749SMark Nutter send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); 8127c038749SMark Nutter } 8137c038749SMark Nutter 8147c038749SMark Nutter static inline void set_spu_npc(struct spu_state *csa, struct spu *spu) 8157c038749SMark Nutter { 8167c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 8177c038749SMark Nutter 8187c038749SMark Nutter /* Save, Step 51: 8197c038749SMark Nutter * Restore, Step 31. 8207c038749SMark Nutter * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry 8217c038749SMark Nutter * point address of context save code in local 8227c038749SMark Nutter * storage. 8237c038749SMark Nutter * 8247c038749SMark Nutter * This implementation uses SPU-side save/restore 8257c038749SMark Nutter * programs with entry points at LSA of 0. 8267c038749SMark Nutter */ 8277c038749SMark Nutter out_be32(&prob->spu_npc_RW, 0); 8287c038749SMark Nutter eieio(); 8297c038749SMark Nutter } 8307c038749SMark Nutter 8317c038749SMark Nutter static inline void set_signot1(struct spu_state *csa, struct spu *spu) 8327c038749SMark Nutter { 8337c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 8347c038749SMark Nutter union { 8357c038749SMark Nutter u64 ull; 8367c038749SMark Nutter u32 ui[2]; 8377c038749SMark Nutter } addr64; 8387c038749SMark Nutter 8397c038749SMark Nutter /* Save, Step 52: 8407c038749SMark Nutter * Restore, Step 32: 8417c038749SMark Nutter * Write SPU_Sig_Notify_1 register with upper 32-bits 8427c038749SMark Nutter * of the CSA.LSCSA effective address. 8437c038749SMark Nutter */ 8447c038749SMark Nutter addr64.ull = (u64) csa->lscsa; 8457c038749SMark Nutter out_be32(&prob->signal_notify1, addr64.ui[0]); 8467c038749SMark Nutter eieio(); 8477c038749SMark Nutter } 8487c038749SMark Nutter 8497c038749SMark Nutter static inline void set_signot2(struct spu_state *csa, struct spu *spu) 8507c038749SMark Nutter { 8517c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 8527c038749SMark Nutter union { 8537c038749SMark Nutter u64 ull; 8547c038749SMark Nutter u32 ui[2]; 8557c038749SMark Nutter } addr64; 8567c038749SMark Nutter 8577c038749SMark Nutter /* Save, Step 53: 8587c038749SMark Nutter * Restore, Step 33: 8597c038749SMark Nutter * Write SPU_Sig_Notify_2 register with lower 32-bits 8607c038749SMark Nutter * of the CSA.LSCSA effective address. 8617c038749SMark Nutter */ 8627c038749SMark Nutter addr64.ull = (u64) csa->lscsa; 8637c038749SMark Nutter out_be32(&prob->signal_notify2, addr64.ui[1]); 8647c038749SMark Nutter eieio(); 8657c038749SMark Nutter } 8667c038749SMark Nutter 8677c038749SMark Nutter static inline void send_save_code(struct spu_state *csa, struct spu *spu) 8687c038749SMark Nutter { 8697c038749SMark Nutter unsigned long addr = (unsigned long)&spu_save_code[0]; 8707c038749SMark Nutter unsigned int ls_offset = 0x0; 8717c038749SMark Nutter unsigned int size = sizeof(spu_save_code); 8727c038749SMark Nutter unsigned int tag = 0; 8737c038749SMark Nutter unsigned int rclass = 0; 8747c038749SMark Nutter unsigned int cmd = MFC_GETFS_CMD; 8757c038749SMark Nutter 8767c038749SMark Nutter /* Save, Step 54: 8777c038749SMark Nutter * Issue a DMA command to copy context save code 8787c038749SMark Nutter * to local storage and start SPU. 8797c038749SMark Nutter */ 8807c038749SMark Nutter send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); 8817c038749SMark Nutter } 8827c038749SMark Nutter 8837c038749SMark Nutter static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu) 8847c038749SMark Nutter { 8857c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 8867c038749SMark Nutter 8877c038749SMark Nutter /* Save, Step 55: 8887c038749SMark Nutter * Restore, Step 38. 8897c038749SMark Nutter * Write PPU_QueryMask=1 (enable Tag Group 0) 8907c038749SMark Nutter * and issue eieio instruction. 8917c038749SMark Nutter */ 8927c038749SMark Nutter out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0)); 8937c038749SMark Nutter eieio(); 8947c038749SMark Nutter } 8957c038749SMark Nutter 8967c038749SMark Nutter static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu) 8977c038749SMark Nutter { 8987c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 8997c038749SMark Nutter u32 mask = MFC_TAGID_TO_TAGMASK(0); 9007c038749SMark Nutter unsigned long flags; 9017c038749SMark Nutter 9027c038749SMark Nutter /* Save, Step 56: 9037c038749SMark Nutter * Restore, Step 39. 9047c038749SMark Nutter * Restore, Step 39. 9057c038749SMark Nutter * Restore, Step 46. 9067c038749SMark Nutter * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete) 9077c038749SMark Nutter * or write PPU_QueryType[TS]=01 and wait for Tag Group 9087c038749SMark Nutter * Complete Interrupt. Write INT_Stat_Class0 or 9097c038749SMark Nutter * INT_Stat_Class2 with value of 'handled'. 9107c038749SMark Nutter */ 9117c038749SMark Nutter POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask); 9127c038749SMark Nutter 9137c038749SMark Nutter local_irq_save(flags); 9149476141cSMasato Noguchi spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); 9159476141cSMasato Noguchi spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); 9167c038749SMark Nutter local_irq_restore(flags); 9177c038749SMark Nutter } 9187c038749SMark Nutter 9197c038749SMark Nutter static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu) 9207c038749SMark Nutter { 9217c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 9227c038749SMark Nutter unsigned long flags; 9237c038749SMark Nutter 9247c038749SMark Nutter /* Save, Step 57: 9257c038749SMark Nutter * Restore, Step 40. 9267c038749SMark Nutter * Poll until SPU_Status[R]=0 or wait for SPU Class 0 9277c038749SMark Nutter * or SPU Class 2 interrupt. Write INT_Stat_class0 9287c038749SMark Nutter * or INT_Stat_class2 with value of handled. 9297c038749SMark Nutter */ 9307c038749SMark Nutter POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); 9317c038749SMark Nutter 9327c038749SMark Nutter local_irq_save(flags); 9339476141cSMasato Noguchi spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); 9349476141cSMasato Noguchi spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); 9357c038749SMark Nutter local_irq_restore(flags); 9367c038749SMark Nutter } 9377c038749SMark Nutter 9387c038749SMark Nutter static inline int check_save_status(struct spu_state *csa, struct spu *spu) 9397c038749SMark Nutter { 9407c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 9417c038749SMark Nutter u32 complete; 9427c038749SMark Nutter 9437c038749SMark Nutter /* Save, Step 54: 9447c038749SMark Nutter * If SPU_Status[P]=1 and SPU_Status[SC] = "success", 9457c038749SMark Nutter * context save succeeded, otherwise context save 9467c038749SMark Nutter * failed. 9477c038749SMark Nutter */ 9487c038749SMark Nutter complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) | 9497c038749SMark Nutter SPU_STATUS_STOPPED_BY_STOP); 9507c038749SMark Nutter return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0; 9517c038749SMark Nutter } 9527c038749SMark Nutter 9537c038749SMark Nutter static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu) 9547c038749SMark Nutter { 9557c038749SMark Nutter /* Restore, Step 4: 9567c038749SMark Nutter * If required, notify the "using application" that 9577c038749SMark Nutter * the SPU task has been terminated. TBD. 9587c038749SMark Nutter */ 9597c038749SMark Nutter } 9607c038749SMark Nutter 961cf17df22SMasato Noguchi static inline void suspend_mfc_and_halt_decr(struct spu_state *csa, 962cf17df22SMasato Noguchi struct spu *spu) 9637c038749SMark Nutter { 9647c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 9657c038749SMark Nutter 9667c038749SMark Nutter /* Restore, Step 7: 967cf17df22SMasato Noguchi * Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend 9687c038749SMark Nutter * the queue and halt the decrementer. 9697c038749SMark Nutter */ 9707c038749SMark Nutter out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE | 9717c038749SMark Nutter MFC_CNTL_DECREMENTER_HALTED); 9727c038749SMark Nutter eieio(); 9737c038749SMark Nutter } 9747c038749SMark Nutter 9757c038749SMark Nutter static inline void wait_suspend_mfc_complete(struct spu_state *csa, 9767c038749SMark Nutter struct spu *spu) 9777c038749SMark Nutter { 9787c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 9797c038749SMark Nutter 9807c038749SMark Nutter /* Restore, Step 8: 9817c038749SMark Nutter * Restore, Step 47. 9827c038749SMark Nutter * Poll MFC_CNTL[Ss] until 11 is returned. 9837c038749SMark Nutter */ 984910ab66bSBenjamin Herrenschmidt POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & 985910ab66bSBenjamin Herrenschmidt MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == 9867c038749SMark Nutter MFC_CNTL_SUSPEND_COMPLETE); 9877c038749SMark Nutter } 9887c038749SMark Nutter 9897c038749SMark Nutter static inline int suspend_spe(struct spu_state *csa, struct spu *spu) 9907c038749SMark Nutter { 9917c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 9927c038749SMark Nutter 9937c038749SMark Nutter /* Restore, Step 9: 9947c038749SMark Nutter * If SPU_Status[R]=1, stop SPU execution 9957c038749SMark Nutter * and wait for stop to complete. 9967c038749SMark Nutter * 9977c038749SMark Nutter * Returns 1 if SPU_Status[R]=1 on entry. 9987c038749SMark Nutter * 0 otherwise 9997c038749SMark Nutter */ 10007c038749SMark Nutter if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) { 10017c038749SMark Nutter if (in_be32(&prob->spu_status_R) & 1002eb758ce5Sarnd@arndb.de SPU_STATUS_ISOLATED_EXIT_STATUS) { 10037c038749SMark Nutter POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 10047c038749SMark Nutter SPU_STATUS_RUNNING); 10057c038749SMark Nutter } 10067c038749SMark Nutter if ((in_be32(&prob->spu_status_R) & 1007eb758ce5Sarnd@arndb.de SPU_STATUS_ISOLATED_LOAD_STATUS) 10087c038749SMark Nutter || (in_be32(&prob->spu_status_R) & 10097c038749SMark Nutter SPU_STATUS_ISOLATED_STATE)) { 10107c038749SMark Nutter out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); 10117c038749SMark Nutter eieio(); 10127c038749SMark Nutter POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 10137c038749SMark Nutter SPU_STATUS_RUNNING); 10147c038749SMark Nutter out_be32(&prob->spu_runcntl_RW, 0x2); 10157c038749SMark Nutter eieio(); 10167c038749SMark Nutter POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 10177c038749SMark Nutter SPU_STATUS_RUNNING); 10187c038749SMark Nutter } 10197c038749SMark Nutter if (in_be32(&prob->spu_status_R) & 10207c038749SMark Nutter SPU_STATUS_WAITING_FOR_CHANNEL) { 10217c038749SMark Nutter out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); 10227c038749SMark Nutter eieio(); 10237c038749SMark Nutter POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 10247c038749SMark Nutter SPU_STATUS_RUNNING); 10257c038749SMark Nutter } 10267c038749SMark Nutter return 1; 10277c038749SMark Nutter } 10287c038749SMark Nutter return 0; 10297c038749SMark Nutter } 10307c038749SMark Nutter 10317c038749SMark Nutter static inline void clear_spu_status(struct spu_state *csa, struct spu *spu) 10327c038749SMark Nutter { 10337c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 10347c038749SMark Nutter 10357c038749SMark Nutter /* Restore, Step 10: 10367c038749SMark Nutter * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1, 10377c038749SMark Nutter * release SPU from isolate state. 10387c038749SMark Nutter */ 10397c038749SMark Nutter if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) { 10407c038749SMark Nutter if (in_be32(&prob->spu_status_R) & 1041eb758ce5Sarnd@arndb.de SPU_STATUS_ISOLATED_EXIT_STATUS) { 1042f0831accSArnd Bergmann spu_mfc_sr1_set(spu, 10437c038749SMark Nutter MFC_STATE1_MASTER_RUN_CONTROL_MASK); 10447c038749SMark Nutter eieio(); 10457c038749SMark Nutter out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); 10467c038749SMark Nutter eieio(); 10477c038749SMark Nutter POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 10487c038749SMark Nutter SPU_STATUS_RUNNING); 10497c038749SMark Nutter } 10507c038749SMark Nutter if ((in_be32(&prob->spu_status_R) & 1051eb758ce5Sarnd@arndb.de SPU_STATUS_ISOLATED_LOAD_STATUS) 10527c038749SMark Nutter || (in_be32(&prob->spu_status_R) & 10537c038749SMark Nutter SPU_STATUS_ISOLATED_STATE)) { 1054f0831accSArnd Bergmann spu_mfc_sr1_set(spu, 10557c038749SMark Nutter MFC_STATE1_MASTER_RUN_CONTROL_MASK); 10567c038749SMark Nutter eieio(); 10577c038749SMark Nutter out_be32(&prob->spu_runcntl_RW, 0x2); 10587c038749SMark Nutter eieio(); 10597c038749SMark Nutter POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 10607c038749SMark Nutter SPU_STATUS_RUNNING); 10617c038749SMark Nutter } 10627c038749SMark Nutter } 10637c038749SMark Nutter } 10647c038749SMark Nutter 10657c038749SMark Nutter static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu) 10667c038749SMark Nutter { 10677c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 1068daced0f7SJeremy Kerr u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; 10697c038749SMark Nutter u64 idx; 10707c038749SMark Nutter int i; 10717c038749SMark Nutter 10727c038749SMark Nutter /* Restore, Step 20: 10737c038749SMark Nutter */ 1074e46a0237Sarnd@arndb.de 1075e46a0237Sarnd@arndb.de /* Reset CH 1 */ 1076e46a0237Sarnd@arndb.de out_be64(&priv2->spu_chnlcntptr_RW, 1); 1077e46a0237Sarnd@arndb.de out_be64(&priv2->spu_chnldata_RW, 0UL); 1078e46a0237Sarnd@arndb.de 1079e46a0237Sarnd@arndb.de /* Reset the following CH: [0,3,4,24,25,27] */ 1080daced0f7SJeremy Kerr for (i = 0; i < ARRAY_SIZE(ch_indices); i++) { 10817c038749SMark Nutter idx = ch_indices[i]; 10827c038749SMark Nutter out_be64(&priv2->spu_chnlcntptr_RW, idx); 10837c038749SMark Nutter eieio(); 10847c038749SMark Nutter out_be64(&priv2->spu_chnldata_RW, 0UL); 10857c038749SMark Nutter out_be64(&priv2->spu_chnlcnt_RW, 0UL); 10867c038749SMark Nutter eieio(); 10877c038749SMark Nutter } 10887c038749SMark Nutter } 10897c038749SMark Nutter 10907c038749SMark Nutter static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu) 10917c038749SMark Nutter { 10927c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 10937c038749SMark Nutter u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL }; 10947c038749SMark Nutter u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL }; 10957c038749SMark Nutter u64 idx; 10967c038749SMark Nutter int i; 10977c038749SMark Nutter 10987c038749SMark Nutter /* Restore, Step 21: 10997c038749SMark Nutter * Reset the following CH: [21, 23, 28, 29, 30] 11007c038749SMark Nutter */ 11017c038749SMark Nutter for (i = 0; i < 5; i++) { 11027c038749SMark Nutter idx = ch_indices[i]; 11037c038749SMark Nutter out_be64(&priv2->spu_chnlcntptr_RW, idx); 11047c038749SMark Nutter eieio(); 11057c038749SMark Nutter out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); 11067c038749SMark Nutter eieio(); 11077c038749SMark Nutter } 11087c038749SMark Nutter } 11097c038749SMark Nutter 11107c038749SMark Nutter static inline void setup_spu_status_part1(struct spu_state *csa, 11117c038749SMark Nutter struct spu *spu) 11127c038749SMark Nutter { 11137c038749SMark Nutter u32 status_P = SPU_STATUS_STOPPED_BY_STOP; 11147c038749SMark Nutter u32 status_I = SPU_STATUS_INVALID_INSTR; 11157c038749SMark Nutter u32 status_H = SPU_STATUS_STOPPED_BY_HALT; 11167c038749SMark Nutter u32 status_S = SPU_STATUS_SINGLE_STEP; 11177c038749SMark Nutter u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR; 11187c038749SMark Nutter u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP; 11197c038749SMark Nutter u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP; 11207c038749SMark Nutter u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR; 11217c038749SMark Nutter u32 status_code; 11227c038749SMark Nutter 11237c038749SMark Nutter /* Restore, Step 27: 11247c038749SMark Nutter * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct 11257c038749SMark Nutter * instruction sequence to the end of the SPU based restore 11267c038749SMark Nutter * code (after the "context restored" stop and signal) to 11277c038749SMark Nutter * restore the correct SPU status. 11287c038749SMark Nutter * 11297c038749SMark Nutter * NOTE: Rather than modifying the SPU executable, we 11307c038749SMark Nutter * instead add a new 'stopped_status' field to the 11317c038749SMark Nutter * LSCSA. The SPU-side restore reads this field and 11327c038749SMark Nutter * takes the appropriate action when exiting. 11337c038749SMark Nutter */ 11347c038749SMark Nutter 11357c038749SMark Nutter status_code = 11367c038749SMark Nutter (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF; 11377c038749SMark Nutter if ((csa->prob.spu_status_R & status_P_I) == status_P_I) { 11387c038749SMark Nutter 11397c038749SMark Nutter /* SPU_Status[P,I]=1 - Illegal Instruction followed 11407c038749SMark Nutter * by Stop and Signal instruction, followed by 'br -4'. 11417c038749SMark Nutter * 11427c038749SMark Nutter */ 11437c038749SMark Nutter csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I; 11447c038749SMark Nutter csa->lscsa->stopped_status.slot[1] = status_code; 11457c038749SMark Nutter 11467c038749SMark Nutter } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) { 11477c038749SMark Nutter 11487c038749SMark Nutter /* SPU_Status[P,H]=1 - Halt Conditional, followed 11497c038749SMark Nutter * by Stop and Signal instruction, followed by 11507c038749SMark Nutter * 'br -4'. 11517c038749SMark Nutter */ 11527c038749SMark Nutter csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H; 11537c038749SMark Nutter csa->lscsa->stopped_status.slot[1] = status_code; 11547c038749SMark Nutter 11557c038749SMark Nutter } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) { 11567c038749SMark Nutter 11577c038749SMark Nutter /* SPU_Status[S,P]=1 - Stop and Signal instruction 11587c038749SMark Nutter * followed by 'br -4'. 11597c038749SMark Nutter */ 11607c038749SMark Nutter csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P; 11617c038749SMark Nutter csa->lscsa->stopped_status.slot[1] = status_code; 11627c038749SMark Nutter 11637c038749SMark Nutter } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) { 11647c038749SMark Nutter 11657c038749SMark Nutter /* SPU_Status[S,I]=1 - Illegal instruction followed 11667c038749SMark Nutter * by 'br -4'. 11677c038749SMark Nutter */ 11687c038749SMark Nutter csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I; 11697c038749SMark Nutter csa->lscsa->stopped_status.slot[1] = status_code; 11707c038749SMark Nutter 11717c038749SMark Nutter } else if ((csa->prob.spu_status_R & status_P) == status_P) { 11727c038749SMark Nutter 11737c038749SMark Nutter /* SPU_Status[P]=1 - Stop and Signal instruction 11747c038749SMark Nutter * followed by 'br -4'. 11757c038749SMark Nutter */ 11767c038749SMark Nutter csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P; 11777c038749SMark Nutter csa->lscsa->stopped_status.slot[1] = status_code; 11787c038749SMark Nutter 11797c038749SMark Nutter } else if ((csa->prob.spu_status_R & status_H) == status_H) { 11807c038749SMark Nutter 11817c038749SMark Nutter /* SPU_Status[H]=1 - Halt Conditional, followed 11827c038749SMark Nutter * by 'br -4'. 11837c038749SMark Nutter */ 11847c038749SMark Nutter csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H; 11857c038749SMark Nutter 11867c038749SMark Nutter } else if ((csa->prob.spu_status_R & status_S) == status_S) { 11877c038749SMark Nutter 11887c038749SMark Nutter /* SPU_Status[S]=1 - Two nop instructions. 11897c038749SMark Nutter */ 11907c038749SMark Nutter csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S; 11917c038749SMark Nutter 11927c038749SMark Nutter } else if ((csa->prob.spu_status_R & status_I) == status_I) { 11937c038749SMark Nutter 11947c038749SMark Nutter /* SPU_Status[I]=1 - Illegal instruction followed 11957c038749SMark Nutter * by 'br -4'. 11967c038749SMark Nutter */ 11977c038749SMark Nutter csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I; 11987c038749SMark Nutter 11997c038749SMark Nutter } 12007c038749SMark Nutter } 12017c038749SMark Nutter 12027c038749SMark Nutter static inline void setup_spu_status_part2(struct spu_state *csa, 12037c038749SMark Nutter struct spu *spu) 12047c038749SMark Nutter { 12057c038749SMark Nutter u32 mask; 12067c038749SMark Nutter 12077c038749SMark Nutter /* Restore, Step 28: 12087c038749SMark Nutter * If the CSA.SPU_Status[I,S,H,P,R]=0 then 12097c038749SMark Nutter * add a 'br *' instruction to the end of 12107c038749SMark Nutter * the SPU based restore code. 12117c038749SMark Nutter * 12127c038749SMark Nutter * NOTE: Rather than modifying the SPU executable, we 12137c038749SMark Nutter * instead add a new 'stopped_status' field to the 12147c038749SMark Nutter * LSCSA. The SPU-side restore reads this field and 12157c038749SMark Nutter * takes the appropriate action when exiting. 12167c038749SMark Nutter */ 12177c038749SMark Nutter mask = SPU_STATUS_INVALID_INSTR | 12187c038749SMark Nutter SPU_STATUS_SINGLE_STEP | 12197c038749SMark Nutter SPU_STATUS_STOPPED_BY_HALT | 12207c038749SMark Nutter SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING; 12217c038749SMark Nutter if (!(csa->prob.spu_status_R & mask)) { 12227c038749SMark Nutter csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R; 12237c038749SMark Nutter } 12247c038749SMark Nutter } 12257c038749SMark Nutter 12267c038749SMark Nutter static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu) 12277c038749SMark Nutter { 12287c038749SMark Nutter /* Restore, Step 29: 12297c038749SMark Nutter * Restore RA_GROUP_ID register and the 12307c038749SMark Nutter * RA_ENABLE reigster from the CSA. 12317c038749SMark Nutter */ 1232f0831accSArnd Bergmann spu_resource_allocation_groupID_set(spu, 12337c038749SMark Nutter csa->priv1.resource_allocation_groupID_RW); 1234f0831accSArnd Bergmann spu_resource_allocation_enable_set(spu, 12357c038749SMark Nutter csa->priv1.resource_allocation_enable_RW); 12367c038749SMark Nutter } 12377c038749SMark Nutter 12387c038749SMark Nutter static inline void send_restore_code(struct spu_state *csa, struct spu *spu) 12397c038749SMark Nutter { 12407c038749SMark Nutter unsigned long addr = (unsigned long)&spu_restore_code[0]; 12417c038749SMark Nutter unsigned int ls_offset = 0x0; 12427c038749SMark Nutter unsigned int size = sizeof(spu_restore_code); 12437c038749SMark Nutter unsigned int tag = 0; 12447c038749SMark Nutter unsigned int rclass = 0; 12457c038749SMark Nutter unsigned int cmd = MFC_GETFS_CMD; 12467c038749SMark Nutter 12477c038749SMark Nutter /* Restore, Step 37: 12487c038749SMark Nutter * Issue MFC DMA command to copy context 12497c038749SMark Nutter * restore code to local storage. 12507c038749SMark Nutter */ 12517c038749SMark Nutter send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); 12527c038749SMark Nutter } 12537c038749SMark Nutter 12547c038749SMark Nutter static inline void setup_decr(struct spu_state *csa, struct spu *spu) 12557c038749SMark Nutter { 12567c038749SMark Nutter /* Restore, Step 34: 12577c038749SMark Nutter * If CSA.MFC_CNTL[Ds]=1 (decrementer was 12587c038749SMark Nutter * running) then adjust decrementer, set 12597c038749SMark Nutter * decrementer running status in LSCSA, 12607c038749SMark Nutter * and set decrementer "wrapped" status 12617c038749SMark Nutter * in LSCSA. 12627c038749SMark Nutter */ 12637c038749SMark Nutter if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) { 12647c038749SMark Nutter cycles_t resume_time = get_cycles(); 12657c038749SMark Nutter cycles_t delta_time = resume_time - csa->suspend_time; 12667c038749SMark Nutter 12671cfc0f86SMasato Noguchi csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING; 12681cfc0f86SMasato Noguchi if (csa->lscsa->decr.slot[0] < delta_time) { 12691cfc0f86SMasato Noguchi csa->lscsa->decr_status.slot[0] |= 12701cfc0f86SMasato Noguchi SPU_DECR_STATUS_WRAPPED; 12711cfc0f86SMasato Noguchi } 12721cfc0f86SMasato Noguchi 1273183b73aeSJordi Caubet csa->lscsa->decr.slot[0] -= delta_time; 12741cfc0f86SMasato Noguchi } else { 12751cfc0f86SMasato Noguchi csa->lscsa->decr_status.slot[0] = 0; 12767c038749SMark Nutter } 12777c038749SMark Nutter } 12787c038749SMark Nutter 12797c038749SMark Nutter static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu) 12807c038749SMark Nutter { 12817c038749SMark Nutter /* Restore, Step 35: 12827c038749SMark Nutter * Copy the CSA.PU_MB data into the LSCSA. 12837c038749SMark Nutter */ 12847c038749SMark Nutter csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R; 12857c038749SMark Nutter } 12867c038749SMark Nutter 12877c038749SMark Nutter static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu) 12887c038749SMark Nutter { 12897c038749SMark Nutter /* Restore, Step 36: 12907c038749SMark Nutter * Copy the CSA.PUINT_MB data into the LSCSA. 12917c038749SMark Nutter */ 12927c038749SMark Nutter csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R; 12937c038749SMark Nutter } 12947c038749SMark Nutter 12957c038749SMark Nutter static inline int check_restore_status(struct spu_state *csa, struct spu *spu) 12967c038749SMark Nutter { 12977c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 12987c038749SMark Nutter u32 complete; 12997c038749SMark Nutter 13007c038749SMark Nutter /* Restore, Step 40: 13017c038749SMark Nutter * If SPU_Status[P]=1 and SPU_Status[SC] = "success", 13027c038749SMark Nutter * context restore succeeded, otherwise context restore 13037c038749SMark Nutter * failed. 13047c038749SMark Nutter */ 13057c038749SMark Nutter complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) | 13067c038749SMark Nutter SPU_STATUS_STOPPED_BY_STOP); 13077c038749SMark Nutter return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0; 13087c038749SMark Nutter } 13097c038749SMark Nutter 13107c038749SMark Nutter static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu) 13117c038749SMark Nutter { 13127c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 13137c038749SMark Nutter 13147c038749SMark Nutter /* Restore, Step 41: 13157c038749SMark Nutter * Restore SPU_PrivCntl from the CSA. 13167c038749SMark Nutter */ 13177c038749SMark Nutter out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW); 13187c038749SMark Nutter eieio(); 13197c038749SMark Nutter } 13207c038749SMark Nutter 13217c038749SMark Nutter static inline void restore_status_part1(struct spu_state *csa, struct spu *spu) 13227c038749SMark Nutter { 13237c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 13247c038749SMark Nutter u32 mask; 13257c038749SMark Nutter 13267c038749SMark Nutter /* Restore, Step 42: 13277c038749SMark Nutter * If any CSA.SPU_Status[I,S,H,P]=1, then 13287c038749SMark Nutter * restore the error or single step state. 13297c038749SMark Nutter */ 13307c038749SMark Nutter mask = SPU_STATUS_INVALID_INSTR | 13317c038749SMark Nutter SPU_STATUS_SINGLE_STEP | 13327c038749SMark Nutter SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; 13337c038749SMark Nutter if (csa->prob.spu_status_R & mask) { 13347c038749SMark Nutter out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); 13357c038749SMark Nutter eieio(); 13367c038749SMark Nutter POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 13377c038749SMark Nutter SPU_STATUS_RUNNING); 13387c038749SMark Nutter } 13397c038749SMark Nutter } 13407c038749SMark Nutter 13417c038749SMark Nutter static inline void restore_status_part2(struct spu_state *csa, struct spu *spu) 13427c038749SMark Nutter { 13437c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 13447c038749SMark Nutter u32 mask; 13457c038749SMark Nutter 13467c038749SMark Nutter /* Restore, Step 43: 13477c038749SMark Nutter * If all CSA.SPU_Status[I,S,H,P,R]=0 then write 13487c038749SMark Nutter * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1, 13497c038749SMark Nutter * then write '00' to SPU_RunCntl[R0R1] and wait 13507c038749SMark Nutter * for SPU_Status[R]=0. 13517c038749SMark Nutter */ 13527c038749SMark Nutter mask = SPU_STATUS_INVALID_INSTR | 13537c038749SMark Nutter SPU_STATUS_SINGLE_STEP | 13547c038749SMark Nutter SPU_STATUS_STOPPED_BY_HALT | 13557c038749SMark Nutter SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING; 13567c038749SMark Nutter if (!(csa->prob.spu_status_R & mask)) { 13577c038749SMark Nutter out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); 13587c038749SMark Nutter eieio(); 13597c038749SMark Nutter POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) & 13607c038749SMark Nutter SPU_STATUS_RUNNING); 13617c038749SMark Nutter out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); 13627c038749SMark Nutter eieio(); 13637c038749SMark Nutter POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 13647c038749SMark Nutter SPU_STATUS_RUNNING); 13657c038749SMark Nutter } 13667c038749SMark Nutter } 13677c038749SMark Nutter 13687c038749SMark Nutter static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu) 13697c038749SMark Nutter { 13707c038749SMark Nutter unsigned long addr = (unsigned long)&csa->lscsa->ls[0]; 13717c038749SMark Nutter unsigned int ls_offset = 0x0; 13727c038749SMark Nutter unsigned int size = 16384; 13737c038749SMark Nutter unsigned int tag = 0; 13747c038749SMark Nutter unsigned int rclass = 0; 13757c038749SMark Nutter unsigned int cmd = MFC_GET_CMD; 13767c038749SMark Nutter 13777c038749SMark Nutter /* Restore, Step 44: 13787c038749SMark Nutter * Issue a DMA command to restore the first 13797c038749SMark Nutter * 16kb of local storage from CSA. 13807c038749SMark Nutter */ 13817c038749SMark Nutter send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); 13827c038749SMark Nutter } 13837c038749SMark Nutter 1384cf17df22SMasato Noguchi static inline void suspend_mfc(struct spu_state *csa, struct spu *spu) 1385cf17df22SMasato Noguchi { 1386cf17df22SMasato Noguchi struct spu_priv2 __iomem *priv2 = spu->priv2; 1387cf17df22SMasato Noguchi 1388cf17df22SMasato Noguchi /* Restore, Step 47. 1389cf17df22SMasato Noguchi * Write MFC_Cntl[Sc,Sm]='1','0' to suspend 1390cf17df22SMasato Noguchi * the queue. 1391cf17df22SMasato Noguchi */ 1392cf17df22SMasato Noguchi out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE); 1393cf17df22SMasato Noguchi eieio(); 1394cf17df22SMasato Noguchi } 1395cf17df22SMasato Noguchi 13967c038749SMark Nutter static inline void clear_interrupts(struct spu_state *csa, struct spu *spu) 13977c038749SMark Nutter { 13987c038749SMark Nutter /* Restore, Step 49: 13997c038749SMark Nutter * Write INT_MASK_class0 with value of 0. 14007c038749SMark Nutter * Write INT_MASK_class1 with value of 0. 14017c038749SMark Nutter * Write INT_MASK_class2 with value of 0. 14027c038749SMark Nutter * Write INT_STAT_class0 with value of -1. 14037c038749SMark Nutter * Write INT_STAT_class1 with value of -1. 14047c038749SMark Nutter * Write INT_STAT_class2 with value of -1. 14057c038749SMark Nutter */ 14067c038749SMark Nutter spin_lock_irq(&spu->register_lock); 1407f0831accSArnd Bergmann spu_int_mask_set(spu, 0, 0ul); 1408f0831accSArnd Bergmann spu_int_mask_set(spu, 1, 0ul); 1409f0831accSArnd Bergmann spu_int_mask_set(spu, 2, 0ul); 14109476141cSMasato Noguchi spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); 14119476141cSMasato Noguchi spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK); 14129476141cSMasato Noguchi spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); 14137c038749SMark Nutter spin_unlock_irq(&spu->register_lock); 14147c038749SMark Nutter } 14157c038749SMark Nutter 14167c038749SMark Nutter static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu) 14177c038749SMark Nutter { 14187c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 14197c038749SMark Nutter int i; 14207c038749SMark Nutter 14217c038749SMark Nutter /* Restore, Step 50: 14227c038749SMark Nutter * If MFC_Cntl[Se]!=0 then restore 14237c038749SMark Nutter * MFC command queues. 14247c038749SMark Nutter */ 14257c038749SMark Nutter if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) { 14267c038749SMark Nutter for (i = 0; i < 8; i++) { 14277c038749SMark Nutter out_be64(&priv2->puq[i].mfc_cq_data0_RW, 14287c038749SMark Nutter csa->priv2.puq[i].mfc_cq_data0_RW); 14297c038749SMark Nutter out_be64(&priv2->puq[i].mfc_cq_data1_RW, 14307c038749SMark Nutter csa->priv2.puq[i].mfc_cq_data1_RW); 14317c038749SMark Nutter out_be64(&priv2->puq[i].mfc_cq_data2_RW, 14327c038749SMark Nutter csa->priv2.puq[i].mfc_cq_data2_RW); 14337c038749SMark Nutter out_be64(&priv2->puq[i].mfc_cq_data3_RW, 14347c038749SMark Nutter csa->priv2.puq[i].mfc_cq_data3_RW); 14357c038749SMark Nutter } 14367c038749SMark Nutter for (i = 0; i < 16; i++) { 14377c038749SMark Nutter out_be64(&priv2->spuq[i].mfc_cq_data0_RW, 14387c038749SMark Nutter csa->priv2.spuq[i].mfc_cq_data0_RW); 14397c038749SMark Nutter out_be64(&priv2->spuq[i].mfc_cq_data1_RW, 14407c038749SMark Nutter csa->priv2.spuq[i].mfc_cq_data1_RW); 14417c038749SMark Nutter out_be64(&priv2->spuq[i].mfc_cq_data2_RW, 14427c038749SMark Nutter csa->priv2.spuq[i].mfc_cq_data2_RW); 14437c038749SMark Nutter out_be64(&priv2->spuq[i].mfc_cq_data3_RW, 14447c038749SMark Nutter csa->priv2.spuq[i].mfc_cq_data3_RW); 14457c038749SMark Nutter } 14467c038749SMark Nutter } 14477c038749SMark Nutter eieio(); 14487c038749SMark Nutter } 14497c038749SMark Nutter 14507c038749SMark Nutter static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu) 14517c038749SMark Nutter { 14527c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 14537c038749SMark Nutter 14547c038749SMark Nutter /* Restore, Step 51: 14557c038749SMark Nutter * Restore the PPU_QueryMask register from CSA. 14567c038749SMark Nutter */ 14577c038749SMark Nutter out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW); 14587c038749SMark Nutter eieio(); 14597c038749SMark Nutter } 14607c038749SMark Nutter 14617c038749SMark Nutter static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu) 14627c038749SMark Nutter { 14637c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 14647c038749SMark Nutter 14657c038749SMark Nutter /* Restore, Step 52: 14667c038749SMark Nutter * Restore the PPU_QueryType register from CSA. 14677c038749SMark Nutter */ 14687c038749SMark Nutter out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW); 14697c038749SMark Nutter eieio(); 14707c038749SMark Nutter } 14717c038749SMark Nutter 14727c038749SMark Nutter static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu) 14737c038749SMark Nutter { 14747c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 14757c038749SMark Nutter 14767c038749SMark Nutter /* Restore, Step 53: 14777c038749SMark Nutter * Restore the MFC_CSR_TSQ register from CSA. 14787c038749SMark Nutter */ 14797c038749SMark Nutter out_be64(&priv2->spu_tag_status_query_RW, 14807c038749SMark Nutter csa->priv2.spu_tag_status_query_RW); 14817c038749SMark Nutter eieio(); 14827c038749SMark Nutter } 14837c038749SMark Nutter 14847c038749SMark Nutter static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu) 14857c038749SMark Nutter { 14867c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 14877c038749SMark Nutter 14887c038749SMark Nutter /* Restore, Step 54: 14897c038749SMark Nutter * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2 14907c038749SMark Nutter * registers from CSA. 14917c038749SMark Nutter */ 14927c038749SMark Nutter out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW); 14937c038749SMark Nutter out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW); 14947c038749SMark Nutter eieio(); 14957c038749SMark Nutter } 14967c038749SMark Nutter 14977c038749SMark Nutter static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu) 14987c038749SMark Nutter { 14997c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 15007c038749SMark Nutter 15017c038749SMark Nutter /* Restore, Step 55: 15027c038749SMark Nutter * Restore the MFC_CSR_ATO register from CSA. 15037c038749SMark Nutter */ 15047c038749SMark Nutter out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW); 15057c038749SMark Nutter } 15067c038749SMark Nutter 15077c038749SMark Nutter static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu) 15087c038749SMark Nutter { 15097c038749SMark Nutter /* Restore, Step 56: 15107c038749SMark Nutter * Restore the MFC_TCLASS_ID register from CSA. 15117c038749SMark Nutter */ 1512f0831accSArnd Bergmann spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW); 15137c038749SMark Nutter eieio(); 15147c038749SMark Nutter } 15157c038749SMark Nutter 15167c038749SMark Nutter static inline void set_llr_event(struct spu_state *csa, struct spu *spu) 15177c038749SMark Nutter { 15187c038749SMark Nutter u64 ch0_cnt, ch0_data; 15197c038749SMark Nutter u64 ch1_data; 15207c038749SMark Nutter 15217c038749SMark Nutter /* Restore, Step 57: 15227c038749SMark Nutter * Set the Lock Line Reservation Lost Event by: 15237c038749SMark Nutter * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1. 15247c038749SMark Nutter * 2. If CSA.SPU_Channel_0_Count=0 and 15257c038749SMark Nutter * CSA.SPU_Wr_Event_Mask[Lr]=1 and 15267c038749SMark Nutter * CSA.SPU_Event_Status[Lr]=0 then set 15277c038749SMark Nutter * CSA.SPU_Event_Status_Count=1. 15287c038749SMark Nutter */ 15297c038749SMark Nutter ch0_cnt = csa->spu_chnlcnt_RW[0]; 15307c038749SMark Nutter ch0_data = csa->spu_chnldata_RW[0]; 15317c038749SMark Nutter ch1_data = csa->spu_chnldata_RW[1]; 15327c038749SMark Nutter csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT; 15337c038749SMark Nutter if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) && 15347c038749SMark Nutter (ch1_data & MFC_LLR_LOST_EVENT)) { 15357c038749SMark Nutter csa->spu_chnlcnt_RW[0] = 1; 15367c038749SMark Nutter } 15377c038749SMark Nutter } 15387c038749SMark Nutter 15397c038749SMark Nutter static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu) 15407c038749SMark Nutter { 15417c038749SMark Nutter /* Restore, Step 58: 15427c038749SMark Nutter * If the status of the CSA software decrementer 15437c038749SMark Nutter * "wrapped" flag is set, OR in a '1' to 15447c038749SMark Nutter * CSA.SPU_Event_Status[Tm]. 15457c038749SMark Nutter */ 154605a059f3SJeremy Kerr if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED)) 154705a059f3SJeremy Kerr return; 154805a059f3SJeremy Kerr 154905a059f3SJeremy Kerr if ((csa->spu_chnlcnt_RW[0] == 0) && 155005a059f3SJeremy Kerr (csa->spu_chnldata_RW[1] & 0x20) && 155105a059f3SJeremy Kerr !(csa->spu_chnldata_RW[0] & 0x20)) 15527c038749SMark Nutter csa->spu_chnlcnt_RW[0] = 1; 155305a059f3SJeremy Kerr 155405a059f3SJeremy Kerr csa->spu_chnldata_RW[0] |= 0x20; 15557c038749SMark Nutter } 15567c038749SMark Nutter 15577c038749SMark Nutter static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu) 15587c038749SMark Nutter { 15597c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 1560daced0f7SJeremy Kerr u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; 15617c038749SMark Nutter int i; 15627c038749SMark Nutter 15637c038749SMark Nutter /* Restore, Step 59: 1564cfd529b2SMasato Noguchi * Restore the following CH: [0,3,4,24,25,27] 15657c038749SMark Nutter */ 1566daced0f7SJeremy Kerr for (i = 0; i < ARRAY_SIZE(ch_indices); i++) { 15677c038749SMark Nutter idx = ch_indices[i]; 15687c038749SMark Nutter out_be64(&priv2->spu_chnlcntptr_RW, idx); 15697c038749SMark Nutter eieio(); 15707c038749SMark Nutter out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]); 15717c038749SMark Nutter out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]); 15727c038749SMark Nutter eieio(); 15737c038749SMark Nutter } 15747c038749SMark Nutter } 15757c038749SMark Nutter 15767c038749SMark Nutter static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu) 15777c038749SMark Nutter { 15787c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 15797c038749SMark Nutter u64 ch_indices[3] = { 9UL, 21UL, 23UL }; 15807c038749SMark Nutter u64 ch_counts[3] = { 1UL, 16UL, 1UL }; 15817c038749SMark Nutter u64 idx; 15827c038749SMark Nutter int i; 15837c038749SMark Nutter 15847c038749SMark Nutter /* Restore, Step 60: 15857c038749SMark Nutter * Restore the following CH: [9,21,23]. 15867c038749SMark Nutter */ 15877c038749SMark Nutter ch_counts[0] = 1UL; 15887c038749SMark Nutter ch_counts[1] = csa->spu_chnlcnt_RW[21]; 15897c038749SMark Nutter ch_counts[2] = 1UL; 15907c038749SMark Nutter for (i = 0; i < 3; i++) { 15917c038749SMark Nutter idx = ch_indices[i]; 15927c038749SMark Nutter out_be64(&priv2->spu_chnlcntptr_RW, idx); 15937c038749SMark Nutter eieio(); 15947c038749SMark Nutter out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); 15957c038749SMark Nutter eieio(); 15967c038749SMark Nutter } 15977c038749SMark Nutter } 15987c038749SMark Nutter 15997c038749SMark Nutter static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu) 16007c038749SMark Nutter { 16017c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 16027c038749SMark Nutter 16037c038749SMark Nutter /* Restore, Step 61: 16047c038749SMark Nutter * Restore the SPU_LSLR register from CSA. 16057c038749SMark Nutter */ 16067c038749SMark Nutter out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW); 16077c038749SMark Nutter eieio(); 16087c038749SMark Nutter } 16097c038749SMark Nutter 16107c038749SMark Nutter static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu) 16117c038749SMark Nutter { 16127c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 16137c038749SMark Nutter 16147c038749SMark Nutter /* Restore, Step 62: 16157c038749SMark Nutter * Restore the SPU_Cfg register from CSA. 16167c038749SMark Nutter */ 16177c038749SMark Nutter out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW); 16187c038749SMark Nutter eieio(); 16197c038749SMark Nutter } 16207c038749SMark Nutter 16217c038749SMark Nutter static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu) 16227c038749SMark Nutter { 16237c038749SMark Nutter /* Restore, Step 63: 16247c038749SMark Nutter * Restore PM_Trace_Tag_Wait_Mask from CSA. 16257c038749SMark Nutter * Not performed by this implementation. 16267c038749SMark Nutter */ 16277c038749SMark Nutter } 16287c038749SMark Nutter 16297c038749SMark Nutter static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu) 16307c038749SMark Nutter { 16317c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 16327c038749SMark Nutter 16337c038749SMark Nutter /* Restore, Step 64: 16347c038749SMark Nutter * Restore SPU_NPC from CSA. 16357c038749SMark Nutter */ 16367c038749SMark Nutter out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW); 16377c038749SMark Nutter eieio(); 16387c038749SMark Nutter } 16397c038749SMark Nutter 16407c038749SMark Nutter static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu) 16417c038749SMark Nutter { 16427c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 16437c038749SMark Nutter int i; 16447c038749SMark Nutter 16457c038749SMark Nutter /* Restore, Step 65: 16467c038749SMark Nutter * Restore MFC_RdSPU_MB from CSA. 16477c038749SMark Nutter */ 16487c038749SMark Nutter out_be64(&priv2->spu_chnlcntptr_RW, 29UL); 16497c038749SMark Nutter eieio(); 16507c038749SMark Nutter out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]); 16517c038749SMark Nutter for (i = 0; i < 4; i++) { 16528b3d6663SArnd Bergmann out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]); 16537c038749SMark Nutter } 16547c038749SMark Nutter eieio(); 16557c038749SMark Nutter } 16567c038749SMark Nutter 16577c038749SMark Nutter static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu) 16587c038749SMark Nutter { 16597c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 16607c038749SMark Nutter u32 dummy = 0; 16617c038749SMark Nutter 16627c038749SMark Nutter /* Restore, Step 66: 16637c038749SMark Nutter * If CSA.MB_Stat[P]=0 (mailbox empty) then 16647c038749SMark Nutter * read from the PPU_MB register. 16657c038749SMark Nutter */ 16667c038749SMark Nutter if ((csa->prob.mb_stat_R & 0xFF) == 0) { 16677c038749SMark Nutter dummy = in_be32(&prob->pu_mb_R); 16687c038749SMark Nutter eieio(); 16697c038749SMark Nutter } 16707c038749SMark Nutter } 16717c038749SMark Nutter 16727c038749SMark Nutter static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu) 16737c038749SMark Nutter { 16747c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 16757c038749SMark Nutter u64 dummy = 0UL; 16767c038749SMark Nutter 16777c038749SMark Nutter /* Restore, Step 66: 16787c038749SMark Nutter * If CSA.MB_Stat[I]=0 (mailbox empty) then 16797c038749SMark Nutter * read from the PPUINT_MB register. 16807c038749SMark Nutter */ 16817c038749SMark Nutter if ((csa->prob.mb_stat_R & 0xFF0000) == 0) { 16827c038749SMark Nutter dummy = in_be64(&priv2->puint_mb_R); 16837c038749SMark Nutter eieio(); 1684f0831accSArnd Bergmann spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR); 16857c038749SMark Nutter eieio(); 16867c038749SMark Nutter } 16877c038749SMark Nutter } 16887c038749SMark Nutter 16897c038749SMark Nutter static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu) 16907c038749SMark Nutter { 16917c038749SMark Nutter /* Restore, Step 69: 16927c038749SMark Nutter * Restore the MFC_SR1 register from CSA. 16937c038749SMark Nutter */ 1694f0831accSArnd Bergmann spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW); 16957c038749SMark Nutter eieio(); 16967c038749SMark Nutter } 16977c038749SMark Nutter 16987a214200SLuke Browning static inline void set_int_route(struct spu_state *csa, struct spu *spu) 16997a214200SLuke Browning { 17007a214200SLuke Browning struct spu_context *ctx = spu->ctx; 17017a214200SLuke Browning 17027a214200SLuke Browning spu_cpu_affinity_set(spu, ctx->last_ran); 17037a214200SLuke Browning } 17047a214200SLuke Browning 17057c038749SMark Nutter static inline void restore_other_spu_access(struct spu_state *csa, 17067c038749SMark Nutter struct spu *spu) 17077c038749SMark Nutter { 17087c038749SMark Nutter /* Restore, Step 70: 17097c038749SMark Nutter * Restore other SPU mappings to this SPU. TBD. 17107c038749SMark Nutter */ 17117c038749SMark Nutter } 17127c038749SMark Nutter 17137c038749SMark Nutter static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu) 17147c038749SMark Nutter { 17157c038749SMark Nutter struct spu_problem __iomem *prob = spu->problem; 17167c038749SMark Nutter 17177c038749SMark Nutter /* Restore, Step 71: 17187c038749SMark Nutter * If CSA.SPU_Status[R]=1 then write 17197c038749SMark Nutter * SPU_RunCntl[R0R1]='01'. 17207c038749SMark Nutter */ 17217c038749SMark Nutter if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) { 17227c038749SMark Nutter out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); 17237c038749SMark Nutter eieio(); 17247c038749SMark Nutter } 17257c038749SMark Nutter } 17267c038749SMark Nutter 17277c038749SMark Nutter static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu) 17287c038749SMark Nutter { 17297c038749SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 17307c038749SMark Nutter 17317c038749SMark Nutter /* Restore, Step 72: 17327c038749SMark Nutter * Restore the MFC_CNTL register for the CSA. 17337c038749SMark Nutter */ 17347c038749SMark Nutter out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW); 17357c038749SMark Nutter eieio(); 1736de102892SLuke Browning 173728347bceSHyeonSeung Jang /* 1738de102892SLuke Browning * The queue is put back into the same state that was evident prior to 1739de102892SLuke Browning * the context switch. The suspend flag is added to the saved state in 1740de102892SLuke Browning * the csa, if the operational state was suspending or suspended. In 1741de102892SLuke Browning * this case, the code that suspended the mfc is responsible for 1742de102892SLuke Browning * continuing it. Note that SPE faults do not change the operational 1743de102892SLuke Browning * state of the spu. 174428347bceSHyeonSeung Jang */ 17457c038749SMark Nutter } 17467c038749SMark Nutter 17477c038749SMark Nutter static inline void enable_user_access(struct spu_state *csa, struct spu *spu) 17487c038749SMark Nutter { 17497c038749SMark Nutter /* Restore, Step 73: 17507c038749SMark Nutter * Enable user-space access (if provided) to this 17517c038749SMark Nutter * SPU by mapping the virtual pages assigned to 17527c038749SMark Nutter * the SPU memory-mapped I/O (MMIO) for problem 17537c038749SMark Nutter * state. TBD. 17547c038749SMark Nutter */ 17557c038749SMark Nutter } 17567c038749SMark Nutter 17577c038749SMark Nutter static inline void reset_switch_active(struct spu_state *csa, struct spu *spu) 17587c038749SMark Nutter { 17597c038749SMark Nutter /* Restore, Step 74: 17607c038749SMark Nutter * Reset the "context switch active" flag. 176161b36fc1SAndre Detsch * Not performed by this implementation. 17627c038749SMark Nutter */ 17637c038749SMark Nutter } 17647c038749SMark Nutter 17657c038749SMark Nutter static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu) 17667c038749SMark Nutter { 17677c038749SMark Nutter /* Restore, Step 75: 17687c038749SMark Nutter * Re-enable SPU interrupts. 17697c038749SMark Nutter */ 17707c038749SMark Nutter spin_lock_irq(&spu->register_lock); 1771f0831accSArnd Bergmann spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW); 1772f0831accSArnd Bergmann spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW); 1773f0831accSArnd Bergmann spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW); 17747c038749SMark Nutter spin_unlock_irq(&spu->register_lock); 17757c038749SMark Nutter } 17767c038749SMark Nutter 17777c038749SMark Nutter static int quiece_spu(struct spu_state *prev, struct spu *spu) 17787c038749SMark Nutter { 17797c038749SMark Nutter /* 17807c038749SMark Nutter * Combined steps 2-18 of SPU context save sequence, which 17817c038749SMark Nutter * quiesce the SPU state (disable SPU execution, MFC command 17827c038749SMark Nutter * queues, decrementer, SPU interrupts, etc.). 17837c038749SMark Nutter * 17847c038749SMark Nutter * Returns 0 on success. 17857c038749SMark Nutter * 2 if failed step 2. 17867c038749SMark Nutter * 6 if failed step 6. 17877c038749SMark Nutter */ 17887c038749SMark Nutter 17897c038749SMark Nutter if (check_spu_isolate(prev, spu)) { /* Step 2. */ 17907c038749SMark Nutter return 2; 17917c038749SMark Nutter } 17927c038749SMark Nutter disable_interrupts(prev, spu); /* Step 3. */ 17937c038749SMark Nutter set_watchdog_timer(prev, spu); /* Step 4. */ 17947c038749SMark Nutter inhibit_user_access(prev, spu); /* Step 5. */ 17957c038749SMark Nutter if (check_spu_isolate(prev, spu)) { /* Step 6. */ 17967c038749SMark Nutter return 6; 17977c038749SMark Nutter } 17987c038749SMark Nutter set_switch_pending(prev, spu); /* Step 7. */ 17997c038749SMark Nutter save_mfc_cntl(prev, spu); /* Step 8. */ 18007c038749SMark Nutter save_spu_runcntl(prev, spu); /* Step 9. */ 18017c038749SMark Nutter save_mfc_sr1(prev, spu); /* Step 10. */ 18027c038749SMark Nutter save_spu_status(prev, spu); /* Step 11. */ 180355d7cd74SJeremy Kerr save_mfc_stopped_status(prev, spu); /* Step 12. */ 18047c038749SMark Nutter halt_mfc_decr(prev, spu); /* Step 13. */ 18057c038749SMark Nutter save_timebase(prev, spu); /* Step 14. */ 18067c038749SMark Nutter remove_other_spu_access(prev, spu); /* Step 15. */ 18077c038749SMark Nutter do_mfc_mssync(prev, spu); /* Step 16. */ 18087c038749SMark Nutter issue_mfc_tlbie(prev, spu); /* Step 17. */ 18097c038749SMark Nutter handle_pending_interrupts(prev, spu); /* Step 18. */ 18107c038749SMark Nutter 18117c038749SMark Nutter return 0; 18127c038749SMark Nutter } 18137c038749SMark Nutter 18147c038749SMark Nutter static void save_csa(struct spu_state *prev, struct spu *spu) 18157c038749SMark Nutter { 18167c038749SMark Nutter /* 18177c038749SMark Nutter * Combine steps 19-44 of SPU context save sequence, which 18187c038749SMark Nutter * save regions of the privileged & problem state areas. 18197c038749SMark Nutter */ 18207c038749SMark Nutter 18217c038749SMark Nutter save_mfc_queues(prev, spu); /* Step 19. */ 18227c038749SMark Nutter save_ppu_querymask(prev, spu); /* Step 20. */ 18237c038749SMark Nutter save_ppu_querytype(prev, spu); /* Step 21. */ 18248d038e04SKazunori Asayama save_ppu_tagstatus(prev, spu); /* NEW. */ 18257c038749SMark Nutter save_mfc_csr_tsq(prev, spu); /* Step 22. */ 18267c038749SMark Nutter save_mfc_csr_cmd(prev, spu); /* Step 23. */ 18277c038749SMark Nutter save_mfc_csr_ato(prev, spu); /* Step 24. */ 18287c038749SMark Nutter save_mfc_tclass_id(prev, spu); /* Step 25. */ 18297c038749SMark Nutter set_mfc_tclass_id(prev, spu); /* Step 26. */ 183036d29edbSJeremy Kerr save_mfc_cmd(prev, spu); /* Step 26a - moved from 44. */ 18317c038749SMark Nutter purge_mfc_queue(prev, spu); /* Step 27. */ 18327c038749SMark Nutter wait_purge_complete(prev, spu); /* Step 28. */ 18337c038749SMark Nutter setup_mfc_sr1(prev, spu); /* Step 30. */ 18347c038749SMark Nutter save_spu_npc(prev, spu); /* Step 31. */ 18357c038749SMark Nutter save_spu_privcntl(prev, spu); /* Step 32. */ 18367c038749SMark Nutter reset_spu_privcntl(prev, spu); /* Step 33. */ 18377c038749SMark Nutter save_spu_lslr(prev, spu); /* Step 34. */ 18387c038749SMark Nutter reset_spu_lslr(prev, spu); /* Step 35. */ 18397c038749SMark Nutter save_spu_cfg(prev, spu); /* Step 36. */ 18407c038749SMark Nutter save_pm_trace(prev, spu); /* Step 37. */ 18417c038749SMark Nutter save_mfc_rag(prev, spu); /* Step 38. */ 18427c038749SMark Nutter save_ppu_mb_stat(prev, spu); /* Step 39. */ 18437c038749SMark Nutter save_ppu_mb(prev, spu); /* Step 40. */ 18447c038749SMark Nutter save_ppuint_mb(prev, spu); /* Step 41. */ 18457c038749SMark Nutter save_ch_part1(prev, spu); /* Step 42. */ 18467c038749SMark Nutter save_spu_mb(prev, spu); /* Step 43. */ 18477c038749SMark Nutter reset_ch(prev, spu); /* Step 45. */ 18487c038749SMark Nutter } 18497c038749SMark Nutter 18507c038749SMark Nutter static void save_lscsa(struct spu_state *prev, struct spu *spu) 18517c038749SMark Nutter { 18527c038749SMark Nutter /* 18537c038749SMark Nutter * Perform steps 46-57 of SPU context save sequence, 18547c038749SMark Nutter * which save regions of the local store and register 18557c038749SMark Nutter * file. 18567c038749SMark Nutter */ 18577c038749SMark Nutter 18587c038749SMark Nutter resume_mfc_queue(prev, spu); /* Step 46. */ 1859684bd614SJeremy Kerr /* Step 47. */ 1860684bd614SJeremy Kerr setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code)); 18617c038749SMark Nutter set_switch_active(prev, spu); /* Step 48. */ 18627c038749SMark Nutter enable_interrupts(prev, spu); /* Step 49. */ 18637c038749SMark Nutter save_ls_16kb(prev, spu); /* Step 50. */ 18647c038749SMark Nutter set_spu_npc(prev, spu); /* Step 51. */ 18657c038749SMark Nutter set_signot1(prev, spu); /* Step 52. */ 18667c038749SMark Nutter set_signot2(prev, spu); /* Step 53. */ 18677c038749SMark Nutter send_save_code(prev, spu); /* Step 54. */ 18687c038749SMark Nutter set_ppu_querymask(prev, spu); /* Step 55. */ 18697c038749SMark Nutter wait_tag_complete(prev, spu); /* Step 56. */ 18707c038749SMark Nutter wait_spu_stopped(prev, spu); /* Step 57. */ 18717c038749SMark Nutter } 18727c038749SMark Nutter 18735737edd1SMark Nutter static void force_spu_isolate_exit(struct spu *spu) 18745737edd1SMark Nutter { 18755737edd1SMark Nutter struct spu_problem __iomem *prob = spu->problem; 18765737edd1SMark Nutter struct spu_priv2 __iomem *priv2 = spu->priv2; 18775737edd1SMark Nutter 18785737edd1SMark Nutter /* Stop SPE execution and wait for completion. */ 18795737edd1SMark Nutter out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); 18805737edd1SMark Nutter iobarrier_rw(); 18815737edd1SMark Nutter POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); 18825737edd1SMark Nutter 18835737edd1SMark Nutter /* Restart SPE master runcntl. */ 18845737edd1SMark Nutter spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK); 18855737edd1SMark Nutter iobarrier_w(); 18865737edd1SMark Nutter 18875737edd1SMark Nutter /* Initiate isolate exit request and wait for completion. */ 18885737edd1SMark Nutter out_be64(&priv2->spu_privcntl_RW, 4LL); 18895737edd1SMark Nutter iobarrier_w(); 18905737edd1SMark Nutter out_be32(&prob->spu_runcntl_RW, 2); 18915737edd1SMark Nutter iobarrier_rw(); 18925737edd1SMark Nutter POLL_WHILE_FALSE((in_be32(&prob->spu_status_R) 18935737edd1SMark Nutter & SPU_STATUS_STOPPED_BY_STOP)); 18945737edd1SMark Nutter 18955737edd1SMark Nutter /* Reset load request to normal. */ 18965737edd1SMark Nutter out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL); 18975737edd1SMark Nutter iobarrier_w(); 18985737edd1SMark Nutter } 18995737edd1SMark Nutter 19005737edd1SMark Nutter /** 19015737edd1SMark Nutter * stop_spu_isolate 19025737edd1SMark Nutter * Check SPU run-control state and force isolated 19035737edd1SMark Nutter * exit function as necessary. 19045737edd1SMark Nutter */ 19055737edd1SMark Nutter static void stop_spu_isolate(struct spu *spu) 19065737edd1SMark Nutter { 19075737edd1SMark Nutter struct spu_problem __iomem *prob = spu->problem; 19085737edd1SMark Nutter 19095737edd1SMark Nutter if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) { 19105737edd1SMark Nutter /* The SPU is in isolated state; the only way 19115737edd1SMark Nutter * to get it out is to perform an isolated 19125737edd1SMark Nutter * exit (clean) operation. 19135737edd1SMark Nutter */ 19145737edd1SMark Nutter force_spu_isolate_exit(spu); 19155737edd1SMark Nutter } 19165737edd1SMark Nutter } 19175737edd1SMark Nutter 19187c038749SMark Nutter static void harvest(struct spu_state *prev, struct spu *spu) 19197c038749SMark Nutter { 19207c038749SMark Nutter /* 19217c038749SMark Nutter * Perform steps 2-25 of SPU context restore sequence, 19227c038749SMark Nutter * which resets an SPU either after a failed save, or 19237c038749SMark Nutter * when using SPU for first time. 19247c038749SMark Nutter */ 19257c038749SMark Nutter 19267c038749SMark Nutter disable_interrupts(prev, spu); /* Step 2. */ 19277c038749SMark Nutter inhibit_user_access(prev, spu); /* Step 3. */ 19287c038749SMark Nutter terminate_spu_app(prev, spu); /* Step 4. */ 19297c038749SMark Nutter set_switch_pending(prev, spu); /* Step 5. */ 19305737edd1SMark Nutter stop_spu_isolate(spu); /* NEW. */ 19317c038749SMark Nutter remove_other_spu_access(prev, spu); /* Step 6. */ 1932cf17df22SMasato Noguchi suspend_mfc_and_halt_decr(prev, spu); /* Step 7. */ 19337c038749SMark Nutter wait_suspend_mfc_complete(prev, spu); /* Step 8. */ 19347c038749SMark Nutter if (!suspend_spe(prev, spu)) /* Step 9. */ 19357c038749SMark Nutter clear_spu_status(prev, spu); /* Step 10. */ 19367c038749SMark Nutter do_mfc_mssync(prev, spu); /* Step 11. */ 19377c038749SMark Nutter issue_mfc_tlbie(prev, spu); /* Step 12. */ 19387c038749SMark Nutter handle_pending_interrupts(prev, spu); /* Step 13. */ 19397c038749SMark Nutter purge_mfc_queue(prev, spu); /* Step 14. */ 19407c038749SMark Nutter wait_purge_complete(prev, spu); /* Step 15. */ 19417c038749SMark Nutter reset_spu_privcntl(prev, spu); /* Step 16. */ 19427c038749SMark Nutter reset_spu_lslr(prev, spu); /* Step 17. */ 19437c038749SMark Nutter setup_mfc_sr1(prev, spu); /* Step 18. */ 194494b2a439SBenjamin Herrenschmidt spu_invalidate_slbs(spu); /* Step 19. */ 19457c038749SMark Nutter reset_ch_part1(prev, spu); /* Step 20. */ 19467c038749SMark Nutter reset_ch_part2(prev, spu); /* Step 21. */ 19477c038749SMark Nutter enable_interrupts(prev, spu); /* Step 22. */ 19487c038749SMark Nutter set_switch_active(prev, spu); /* Step 23. */ 19497c038749SMark Nutter set_mfc_tclass_id(prev, spu); /* Step 24. */ 19507c038749SMark Nutter resume_mfc_queue(prev, spu); /* Step 25. */ 19517c038749SMark Nutter } 19527c038749SMark Nutter 19537c038749SMark Nutter static void restore_lscsa(struct spu_state *next, struct spu *spu) 19547c038749SMark Nutter { 19557c038749SMark Nutter /* 19567c038749SMark Nutter * Perform steps 26-40 of SPU context restore sequence, 19577c038749SMark Nutter * which restores regions of the local store and register 19587c038749SMark Nutter * file. 19597c038749SMark Nutter */ 19607c038749SMark Nutter 19617c038749SMark Nutter set_watchdog_timer(next, spu); /* Step 26. */ 19627c038749SMark Nutter setup_spu_status_part1(next, spu); /* Step 27. */ 19637c038749SMark Nutter setup_spu_status_part2(next, spu); /* Step 28. */ 19647c038749SMark Nutter restore_mfc_rag(next, spu); /* Step 29. */ 1965684bd614SJeremy Kerr /* Step 30. */ 1966684bd614SJeremy Kerr setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code)); 19677c038749SMark Nutter set_spu_npc(next, spu); /* Step 31. */ 19687c038749SMark Nutter set_signot1(next, spu); /* Step 32. */ 19697c038749SMark Nutter set_signot2(next, spu); /* Step 33. */ 19707c038749SMark Nutter setup_decr(next, spu); /* Step 34. */ 19717c038749SMark Nutter setup_ppu_mb(next, spu); /* Step 35. */ 19727c038749SMark Nutter setup_ppuint_mb(next, spu); /* Step 36. */ 19737c038749SMark Nutter send_restore_code(next, spu); /* Step 37. */ 19747c038749SMark Nutter set_ppu_querymask(next, spu); /* Step 38. */ 19757c038749SMark Nutter wait_tag_complete(next, spu); /* Step 39. */ 19767c038749SMark Nutter wait_spu_stopped(next, spu); /* Step 40. */ 19777c038749SMark Nutter } 19787c038749SMark Nutter 19797c038749SMark Nutter static void restore_csa(struct spu_state *next, struct spu *spu) 19807c038749SMark Nutter { 19817c038749SMark Nutter /* 19827c038749SMark Nutter * Combine steps 41-76 of SPU context restore sequence, which 19837c038749SMark Nutter * restore regions of the privileged & problem state areas. 19847c038749SMark Nutter */ 19857c038749SMark Nutter 19867c038749SMark Nutter restore_spu_privcntl(next, spu); /* Step 41. */ 19877c038749SMark Nutter restore_status_part1(next, spu); /* Step 42. */ 19887c038749SMark Nutter restore_status_part2(next, spu); /* Step 43. */ 19897c038749SMark Nutter restore_ls_16kb(next, spu); /* Step 44. */ 19907c038749SMark Nutter wait_tag_complete(next, spu); /* Step 45. */ 19917c038749SMark Nutter suspend_mfc(next, spu); /* Step 46. */ 19927c038749SMark Nutter wait_suspend_mfc_complete(next, spu); /* Step 47. */ 19937c038749SMark Nutter issue_mfc_tlbie(next, spu); /* Step 48. */ 19947c038749SMark Nutter clear_interrupts(next, spu); /* Step 49. */ 19957c038749SMark Nutter restore_mfc_queues(next, spu); /* Step 50. */ 19967c038749SMark Nutter restore_ppu_querymask(next, spu); /* Step 51. */ 19977c038749SMark Nutter restore_ppu_querytype(next, spu); /* Step 52. */ 19987c038749SMark Nutter restore_mfc_csr_tsq(next, spu); /* Step 53. */ 19997c038749SMark Nutter restore_mfc_csr_cmd(next, spu); /* Step 54. */ 20007c038749SMark Nutter restore_mfc_csr_ato(next, spu); /* Step 55. */ 20017c038749SMark Nutter restore_mfc_tclass_id(next, spu); /* Step 56. */ 20027c038749SMark Nutter set_llr_event(next, spu); /* Step 57. */ 20037c038749SMark Nutter restore_decr_wrapped(next, spu); /* Step 58. */ 20047c038749SMark Nutter restore_ch_part1(next, spu); /* Step 59. */ 20057c038749SMark Nutter restore_ch_part2(next, spu); /* Step 60. */ 20067c038749SMark Nutter restore_spu_lslr(next, spu); /* Step 61. */ 20077c038749SMark Nutter restore_spu_cfg(next, spu); /* Step 62. */ 20087c038749SMark Nutter restore_pm_trace(next, spu); /* Step 63. */ 20097c038749SMark Nutter restore_spu_npc(next, spu); /* Step 64. */ 20107c038749SMark Nutter restore_spu_mb(next, spu); /* Step 65. */ 20117c038749SMark Nutter check_ppu_mb_stat(next, spu); /* Step 66. */ 20127c038749SMark Nutter check_ppuint_mb_stat(next, spu); /* Step 67. */ 201394b2a439SBenjamin Herrenschmidt spu_invalidate_slbs(spu); /* Modified Step 68. */ 20147c038749SMark Nutter restore_mfc_sr1(next, spu); /* Step 69. */ 20157a214200SLuke Browning set_int_route(next, spu); /* NEW */ 20167c038749SMark Nutter restore_other_spu_access(next, spu); /* Step 70. */ 20177c038749SMark Nutter restore_spu_runcntl(next, spu); /* Step 71. */ 20187c038749SMark Nutter restore_mfc_cntl(next, spu); /* Step 72. */ 20197c038749SMark Nutter enable_user_access(next, spu); /* Step 73. */ 20207c038749SMark Nutter reset_switch_active(next, spu); /* Step 74. */ 20217c038749SMark Nutter reenable_interrupts(next, spu); /* Step 75. */ 20227c038749SMark Nutter } 20237c038749SMark Nutter 20247c038749SMark Nutter static int __do_spu_save(struct spu_state *prev, struct spu *spu) 20257c038749SMark Nutter { 20267c038749SMark Nutter int rc; 20277c038749SMark Nutter 20287c038749SMark Nutter /* 20297c038749SMark Nutter * SPU context save can be broken into three phases: 20307c038749SMark Nutter * 20317c038749SMark Nutter * (a) quiesce [steps 2-16]. 20327c038749SMark Nutter * (b) save of CSA, performed by PPE [steps 17-42] 20337c038749SMark Nutter * (c) save of LSCSA, mostly performed by SPU [steps 43-52]. 20347c038749SMark Nutter * 20357c038749SMark Nutter * Returns 0 on success. 20367c038749SMark Nutter * 2,6 if failed to quiece SPU 20377c038749SMark Nutter * 53 if SPU-side of save failed. 20387c038749SMark Nutter */ 20397c038749SMark Nutter 20407c038749SMark Nutter rc = quiece_spu(prev, spu); /* Steps 2-16. */ 20417c038749SMark Nutter switch (rc) { 20427c038749SMark Nutter default: 20437c038749SMark Nutter case 2: 20447c038749SMark Nutter case 6: 20457c038749SMark Nutter harvest(prev, spu); 20467c038749SMark Nutter return rc; 20477c038749SMark Nutter break; 20487c038749SMark Nutter case 0: 20497c038749SMark Nutter break; 20507c038749SMark Nutter } 20517c038749SMark Nutter save_csa(prev, spu); /* Steps 17-43. */ 20527c038749SMark Nutter save_lscsa(prev, spu); /* Steps 44-53. */ 20537c038749SMark Nutter return check_save_status(prev, spu); /* Step 54. */ 20547c038749SMark Nutter } 20557c038749SMark Nutter 20567c038749SMark Nutter static int __do_spu_restore(struct spu_state *next, struct spu *spu) 20577c038749SMark Nutter { 20587c038749SMark Nutter int rc; 20597c038749SMark Nutter 20607c038749SMark Nutter /* 20617c038749SMark Nutter * SPU context restore can be broken into three phases: 20627c038749SMark Nutter * 20637c038749SMark Nutter * (a) harvest (or reset) SPU [steps 2-24]. 20647c038749SMark Nutter * (b) restore LSCSA [steps 25-40], mostly performed by SPU. 20657c038749SMark Nutter * (c) restore CSA [steps 41-76], performed by PPE. 20667c038749SMark Nutter * 20677c038749SMark Nutter * The 'harvest' step is not performed here, but rather 20687c038749SMark Nutter * as needed below. 20697c038749SMark Nutter */ 20707c038749SMark Nutter 20717c038749SMark Nutter restore_lscsa(next, spu); /* Steps 24-39. */ 20727c038749SMark Nutter rc = check_restore_status(next, spu); /* Step 40. */ 20737c038749SMark Nutter switch (rc) { 20747c038749SMark Nutter default: 20757c038749SMark Nutter /* Failed. Return now. */ 20767c038749SMark Nutter return rc; 20777c038749SMark Nutter break; 20787c038749SMark Nutter case 0: 20797c038749SMark Nutter /* Fall through to next step. */ 20807c038749SMark Nutter break; 20817c038749SMark Nutter } 20827c038749SMark Nutter restore_csa(next, spu); 20837c038749SMark Nutter 20847c038749SMark Nutter return 0; 20857c038749SMark Nutter } 20867c038749SMark Nutter 20875473af04SMark Nutter /** 20885473af04SMark Nutter * spu_save - SPU context save, with locking. 20895473af04SMark Nutter * @prev: pointer to SPU context save area, to be saved. 20905473af04SMark Nutter * @spu: pointer to SPU iomem structure. 20915473af04SMark Nutter * 20925473af04SMark Nutter * Acquire locks, perform the save operation then return. 20935473af04SMark Nutter */ 20945473af04SMark Nutter int spu_save(struct spu_state *prev, struct spu *spu) 20955473af04SMark Nutter { 20967c038749SMark Nutter int rc; 20975473af04SMark Nutter 20987c038749SMark Nutter acquire_spu_lock(spu); /* Step 1. */ 20997c038749SMark Nutter rc = __do_spu_save(prev, spu); /* Steps 2-53. */ 21007c038749SMark Nutter release_spu_lock(spu); 21015737edd1SMark Nutter if (rc != 0 && rc != 2 && rc != 6) { 21028b3d6663SArnd Bergmann panic("%s failed on SPU[%d], rc=%d.\n", 21038b3d6663SArnd Bergmann __func__, spu->number, rc); 21048b3d6663SArnd Bergmann } 21055737edd1SMark Nutter return 0; 21065473af04SMark Nutter } 210791edfa49Sarnd@arndb.de EXPORT_SYMBOL_GPL(spu_save); 21085473af04SMark Nutter 21095473af04SMark Nutter /** 21105473af04SMark Nutter * spu_restore - SPU context restore, with harvest and locking. 21115473af04SMark Nutter * @new: pointer to SPU context save area, to be restored. 21125473af04SMark Nutter * @spu: pointer to SPU iomem structure. 21135473af04SMark Nutter * 21145473af04SMark Nutter * Perform harvest + restore, as we may not be coming 2115d6e05edcSAndreas Mohr * from a previous successful save operation, and the 21165473af04SMark Nutter * hardware state is unknown. 21175473af04SMark Nutter */ 21185473af04SMark Nutter int spu_restore(struct spu_state *new, struct spu *spu) 21195473af04SMark Nutter { 21207c038749SMark Nutter int rc; 21215473af04SMark Nutter 21227c038749SMark Nutter acquire_spu_lock(spu); 21237c038749SMark Nutter harvest(NULL, spu); 21248b3d6663SArnd Bergmann spu->slb_replace = 0; 21257c038749SMark Nutter rc = __do_spu_restore(new, spu); 21267c038749SMark Nutter release_spu_lock(spu); 21278b3d6663SArnd Bergmann if (rc) { 21288b3d6663SArnd Bergmann panic("%s failed on SPU[%d] rc=%d.\n", 21298b3d6663SArnd Bergmann __func__, spu->number, rc); 21308b3d6663SArnd Bergmann } 21317c038749SMark Nutter return rc; 21325473af04SMark Nutter } 213391edfa49Sarnd@arndb.de EXPORT_SYMBOL_GPL(spu_restore); 21345473af04SMark Nutter 21355473af04SMark Nutter static void init_prob(struct spu_state *csa) 21365473af04SMark Nutter { 21375473af04SMark Nutter csa->spu_chnlcnt_RW[9] = 1; 21385473af04SMark Nutter csa->spu_chnlcnt_RW[21] = 16; 21395473af04SMark Nutter csa->spu_chnlcnt_RW[23] = 1; 21405473af04SMark Nutter csa->spu_chnlcnt_RW[28] = 1; 21415473af04SMark Nutter csa->spu_chnlcnt_RW[30] = 1; 21425473af04SMark Nutter csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP; 2143970f1baaSarnd@arndb.de csa->prob.mb_stat_R = 0x000400; 21445473af04SMark Nutter } 21455473af04SMark Nutter 21465473af04SMark Nutter static void init_priv1(struct spu_state *csa) 21475473af04SMark Nutter { 21485473af04SMark Nutter /* Enable decode, relocate, tlbie response, master runcntl. */ 21495473af04SMark Nutter csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK | 21505473af04SMark Nutter MFC_STATE1_MASTER_RUN_CONTROL_MASK | 21515473af04SMark Nutter MFC_STATE1_PROBLEM_STATE_MASK | 21525473af04SMark Nutter MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK; 21535473af04SMark Nutter 21545473af04SMark Nutter /* Enable OS-specific set of interrupts. */ 21555473af04SMark Nutter csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR | 21565473af04SMark Nutter CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR | 21575473af04SMark Nutter CLASS0_ENABLE_SPU_ERROR_INTR; 21585473af04SMark Nutter csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR | 21595473af04SMark Nutter CLASS1_ENABLE_STORAGE_FAULT_INTR; 21603a843d7cSArnd Bergmann csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR | 2161a33a7d73SArnd Bergmann CLASS2_ENABLE_SPU_HALT_INTR | 2162a33a7d73SArnd Bergmann CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR; 21635473af04SMark Nutter } 21645473af04SMark Nutter 21655473af04SMark Nutter static void init_priv2(struct spu_state *csa) 21665473af04SMark Nutter { 21675473af04SMark Nutter csa->priv2.spu_lslr_RW = LS_ADDR_MASK; 21685473af04SMark Nutter csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE | 21695473af04SMark Nutter MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION | 21705473af04SMark Nutter MFC_CNTL_DMA_QUEUES_EMPTY_MASK; 21715473af04SMark Nutter } 21725473af04SMark Nutter 21735473af04SMark Nutter /** 21745473af04SMark Nutter * spu_alloc_csa - allocate and initialize an SPU context save area. 21755473af04SMark Nutter * 21765473af04SMark Nutter * Allocate and initialize the contents of an SPU context save area. 21775473af04SMark Nutter * This includes enabling address translation, interrupt masks, etc., 21785473af04SMark Nutter * as appropriate for the given OS environment. 21795473af04SMark Nutter * 21805473af04SMark Nutter * Note that storage for the 'lscsa' is allocated separately, 21815473af04SMark Nutter * as it is by far the largest of the context save regions, 21825473af04SMark Nutter * and may need to be pinned or otherwise specially aligned. 21835473af04SMark Nutter */ 2184f1fa74f4SBenjamin Herrenschmidt int spu_init_csa(struct spu_state *csa) 21855473af04SMark Nutter { 2186f1fa74f4SBenjamin Herrenschmidt int rc; 21875473af04SMark Nutter 21885473af04SMark Nutter if (!csa) 2189f1fa74f4SBenjamin Herrenschmidt return -EINVAL; 21905473af04SMark Nutter memset(csa, 0, sizeof(struct spu_state)); 21915473af04SMark Nutter 2192f1fa74f4SBenjamin Herrenschmidt rc = spu_alloc_lscsa(csa); 2193f1fa74f4SBenjamin Herrenschmidt if (rc) 2194f1fa74f4SBenjamin Herrenschmidt return rc; 21955473af04SMark Nutter 219634af946aSIngo Molnar spin_lock_init(&csa->register_lock); 21978b3d6663SArnd Bergmann 21985473af04SMark Nutter init_prob(csa); 21995473af04SMark Nutter init_priv1(csa); 22005473af04SMark Nutter init_priv2(csa); 2201f1fa74f4SBenjamin Herrenschmidt 2202f1fa74f4SBenjamin Herrenschmidt return 0; 22035473af04SMark Nutter } 22045473af04SMark Nutter 22055473af04SMark Nutter void spu_fini_csa(struct spu_state *csa) 22065473af04SMark Nutter { 2207f1fa74f4SBenjamin Herrenschmidt spu_free_lscsa(csa); 22085473af04SMark Nutter } 2209