1 /* 2 * Copyright IBM Corp. 1999, 2009 3 * 4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 5 */ 6 7 #ifndef __ASM_SWITCH_TO_H 8 #define __ASM_SWITCH_TO_H 9 10 #include <linux/thread_info.h> 11 #include <asm/ptrace.h> 12 13 extern struct task_struct *__switch_to(void *, void *); 14 extern void update_cr_regs(struct task_struct *task); 15 16 static inline void save_fp_regs(s390_fp_regs *fpregs) 17 { 18 asm volatile( 19 " std 0,%O0+8(%R0)\n" 20 " std 2,%O0+24(%R0)\n" 21 " std 4,%O0+40(%R0)\n" 22 " std 6,%O0+56(%R0)" 23 : "=Q" (*fpregs) : "Q" (*fpregs)); 24 if (!MACHINE_HAS_IEEE) 25 return; 26 asm volatile( 27 " stfpc %0\n" 28 " std 1,%O0+16(%R0)\n" 29 " std 3,%O0+32(%R0)\n" 30 " std 5,%O0+48(%R0)\n" 31 " std 7,%O0+64(%R0)\n" 32 " std 8,%O0+72(%R0)\n" 33 " std 9,%O0+80(%R0)\n" 34 " std 10,%O0+88(%R0)\n" 35 " std 11,%O0+96(%R0)\n" 36 " std 12,%O0+104(%R0)\n" 37 " std 13,%O0+112(%R0)\n" 38 " std 14,%O0+120(%R0)\n" 39 " std 15,%O0+128(%R0)\n" 40 : "=Q" (*fpregs) : "Q" (*fpregs)); 41 } 42 43 static inline void restore_fp_regs(s390_fp_regs *fpregs) 44 { 45 asm volatile( 46 " ld 0,%O0+8(%R0)\n" 47 " ld 2,%O0+24(%R0)\n" 48 " ld 4,%O0+40(%R0)\n" 49 " ld 6,%O0+56(%R0)" 50 : : "Q" (*fpregs)); 51 if (!MACHINE_HAS_IEEE) 52 return; 53 asm volatile( 54 " lfpc %0\n" 55 " ld 1,%O0+16(%R0)\n" 56 " ld 3,%O0+32(%R0)\n" 57 " ld 5,%O0+48(%R0)\n" 58 " ld 7,%O0+64(%R0)\n" 59 " ld 8,%O0+72(%R0)\n" 60 " ld 9,%O0+80(%R0)\n" 61 " ld 10,%O0+88(%R0)\n" 62 " ld 11,%O0+96(%R0)\n" 63 " ld 12,%O0+104(%R0)\n" 64 " ld 13,%O0+112(%R0)\n" 65 " ld 14,%O0+120(%R0)\n" 66 " ld 15,%O0+128(%R0)\n" 67 : : "Q" (*fpregs)); 68 } 69 70 static inline void save_access_regs(unsigned int *acrs) 71 { 72 typedef struct { int _[NUM_ACRS]; } acrstype; 73 74 asm volatile("stam 0,15,%0" : "=Q" (*(acrstype *)acrs)); 75 } 76 77 static inline void restore_access_regs(unsigned int *acrs) 78 { 79 typedef struct { int _[NUM_ACRS]; } acrstype; 80 81 asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs)); 82 } 83 84 #define switch_to(prev,next,last) do { \ 85 if (prev->mm) { \ 86 save_fp_regs(&prev->thread.fp_regs); \ 87 save_access_regs(&prev->thread.acrs[0]); \ 88 save_ri_cb(prev->thread.ri_cb); \ 89 } \ 90 if (next->mm) { \ 91 restore_fp_regs(&next->thread.fp_regs); \ 92 restore_access_regs(&next->thread.acrs[0]); \ 93 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ 94 update_cr_regs(next); \ 95 } \ 96 prev = __switch_to(prev,next); \ 97 } while (0) 98 99 #define finish_arch_switch(prev) do { \ 100 set_fs(current->thread.mm_segment); \ 101 } while (0) 102 103 #endif /* __ASM_SWITCH_TO_H */ 104