1/* 2 * FPU support code, moved here from head.S so that it can be used 3 * by chips which use other head-whatever.S files. 4 * 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 7 * Copyright (C) 1996 Paul Mackerras. 8 * Copyright (C) 1997 Dan Malek (dmalek@jlc.net). 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 * 15 */ 16 17#include <asm/reg.h> 18#include <asm/page.h> 19#include <asm/mmu.h> 20#include <asm/pgtable.h> 21#include <asm/cputable.h> 22#include <asm/cache.h> 23#include <asm/thread_info.h> 24#include <asm/ppc_asm.h> 25#include <asm/asm-offsets.h> 26#include <asm/ptrace.h> 27#include <asm/export.h> 28#include <asm/asm-compat.h> 29#include <asm/feature-fixups.h> 30 31#ifdef CONFIG_VSX 32#define __REST_32FPVSRS(n,c,base) \ 33BEGIN_FTR_SECTION \ 34 b 2f; \ 35END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 36 REST_32FPRS(n,base); \ 37 b 3f; \ 382: REST_32VSRS(n,c,base); \ 393: 40 41#define __SAVE_32FPVSRS(n,c,base) \ 42BEGIN_FTR_SECTION \ 43 b 2f; \ 44END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 45 SAVE_32FPRS(n,base); \ 46 b 3f; \ 472: SAVE_32VSRS(n,c,base); \ 483: 49#else 50#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base) 51#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base) 52#endif 53#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base) 54#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base) 55 56/* 57 * Load state from memory into FP registers including FPSCR. 58 * Assumes the caller has enabled FP in the MSR. 59 */ 60_GLOBAL(load_fp_state) 61 lfd fr0,FPSTATE_FPSCR(r3) 62 MTFSF_L(fr0) 63 REST_32FPVSRS(0, R4, R3) 64 blr 65EXPORT_SYMBOL(load_fp_state) 66_ASM_NOKPROBE_SYMBOL(load_fp_state); /* used by restore_math */ 67 68/* 69 * Store FP state into memory, including FPSCR 70 * Assumes the caller has enabled FP in the MSR. 71 */ 72_GLOBAL(store_fp_state) 73 SAVE_32FPVSRS(0, R4, R3) 74 mffs fr0 75 stfd fr0,FPSTATE_FPSCR(r3) 76 blr 77EXPORT_SYMBOL(store_fp_state) 78 79/* 80 * This task wants to use the FPU now. 81 * On UP, disable FP for the task which had the FPU previously, 82 * and save its floating-point registers in its thread_struct. 83 * Load up this task's FP registers from its thread_struct, 84 * enable the FPU for the current task and return to the task. 85 * Note that on 32-bit this can only use registers that will be 86 * restored by fast_exception_return, i.e. r3 - r6, r10 and r11. 87 */ 88_GLOBAL(load_up_fpu) 89 mfmsr r5 90 ori r5,r5,MSR_FP 91#ifdef CONFIG_VSX 92BEGIN_FTR_SECTION 93 oris r5,r5,MSR_VSX@h 94END_FTR_SECTION_IFSET(CPU_FTR_VSX) 95#endif 96 SYNC 97 MTMSRD(r5) /* enable use of fpu now */ 98 isync 99 /* enable use of FP after return */ 100#ifdef CONFIG_PPC32 101 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ 102 lwz r4,THREAD_FPEXC_MODE(r5) 103 ori r9,r9,MSR_FP /* enable FP for current */ 104 or r9,r9,r4 105#else 106 ld r4,PACACURRENT(r13) 107 addi r5,r4,THREAD /* Get THREAD */ 108 lwz r4,THREAD_FPEXC_MODE(r5) 109 ori r12,r12,MSR_FP 110 or r12,r12,r4 111 std r12,_MSR(r1) 112#endif 113 /* Don't care if r4 overflows, this is desired behaviour */ 114 lbz r4,THREAD_LOAD_FP(r5) 115 addi r4,r4,1 116 stb r4,THREAD_LOAD_FP(r5) 117 addi r10,r5,THREAD_FPSTATE 118 lfd fr0,FPSTATE_FPSCR(r10) 119 MTFSF_L(fr0) 120 REST_32FPVSRS(0, R4, R10) 121 /* restore registers and return */ 122 /* we haven't used ctr or xer or lr */ 123 blr 124 125/* 126 * save_fpu(tsk) 127 * Save the floating-point registers in its thread_struct. 128 * Enables the FPU for use in the kernel on return. 129 */ 130_GLOBAL(save_fpu) 131 addi r3,r3,THREAD /* want THREAD of task */ 132 PPC_LL r6,THREAD_FPSAVEAREA(r3) 133 PPC_LL r5,PT_REGS(r3) 134 PPC_LCMPI 0,r6,0 135 bne 2f 136 addi r6,r3,THREAD_FPSTATE 1372: SAVE_32FPVSRS(0, R4, R6) 138 mffs fr0 139 stfd fr0,FPSTATE_FPSCR(r6) 140 blr 141 142/* 143 * These are used in the alignment trap handler when emulating 144 * single-precision loads and stores. 145 */ 146 147_GLOBAL(cvt_fd) 148 lfs 0,0(r3) 149 stfd 0,0(r4) 150 blr 151 152_GLOBAL(cvt_df) 153 lfd 0,0(r3) 154 stfs 0,0(r4) 155 blr 156