1/* 2 * arch/xtensa/kernel/coprocessor.S 3 * 4 * Xtensa processor configuration-specific table of coprocessor and 5 * other custom register layout information. 6 * 7 * This file is subject to the terms and conditions of the GNU General Public 8 * License. See the file "COPYING" in the main directory of this archive 9 * for more details. 10 * 11 * Copyright (C) 2003 - 2007 Tensilica Inc. 12 */ 13 14 15#include <linux/linkage.h> 16#include <asm/asm-offsets.h> 17#include <asm/processor.h> 18#include <asm/coprocessor.h> 19#include <asm/thread_info.h> 20#include <asm/uaccess.h> 21#include <asm/unistd.h> 22#include <asm/ptrace.h> 23#include <asm/current.h> 24#include <asm/pgtable.h> 25#include <asm/page.h> 26#include <asm/signal.h> 27#include <asm/tlbflush.h> 28 29/* 30 * Entry condition: 31 * 32 * a0: trashed, original value saved on stack (PT_AREG0) 33 * a1: a1 34 * a2: new stack pointer, original in DEPC 35 * a3: a3 36 * depc: a2, original value saved on stack (PT_DEPC) 37 * excsave_1: dispatch table 38 * 39 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 40 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 41 */ 42 43/* IO protection is currently unsupported. */ 44 45ENTRY(fast_io_protect) 46 47 wsr a0, excsave1 48 movi a0, unrecoverable_exception 49 callx0 a0 50 51ENDPROC(fast_io_protect) 52 53#if XTENSA_HAVE_COPROCESSORS 54 55/* 56 * Macros for lazy context switch. 57 */ 58 59#define SAVE_CP_REGS(x) \ 60 .align 4; \ 61 .Lsave_cp_regs_cp##x: \ 62 .if XTENSA_HAVE_COPROCESSOR(x); \ 63 xchal_cp##x##_store a2 a4 a5 a6 a7; \ 64 .endif; \ 65 jx a0 66 67#define SAVE_CP_REGS_TAB(x) \ 68 .if XTENSA_HAVE_COPROCESSOR(x); \ 69 .long .Lsave_cp_regs_cp##x - .Lsave_cp_regs_jump_table; \ 70 .else; \ 71 .long 0; \ 72 .endif; \ 73 .long THREAD_XTREGS_CP##x 74 75 76#define LOAD_CP_REGS(x) \ 77 .align 4; \ 78 .Lload_cp_regs_cp##x: \ 79 .if XTENSA_HAVE_COPROCESSOR(x); \ 80 xchal_cp##x##_load a2 a4 a5 a6 a7; \ 81 .endif; \ 82 jx a0 83 84#define LOAD_CP_REGS_TAB(x) \ 85 .if XTENSA_HAVE_COPROCESSOR(x); \ 86 .long .Lload_cp_regs_cp##x - .Lload_cp_regs_jump_table; \ 87 .else; \ 88 .long 0; \ 89 .endif; \ 90 .long THREAD_XTREGS_CP##x 91 92 SAVE_CP_REGS(0) 93 SAVE_CP_REGS(1) 94 SAVE_CP_REGS(2) 95 SAVE_CP_REGS(3) 96 SAVE_CP_REGS(4) 97 SAVE_CP_REGS(5) 98 SAVE_CP_REGS(6) 99 SAVE_CP_REGS(7) 100 101 LOAD_CP_REGS(0) 102 LOAD_CP_REGS(1) 103 LOAD_CP_REGS(2) 104 LOAD_CP_REGS(3) 105 LOAD_CP_REGS(4) 106 LOAD_CP_REGS(5) 107 LOAD_CP_REGS(6) 108 LOAD_CP_REGS(7) 109 110 .align 4 111.Lsave_cp_regs_jump_table: 112 SAVE_CP_REGS_TAB(0) 113 SAVE_CP_REGS_TAB(1) 114 SAVE_CP_REGS_TAB(2) 115 SAVE_CP_REGS_TAB(3) 116 SAVE_CP_REGS_TAB(4) 117 SAVE_CP_REGS_TAB(5) 118 SAVE_CP_REGS_TAB(6) 119 SAVE_CP_REGS_TAB(7) 120 121.Lload_cp_regs_jump_table: 122 LOAD_CP_REGS_TAB(0) 123 LOAD_CP_REGS_TAB(1) 124 LOAD_CP_REGS_TAB(2) 125 LOAD_CP_REGS_TAB(3) 126 LOAD_CP_REGS_TAB(4) 127 LOAD_CP_REGS_TAB(5) 128 LOAD_CP_REGS_TAB(6) 129 LOAD_CP_REGS_TAB(7) 130 131/* 132 * coprocessor_save(buffer, index) 133 * a2 a3 134 * coprocessor_load(buffer, index) 135 * a2 a3 136 * 137 * Save or load coprocessor registers for coprocessor 'index'. 138 * The register values are saved to or loaded from them 'buffer' address. 139 * 140 * Note that these functions don't update the coprocessor_owner information! 141 * 142 */ 143 144ENTRY(coprocessor_save) 145 146 entry a1, 32 147 s32i a0, a1, 0 148 movi a0, .Lsave_cp_regs_jump_table 149 addx8 a3, a3, a0 150 l32i a3, a3, 0 151 beqz a3, 1f 152 add a0, a0, a3 153 callx0 a0 1541: l32i a0, a1, 0 155 retw 156 157ENDPROC(coprocessor_save) 158 159ENTRY(coprocessor_load) 160 161 entry a1, 32 162 s32i a0, a1, 0 163 movi a0, .Lload_cp_regs_jump_table 164 addx4 a3, a3, a0 165 l32i a3, a3, 0 166 beqz a3, 1f 167 add a0, a0, a3 168 callx0 a0 1691: l32i a0, a1, 0 170 retw 171 172ENDPROC(coprocessor_load) 173 174/* 175 * coprocessor_flush(struct task_info*, index) 176 * a2 a3 177 * coprocessor_restore(struct task_info*, index) 178 * a2 a3 179 * 180 * Save or load coprocessor registers for coprocessor 'index'. 181 * The register values are saved to or loaded from the coprocessor area 182 * inside the task_info structure. 183 * 184 * Note that these functions don't update the coprocessor_owner information! 185 * 186 */ 187 188 189ENTRY(coprocessor_flush) 190 191 entry a1, 32 192 s32i a0, a1, 0 193 movi a0, .Lsave_cp_regs_jump_table 194 addx8 a3, a3, a0 195 l32i a4, a3, 4 196 l32i a3, a3, 0 197 add a2, a2, a4 198 beqz a3, 1f 199 add a0, a0, a3 200 callx0 a0 2011: l32i a0, a1, 0 202 retw 203 204ENDPROC(coprocessor_flush) 205 206ENTRY(coprocessor_restore) 207 entry a1, 32 208 s32i a0, a1, 0 209 movi a0, .Lload_cp_regs_jump_table 210 addx4 a3, a3, a0 211 l32i a4, a3, 4 212 l32i a3, a3, 0 213 add a2, a2, a4 214 beqz a3, 1f 215 add a0, a0, a3 216 callx0 a0 2171: l32i a0, a1, 0 218 retw 219 220ENDPROC(coprocessor_restore) 221 222/* 223 * Entry condition: 224 * 225 * a0: trashed, original value saved on stack (PT_AREG0) 226 * a1: a1 227 * a2: new stack pointer, original in DEPC 228 * a3: a3 229 * depc: a2, original value saved on stack (PT_DEPC) 230 * excsave_1: dispatch table 231 * 232 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 233 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 234 */ 235 236ENTRY(fast_coprocessor_double) 237 238 wsr a0, excsave1 239 movi a0, unrecoverable_exception 240 callx0 a0 241 242ENDPROC(fast_coprocessor_double) 243 244ENTRY(fast_coprocessor) 245 246 /* Save remaining registers a1-a3 and SAR */ 247 248 s32i a3, a2, PT_AREG3 249 rsr a3, sar 250 s32i a1, a2, PT_AREG1 251 s32i a3, a2, PT_SAR 252 mov a1, a2 253 rsr a2, depc 254 s32i a2, a1, PT_AREG2 255 256 /* 257 * The hal macros require up to 4 temporary registers. We use a3..a6. 258 */ 259 260 s32i a4, a1, PT_AREG4 261 s32i a5, a1, PT_AREG5 262 s32i a6, a1, PT_AREG6 263 264 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */ 265 266 rsr a3, exccause 267 addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED 268 269 /* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/ 270 271 ssl a3 # SAR: 32 - coprocessor_number 272 movi a2, 1 273 rsr a0, cpenable 274 sll a2, a2 275 or a0, a0, a2 276 wsr a0, cpenable 277 rsync 278 279 /* Retrieve previous owner. (a3 still holds CP number) */ 280 281 movi a0, coprocessor_owner # list of owners 282 addx4 a0, a3, a0 # entry for CP 283 l32i a4, a0, 0 284 285 beqz a4, 1f # skip 'save' if no previous owner 286 287 /* Disable coprocessor for previous owner. (a2 = 1 << CP number) */ 288 289 l32i a5, a4, THREAD_CPENABLE 290 xor a5, a5, a2 # (1 << cp-id) still in a2 291 s32i a5, a4, THREAD_CPENABLE 292 293 /* 294 * Get context save area and 'call' save routine. 295 * (a4 still holds previous owner (thread_info), a3 CP number) 296 */ 297 298 movi a5, .Lsave_cp_regs_jump_table 299 movi a0, 2f # a0: 'return' address 300 addx8 a3, a3, a5 # a3: coprocessor number 301 l32i a2, a3, 4 # a2: xtregs offset 302 l32i a3, a3, 0 # a3: jump offset 303 add a2, a2, a4 304 add a4, a3, a5 # a4: address of save routine 305 jx a4 306 307 /* Note that only a0 and a1 were preserved. */ 308 3092: rsr a3, exccause 310 addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED 311 movi a0, coprocessor_owner 312 addx4 a0, a3, a0 313 314 /* Set new 'owner' (a0 points to the CP owner, a3 contains the CP nr) */ 315 3161: GET_THREAD_INFO (a4, a1) 317 s32i a4, a0, 0 318 319 /* Get context save area and 'call' load routine. */ 320 321 movi a5, .Lload_cp_regs_jump_table 322 movi a0, 1f 323 addx8 a3, a3, a5 324 l32i a2, a3, 4 # a2: xtregs offset 325 l32i a3, a3, 0 # a3: jump offset 326 add a2, a2, a4 327 add a4, a3, a5 328 jx a4 329 330 /* Restore all registers and return from exception handler. */ 331 3321: l32i a6, a1, PT_AREG6 333 l32i a5, a1, PT_AREG5 334 l32i a4, a1, PT_AREG4 335 336 l32i a0, a1, PT_SAR 337 l32i a3, a1, PT_AREG3 338 l32i a2, a1, PT_AREG2 339 wsr a0, sar 340 l32i a0, a1, PT_AREG0 341 l32i a1, a1, PT_AREG1 342 343 rfe 344 345ENDPROC(fast_coprocessor) 346 347 .data 348 349ENTRY(coprocessor_owner) 350 351 .fill XCHAL_CP_MAX, 4, 0 352 353END(coprocessor_owner) 354 355#endif /* XTENSA_HAVE_COPROCESSORS */ 356