1 /* 2 * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan. 3 */ 4 #ifndef _ASM_POWERPC_PPC_ASM_H 5 #define _ASM_POWERPC_PPC_ASM_H 6 7 #include <linux/stringify.h> 8 #include <asm/asm-compat.h> 9 #include <asm/processor.h> 10 #include <asm/ppc-opcode.h> 11 #include <asm/firmware.h> 12 13 #ifndef __ASSEMBLY__ 14 #error __FILE__ should only be used in assembler files 15 #else 16 17 #define SZL (BITS_PER_LONG/8) 18 19 /* 20 * Stuff for accurate CPU time accounting. 21 * These macros handle transitions between user and system state 22 * in exception entry and exit and accumulate time to the 23 * user_time and system_time fields in the paca. 24 */ 25 26 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 27 #define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) 28 #define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb) 29 #define ACCOUNT_STOLEN_TIME 30 #else 31 #define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) \ 32 MFTB(ra); /* get timebase */ \ 33 PPC_LL rb, ACCOUNT_STARTTIME_USER(ptr); \ 34 PPC_STL ra, ACCOUNT_STARTTIME(ptr); \ 35 subf rb,rb,ra; /* subtract start value */ \ 36 PPC_LL ra, ACCOUNT_USER_TIME(ptr); \ 37 add ra,ra,rb; /* add on to user time */ \ 38 PPC_STL ra, ACCOUNT_USER_TIME(ptr); \ 39 40 #define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb) \ 41 MFTB(ra); /* get timebase */ \ 42 PPC_LL rb, ACCOUNT_STARTTIME(ptr); \ 43 PPC_STL ra, ACCOUNT_STARTTIME_USER(ptr); \ 44 subf rb,rb,ra; /* subtract start value */ \ 45 PPC_LL ra, ACCOUNT_SYSTEM_TIME(ptr); \ 46 add ra,ra,rb; /* add on to system time */ \ 47 PPC_STL ra, ACCOUNT_SYSTEM_TIME(ptr) 48 49 #ifdef CONFIG_PPC_SPLPAR 50 #define ACCOUNT_STOLEN_TIME \ 51 BEGIN_FW_FTR_SECTION; \ 52 beq 33f; \ 53 /* from user - see if there are any DTL entries to process */ \ 54 ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \ 55 ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \ 56 addi r10,r10,LPPACA_DTLIDX; \ 57 LDX_BE r10,0,r10; /* get log write index */ \ 58 cmpd cr1,r11,r10; \ 59 beq+ cr1,33f; \ 60 bl accumulate_stolen_time; \ 61 ld r12,_MSR(r1); \ 62 andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \ 63 33: \ 64 END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) 65 66 #else /* CONFIG_PPC_SPLPAR */ 67 #define ACCOUNT_STOLEN_TIME 68 69 #endif /* CONFIG_PPC_SPLPAR */ 70 71 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 72 73 /* 74 * Macros for storing registers into and loading registers from 75 * exception frames. 76 */ 77 #ifdef __powerpc64__ 78 #define SAVE_GPR(n, base) std n,GPR0+8*(n)(base) 79 #define REST_GPR(n, base) ld n,GPR0+8*(n)(base) 80 #define SAVE_NVGPRS(base) SAVE_8GPRS(14, base); SAVE_10GPRS(22, base) 81 #define REST_NVGPRS(base) REST_8GPRS(14, base); REST_10GPRS(22, base) 82 #else 83 #define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base) 84 #define REST_GPR(n, base) lwz n,GPR0+4*(n)(base) 85 #define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); \ 86 SAVE_10GPRS(22, base) 87 #define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); \ 88 REST_10GPRS(22, base) 89 #endif 90 91 #define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base) 92 #define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base) 93 #define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base) 94 #define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base) 95 #define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base) 96 #define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base) 97 #define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base) 98 #define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base) 99 100 #define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base) 101 #define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base) 102 #define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base) 103 #define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base) 104 #define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base) 105 #define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base) 106 #define REST_FPR(n, base) lfd n,8*TS_FPRWIDTH*(n)(base) 107 #define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base) 108 #define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base) 109 #define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base) 110 #define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base) 111 #define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base) 112 113 #define SAVE_VR(n,b,base) li b,16*(n); stvx n,base,b 114 #define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base) 115 #define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base) 116 #define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base) 117 #define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base) 118 #define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base) 119 #define REST_VR(n,b,base) li b,16*(n); lvx n,base,b 120 #define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base) 121 #define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base) 122 #define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base) 123 #define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base) 124 #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) 125 126 #ifdef __BIG_ENDIAN__ 127 #define STXVD2X_ROT(n,b,base) STXVD2X(n,b,base) 128 #define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base) 129 #else 130 #define STXVD2X_ROT(n,b,base) XXSWAPD(n,n); \ 131 STXVD2X(n,b,base); \ 132 XXSWAPD(n,n) 133 134 #define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base); \ 135 XXSWAPD(n,n) 136 #endif 137 /* Save the lower 32 VSRs in the thread VSR region */ 138 #define SAVE_VSR(n,b,base) li b,16*(n); STXVD2X_ROT(n,R##base,R##b) 139 #define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base) 140 #define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base) 141 #define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base) 142 #define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base) 143 #define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base) 144 #define REST_VSR(n,b,base) li b,16*(n); LXVD2X_ROT(n,R##base,R##b) 145 #define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base) 146 #define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base) 147 #define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base) 148 #define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base) 149 #define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base) 150 151 /* 152 * b = base register for addressing, o = base offset from register of 1st EVR 153 * n = first EVR, s = scratch 154 */ 155 #define SAVE_EVR(n,s,b,o) evmergehi s,s,n; stw s,o+4*(n)(b) 156 #define SAVE_2EVRS(n,s,b,o) SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o) 157 #define SAVE_4EVRS(n,s,b,o) SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o) 158 #define SAVE_8EVRS(n,s,b,o) SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o) 159 #define SAVE_16EVRS(n,s,b,o) SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o) 160 #define SAVE_32EVRS(n,s,b,o) SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o) 161 #define REST_EVR(n,s,b,o) lwz s,o+4*(n)(b); evmergelo n,s,n 162 #define REST_2EVRS(n,s,b,o) REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o) 163 #define REST_4EVRS(n,s,b,o) REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o) 164 #define REST_8EVRS(n,s,b,o) REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o) 165 #define REST_16EVRS(n,s,b,o) REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o) 166 #define REST_32EVRS(n,s,b,o) REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o) 167 168 /* Macros to adjust thread priority for hardware multithreading */ 169 #define HMT_VERY_LOW or 31,31,31 # very low priority 170 #define HMT_LOW or 1,1,1 171 #define HMT_MEDIUM_LOW or 6,6,6 # medium low priority 172 #define HMT_MEDIUM or 2,2,2 173 #define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority 174 #define HMT_HIGH or 3,3,3 175 #define HMT_EXTRA_HIGH or 7,7,7 # power7 only 176 177 #ifdef CONFIG_PPC64 178 #define ULONG_SIZE 8 179 #else 180 #define ULONG_SIZE 4 181 #endif 182 #define __VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) 183 #define VCPU_GPR(n) __VCPU_GPR(__REG_##n) 184 185 #ifdef __KERNEL__ 186 #ifdef CONFIG_PPC64 187 188 #define STACKFRAMESIZE 256 189 #define __STK_REG(i) (112 + ((i)-14)*8) 190 #define STK_REG(i) __STK_REG(__REG_##i) 191 192 #ifdef PPC64_ELF_ABI_v2 193 #define STK_GOT 24 194 #define __STK_PARAM(i) (32 + ((i)-3)*8) 195 #else 196 #define STK_GOT 40 197 #define __STK_PARAM(i) (48 + ((i)-3)*8) 198 #endif 199 #define STK_PARAM(i) __STK_PARAM(__REG_##i) 200 201 #ifdef PPC64_ELF_ABI_v2 202 203 #define _GLOBAL(name) \ 204 .section ".text"; \ 205 .align 2 ; \ 206 .type name,@function; \ 207 .globl name; \ 208 name: 209 210 #define _GLOBAL_TOC(name) \ 211 .section ".text"; \ 212 .align 2 ; \ 213 .type name,@function; \ 214 .globl name; \ 215 name: \ 216 0: addis r2,r12,(.TOC.-0b)@ha; \ 217 addi r2,r2,(.TOC.-0b)@l; \ 218 .localentry name,.-name 219 220 #define _KPROBE(name) \ 221 .section ".kprobes.text","a"; \ 222 .align 2 ; \ 223 .type name,@function; \ 224 .globl name; \ 225 name: 226 227 #define DOTSYM(a) a 228 229 #else 230 231 #define XGLUE(a,b) a##b 232 #define GLUE(a,b) XGLUE(a,b) 233 234 #define _GLOBAL(name) \ 235 .section ".text"; \ 236 .align 2 ; \ 237 .globl name; \ 238 .globl GLUE(.,name); \ 239 .section ".opd","aw"; \ 240 name: \ 241 .quad GLUE(.,name); \ 242 .quad .TOC.@tocbase; \ 243 .quad 0; \ 244 .previous; \ 245 .type GLUE(.,name),@function; \ 246 GLUE(.,name): 247 248 #define _GLOBAL_TOC(name) _GLOBAL(name) 249 250 #define _KPROBE(name) \ 251 .section ".kprobes.text","a"; \ 252 .align 2 ; \ 253 .globl name; \ 254 .globl GLUE(.,name); \ 255 .section ".opd","aw"; \ 256 name: \ 257 .quad GLUE(.,name); \ 258 .quad .TOC.@tocbase; \ 259 .quad 0; \ 260 .previous; \ 261 .type GLUE(.,name),@function; \ 262 GLUE(.,name): 263 264 #define DOTSYM(a) GLUE(.,a) 265 266 #endif 267 268 #else /* 32-bit */ 269 270 #define _ENTRY(n) \ 271 .globl n; \ 272 n: 273 274 #define _GLOBAL(n) \ 275 .text; \ 276 .stabs __stringify(n:F-1),N_FUN,0,0,n;\ 277 .globl n; \ 278 n: 279 280 #define _GLOBAL_TOC(name) _GLOBAL(name) 281 282 #define _KPROBE(n) \ 283 .section ".kprobes.text","a"; \ 284 .globl n; \ 285 n: 286 287 #endif 288 289 #define FUNC_START(name) _GLOBAL(name) 290 #define FUNC_END(name) 291 292 /* 293 * LOAD_REG_IMMEDIATE(rn, expr) 294 * Loads the value of the constant expression 'expr' into register 'rn' 295 * using immediate instructions only. Use this when it's important not 296 * to reference other data (i.e. on ppc64 when the TOC pointer is not 297 * valid) and when 'expr' is a constant or absolute address. 298 * 299 * LOAD_REG_ADDR(rn, name) 300 * Loads the address of label 'name' into register 'rn'. Use this when 301 * you don't particularly need immediate instructions only, but you need 302 * the whole address in one register (e.g. it's a structure address and 303 * you want to access various offsets within it). On ppc32 this is 304 * identical to LOAD_REG_IMMEDIATE. 305 * 306 * LOAD_REG_ADDR_PIC(rn, name) 307 * Loads the address of label 'name' into register 'run'. Use this when 308 * the kernel doesn't run at the linked or relocated address. Please 309 * note that this macro will clobber the lr register. 310 * 311 * LOAD_REG_ADDRBASE(rn, name) 312 * ADDROFF(name) 313 * LOAD_REG_ADDRBASE loads part of the address of label 'name' into 314 * register 'rn'. ADDROFF(name) returns the remainder of the address as 315 * a constant expression. ADDROFF(name) is a signed expression < 16 bits 316 * in size, so is suitable for use directly as an offset in load and store 317 * instructions. Use this when loading/storing a single word or less as: 318 * LOAD_REG_ADDRBASE(rX, name) 319 * ld rY,ADDROFF(name)(rX) 320 */ 321 322 /* Be careful, this will clobber the lr register. */ 323 #define LOAD_REG_ADDR_PIC(reg, name) \ 324 bl 0f; \ 325 0: mflr reg; \ 326 addis reg,reg,(name - 0b)@ha; \ 327 addi reg,reg,(name - 0b)@l; 328 329 #ifdef __powerpc64__ 330 #ifdef HAVE_AS_ATHIGH 331 #define __AS_ATHIGH high 332 #else 333 #define __AS_ATHIGH h 334 #endif 335 #define LOAD_REG_IMMEDIATE(reg,expr) \ 336 lis reg,(expr)@highest; \ 337 ori reg,reg,(expr)@higher; \ 338 rldicr reg,reg,32,31; \ 339 oris reg,reg,(expr)@__AS_ATHIGH; \ 340 ori reg,reg,(expr)@l; 341 342 #define LOAD_REG_ADDR(reg,name) \ 343 ld reg,name@got(r2) 344 345 #define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name) 346 #define ADDROFF(name) 0 347 348 /* offsets for stack frame layout */ 349 #define LRSAVE 16 350 351 #else /* 32-bit */ 352 353 #define LOAD_REG_IMMEDIATE(reg,expr) \ 354 lis reg,(expr)@ha; \ 355 addi reg,reg,(expr)@l; 356 357 #define LOAD_REG_ADDR(reg,name) LOAD_REG_IMMEDIATE(reg, name) 358 359 #define LOAD_REG_ADDRBASE(reg, name) lis reg,name@ha 360 #define ADDROFF(name) name@l 361 362 /* offsets for stack frame layout */ 363 #define LRSAVE 4 364 365 #endif 366 367 /* various errata or part fixups */ 368 #ifdef CONFIG_PPC601_SYNC_FIX 369 #define SYNC \ 370 BEGIN_FTR_SECTION \ 371 sync; \ 372 isync; \ 373 END_FTR_SECTION_IFSET(CPU_FTR_601) 374 #define SYNC_601 \ 375 BEGIN_FTR_SECTION \ 376 sync; \ 377 END_FTR_SECTION_IFSET(CPU_FTR_601) 378 #define ISYNC_601 \ 379 BEGIN_FTR_SECTION \ 380 isync; \ 381 END_FTR_SECTION_IFSET(CPU_FTR_601) 382 #else 383 #define SYNC 384 #define SYNC_601 385 #define ISYNC_601 386 #endif 387 388 #if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E) 389 #define MFTB(dest) \ 390 90: mfspr dest, SPRN_TBRL; \ 391 BEGIN_FTR_SECTION_NESTED(96); \ 392 cmpwi dest,0; \ 393 beq- 90b; \ 394 END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) 395 #elif defined(CONFIG_8xx) 396 #define MFTB(dest) mftb dest 397 #else 398 #define MFTB(dest) mfspr dest, SPRN_TBRL 399 #endif 400 401 #ifndef CONFIG_SMP 402 #define TLBSYNC 403 #else /* CONFIG_SMP */ 404 /* tlbsync is not implemented on 601 */ 405 #define TLBSYNC \ 406 BEGIN_FTR_SECTION \ 407 tlbsync; \ 408 sync; \ 409 END_FTR_SECTION_IFCLR(CPU_FTR_601) 410 #endif 411 412 #ifdef CONFIG_PPC64 413 #define MTOCRF(FXM, RS) \ 414 BEGIN_FTR_SECTION_NESTED(848); \ 415 mtcrf (FXM), RS; \ 416 FTR_SECTION_ELSE_NESTED(848); \ 417 mtocrf (FXM), RS; \ 418 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848) 419 #endif 420 421 /* 422 * This instruction is not implemented on the PPC 603 or 601; however, on 423 * the 403GCX and 405GP tlbia IS defined and tlbie is not. 424 * All of these instructions exist in the 8xx, they have magical powers, 425 * and they must be used. 426 */ 427 428 #if !defined(CONFIG_4xx) && !defined(CONFIG_8xx) 429 #define tlbia \ 430 li r4,1024; \ 431 mtctr r4; \ 432 lis r4,KERNELBASE@h; \ 433 .machine push; \ 434 .machine "power4"; \ 435 0: tlbie r4; \ 436 .machine pop; \ 437 addi r4,r4,0x1000; \ 438 bdnz 0b 439 #endif 440 441 442 #ifdef CONFIG_IBM440EP_ERR42 443 #define PPC440EP_ERR42 isync 444 #else 445 #define PPC440EP_ERR42 446 #endif 447 448 /* The following stops all load and store data streams associated with stream 449 * ID (ie. streams created explicitly). The embedded and server mnemonics for 450 * dcbt are different so we use machine "power4" here explicitly. 451 */ 452 #define DCBT_STOP_ALL_STREAM_IDS(scratch) \ 453 .machine push ; \ 454 .machine "power4" ; \ 455 lis scratch,0x60000000@h; \ 456 dcbt r0,scratch,0b01010; \ 457 .machine pop 458 459 /* 460 * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them 461 * keep the address intact to be compatible with code shared with 462 * 32-bit classic. 463 * 464 * On the other hand, I find it useful to have them behave as expected 465 * by their name (ie always do the addition) on 64-bit BookE 466 */ 467 #if defined(CONFIG_BOOKE) && !defined(CONFIG_PPC64) 468 #define toreal(rd) 469 #define fromreal(rd) 470 471 /* 472 * We use addis to ensure compatibility with the "classic" ppc versions of 473 * these macros, which use rs = 0 to get the tophys offset in rd, rather than 474 * converting the address in r0, and so this version has to do that too 475 * (i.e. set register rd to 0 when rs == 0). 476 */ 477 #define tophys(rd,rs) \ 478 addis rd,rs,0 479 480 #define tovirt(rd,rs) \ 481 addis rd,rs,0 482 483 #elif defined(CONFIG_PPC64) 484 #define toreal(rd) /* we can access c000... in real mode */ 485 #define fromreal(rd) 486 487 #define tophys(rd,rs) \ 488 clrldi rd,rs,2 489 490 #define tovirt(rd,rs) \ 491 rotldi rd,rs,16; \ 492 ori rd,rd,((KERNELBASE>>48)&0xFFFF);\ 493 rotldi rd,rd,48 494 #else 495 /* 496 * On APUS (Amiga PowerPC cpu upgrade board), we don't know the 497 * physical base address of RAM at compile time. 498 */ 499 #define toreal(rd) tophys(rd,rd) 500 #define fromreal(rd) tovirt(rd,rd) 501 502 #define tophys(rd,rs) \ 503 0: addis rd,rs,-PAGE_OFFSET@h; \ 504 .section ".vtop_fixup","aw"; \ 505 .align 1; \ 506 .long 0b; \ 507 .previous 508 509 #define tovirt(rd,rs) \ 510 0: addis rd,rs,PAGE_OFFSET@h; \ 511 .section ".ptov_fixup","aw"; \ 512 .align 1; \ 513 .long 0b; \ 514 .previous 515 #endif 516 517 #ifdef CONFIG_PPC_BOOK3S_64 518 #define RFI rfid 519 #define MTMSRD(r) mtmsrd r 520 #define MTMSR_EERI(reg) mtmsrd reg,1 521 #else 522 #define FIX_SRR1(ra, rb) 523 #ifndef CONFIG_40x 524 #define RFI rfi 525 #else 526 #define RFI rfi; b . /* Prevent prefetch past rfi */ 527 #endif 528 #define MTMSRD(r) mtmsr r 529 #define MTMSR_EERI(reg) mtmsr reg 530 #define CLR_TOP32(r) 531 #endif 532 533 #endif /* __KERNEL__ */ 534 535 /* The boring bits... */ 536 537 /* Condition Register Bit Fields */ 538 539 #define cr0 0 540 #define cr1 1 541 #define cr2 2 542 #define cr3 3 543 #define cr4 4 544 #define cr5 5 545 #define cr6 6 546 #define cr7 7 547 548 549 /* 550 * General Purpose Registers (GPRs) 551 * 552 * The lower case r0-r31 should be used in preference to the upper 553 * case R0-R31 as they provide more error checking in the assembler. 554 * Use R0-31 only when really nessesary. 555 */ 556 557 #define r0 %r0 558 #define r1 %r1 559 #define r2 %r2 560 #define r3 %r3 561 #define r4 %r4 562 #define r5 %r5 563 #define r6 %r6 564 #define r7 %r7 565 #define r8 %r8 566 #define r9 %r9 567 #define r10 %r10 568 #define r11 %r11 569 #define r12 %r12 570 #define r13 %r13 571 #define r14 %r14 572 #define r15 %r15 573 #define r16 %r16 574 #define r17 %r17 575 #define r18 %r18 576 #define r19 %r19 577 #define r20 %r20 578 #define r21 %r21 579 #define r22 %r22 580 #define r23 %r23 581 #define r24 %r24 582 #define r25 %r25 583 #define r26 %r26 584 #define r27 %r27 585 #define r28 %r28 586 #define r29 %r29 587 #define r30 %r30 588 #define r31 %r31 589 590 591 /* Floating Point Registers (FPRs) */ 592 593 #define fr0 0 594 #define fr1 1 595 #define fr2 2 596 #define fr3 3 597 #define fr4 4 598 #define fr5 5 599 #define fr6 6 600 #define fr7 7 601 #define fr8 8 602 #define fr9 9 603 #define fr10 10 604 #define fr11 11 605 #define fr12 12 606 #define fr13 13 607 #define fr14 14 608 #define fr15 15 609 #define fr16 16 610 #define fr17 17 611 #define fr18 18 612 #define fr19 19 613 #define fr20 20 614 #define fr21 21 615 #define fr22 22 616 #define fr23 23 617 #define fr24 24 618 #define fr25 25 619 #define fr26 26 620 #define fr27 27 621 #define fr28 28 622 #define fr29 29 623 #define fr30 30 624 #define fr31 31 625 626 /* AltiVec Registers (VPRs) */ 627 628 #define v0 0 629 #define v1 1 630 #define v2 2 631 #define v3 3 632 #define v4 4 633 #define v5 5 634 #define v6 6 635 #define v7 7 636 #define v8 8 637 #define v9 9 638 #define v10 10 639 #define v11 11 640 #define v12 12 641 #define v13 13 642 #define v14 14 643 #define v15 15 644 #define v16 16 645 #define v17 17 646 #define v18 18 647 #define v19 19 648 #define v20 20 649 #define v21 21 650 #define v22 22 651 #define v23 23 652 #define v24 24 653 #define v25 25 654 #define v26 26 655 #define v27 27 656 #define v28 28 657 #define v29 29 658 #define v30 30 659 #define v31 31 660 661 /* VSX Registers (VSRs) */ 662 663 #define vs0 0 664 #define vs1 1 665 #define vs2 2 666 #define vs3 3 667 #define vs4 4 668 #define vs5 5 669 #define vs6 6 670 #define vs7 7 671 #define vs8 8 672 #define vs9 9 673 #define vs10 10 674 #define vs11 11 675 #define vs12 12 676 #define vs13 13 677 #define vs14 14 678 #define vs15 15 679 #define vs16 16 680 #define vs17 17 681 #define vs18 18 682 #define vs19 19 683 #define vs20 20 684 #define vs21 21 685 #define vs22 22 686 #define vs23 23 687 #define vs24 24 688 #define vs25 25 689 #define vs26 26 690 #define vs27 27 691 #define vs28 28 692 #define vs29 29 693 #define vs30 30 694 #define vs31 31 695 #define vs32 32 696 #define vs33 33 697 #define vs34 34 698 #define vs35 35 699 #define vs36 36 700 #define vs37 37 701 #define vs38 38 702 #define vs39 39 703 #define vs40 40 704 #define vs41 41 705 #define vs42 42 706 #define vs43 43 707 #define vs44 44 708 #define vs45 45 709 #define vs46 46 710 #define vs47 47 711 #define vs48 48 712 #define vs49 49 713 #define vs50 50 714 #define vs51 51 715 #define vs52 52 716 #define vs53 53 717 #define vs54 54 718 #define vs55 55 719 #define vs56 56 720 #define vs57 57 721 #define vs58 58 722 #define vs59 59 723 #define vs60 60 724 #define vs61 61 725 #define vs62 62 726 #define vs63 63 727 728 /* SPE Registers (EVPRs) */ 729 730 #define evr0 0 731 #define evr1 1 732 #define evr2 2 733 #define evr3 3 734 #define evr4 4 735 #define evr5 5 736 #define evr6 6 737 #define evr7 7 738 #define evr8 8 739 #define evr9 9 740 #define evr10 10 741 #define evr11 11 742 #define evr12 12 743 #define evr13 13 744 #define evr14 14 745 #define evr15 15 746 #define evr16 16 747 #define evr17 17 748 #define evr18 18 749 #define evr19 19 750 #define evr20 20 751 #define evr21 21 752 #define evr22 22 753 #define evr23 23 754 #define evr24 24 755 #define evr25 25 756 #define evr26 26 757 #define evr27 27 758 #define evr28 28 759 #define evr29 29 760 #define evr30 30 761 #define evr31 31 762 763 /* some stab codes */ 764 #define N_FUN 36 765 #define N_RSYM 64 766 #define N_SLINE 68 767 #define N_SO 100 768 769 /* 770 * Create an endian fixup trampoline 771 * 772 * This starts with a "tdi 0,0,0x48" instruction which is 773 * essentially a "trap never", and thus akin to a nop. 774 * 775 * The opcode for this instruction read with the wrong endian 776 * however results in a b . + 8 777 * 778 * So essentially we use that trick to execute the following 779 * trampoline in "reverse endian" if we are running with the 780 * MSR_LE bit set the "wrong" way for whatever endianness the 781 * kernel is built for. 782 */ 783 784 #ifdef CONFIG_PPC_BOOK3E 785 #define FIXUP_ENDIAN 786 #else 787 #define FIXUP_ENDIAN \ 788 tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \ 789 b $+36; /* Skip trampoline if endian is good */ \ 790 .long 0x05009f42; /* bcl 20,31,$+4 */ \ 791 .long 0xa602487d; /* mflr r10 */ \ 792 .long 0x1c004a39; /* addi r10,r10,28 */ \ 793 .long 0xa600607d; /* mfmsr r11 */ \ 794 .long 0x01006b69; /* xori r11,r11,1 */ \ 795 .long 0xa6035a7d; /* mtsrr0 r10 */ \ 796 .long 0xa6037b7d; /* mtsrr1 r11 */ \ 797 .long 0x2400004c /* rfid */ 798 #endif /* !CONFIG_PPC_BOOK3E */ 799 #endif /* __ASSEMBLY__ */ 800 #endif /* _ASM_POWERPC_PPC_ASM_H */ 801