1 #ifndef _ASM_POWERPC_PROCESSOR_H 2 #define _ASM_POWERPC_PROCESSOR_H 3 4 /* 5 * Copyright (C) 2001 PPC 64 Team, IBM Corp 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <asm/reg.h> 14 15 #ifdef CONFIG_VSX 16 #define TS_FPRWIDTH 2 17 18 #ifdef __BIG_ENDIAN__ 19 #define TS_FPROFFSET 0 20 #define TS_VSRLOWOFFSET 1 21 #else 22 #define TS_FPROFFSET 1 23 #define TS_VSRLOWOFFSET 0 24 #endif 25 26 #else 27 #define TS_FPRWIDTH 1 28 #define TS_FPROFFSET 0 29 #endif 30 31 #ifdef CONFIG_PPC64 32 /* Default SMT priority is set to 3. Use 11- 13bits to save priority. */ 33 #define PPR_PRIORITY 3 34 #ifdef __ASSEMBLY__ 35 #define DEFAULT_PPR (PPR_PRIORITY << 50) 36 #else 37 #define DEFAULT_PPR ((u64)PPR_PRIORITY << 50) 38 #endif /* __ASSEMBLY__ */ 39 #endif /* CONFIG_PPC64 */ 40 41 #ifndef __ASSEMBLY__ 42 #include <linux/types.h> 43 #include <asm/thread_info.h> 44 #include <asm/ptrace.h> 45 #include <asm/hw_breakpoint.h> 46 47 /* We do _not_ want to define new machine types at all, those must die 48 * in favor of using the device-tree 49 * -- BenH. 50 */ 51 52 /* PREP sub-platform types. Unused */ 53 #define _PREP_Motorola 0x01 /* motorola prep */ 54 #define _PREP_Firm 0x02 /* firmworks prep */ 55 #define _PREP_IBM 0x00 /* ibm prep */ 56 #define _PREP_Bull 0x03 /* bull prep */ 57 58 /* CHRP sub-platform types. These are arbitrary */ 59 #define _CHRP_Motorola 0x04 /* motorola chrp, the cobra */ 60 #define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */ 61 #define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */ 62 #define _CHRP_briq 0x07 /* TotalImpact's briQ */ 63 64 #if defined(__KERNEL__) && defined(CONFIG_PPC32) 65 66 extern int _chrp_type; 67 68 #endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */ 69 70 /* Macros for adjusting thread priority (hardware multi-threading) */ 71 #define HMT_very_low() asm volatile("or 31,31,31 # very low priority") 72 #define HMT_low() asm volatile("or 1,1,1 # low priority") 73 #define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority") 74 #define HMT_medium() asm volatile("or 2,2,2 # medium priority") 75 #define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority") 76 #define HMT_high() asm volatile("or 3,3,3 # high priority") 77 78 #ifdef __KERNEL__ 79 80 struct task_struct; 81 void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); 82 void release_thread(struct task_struct *); 83 84 #ifdef CONFIG_PPC32 85 86 #if CONFIG_TASK_SIZE > CONFIG_KERNEL_START 87 #error User TASK_SIZE overlaps with KERNEL_START address 88 #endif 89 #define TASK_SIZE (CONFIG_TASK_SIZE) 90 91 /* This decides where the kernel will search for a free chunk of vm 92 * space during mmap's. 93 */ 94 #define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3) 95 #endif 96 97 #ifdef CONFIG_PPC64 98 /* 99 * 64-bit user address space can have multiple limits 100 * For now supported values are: 101 */ 102 #define TASK_SIZE_64TB (0x0000400000000000UL) 103 #define TASK_SIZE_128TB (0x0000800000000000UL) 104 #define TASK_SIZE_512TB (0x0002000000000000UL) 105 #define TASK_SIZE_1PB (0x0004000000000000UL) 106 #define TASK_SIZE_2PB (0x0008000000000000UL) 107 /* 108 * With 52 bits in the address we can support 109 * upto 4PB of range. 110 */ 111 #define TASK_SIZE_4PB (0x0010000000000000UL) 112 113 /* 114 * For now 512TB is only supported with book3s and 64K linux page size. 115 */ 116 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES) 117 /* 118 * Max value currently used: 119 */ 120 #define TASK_SIZE_USER64 TASK_SIZE_4PB 121 #define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB 122 #define TASK_CONTEXT_SIZE TASK_SIZE_512TB 123 #else 124 #define TASK_SIZE_USER64 TASK_SIZE_64TB 125 #define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB 126 /* 127 * We don't need to allocate extended context ids for 4K page size, because 128 * we limit the max effective address on this config to 64TB. 129 */ 130 #define TASK_CONTEXT_SIZE TASK_SIZE_64TB 131 #endif 132 133 /* 134 * 32-bit user address space is 4GB - 1 page 135 * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT 136 */ 137 #define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE)) 138 139 #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ 140 TASK_SIZE_USER32 : TASK_SIZE_USER64) 141 #define TASK_SIZE TASK_SIZE_OF(current) 142 /* This decides where the kernel will search for a free chunk of vm 143 * space during mmap's. 144 */ 145 #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4)) 146 #define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4)) 147 148 #define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \ 149 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 ) 150 #endif 151 152 /* 153 * Initial task size value for user applications. For book3s 64 we start 154 * with 128TB and conditionally enable upto 512TB 155 */ 156 #ifdef CONFIG_PPC_BOOK3S_64 157 #define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \ 158 TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64) 159 #else 160 #define DEFAULT_MAP_WINDOW TASK_SIZE 161 #endif 162 163 #ifdef __powerpc64__ 164 165 #define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64 166 #define STACK_TOP_USER32 TASK_SIZE_USER32 167 168 #define STACK_TOP (is_32bit_task() ? \ 169 STACK_TOP_USER32 : STACK_TOP_USER64) 170 171 #define STACK_TOP_MAX TASK_SIZE_USER64 172 173 #else /* __powerpc64__ */ 174 175 #define STACK_TOP TASK_SIZE 176 #define STACK_TOP_MAX STACK_TOP 177 178 #endif /* __powerpc64__ */ 179 180 typedef struct { 181 unsigned long seg; 182 } mm_segment_t; 183 184 #define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET] 185 #define TS_CKFPR(i) ckfp_state.fpr[i][TS_FPROFFSET] 186 187 /* FP and VSX 0-31 register set */ 188 struct thread_fp_state { 189 u64 fpr[32][TS_FPRWIDTH] __attribute__((aligned(16))); 190 u64 fpscr; /* Floating point status */ 191 }; 192 193 /* Complete AltiVec register set including VSCR */ 194 struct thread_vr_state { 195 vector128 vr[32] __attribute__((aligned(16))); 196 vector128 vscr __attribute__((aligned(16))); 197 }; 198 199 struct debug_reg { 200 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 201 /* 202 * The following help to manage the use of Debug Control Registers 203 * om the BookE platforms. 204 */ 205 uint32_t dbcr0; 206 uint32_t dbcr1; 207 #ifdef CONFIG_BOOKE 208 uint32_t dbcr2; 209 #endif 210 /* 211 * The stored value of the DBSR register will be the value at the 212 * last debug interrupt. This register can only be read from the 213 * user (will never be written to) and has value while helping to 214 * describe the reason for the last debug trap. Torez 215 */ 216 uint32_t dbsr; 217 /* 218 * The following will contain addresses used by debug applications 219 * to help trace and trap on particular address locations. 220 * The bits in the Debug Control Registers above help define which 221 * of the following registers will contain valid data and/or addresses. 222 */ 223 unsigned long iac1; 224 unsigned long iac2; 225 #if CONFIG_PPC_ADV_DEBUG_IACS > 2 226 unsigned long iac3; 227 unsigned long iac4; 228 #endif 229 unsigned long dac1; 230 unsigned long dac2; 231 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 232 unsigned long dvc1; 233 unsigned long dvc2; 234 #endif 235 #endif 236 }; 237 238 struct thread_struct { 239 unsigned long ksp; /* Kernel stack pointer */ 240 241 #ifdef CONFIG_PPC64 242 unsigned long ksp_vsid; 243 #endif 244 struct pt_regs *regs; /* Pointer to saved register state */ 245 mm_segment_t addr_limit; /* for get_fs() validation */ 246 #ifdef CONFIG_BOOKE 247 /* BookE base exception scratch space; align on cacheline */ 248 unsigned long normsave[8] ____cacheline_aligned; 249 #endif 250 #ifdef CONFIG_PPC32 251 void *pgdir; /* root of page-table tree */ 252 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ 253 #endif 254 /* Debug Registers */ 255 struct debug_reg debug; 256 struct thread_fp_state fp_state; 257 struct thread_fp_state *fp_save_area; 258 int fpexc_mode; /* floating-point exception mode */ 259 unsigned int align_ctl; /* alignment handling control */ 260 #ifdef CONFIG_HAVE_HW_BREAKPOINT 261 struct perf_event *ptrace_bps[HBP_NUM]; 262 /* 263 * Helps identify source of single-step exception and subsequent 264 * hw-breakpoint enablement 265 */ 266 struct perf_event *last_hit_ubp; 267 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 268 struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */ 269 unsigned long trap_nr; /* last trap # on this thread */ 270 u8 load_slb; /* Ages out SLB preload cache entries */ 271 u8 load_fp; 272 #ifdef CONFIG_ALTIVEC 273 u8 load_vec; 274 struct thread_vr_state vr_state; 275 struct thread_vr_state *vr_save_area; 276 unsigned long vrsave; 277 int used_vr; /* set if process has used altivec */ 278 #endif /* CONFIG_ALTIVEC */ 279 #ifdef CONFIG_VSX 280 /* VSR status */ 281 int used_vsr; /* set if process has used VSX */ 282 #endif /* CONFIG_VSX */ 283 #ifdef CONFIG_SPE 284 unsigned long evr[32]; /* upper 32-bits of SPE regs */ 285 u64 acc; /* Accumulator */ 286 unsigned long spefscr; /* SPE & eFP status */ 287 unsigned long spefscr_last; /* SPEFSCR value on last prctl 288 call or trap return */ 289 int used_spe; /* set if process has used spe */ 290 #endif /* CONFIG_SPE */ 291 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 292 u8 load_tm; 293 u64 tm_tfhar; /* Transaction fail handler addr */ 294 u64 tm_texasr; /* Transaction exception & summary */ 295 u64 tm_tfiar; /* Transaction fail instr address reg */ 296 struct pt_regs ckpt_regs; /* Checkpointed registers */ 297 298 unsigned long tm_tar; 299 unsigned long tm_ppr; 300 unsigned long tm_dscr; 301 302 /* 303 * Checkpointed FP and VSX 0-31 register set. 304 * 305 * When a transaction is active/signalled/scheduled etc., *regs is the 306 * most recent set of/speculated GPRs with ckpt_regs being the older 307 * checkpointed regs to which we roll back if transaction aborts. 308 * 309 * These are analogous to how ckpt_regs and pt_regs work 310 */ 311 struct thread_fp_state ckfp_state; /* Checkpointed FP state */ 312 struct thread_vr_state ckvr_state; /* Checkpointed VR state */ 313 unsigned long ckvrsave; /* Checkpointed VRSAVE */ 314 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 315 #ifdef CONFIG_PPC_MEM_KEYS 316 unsigned long amr; 317 unsigned long iamr; 318 unsigned long uamor; 319 #endif 320 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 321 void* kvm_shadow_vcpu; /* KVM internal data */ 322 #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */ 323 #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE) 324 struct kvm_vcpu *kvm_vcpu; 325 #endif 326 #ifdef CONFIG_PPC64 327 unsigned long dscr; 328 unsigned long fscr; 329 /* 330 * This member element dscr_inherit indicates that the process 331 * has explicitly attempted and changed the DSCR register value 332 * for itself. Hence kernel wont use the default CPU DSCR value 333 * contained in the PACA structure anymore during process context 334 * switch. Once this variable is set, this behaviour will also be 335 * inherited to all the children of this process from that point 336 * onwards. 337 */ 338 int dscr_inherit; 339 unsigned long tidr; 340 #endif 341 #ifdef CONFIG_PPC_BOOK3S_64 342 unsigned long tar; 343 unsigned long ebbrr; 344 unsigned long ebbhr; 345 unsigned long bescr; 346 unsigned long siar; 347 unsigned long sdar; 348 unsigned long sier; 349 unsigned long mmcr2; 350 unsigned mmcr0; 351 352 unsigned used_ebb; 353 unsigned int used_vas; 354 #endif 355 }; 356 357 #define ARCH_MIN_TASKALIGN 16 358 359 #define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack) 360 #define INIT_SP_LIMIT \ 361 (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack) 362 363 #ifdef CONFIG_SPE 364 #define SPEFSCR_INIT \ 365 .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, \ 366 .spefscr_last = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, 367 #else 368 #define SPEFSCR_INIT 369 #endif 370 371 #ifdef CONFIG_PPC32 372 #define INIT_THREAD { \ 373 .ksp = INIT_SP, \ 374 .ksp_limit = INIT_SP_LIMIT, \ 375 .addr_limit = KERNEL_DS, \ 376 .pgdir = swapper_pg_dir, \ 377 .fpexc_mode = MSR_FE0 | MSR_FE1, \ 378 SPEFSCR_INIT \ 379 } 380 #else 381 #define INIT_THREAD { \ 382 .ksp = INIT_SP, \ 383 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ 384 .addr_limit = KERNEL_DS, \ 385 .fpexc_mode = 0, \ 386 .fscr = FSCR_TAR | FSCR_EBB \ 387 } 388 #endif 389 390 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs) 391 392 unsigned long get_wchan(struct task_struct *p); 393 394 #define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) 395 #define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0) 396 397 /* Get/set floating-point exception mode */ 398 #define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr)) 399 #define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val)) 400 401 extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr); 402 extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val); 403 404 #define GET_ENDIAN(tsk, adr) get_endian((tsk), (adr)) 405 #define SET_ENDIAN(tsk, val) set_endian((tsk), (val)) 406 407 extern int get_endian(struct task_struct *tsk, unsigned long adr); 408 extern int set_endian(struct task_struct *tsk, unsigned int val); 409 410 #define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr)) 411 #define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val)) 412 413 extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); 414 extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); 415 416 extern void load_fp_state(struct thread_fp_state *fp); 417 extern void store_fp_state(struct thread_fp_state *fp); 418 extern void load_vr_state(struct thread_vr_state *vr); 419 extern void store_vr_state(struct thread_vr_state *vr); 420 421 static inline unsigned int __unpack_fe01(unsigned long msr_bits) 422 { 423 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8); 424 } 425 426 static inline unsigned long __pack_fe01(unsigned int fpmode) 427 { 428 return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1); 429 } 430 431 #ifdef CONFIG_PPC64 432 #define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0) 433 434 #define spin_begin() HMT_low() 435 436 #define spin_cpu_relax() barrier() 437 438 #define spin_cpu_yield() spin_cpu_relax() 439 440 #define spin_end() HMT_medium() 441 442 #define spin_until_cond(cond) \ 443 do { \ 444 if (unlikely(!(cond))) { \ 445 spin_begin(); \ 446 do { \ 447 spin_cpu_relax(); \ 448 } while (!(cond)); \ 449 spin_end(); \ 450 } \ 451 } while (0) 452 453 #else 454 #define cpu_relax() barrier() 455 #endif 456 457 /* Check that a certain kernel stack pointer is valid in task_struct p */ 458 int validate_sp(unsigned long sp, struct task_struct *p, 459 unsigned long nbytes); 460 461 /* 462 * Prefetch macros. 463 */ 464 #define ARCH_HAS_PREFETCH 465 #define ARCH_HAS_PREFETCHW 466 #define ARCH_HAS_SPINLOCK_PREFETCH 467 468 static inline void prefetch(const void *x) 469 { 470 if (unlikely(!x)) 471 return; 472 473 __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x)); 474 } 475 476 static inline void prefetchw(const void *x) 477 { 478 if (unlikely(!x)) 479 return; 480 481 __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x)); 482 } 483 484 #define spin_lock_prefetch(x) prefetchw(x) 485 486 #define HAVE_ARCH_PICK_MMAP_LAYOUT 487 488 #ifdef CONFIG_PPC64 489 static inline unsigned long get_clean_sp(unsigned long sp, int is_32) 490 { 491 if (is_32) 492 return sp & 0x0ffffffffUL; 493 return sp; 494 } 495 #else 496 static inline unsigned long get_clean_sp(unsigned long sp, int is_32) 497 { 498 return sp; 499 } 500 #endif 501 502 extern unsigned long cpuidle_disable; 503 enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; 504 505 extern int powersave_nap; /* set if nap mode can be used in idle loop */ 506 extern unsigned long power7_idle_insn(unsigned long type); /* PNV_THREAD_NAP/etc*/ 507 extern void power7_idle_type(unsigned long type); 508 extern unsigned long power9_idle_stop(unsigned long psscr_val); 509 extern unsigned long power9_offline_stop(unsigned long psscr_val); 510 extern void power9_idle_type(unsigned long stop_psscr_val, 511 unsigned long stop_psscr_mask); 512 513 extern void flush_instruction_cache(void); 514 extern void hard_reset_now(void); 515 extern void poweroff_now(void); 516 extern int fix_alignment(struct pt_regs *); 517 extern void cvt_fd(float *from, double *to); 518 extern void cvt_df(double *from, float *to); 519 extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); 520 521 #ifdef CONFIG_PPC64 522 /* 523 * We handle most unaligned accesses in hardware. On the other hand 524 * unaligned DMA can be very expensive on some ppc64 IO chips (it does 525 * powers of 2 writes until it reaches sufficient alignment). 526 * 527 * Based on this we disable the IP header alignment in network drivers. 528 */ 529 #define NET_IP_ALIGN 0 530 #endif 531 532 #endif /* __KERNEL__ */ 533 #endif /* __ASSEMBLY__ */ 534 #endif /* _ASM_POWERPC_PROCESSOR_H */ 535