1 /* 2 * This control block defines the PACA which defines the processor 3 * specific data for each logical processor on the system. 4 * There are some pointers defined that are utilized by PLIC. 5 * 6 * C 2001 PPC 64 Team, IBM Corp 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 #ifndef _ASM_POWERPC_PACA_H 14 #define _ASM_POWERPC_PACA_H 15 #ifdef __KERNEL__ 16 17 #include <asm/types.h> 18 #include <asm/lppaca.h> 19 #include <asm/mmu.h> 20 21 register struct paca_struct *local_paca asm("r13"); 22 23 #if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_SMP) 24 extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */ 25 /* 26 * Add standard checks that preemption cannot occur when using get_paca(): 27 * otherwise the paca_struct it points to may be the wrong one just after. 28 */ 29 #define get_paca() ((void) debug_smp_processor_id(), local_paca) 30 #else 31 #define get_paca() local_paca 32 #endif 33 34 #define get_lppaca() (get_paca()->lppaca_ptr) 35 #define get_slb_shadow() (get_paca()->slb_shadow_ptr) 36 37 struct task_struct; 38 39 /* 40 * Defines the layout of the paca. 41 * 42 * This structure is not directly accessed by firmware or the service 43 * processor. 44 */ 45 struct paca_struct { 46 /* 47 * Because hw_cpu_id, unlike other paca fields, is accessed 48 * routinely from other CPUs (from the IRQ code), we stick to 49 * read-only (after boot) fields in the first cacheline to 50 * avoid cacheline bouncing. 51 */ 52 53 struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */ 54 55 /* 56 * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c 57 * load lock_token and paca_index with a single lwz 58 * instruction. They must travel together and be properly 59 * aligned. 60 */ 61 u16 lock_token; /* Constant 0x8000, used in locks */ 62 u16 paca_index; /* Logical processor number */ 63 64 u64 kernel_toc; /* Kernel TOC address */ 65 u64 stab_real; /* Absolute address of segment table */ 66 u64 stab_addr; /* Virtual address of segment table */ 67 void *emergency_sp; /* pointer to emergency stack */ 68 u64 data_offset; /* per cpu data offset */ 69 s16 hw_cpu_id; /* Physical processor number */ 70 u8 cpu_start; /* At startup, processor spins until */ 71 /* this becomes non-zero. */ 72 struct slb_shadow *slb_shadow_ptr; 73 74 /* 75 * Now, starting in cacheline 2, the exception save areas 76 */ 77 /* used for most interrupts/exceptions */ 78 u64 exgen[10] __attribute__((aligned(0x80))); 79 u64 exmc[10]; /* used for machine checks */ 80 u64 exslb[10]; /* used for SLB/segment table misses 81 * on the linear mapping */ 82 83 mm_context_t context; 84 u16 vmalloc_sllp; 85 u16 slb_cache_ptr; 86 u16 slb_cache[SLB_CACHE_ENTRIES]; 87 88 /* 89 * then miscellaneous read-write fields 90 */ 91 struct task_struct *__current; /* Pointer to current */ 92 u64 kstack; /* Saved Kernel stack addr */ 93 u64 stab_rr; /* stab/slb round-robin counter */ 94 u64 saved_r1; /* r1 save for RTAS calls */ 95 u64 saved_msr; /* MSR saved here by enter_rtas */ 96 u16 trap_save; /* Used when bad stack is encountered */ 97 u8 soft_enabled; /* irq soft-enable flag */ 98 u8 hard_enabled; /* set if irqs are enabled in MSR */ 99 u8 io_sync; /* writel() needs spin_unlock sync */ 100 101 /* Stuff for accurate time accounting */ 102 u64 user_time; /* accumulated usermode TB ticks */ 103 u64 system_time; /* accumulated system TB ticks */ 104 u64 startpurr; /* PURR/TB value snapshot */ 105 u64 startspurr; /* SPURR value snapshot */ 106 }; 107 108 extern struct paca_struct paca[]; 109 extern void initialise_pacas(void); 110 111 #endif /* __KERNEL__ */ 112 #endif /* _ASM_POWERPC_PACA_H */ 113