12874c5fdSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
2b8b572e1SStephen Rothwell /*
3b8b572e1SStephen Rothwell * This control block defines the PACA which defines the processor
4b8b572e1SStephen Rothwell * specific data for each logical processor on the system.
5b8b572e1SStephen Rothwell * There are some pointers defined that are utilized by PLIC.
6b8b572e1SStephen Rothwell *
7b8b572e1SStephen Rothwell * C 2001 PPC 64 Team, IBM Corp
8b8b572e1SStephen Rothwell */
9b8b572e1SStephen Rothwell #ifndef _ASM_POWERPC_PACA_H
10b8b572e1SStephen Rothwell #define _ASM_POWERPC_PACA_H
11b8b572e1SStephen Rothwell #ifdef __KERNEL__
12b8b572e1SStephen Rothwell
131426d5a3SMichael Ellerman #ifdef CONFIG_PPC64
141426d5a3SMichael Ellerman
15dcf280e6SMichael Ellerman #include <linux/cache.h>
162fc251a8SMichael Neuling #include <linux/string.h>
17b8b572e1SStephen Rothwell #include <asm/types.h>
18b8b572e1SStephen Rothwell #include <asm/mmu.h>
19dce6670aSBenjamin Herrenschmidt #include <asm/page.h>
20e0d68273SChristophe Leroy #ifdef CONFIG_PPC_BOOK3E_64
21dce6670aSBenjamin Herrenschmidt #include <asm/exception-64e.h>
228c388514SNicholas Piggin #else
238c388514SNicholas Piggin #include <asm/exception-64s.h>
248c388514SNicholas Piggin #endif
257e57cba0SAlexander Graf #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
262191d657SAlexander Graf #include <asm/kvm_book3s_asm.h>
277e57cba0SAlexander Graf #endif
28c223c903SChristophe Leroy #include <asm/accounting.h>
29fd7bacbcSMahesh Salgaonkar #include <asm/hmi.h>
30e1c1cfedSGautham R. Shenoy #include <asm/cpuidle.h>
317672691aSPaul Mackerras #include <asm/atomic.h>
32923b3cf0SGanesh Goudar #include <asm/mce.h>
33b8b572e1SStephen Rothwell
34420af155SWill Deacon #include <asm-generic/mmiowb_types.h>
35420af155SWill Deacon
36b8b572e1SStephen Rothwell register struct paca_struct *local_paca asm("r13");
37b8b572e1SStephen Rothwell
38b8b572e1SStephen Rothwell #if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_SMP)
39b8b572e1SStephen Rothwell extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
40b8b572e1SStephen Rothwell /*
41b8b572e1SStephen Rothwell * Add standard checks that preemption cannot occur when using get_paca():
42b8b572e1SStephen Rothwell * otherwise the paca_struct it points to may be the wrong one just after.
43b8b572e1SStephen Rothwell */
44b8b572e1SStephen Rothwell #define get_paca() ((void) debug_smp_processor_id(), local_paca)
45b8b572e1SStephen Rothwell #else
46b8b572e1SStephen Rothwell #define get_paca() local_paca
47b8b572e1SStephen Rothwell #endif
48b8b572e1SStephen Rothwell
49b8b572e1SStephen Rothwell #define get_slb_shadow() (get_paca()->slb_shadow_ptr)
50b8b572e1SStephen Rothwell
51b8b572e1SStephen Rothwell struct task_struct;
52d6bdceb6SPeter Zijlstra struct rtas_args;
53*1aa00066SMichael Ellerman struct lppaca;
54b8b572e1SStephen Rothwell
55b8b572e1SStephen Rothwell /*
56b8b572e1SStephen Rothwell * Defines the layout of the paca.
57b8b572e1SStephen Rothwell *
58b8b572e1SStephen Rothwell * This structure is not directly accessed by firmware or the service
59b8b572e1SStephen Rothwell * processor.
60b8b572e1SStephen Rothwell */
61b8b572e1SStephen Rothwell struct paca_struct {
628e0b634bSNicholas Piggin #ifdef CONFIG_PPC_PSERIES
63b8b572e1SStephen Rothwell /*
64b8b572e1SStephen Rothwell * Because hw_cpu_id, unlike other paca fields, is accessed
65b8b572e1SStephen Rothwell * routinely from other CPUs (from the IRQ code), we stick to
66b8b572e1SStephen Rothwell * read-only (after boot) fields in the first cacheline to
67b8b572e1SStephen Rothwell * avoid cacheline bouncing.
68b8b572e1SStephen Rothwell */
69b8b572e1SStephen Rothwell
70b8b572e1SStephen Rothwell struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */
718e0b634bSNicholas Piggin #endif /* CONFIG_PPC_PSERIES */
728e0b634bSNicholas Piggin
73b8b572e1SStephen Rothwell /*
74b8b572e1SStephen Rothwell * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c
75b8b572e1SStephen Rothwell * load lock_token and paca_index with a single lwz
76b8b572e1SStephen Rothwell * instruction. They must travel together and be properly
77b8b572e1SStephen Rothwell * aligned.
78b8b572e1SStephen Rothwell */
7954bb7f4bSAnton Blanchard #ifdef __BIG_ENDIAN__
80b8b572e1SStephen Rothwell u16 lock_token; /* Constant 0x8000, used in locks */
81b8b572e1SStephen Rothwell u16 paca_index; /* Logical processor number */
8254bb7f4bSAnton Blanchard #else
8354bb7f4bSAnton Blanchard u16 paca_index; /* Logical processor number */
8454bb7f4bSAnton Blanchard u16 lock_token; /* Constant 0x8000, used in locks */
8554bb7f4bSAnton Blanchard #endif
86b8b572e1SStephen Rothwell
877e3a68beSNicholas Piggin #ifndef CONFIG_PPC_KERNEL_PCREL
88b8b572e1SStephen Rothwell u64 kernel_toc; /* Kernel TOC address */
897e3a68beSNicholas Piggin #endif
901f6a93e4SPaul Mackerras u64 kernelbase; /* Base address of kernel */
911f6a93e4SPaul Mackerras u64 kernel_msr; /* MSR while running in kernel */
92b8b572e1SStephen Rothwell void *emergency_sp; /* pointer to emergency stack */
93b8b572e1SStephen Rothwell u64 data_offset; /* per cpu data offset */
94b8b572e1SStephen Rothwell s16 hw_cpu_id; /* Physical processor number */
95b8b572e1SStephen Rothwell u8 cpu_start; /* At startup, processor spins until */
96b8b572e1SStephen Rothwell /* this becomes non-zero. */
971fc711f7SMichael Neuling u8 kexec_state; /* set when kexec down has irqs off */
984e003747SMichael Ellerman #ifdef CONFIG_PPC_BOOK3S_64
99387e220aSNicholas Piggin #ifdef CONFIG_PPC_64S_HASH_MMU
100b8b572e1SStephen Rothwell struct slb_shadow *slb_shadow_ptr;
101387e220aSNicholas Piggin #endif
102cf9efce0SPaul Mackerras struct dtl_entry *dispatch_log;
103cf9efce0SPaul Mackerras struct dtl_entry *dispatch_log_end;
1044e003747SMichael Ellerman #endif
1051739ea9eSSam bobroff u64 dscr_default; /* per-CPU default DSCR */
106b8b572e1SStephen Rothwell
1074e003747SMichael Ellerman #ifdef CONFIG_PPC_BOOK3S_64
108b8b572e1SStephen Rothwell /*
109b8b572e1SStephen Rothwell * Now, starting in cacheline 2, the exception save areas
110b8b572e1SStephen Rothwell */
111b8b572e1SStephen Rothwell /* used for most interrupts/exceptions */
1128c388514SNicholas Piggin u64 exgen[EX_SIZE] __attribute__((aligned(0x80)));
113ac7c5e9bSNicholas Piggin
114387e220aSNicholas Piggin #ifdef CONFIG_PPC_64S_HASH_MMU
11591c60b5bSBenjamin Herrenschmidt /* SLB related definitions */
116b8b572e1SStephen Rothwell u16 vmalloc_sllp;
117126b11b2SNicholas Piggin u8 slb_cache_ptr;
118126b11b2SNicholas Piggin u8 stab_rr; /* stab/slb round-robin counter */
119e15a4feaSNicholas Piggin #ifdef CONFIG_DEBUG_VM
120e15a4feaSNicholas Piggin u8 in_kernel_slb_handler;
121e15a4feaSNicholas Piggin #endif
122126b11b2SNicholas Piggin u32 slb_used_bitmap; /* Bitmaps for first 32 SLB entries. */
123126b11b2SNicholas Piggin u32 slb_kern_bitmap;
124735cafc3SAneesh Kumar K.V u32 slb_cache[SLB_CACHE_ENTRIES];
125387e220aSNicholas Piggin #endif
1264e003747SMichael Ellerman #endif /* CONFIG_PPC_BOOK3S_64 */
12791c60b5bSBenjamin Herrenschmidt
128e0d68273SChristophe Leroy #ifdef CONFIG_PPC_BOOK3E_64
129016f8cf0SKevin Hao u64 exgen[8] __aligned(0x40);
130f67f4ef5SScott Wood /* Keep pgd in the same cacheline as the start of extlb */
131016f8cf0SKevin Hao pgd_t *pgd __aligned(0x40); /* Current PGD */
132f67f4ef5SScott Wood pgd_t *kernel_pgd; /* Kernel PGD */
13328efc35fSScott Wood
13428efc35fSScott Wood /* Shared by all threads of a core -- points to tcd of first thread */
13528efc35fSScott Wood struct tlb_core_data *tcd_ptr;
13628efc35fSScott Wood
137609af38fSScott Wood /*
138609af38fSScott Wood * We can have up to 3 levels of reentrancy in the TLB miss handler,
139609af38fSScott Wood * in each of four exception levels (normal, crit, mcheck, debug).
140609af38fSScott Wood */
141609af38fSScott Wood u64 extlb[12][EX_TLB_SIZE / sizeof(u64)];
142dce6670aSBenjamin Herrenschmidt u64 exmc[8]; /* used for machine checks */
143dce6670aSBenjamin Herrenschmidt u64 excrit[8]; /* used for crit interrupts */
144dce6670aSBenjamin Herrenschmidt u64 exdbg[8]; /* used for debug interrupts */
145dce6670aSBenjamin Herrenschmidt
146dce6670aSBenjamin Herrenschmidt /* Kernel stack pointers for use by special exceptions */
147dce6670aSBenjamin Herrenschmidt void *mc_kstack;
148dce6670aSBenjamin Herrenschmidt void *crit_kstack;
149dce6670aSBenjamin Herrenschmidt void *dbg_kstack;
15028efc35fSScott Wood
15128efc35fSScott Wood struct tlb_core_data tcd;
152e0d68273SChristophe Leroy #endif /* CONFIG_PPC_BOOK3E_64 */
153dce6670aSBenjamin Herrenschmidt
154387e220aSNicholas Piggin #ifdef CONFIG_PPC_64S_HASH_MMU
15554be0b9cSMichael Ellerman unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
15654be0b9cSMichael Ellerman unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
157387e220aSNicholas Piggin #endif
15854be0b9cSMichael Ellerman
159b8b572e1SStephen Rothwell /*
160b8b572e1SStephen Rothwell * then miscellaneous read-write fields
161b8b572e1SStephen Rothwell */
162b8b572e1SStephen Rothwell struct task_struct *__current; /* Pointer to current */
163b8b572e1SStephen Rothwell u64 kstack; /* Saved Kernel stack addr */
1647b08729cSMichael Ellerman u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */
165b8b572e1SStephen Rothwell u64 saved_msr; /* MSR saved here by enter_rtas */
16613799748SNicholas Piggin #ifdef CONFIG_PPC64
16713799748SNicholas Piggin u64 exit_save_r1; /* Syscall/interrupt R1 save */
16813799748SNicholas Piggin #endif
169e0d68273SChristophe Leroy #ifdef CONFIG_PPC_BOOK3E_64
170b8b572e1SStephen Rothwell u16 trap_save; /* Used when bad stack is encountered */
1710a882e28SNicholas Piggin #endif
17259dc5bfcSNicholas Piggin #ifdef CONFIG_PPC_BOOK3S_64
17359dc5bfcSNicholas Piggin u8 hsrr_valid; /* HSRRs set for HRFID */
17459dc5bfcSNicholas Piggin u8 srr_valid; /* SRRs set for RFID */
17559dc5bfcSNicholas Piggin #endif
1764e26bc4aSMadhavan Srinivasan u8 irq_soft_mask; /* mask for irq soft masking */
1777230c564SBenjamin Herrenschmidt u8 irq_happened; /* irq happened while soft-disabled */
178e360adbeSPeter Zijlstra u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
1798e0b634bSNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1808e0b634bSNicholas Piggin u8 pmcregs_in_use; /* pseries puts this in lppaca */
1818e0b634bSNicholas Piggin #endif
1829d378dfaSScott Wood u64 sprg_vdso; /* Saved user-visible sprg */
183afc07701SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
184afc07701SMichael Neuling u64 tm_scratch; /* TM scratch area for reclaim */
185afc07701SMichael Neuling #endif
186b8b572e1SStephen Rothwell
1877cba160aSShreyas B. Prabhu #ifdef CONFIG_PPC_POWERNV
18810d91611SNicholas Piggin /* PowerNV idle fields */
18910d91611SNicholas Piggin /* PNV_CORE_IDLE_* bits, all siblings work on thread 0 paca */
190b0c5b4f1SRohan McLure unsigned long idle_lock; /* A value of 1 means acquired */
19110d91611SNicholas Piggin unsigned long idle_state;
19210d91611SNicholas Piggin union {
19310d91611SNicholas Piggin /* P7/P8 specific fields */
19410d91611SNicholas Piggin struct {
19510d91611SNicholas Piggin /* PNV_THREAD_RUNNING/NAP/SLEEP */
19610d91611SNicholas Piggin u8 thread_idle_state;
19777b54e9fSShreyas B. Prabhu /* Mask to denote subcore sibling threads */
19877b54e9fSShreyas B. Prabhu u8 subcore_sibling_mask;
19910d91611SNicholas Piggin };
20010d91611SNicholas Piggin
20110d91611SNicholas Piggin /* P9 specific fields */
20210d91611SNicholas Piggin struct {
20310d91611SNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
20422c6663dSGautham R. Shenoy /* The PSSCR value that the kernel requested before going to stop */
20522c6663dSGautham R. Shenoy u64 requested_psscr;
20610d91611SNicholas Piggin /* Flag to request this thread not to stop */
20710d91611SNicholas Piggin atomic_t dont_stop;
20810d91611SNicholas Piggin #endif
20910d91611SNicholas Piggin };
21010d91611SNicholas Piggin };
2117cba160aSShreyas B. Prabhu #endif
2127cba160aSShreyas B. Prabhu
2134e003747SMichael Ellerman #ifdef CONFIG_PPC_BOOK3S_64
214a3d96f70SNicholas Piggin /* Non-maskable exceptions that are not performance critical */
2158c388514SNicholas Piggin u64 exnmi[EX_SIZE]; /* used for system reset (nmi) */
2168c388514SNicholas Piggin u64 exmc[EX_SIZE]; /* used for machine checks */
217a3d96f70SNicholas Piggin #endif
218729b0f71SMahesh Salgaonkar #ifdef CONFIG_PPC_BOOK3S_64
219b1ee8a3dSNicholas Piggin /* Exclusive stacks for system reset and machine check exception. */
220b1ee8a3dSNicholas Piggin void *nmi_emergency_sp;
221729b0f71SMahesh Salgaonkar void *mc_emergency_sp;
222c4f3b52cSNicholas Piggin
223c4f3b52cSNicholas Piggin u16 in_nmi; /* In nmi handler */
224c4f3b52cSNicholas Piggin
225729b0f71SMahesh Salgaonkar /*
226729b0f71SMahesh Salgaonkar * Flag to check whether we are in machine check early handler
227729b0f71SMahesh Salgaonkar * and already using emergency stack.
228729b0f71SMahesh Salgaonkar */
229729b0f71SMahesh Salgaonkar u16 in_mce;
2300ef95b41SMahesh Salgaonkar u8 hmi_event_available; /* HMI event is available */
2315080332cSMichael Neuling u8 hmi_p9_special_emu; /* HMI P9 special emulation */
232ada68a66SMahesh Salgaonkar u32 hmi_irqs; /* HMI irq stat */
233729b0f71SMahesh Salgaonkar #endif
234ea678ac6SNaveen N. Rao u8 ftrace_enabled; /* Hard disable ftrace */
235ed79ba9eSBenjamin Herrenschmidt
236b8b572e1SStephen Rothwell /* Stuff for accurate time accounting */
237c223c903SChristophe Leroy struct cpu_accounting_data accounting;
238cf9efce0SPaul Mackerras u64 dtl_ridx; /* read index in dispatch log */
239cf9efce0SPaul Mackerras struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
2404b7ae55dSAlexander Graf
241c14dea04SAlexander Graf #ifdef CONFIG_KVM_BOOK3S_HANDLER
2427aa79938SAneesh Kumar K.V #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2437e57cba0SAlexander Graf /* We use this to store guest state in */
2447e57cba0SAlexander Graf struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
245de56a948SPaul Mackerras #endif
2463c42bf8aSPaul Mackerras struct kvmppc_host_state kvm_hstate;
2477c379526SPaolo Bonzini #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
2487c379526SPaolo Bonzini /*
2497c379526SPaolo Bonzini * Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
2507c379526SPaolo Bonzini * more details
2517c379526SPaolo Bonzini */
2527c379526SPaolo Bonzini struct sibling_subcore_state *sibling_subcore_state;
2537c379526SPaolo Bonzini #endif
2544b7ae55dSAlexander Graf #endif
255aa8a5e00SMichael Ellerman #ifdef CONFIG_PPC_BOOK3S_64
256aa8a5e00SMichael Ellerman /*
257aa8a5e00SMichael Ellerman * rfi fallback flush must be in its own cacheline to prevent
258aa8a5e00SMichael Ellerman * other paca data leaking into the L1d
259aa8a5e00SMichael Ellerman */
260aa8a5e00SMichael Ellerman u64 exrfi[EX_SIZE] __aligned(0x80);
261aa8a5e00SMichael Ellerman void *rfi_flush_fallback_area;
262bdcb1aefSNicholas Piggin u64 l1d_flush_size;
263aa8a5e00SMichael Ellerman #endif
26494675cceSMahesh Salgaonkar #ifdef CONFIG_PPC_PSERIES
26594675cceSMahesh Salgaonkar u8 *mce_data_buf; /* buffer to hold per cpu rtas errlog */
26694675cceSMahesh Salgaonkar #endif /* CONFIG_PPC_PSERIES */
267c6d15258SMahesh Salgaonkar
268c6d15258SMahesh Salgaonkar #ifdef CONFIG_PPC_BOOK3S_64
269387e220aSNicholas Piggin #ifdef CONFIG_PPC_64S_HASH_MMU
270c6d15258SMahesh Salgaonkar /* Capture SLB related old contents in MCE handler. */
271c6d15258SMahesh Salgaonkar struct slb_entry *mce_faulty_slbs;
272c6d15258SMahesh Salgaonkar u16 slb_save_cache_ptr;
273387e220aSNicholas Piggin #endif
274c6d15258SMahesh Salgaonkar #endif /* CONFIG_PPC_BOOK3S_64 */
27506ec27aeSChristophe Leroy #ifdef CONFIG_STACKPROTECTOR
27606ec27aeSChristophe Leroy unsigned long canary;
27706ec27aeSChristophe Leroy #endif
278420af155SWill Deacon #ifdef CONFIG_MMIOWB
279420af155SWill Deacon struct mmiowb_state mmiowb_state;
280420af155SWill Deacon #endif
281923b3cf0SGanesh Goudar #ifdef CONFIG_PPC_BOOK3S_64
282923b3cf0SGanesh Goudar struct mce_info *mce_info;
283cc15ff32SGanesh Goudar u8 mce_pending_irq_work;
284923b3cf0SGanesh Goudar #endif /* CONFIG_PPC_BOOK3S_64 */
285d2e60075SNicholas Piggin } ____cacheline_aligned;
286b8b572e1SStephen Rothwell
28754be0b9cSMichael Ellerman extern void copy_mm_to_paca(struct mm_struct *mm);
288d2e60075SNicholas Piggin extern struct paca_struct **paca_ptrs;
2891426d5a3SMichael Ellerman extern void initialise_paca(struct paca_struct *new_paca, int cpu);
290fc53b420SMatt Evans extern void setup_paca(struct paca_struct *new_paca);
29159f57774SNicholas Piggin extern void allocate_paca_ptrs(void);
29259f57774SNicholas Piggin extern void allocate_paca(int cpu);
2931426d5a3SMichael Ellerman extern void free_unused_pacas(void);
2941426d5a3SMichael Ellerman
2951426d5a3SMichael Ellerman #else /* CONFIG_PPC64 */
2961426d5a3SMichael Ellerman
allocate_paca(int cpu)2976c6fdbb2SChengyang Fan static inline void allocate_paca(int cpu) { }
free_unused_pacas(void)2986c6fdbb2SChengyang Fan static inline void free_unused_pacas(void) { }
2991426d5a3SMichael Ellerman
3001426d5a3SMichael Ellerman #endif /* CONFIG_PPC64 */
301b8b572e1SStephen Rothwell
302b8b572e1SStephen Rothwell #endif /* __KERNEL__ */
303b8b572e1SStephen Rothwell #endif /* _ASM_POWERPC_PACA_H */
304