1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * This program is used to generate definitions needed by
4  * assembly language modules.
5  *
6  * We use the technique used in the OSF Mach kernel code:
7  * generate asm statements containing #defines,
8  * compile this file to assembler, and then extract the
9  * #defines from the assembly-language output.
10  */
11 
12 #define GENERATING_ASM_OFFSETS	/* asm/smp.h */
13 
14 #include <linux/compat.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/mman.h>
22 #include <linux/mm.h>
23 #include <linux/suspend.h>
24 #include <linux/hrtimer.h>
25 #ifdef CONFIG_PPC64
26 #include <linux/time.h>
27 #include <linux/hardirq.h>
28 #endif
29 #include <linux/kbuild.h>
30 
31 #include <asm/io.h>
32 #include <asm/page.h>
33 #include <asm/processor.h>
34 #include <asm/cputable.h>
35 #include <asm/thread_info.h>
36 #include <asm/rtas.h>
37 #include <asm/vdso_datapage.h>
38 #include <asm/dbell.h>
39 #ifdef CONFIG_PPC64
40 #include <asm/paca.h>
41 #include <asm/lppaca.h>
42 #include <asm/cache.h>
43 #include <asm/mmu.h>
44 #include <asm/hvcall.h>
45 #include <asm/xics.h>
46 #endif
47 #ifdef CONFIG_PPC_POWERNV
48 #include <asm/opal.h>
49 #endif
50 #if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST)
51 #include <linux/kvm_host.h>
52 #endif
53 #if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S)
54 #include <asm/kvm_book3s.h>
55 #include <asm/kvm_ppc.h>
56 #endif
57 
58 #ifdef CONFIG_PPC32
59 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
60 #include "head_booke.h"
61 #endif
62 #endif
63 
64 #if defined(CONFIG_PPC_FSL_BOOK3E)
65 #include "../mm/mmu_decl.h"
66 #endif
67 
68 #ifdef CONFIG_PPC_8xx
69 #include <asm/fixmap.h>
70 #endif
71 
72 #ifdef CONFIG_XMON
73 #include "../xmon/xmon_bpts.h"
74 #endif
75 
76 #define STACK_PT_REGS_OFFSET(sym, val)	\
77 	DEFINE(sym, STACK_FRAME_OVERHEAD + offsetof(struct pt_regs, val))
78 
79 int main(void)
80 {
81 	OFFSET(THREAD, task_struct, thread);
82 	OFFSET(MM, task_struct, mm);
83 #ifdef CONFIG_STACKPROTECTOR
84 	OFFSET(TASK_CANARY, task_struct, stack_canary);
85 #ifdef CONFIG_PPC64
86 	OFFSET(PACA_CANARY, paca_struct, canary);
87 #endif
88 #endif
89 	OFFSET(MMCONTEXTID, mm_struct, context.id);
90 #ifdef CONFIG_PPC64
91 	DEFINE(SIGSEGV, SIGSEGV);
92 	DEFINE(NMI_MASK, NMI_MASK);
93 #else
94 	OFFSET(KSP_LIMIT, thread_struct, ksp_limit);
95 #ifdef CONFIG_PPC_RTAS
96 	OFFSET(RTAS_SP, thread_struct, rtas_sp);
97 #endif
98 #endif /* CONFIG_PPC64 */
99 	OFFSET(TASK_STACK, task_struct, stack);
100 #ifdef CONFIG_SMP
101 	OFFSET(TASK_CPU, task_struct, cpu);
102 #endif
103 
104 #ifdef CONFIG_LIVEPATCH
105 	OFFSET(TI_livepatch_sp, thread_info, livepatch_sp);
106 #endif
107 
108 	OFFSET(KSP, thread_struct, ksp);
109 	OFFSET(PT_REGS, thread_struct, regs);
110 #ifdef CONFIG_BOOKE
111 	OFFSET(THREAD_NORMSAVES, thread_struct, normsave[0]);
112 #endif
113 #ifdef CONFIG_PPC_FPU
114 	OFFSET(THREAD_FPEXC_MODE, thread_struct, fpexc_mode);
115 	OFFSET(THREAD_FPSTATE, thread_struct, fp_state.fpr);
116 	OFFSET(THREAD_FPSAVEAREA, thread_struct, fp_save_area);
117 #endif
118 	OFFSET(FPSTATE_FPSCR, thread_fp_state, fpscr);
119 	OFFSET(THREAD_LOAD_FP, thread_struct, load_fp);
120 #ifdef CONFIG_ALTIVEC
121 	OFFSET(THREAD_VRSTATE, thread_struct, vr_state.vr);
122 	OFFSET(THREAD_VRSAVEAREA, thread_struct, vr_save_area);
123 	OFFSET(THREAD_VRSAVE, thread_struct, vrsave);
124 	OFFSET(THREAD_USED_VR, thread_struct, used_vr);
125 	OFFSET(VRSTATE_VSCR, thread_vr_state, vscr);
126 	OFFSET(THREAD_LOAD_VEC, thread_struct, load_vec);
127 #endif /* CONFIG_ALTIVEC */
128 #ifdef CONFIG_VSX
129 	OFFSET(THREAD_USED_VSR, thread_struct, used_vsr);
130 #endif /* CONFIG_VSX */
131 #ifdef CONFIG_PPC64
132 	OFFSET(KSP_VSID, thread_struct, ksp_vsid);
133 #else /* CONFIG_PPC64 */
134 	OFFSET(PGDIR, thread_struct, pgdir);
135 #ifdef CONFIG_VMAP_STACK
136 	OFFSET(SRR0, thread_struct, srr0);
137 	OFFSET(SRR1, thread_struct, srr1);
138 	OFFSET(DAR, thread_struct, dar);
139 	OFFSET(DSISR, thread_struct, dsisr);
140 #ifdef CONFIG_PPC_BOOK3S_32
141 	OFFSET(THR0, thread_struct, r0);
142 	OFFSET(THR3, thread_struct, r3);
143 	OFFSET(THR4, thread_struct, r4);
144 	OFFSET(THR5, thread_struct, r5);
145 	OFFSET(THR6, thread_struct, r6);
146 	OFFSET(THR8, thread_struct, r8);
147 	OFFSET(THR9, thread_struct, r9);
148 	OFFSET(THR11, thread_struct, r11);
149 	OFFSET(THLR, thread_struct, lr);
150 	OFFSET(THCTR, thread_struct, ctr);
151 #endif
152 #endif
153 #ifdef CONFIG_SPE
154 	OFFSET(THREAD_EVR0, thread_struct, evr[0]);
155 	OFFSET(THREAD_ACC, thread_struct, acc);
156 	OFFSET(THREAD_SPEFSCR, thread_struct, spefscr);
157 	OFFSET(THREAD_USED_SPE, thread_struct, used_spe);
158 #endif /* CONFIG_SPE */
159 #endif /* CONFIG_PPC64 */
160 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
161 	OFFSET(THREAD_DBCR0, thread_struct, debug.dbcr0);
162 #endif
163 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
164 	OFFSET(THREAD_KVM_SVCPU, thread_struct, kvm_shadow_vcpu);
165 #endif
166 #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
167 	OFFSET(THREAD_KVM_VCPU, thread_struct, kvm_vcpu);
168 #endif
169 #if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
170 	OFFSET(KUAP, thread_struct, kuap);
171 #endif
172 
173 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
174 	OFFSET(PACATMSCRATCH, paca_struct, tm_scratch);
175 	OFFSET(THREAD_TM_TFHAR, thread_struct, tm_tfhar);
176 	OFFSET(THREAD_TM_TEXASR, thread_struct, tm_texasr);
177 	OFFSET(THREAD_TM_TFIAR, thread_struct, tm_tfiar);
178 	OFFSET(THREAD_TM_TAR, thread_struct, tm_tar);
179 	OFFSET(THREAD_TM_PPR, thread_struct, tm_ppr);
180 	OFFSET(THREAD_TM_DSCR, thread_struct, tm_dscr);
181 	OFFSET(THREAD_TM_AMR, thread_struct, tm_amr);
182 	OFFSET(PT_CKPT_REGS, thread_struct, ckpt_regs);
183 	OFFSET(THREAD_CKVRSTATE, thread_struct, ckvr_state.vr);
184 	OFFSET(THREAD_CKVRSAVE, thread_struct, ckvrsave);
185 	OFFSET(THREAD_CKFPSTATE, thread_struct, ckfp_state.fpr);
186 	/* Local pt_regs on stack for Transactional Memory funcs. */
187 	DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD +
188 	       sizeof(struct pt_regs) + 16);
189 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
190 
191 	OFFSET(TI_FLAGS, thread_info, flags);
192 	OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
193 	OFFSET(TI_PREEMPT, thread_info, preempt_count);
194 
195 #ifdef CONFIG_PPC64
196 	OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size);
197 	OFFSET(DCACHEL1LOGBLOCKSIZE, ppc64_caches, l1d.log_block_size);
198 	OFFSET(DCACHEL1BLOCKSPERPAGE, ppc64_caches, l1d.blocks_per_page);
199 	OFFSET(ICACHEL1BLOCKSIZE, ppc64_caches, l1i.block_size);
200 	OFFSET(ICACHEL1LOGBLOCKSIZE, ppc64_caches, l1i.log_block_size);
201 	OFFSET(ICACHEL1BLOCKSPERPAGE, ppc64_caches, l1i.blocks_per_page);
202 	/* paca */
203 	DEFINE(PACA_SIZE, sizeof(struct paca_struct));
204 	OFFSET(PACAPACAINDEX, paca_struct, paca_index);
205 	OFFSET(PACAPROCSTART, paca_struct, cpu_start);
206 	OFFSET(PACAKSAVE, paca_struct, kstack);
207 	OFFSET(PACACURRENT, paca_struct, __current);
208 	DEFINE(PACA_THREAD_INFO, offsetof(struct paca_struct, __current) +
209 				 offsetof(struct task_struct, thread_info));
210 	OFFSET(PACASAVEDMSR, paca_struct, saved_msr);
211 	OFFSET(PACAR1, paca_struct, saved_r1);
212 	OFFSET(PACATOC, paca_struct, kernel_toc);
213 	OFFSET(PACAKBASE, paca_struct, kernelbase);
214 	OFFSET(PACAKMSR, paca_struct, kernel_msr);
215 	OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask);
216 	OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
217 	OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled);
218 #ifdef CONFIG_PPC_BOOK3S
219 	OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id);
220 #ifdef CONFIG_PPC_MM_SLICES
221 	OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize);
222 	OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize);
223 	OFFSET(PACA_SLB_ADDR_LIMIT, paca_struct, mm_ctx_slb_addr_limit);
224 	DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
225 #endif /* CONFIG_PPC_MM_SLICES */
226 #endif
227 
228 #ifdef CONFIG_PPC_BOOK3E
229 	OFFSET(PACAPGD, paca_struct, pgd);
230 	OFFSET(PACA_KERNELPGD, paca_struct, kernel_pgd);
231 	OFFSET(PACA_EXGEN, paca_struct, exgen);
232 	OFFSET(PACA_EXTLB, paca_struct, extlb);
233 	OFFSET(PACA_EXMC, paca_struct, exmc);
234 	OFFSET(PACA_EXCRIT, paca_struct, excrit);
235 	OFFSET(PACA_EXDBG, paca_struct, exdbg);
236 	OFFSET(PACA_MC_STACK, paca_struct, mc_kstack);
237 	OFFSET(PACA_CRIT_STACK, paca_struct, crit_kstack);
238 	OFFSET(PACA_DBG_STACK, paca_struct, dbg_kstack);
239 	OFFSET(PACA_TCD_PTR, paca_struct, tcd_ptr);
240 
241 	OFFSET(TCD_ESEL_NEXT, tlb_core_data, esel_next);
242 	OFFSET(TCD_ESEL_MAX, tlb_core_data, esel_max);
243 	OFFSET(TCD_ESEL_FIRST, tlb_core_data, esel_first);
244 #endif /* CONFIG_PPC_BOOK3E */
245 
246 #ifdef CONFIG_PPC_BOOK3S_64
247 	OFFSET(PACASLBCACHE, paca_struct, slb_cache);
248 	OFFSET(PACASLBCACHEPTR, paca_struct, slb_cache_ptr);
249 	OFFSET(PACASTABRR, paca_struct, stab_rr);
250 	OFFSET(PACAVMALLOCSLLP, paca_struct, vmalloc_sllp);
251 #ifdef CONFIG_PPC_MM_SLICES
252 	OFFSET(MMUPSIZESLLP, mmu_psize_def, sllp);
253 #else
254 	OFFSET(PACACONTEXTSLLP, paca_struct, mm_ctx_sllp);
255 #endif /* CONFIG_PPC_MM_SLICES */
256 	OFFSET(PACA_EXGEN, paca_struct, exgen);
257 	OFFSET(PACA_EXMC, paca_struct, exmc);
258 	OFFSET(PACA_EXNMI, paca_struct, exnmi);
259 #ifdef CONFIG_PPC_PSERIES
260 	OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr);
261 #endif
262 	OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr);
263 	OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid);
264 	OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid);
265 	OFFSET(SLBSHADOW_SAVEAREA, slb_shadow, save_area);
266 	OFFSET(LPPACA_PMCINUSE, lppaca, pmcregs_in_use);
267 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
268 	OFFSET(PACA_PMCINUSE, paca_struct, pmcregs_in_use);
269 #endif
270 	OFFSET(LPPACA_DTLIDX, lppaca, dtl_idx);
271 	OFFSET(LPPACA_YIELDCOUNT, lppaca, yield_count);
272 	OFFSET(PACA_DTL_RIDX, paca_struct, dtl_ridx);
273 #endif /* CONFIG_PPC_BOOK3S_64 */
274 	OFFSET(PACAEMERGSP, paca_struct, emergency_sp);
275 #ifdef CONFIG_PPC_BOOK3S_64
276 	OFFSET(PACAMCEMERGSP, paca_struct, mc_emergency_sp);
277 	OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp);
278 	OFFSET(PACA_IN_MCE, paca_struct, in_mce);
279 	OFFSET(PACA_IN_NMI, paca_struct, in_nmi);
280 	OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area);
281 	OFFSET(PACA_EXRFI, paca_struct, exrfi);
282 	OFFSET(PACA_L1D_FLUSH_SIZE, paca_struct, l1d_flush_size);
283 
284 #endif
285 	OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
286 	OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
287 	OFFSET(PACA_DSCR_DEFAULT, paca_struct, dscr_default);
288 	OFFSET(ACCOUNT_STARTTIME, paca_struct, accounting.starttime);
289 	OFFSET(ACCOUNT_STARTTIME_USER, paca_struct, accounting.starttime_user);
290 	OFFSET(ACCOUNT_USER_TIME, paca_struct, accounting.utime);
291 	OFFSET(ACCOUNT_SYSTEM_TIME, paca_struct, accounting.stime);
292 #ifdef CONFIG_PPC_BOOK3E
293 	OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
294 #endif
295 	OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso);
296 #else /* CONFIG_PPC64 */
297 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
298 	OFFSET(ACCOUNT_STARTTIME, thread_info, accounting.starttime);
299 	OFFSET(ACCOUNT_STARTTIME_USER, thread_info, accounting.starttime_user);
300 	OFFSET(ACCOUNT_USER_TIME, thread_info, accounting.utime);
301 	OFFSET(ACCOUNT_SYSTEM_TIME, thread_info, accounting.stime);
302 #endif
303 #endif /* CONFIG_PPC64 */
304 
305 	/* RTAS */
306 	OFFSET(RTASBASE, rtas_t, base);
307 	OFFSET(RTASENTRY, rtas_t, entry);
308 
309 	/* Interrupt register frame */
310 	DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
311 	DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_WITH_PT_REGS);
312 	STACK_PT_REGS_OFFSET(GPR0, gpr[0]);
313 	STACK_PT_REGS_OFFSET(GPR1, gpr[1]);
314 	STACK_PT_REGS_OFFSET(GPR2, gpr[2]);
315 	STACK_PT_REGS_OFFSET(GPR3, gpr[3]);
316 	STACK_PT_REGS_OFFSET(GPR4, gpr[4]);
317 	STACK_PT_REGS_OFFSET(GPR5, gpr[5]);
318 	STACK_PT_REGS_OFFSET(GPR6, gpr[6]);
319 	STACK_PT_REGS_OFFSET(GPR7, gpr[7]);
320 	STACK_PT_REGS_OFFSET(GPR8, gpr[8]);
321 	STACK_PT_REGS_OFFSET(GPR9, gpr[9]);
322 	STACK_PT_REGS_OFFSET(GPR10, gpr[10]);
323 	STACK_PT_REGS_OFFSET(GPR11, gpr[11]);
324 	STACK_PT_REGS_OFFSET(GPR12, gpr[12]);
325 	STACK_PT_REGS_OFFSET(GPR13, gpr[13]);
326 #ifndef CONFIG_PPC64
327 	STACK_PT_REGS_OFFSET(GPR14, gpr[14]);
328 #endif /* CONFIG_PPC64 */
329 	/*
330 	 * Note: these symbols include _ because they overlap with special
331 	 * register names
332 	 */
333 	STACK_PT_REGS_OFFSET(_NIP, nip);
334 	STACK_PT_REGS_OFFSET(_MSR, msr);
335 	STACK_PT_REGS_OFFSET(_CTR, ctr);
336 	STACK_PT_REGS_OFFSET(_LINK, link);
337 	STACK_PT_REGS_OFFSET(_CCR, ccr);
338 	STACK_PT_REGS_OFFSET(_XER, xer);
339 	STACK_PT_REGS_OFFSET(_DAR, dar);
340 	STACK_PT_REGS_OFFSET(_DSISR, dsisr);
341 	STACK_PT_REGS_OFFSET(ORIG_GPR3, orig_gpr3);
342 	STACK_PT_REGS_OFFSET(RESULT, result);
343 	STACK_PT_REGS_OFFSET(_TRAP, trap);
344 #ifndef CONFIG_PPC64
345 	/*
346 	 * The PowerPC 400-class & Book-E processors have neither the DAR
347 	 * nor the DSISR SPRs. Hence, we overload them to hold the similar
348 	 * DEAR and ESR SPRs for such processors.  For critical interrupts
349 	 * we use them to hold SRR0 and SRR1.
350 	 */
351 	STACK_PT_REGS_OFFSET(_DEAR, dar);
352 	STACK_PT_REGS_OFFSET(_ESR, dsisr);
353 #else /* CONFIG_PPC64 */
354 	STACK_PT_REGS_OFFSET(SOFTE, softe);
355 	STACK_PT_REGS_OFFSET(_PPR, ppr);
356 #endif /* CONFIG_PPC64 */
357 
358 #ifdef CONFIG_PPC_PKEY
359 	STACK_PT_REGS_OFFSET(STACK_REGS_AMR, amr);
360 	STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
361 #endif
362 #ifdef CONFIG_PPC_KUAP
363 	STACK_PT_REGS_OFFSET(STACK_REGS_KUAP, kuap);
364 #endif
365 
366 
367 #if defined(CONFIG_PPC32)
368 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
369 	DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
370 	DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
371 	/* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
372 	DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
373 	DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1));
374 	DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2));
375 	DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3));
376 	DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6));
377 	DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7));
378 	DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0));
379 	DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1));
380 	DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0));
381 	DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
382 	DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
383 	DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
384 	DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit));
385 #endif
386 #endif
387 
388 #ifndef CONFIG_PPC64
389 	OFFSET(MM_PGD, mm_struct, pgd);
390 #endif /* ! CONFIG_PPC64 */
391 
392 	/* About the CPU features table */
393 	OFFSET(CPU_SPEC_FEATURES, cpu_spec, cpu_features);
394 	OFFSET(CPU_SPEC_SETUP, cpu_spec, cpu_setup);
395 	OFFSET(CPU_SPEC_RESTORE, cpu_spec, cpu_restore);
396 
397 	OFFSET(pbe_address, pbe, address);
398 	OFFSET(pbe_orig_address, pbe, orig_address);
399 	OFFSET(pbe_next, pbe, next);
400 
401 #ifndef CONFIG_PPC64
402 	DEFINE(TASK_SIZE, TASK_SIZE);
403 	DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
404 #endif /* ! CONFIG_PPC64 */
405 
406 	/* datapage offsets for use by vdso */
407 	OFFSET(VDSO_DATA_OFFSET, vdso_arch_data, data);
408 	OFFSET(CFG_TB_TICKS_PER_SEC, vdso_arch_data, tb_ticks_per_sec);
409 #ifdef CONFIG_PPC64
410 	OFFSET(CFG_ICACHE_BLOCKSZ, vdso_arch_data, icache_block_size);
411 	OFFSET(CFG_DCACHE_BLOCKSZ, vdso_arch_data, dcache_block_size);
412 	OFFSET(CFG_ICACHE_LOGBLOCKSZ, vdso_arch_data, icache_log_block_size);
413 	OFFSET(CFG_DCACHE_LOGBLOCKSZ, vdso_arch_data, dcache_log_block_size);
414 	OFFSET(CFG_SYSCALL_MAP64, vdso_arch_data, syscall_map);
415 	OFFSET(CFG_SYSCALL_MAP32, vdso_arch_data, compat_syscall_map);
416 #else
417 	OFFSET(CFG_SYSCALL_MAP32, vdso_arch_data, syscall_map);
418 #endif
419 
420 #ifdef CONFIG_BUG
421 	DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
422 #endif
423 
424 #ifdef CONFIG_PPC_BOOK3S_64
425 	DEFINE(PGD_TABLE_SIZE, (sizeof(pgd_t) << max(RADIX_PGD_INDEX_SIZE, H_PGD_INDEX_SIZE)));
426 #else
427 	DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
428 #endif
429 	DEFINE(PTE_SIZE, sizeof(pte_t));
430 
431 #ifdef CONFIG_KVM
432 	OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack);
433 	OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid);
434 	OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid);
435 	OFFSET(VCPU_GPRS, kvm_vcpu, arch.regs.gpr);
436 	OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave);
437 	OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr);
438 #ifdef CONFIG_ALTIVEC
439 	OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr);
440 #endif
441 	OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
442 	OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
443 	OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
444 #ifdef CONFIG_PPC_BOOK3S
445 	OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
446 #endif
447 	OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
448 	OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
449 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
450 	OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
451 	OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0);
452 	OFFSET(VCPU_SRR1, kvm_vcpu, arch.shregs.srr1);
453 	OFFSET(VCPU_SPRG0, kvm_vcpu, arch.shregs.sprg0);
454 	OFFSET(VCPU_SPRG1, kvm_vcpu, arch.shregs.sprg1);
455 	OFFSET(VCPU_SPRG2, kvm_vcpu, arch.shregs.sprg2);
456 	OFFSET(VCPU_SPRG3, kvm_vcpu, arch.shregs.sprg3);
457 #endif
458 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
459 	OFFSET(VCPU_TB_RMENTRY, kvm_vcpu, arch.rm_entry);
460 	OFFSET(VCPU_TB_RMINTR, kvm_vcpu, arch.rm_intr);
461 	OFFSET(VCPU_TB_RMEXIT, kvm_vcpu, arch.rm_exit);
462 	OFFSET(VCPU_TB_GUEST, kvm_vcpu, arch.guest_time);
463 	OFFSET(VCPU_TB_CEDE, kvm_vcpu, arch.cede_time);
464 	OFFSET(VCPU_CUR_ACTIVITY, kvm_vcpu, arch.cur_activity);
465 	OFFSET(VCPU_ACTIVITY_START, kvm_vcpu, arch.cur_tb_start);
466 	OFFSET(TAS_SEQCOUNT, kvmhv_tb_accumulator, seqcount);
467 	OFFSET(TAS_TOTAL, kvmhv_tb_accumulator, tb_total);
468 	OFFSET(TAS_MIN, kvmhv_tb_accumulator, tb_min);
469 	OFFSET(TAS_MAX, kvmhv_tb_accumulator, tb_max);
470 #endif
471 	OFFSET(VCPU_SHARED_SPRG3, kvm_vcpu_arch_shared, sprg3);
472 	OFFSET(VCPU_SHARED_SPRG4, kvm_vcpu_arch_shared, sprg4);
473 	OFFSET(VCPU_SHARED_SPRG5, kvm_vcpu_arch_shared, sprg5);
474 	OFFSET(VCPU_SHARED_SPRG6, kvm_vcpu_arch_shared, sprg6);
475 	OFFSET(VCPU_SHARED_SPRG7, kvm_vcpu_arch_shared, sprg7);
476 	OFFSET(VCPU_SHADOW_PID, kvm_vcpu, arch.shadow_pid);
477 	OFFSET(VCPU_SHADOW_PID1, kvm_vcpu, arch.shadow_pid1);
478 	OFFSET(VCPU_SHARED, kvm_vcpu, arch.shared);
479 	OFFSET(VCPU_SHARED_MSR, kvm_vcpu_arch_shared, msr);
480 	OFFSET(VCPU_SHADOW_MSR, kvm_vcpu, arch.shadow_msr);
481 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
482 	OFFSET(VCPU_SHAREDBE, kvm_vcpu, arch.shared_big_endian);
483 #endif
484 
485 	OFFSET(VCPU_SHARED_MAS0, kvm_vcpu_arch_shared, mas0);
486 	OFFSET(VCPU_SHARED_MAS1, kvm_vcpu_arch_shared, mas1);
487 	OFFSET(VCPU_SHARED_MAS2, kvm_vcpu_arch_shared, mas2);
488 	OFFSET(VCPU_SHARED_MAS7_3, kvm_vcpu_arch_shared, mas7_3);
489 	OFFSET(VCPU_SHARED_MAS4, kvm_vcpu_arch_shared, mas4);
490 	OFFSET(VCPU_SHARED_MAS6, kvm_vcpu_arch_shared, mas6);
491 
492 	OFFSET(VCPU_KVM, kvm_vcpu, kvm);
493 	OFFSET(KVM_LPID, kvm, arch.lpid);
494 
495 	/* book3s */
496 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
497 	OFFSET(KVM_TLB_SETS, kvm, arch.tlb_sets);
498 	OFFSET(KVM_SDR1, kvm, arch.sdr1);
499 	OFFSET(KVM_HOST_LPID, kvm, arch.host_lpid);
500 	OFFSET(KVM_HOST_LPCR, kvm, arch.host_lpcr);
501 	OFFSET(KVM_HOST_SDR1, kvm, arch.host_sdr1);
502 	OFFSET(KVM_NEED_FLUSH, kvm, arch.need_tlb_flush.bits);
503 	OFFSET(KVM_ENABLED_HCALLS, kvm, arch.enabled_hcalls);
504 	OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
505 	OFFSET(KVM_RADIX, kvm, arch.radix);
506 	OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled);
507 	OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest);
508 	OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
509 	OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
510 	OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr);
511 	OFFSET(VCPU_VPA_DIRTY, kvm_vcpu, arch.vpa.dirty);
512 	OFFSET(VCPU_HEIR, kvm_vcpu, arch.emul_inst);
513 	OFFSET(VCPU_NESTED, kvm_vcpu, arch.nested);
514 	OFFSET(VCPU_CPU, kvm_vcpu, cpu);
515 	OFFSET(VCPU_THREAD_CPU, kvm_vcpu, arch.thread_cpu);
516 #endif
517 #ifdef CONFIG_PPC_BOOK3S
518 	OFFSET(VCPU_PURR, kvm_vcpu, arch.purr);
519 	OFFSET(VCPU_SPURR, kvm_vcpu, arch.spurr);
520 	OFFSET(VCPU_IC, kvm_vcpu, arch.ic);
521 	OFFSET(VCPU_DSCR, kvm_vcpu, arch.dscr);
522 	OFFSET(VCPU_AMR, kvm_vcpu, arch.amr);
523 	OFFSET(VCPU_UAMOR, kvm_vcpu, arch.uamor);
524 	OFFSET(VCPU_IAMR, kvm_vcpu, arch.iamr);
525 	OFFSET(VCPU_CTRL, kvm_vcpu, arch.ctrl);
526 	OFFSET(VCPU_DABR, kvm_vcpu, arch.dabr);
527 	OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx);
528 	OFFSET(VCPU_DAWR0, kvm_vcpu, arch.dawr0);
529 	OFFSET(VCPU_DAWRX0, kvm_vcpu, arch.dawrx0);
530 	OFFSET(VCPU_DAWR1, kvm_vcpu, arch.dawr1);
531 	OFFSET(VCPU_DAWRX1, kvm_vcpu, arch.dawrx1);
532 	OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr);
533 	OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags);
534 	OFFSET(VCPU_DEC, kvm_vcpu, arch.dec);
535 	OFFSET(VCPU_DEC_EXPIRES, kvm_vcpu, arch.dec_expires);
536 	OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions);
537 	OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded);
538 	OFFSET(VCPU_PRODDED, kvm_vcpu, arch.prodded);
539 	OFFSET(VCPU_IRQ_PENDING, kvm_vcpu, arch.irq_pending);
540 	OFFSET(VCPU_DBELL_REQ, kvm_vcpu, arch.doorbell_request);
541 	OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr);
542 	OFFSET(VCPU_MMCRA, kvm_vcpu, arch.mmcra);
543 	OFFSET(VCPU_MMCRS, kvm_vcpu, arch.mmcrs);
544 	OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc);
545 	OFFSET(VCPU_SPMC, kvm_vcpu, arch.spmc);
546 	OFFSET(VCPU_SIAR, kvm_vcpu, arch.siar);
547 	OFFSET(VCPU_SDAR, kvm_vcpu, arch.sdar);
548 	OFFSET(VCPU_SIER, kvm_vcpu, arch.sier);
549 	OFFSET(VCPU_SLB, kvm_vcpu, arch.slb);
550 	OFFSET(VCPU_SLB_MAX, kvm_vcpu, arch.slb_max);
551 	OFFSET(VCPU_SLB_NR, kvm_vcpu, arch.slb_nr);
552 	OFFSET(VCPU_FAULT_DSISR, kvm_vcpu, arch.fault_dsisr);
553 	OFFSET(VCPU_FAULT_DAR, kvm_vcpu, arch.fault_dar);
554 	OFFSET(VCPU_FAULT_GPA, kvm_vcpu, arch.fault_gpa);
555 	OFFSET(VCPU_INTR_MSR, kvm_vcpu, arch.intr_msr);
556 	OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
557 	OFFSET(VCPU_TRAP, kvm_vcpu, arch.trap);
558 	OFFSET(VCPU_CFAR, kvm_vcpu, arch.cfar);
559 	OFFSET(VCPU_PPR, kvm_vcpu, arch.ppr);
560 	OFFSET(VCPU_FSCR, kvm_vcpu, arch.fscr);
561 	OFFSET(VCPU_PSPB, kvm_vcpu, arch.pspb);
562 	OFFSET(VCPU_EBBHR, kvm_vcpu, arch.ebbhr);
563 	OFFSET(VCPU_EBBRR, kvm_vcpu, arch.ebbrr);
564 	OFFSET(VCPU_BESCR, kvm_vcpu, arch.bescr);
565 	OFFSET(VCPU_CSIGR, kvm_vcpu, arch.csigr);
566 	OFFSET(VCPU_TACR, kvm_vcpu, arch.tacr);
567 	OFFSET(VCPU_TCSCR, kvm_vcpu, arch.tcscr);
568 	OFFSET(VCPU_ACOP, kvm_vcpu, arch.acop);
569 	OFFSET(VCPU_WORT, kvm_vcpu, arch.wort);
570 	OFFSET(VCPU_TID, kvm_vcpu, arch.tid);
571 	OFFSET(VCPU_PSSCR, kvm_vcpu, arch.psscr);
572 	OFFSET(VCPU_HFSCR, kvm_vcpu, arch.hfscr);
573 	OFFSET(VCORE_ENTRY_EXIT, kvmppc_vcore, entry_exit_map);
574 	OFFSET(VCORE_IN_GUEST, kvmppc_vcore, in_guest);
575 	OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads);
576 	OFFSET(VCORE_KVM, kvmppc_vcore, kvm);
577 	OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset);
578 	OFFSET(VCORE_TB_OFFSET_APPL, kvmppc_vcore, tb_offset_applied);
579 	OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr);
580 	OFFSET(VCORE_PCR, kvmppc_vcore, pcr);
581 	OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes);
582 	OFFSET(VCORE_VTB, kvmppc_vcore, vtb);
583 	OFFSET(VCPU_SLB_E, kvmppc_slb, orige);
584 	OFFSET(VCPU_SLB_V, kvmppc_slb, origv);
585 	DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
586 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
587 	OFFSET(VCPU_TFHAR, kvm_vcpu, arch.tfhar);
588 	OFFSET(VCPU_TFIAR, kvm_vcpu, arch.tfiar);
589 	OFFSET(VCPU_TEXASR, kvm_vcpu, arch.texasr);
590 	OFFSET(VCPU_ORIG_TEXASR, kvm_vcpu, arch.orig_texasr);
591 	OFFSET(VCPU_GPR_TM, kvm_vcpu, arch.gpr_tm);
592 	OFFSET(VCPU_FPRS_TM, kvm_vcpu, arch.fp_tm.fpr);
593 	OFFSET(VCPU_VRS_TM, kvm_vcpu, arch.vr_tm.vr);
594 	OFFSET(VCPU_VRSAVE_TM, kvm_vcpu, arch.vrsave_tm);
595 	OFFSET(VCPU_CR_TM, kvm_vcpu, arch.cr_tm);
596 	OFFSET(VCPU_XER_TM, kvm_vcpu, arch.xer_tm);
597 	OFFSET(VCPU_LR_TM, kvm_vcpu, arch.lr_tm);
598 	OFFSET(VCPU_CTR_TM, kvm_vcpu, arch.ctr_tm);
599 	OFFSET(VCPU_AMR_TM, kvm_vcpu, arch.amr_tm);
600 	OFFSET(VCPU_PPR_TM, kvm_vcpu, arch.ppr_tm);
601 	OFFSET(VCPU_DSCR_TM, kvm_vcpu, arch.dscr_tm);
602 	OFFSET(VCPU_TAR_TM, kvm_vcpu, arch.tar_tm);
603 #endif
604 
605 #ifdef CONFIG_PPC_BOOK3S_64
606 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
607 	OFFSET(PACA_SVCPU, paca_struct, shadow_vcpu);
608 # define SVCPU_FIELD(x, f)	DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
609 #else
610 # define SVCPU_FIELD(x, f)
611 #endif
612 # define HSTATE_FIELD(x, f)	DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f))
613 #else	/* 32-bit */
614 # define SVCPU_FIELD(x, f)	DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f))
615 # define HSTATE_FIELD(x, f)	DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f))
616 #endif
617 
618 	SVCPU_FIELD(SVCPU_CR, cr);
619 	SVCPU_FIELD(SVCPU_XER, xer);
620 	SVCPU_FIELD(SVCPU_CTR, ctr);
621 	SVCPU_FIELD(SVCPU_LR, lr);
622 	SVCPU_FIELD(SVCPU_PC, pc);
623 	SVCPU_FIELD(SVCPU_R0, gpr[0]);
624 	SVCPU_FIELD(SVCPU_R1, gpr[1]);
625 	SVCPU_FIELD(SVCPU_R2, gpr[2]);
626 	SVCPU_FIELD(SVCPU_R3, gpr[3]);
627 	SVCPU_FIELD(SVCPU_R4, gpr[4]);
628 	SVCPU_FIELD(SVCPU_R5, gpr[5]);
629 	SVCPU_FIELD(SVCPU_R6, gpr[6]);
630 	SVCPU_FIELD(SVCPU_R7, gpr[7]);
631 	SVCPU_FIELD(SVCPU_R8, gpr[8]);
632 	SVCPU_FIELD(SVCPU_R9, gpr[9]);
633 	SVCPU_FIELD(SVCPU_R10, gpr[10]);
634 	SVCPU_FIELD(SVCPU_R11, gpr[11]);
635 	SVCPU_FIELD(SVCPU_R12, gpr[12]);
636 	SVCPU_FIELD(SVCPU_R13, gpr[13]);
637 	SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr);
638 	SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar);
639 	SVCPU_FIELD(SVCPU_LAST_INST, last_inst);
640 	SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1);
641 #ifdef CONFIG_PPC_BOOK3S_32
642 	SVCPU_FIELD(SVCPU_SR, sr);
643 #endif
644 #ifdef CONFIG_PPC64
645 	SVCPU_FIELD(SVCPU_SLB, slb);
646 	SVCPU_FIELD(SVCPU_SLB_MAX, slb_max);
647 	SVCPU_FIELD(SVCPU_SHADOW_FSCR, shadow_fscr);
648 #endif
649 
650 	HSTATE_FIELD(HSTATE_HOST_R1, host_r1);
651 	HSTATE_FIELD(HSTATE_HOST_R2, host_r2);
652 	HSTATE_FIELD(HSTATE_HOST_MSR, host_msr);
653 	HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
654 	HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
655 	HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
656 	HSTATE_FIELD(HSTATE_SCRATCH2, scratch2);
657 	HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
658 	HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
659 	HSTATE_FIELD(HSTATE_NAPPING, napping);
660 
661 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
662 	HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req);
663 	HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
664 	HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
665 	HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
666 	HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
667 	HSTATE_FIELD(HSTATE_XIVE_TIMA_PHYS, xive_tima_phys);
668 	HSTATE_FIELD(HSTATE_XIVE_TIMA_VIRT, xive_tima_virt);
669 	HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
670 	HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
671 	HSTATE_FIELD(HSTATE_PTID, ptid);
672 	HSTATE_FIELD(HSTATE_FAKE_SUSPEND, fake_suspend);
673 	HSTATE_FIELD(HSTATE_MMCR0, host_mmcr[0]);
674 	HSTATE_FIELD(HSTATE_MMCR1, host_mmcr[1]);
675 	HSTATE_FIELD(HSTATE_MMCRA, host_mmcr[2]);
676 	HSTATE_FIELD(HSTATE_SIAR, host_mmcr[3]);
677 	HSTATE_FIELD(HSTATE_SDAR, host_mmcr[4]);
678 	HSTATE_FIELD(HSTATE_MMCR2, host_mmcr[5]);
679 	HSTATE_FIELD(HSTATE_SIER, host_mmcr[6]);
680 	HSTATE_FIELD(HSTATE_MMCR3, host_mmcr[7]);
681 	HSTATE_FIELD(HSTATE_SIER2, host_mmcr[8]);
682 	HSTATE_FIELD(HSTATE_SIER3, host_mmcr[9]);
683 	HSTATE_FIELD(HSTATE_PMC1, host_pmc[0]);
684 	HSTATE_FIELD(HSTATE_PMC2, host_pmc[1]);
685 	HSTATE_FIELD(HSTATE_PMC3, host_pmc[2]);
686 	HSTATE_FIELD(HSTATE_PMC4, host_pmc[3]);
687 	HSTATE_FIELD(HSTATE_PMC5, host_pmc[4]);
688 	HSTATE_FIELD(HSTATE_PMC6, host_pmc[5]);
689 	HSTATE_FIELD(HSTATE_PURR, host_purr);
690 	HSTATE_FIELD(HSTATE_SPURR, host_spurr);
691 	HSTATE_FIELD(HSTATE_DSCR, host_dscr);
692 	HSTATE_FIELD(HSTATE_DABR, dabr);
693 	HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
694 	HSTATE_FIELD(HSTATE_SPLIT_MODE, kvm_split_mode);
695 	DEFINE(IPI_PRIORITY, IPI_PRIORITY);
696 	OFFSET(KVM_SPLIT_RPR, kvm_split_mode, rpr);
697 	OFFSET(KVM_SPLIT_PMMAR, kvm_split_mode, pmmar);
698 	OFFSET(KVM_SPLIT_LDBAR, kvm_split_mode, ldbar);
699 	OFFSET(KVM_SPLIT_DO_NAP, kvm_split_mode, do_nap);
700 	OFFSET(KVM_SPLIT_NAPPED, kvm_split_mode, napped);
701 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
702 
703 #ifdef CONFIG_PPC_BOOK3S_64
704 	HSTATE_FIELD(HSTATE_CFAR, cfar);
705 	HSTATE_FIELD(HSTATE_PPR, ppr);
706 	HSTATE_FIELD(HSTATE_HOST_FSCR, host_fscr);
707 #endif /* CONFIG_PPC_BOOK3S_64 */
708 
709 #else /* CONFIG_PPC_BOOK3S */
710 	OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
711 	OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
712 	OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
713 	OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
714 	OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
715 	OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9);
716 	OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
717 	OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear);
718 	OFFSET(VCPU_FAULT_ESR, kvm_vcpu, arch.fault_esr);
719 	OFFSET(VCPU_CRIT_SAVE, kvm_vcpu, arch.crit_save);
720 #endif /* CONFIG_PPC_BOOK3S */
721 #endif /* CONFIG_KVM */
722 
723 #ifdef CONFIG_KVM_GUEST
724 	OFFSET(KVM_MAGIC_SCRATCH1, kvm_vcpu_arch_shared, scratch1);
725 	OFFSET(KVM_MAGIC_SCRATCH2, kvm_vcpu_arch_shared, scratch2);
726 	OFFSET(KVM_MAGIC_SCRATCH3, kvm_vcpu_arch_shared, scratch3);
727 	OFFSET(KVM_MAGIC_INT, kvm_vcpu_arch_shared, int_pending);
728 	OFFSET(KVM_MAGIC_MSR, kvm_vcpu_arch_shared, msr);
729 	OFFSET(KVM_MAGIC_CRITICAL, kvm_vcpu_arch_shared, critical);
730 	OFFSET(KVM_MAGIC_SR, kvm_vcpu_arch_shared, sr);
731 #endif
732 
733 #ifdef CONFIG_44x
734 	DEFINE(PGD_T_LOG2, PGD_T_LOG2);
735 	DEFINE(PTE_T_LOG2, PTE_T_LOG2);
736 #endif
737 #ifdef CONFIG_PPC_FSL_BOOK3E
738 	DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
739 	OFFSET(TLBCAM_MAS0, tlbcam, MAS0);
740 	OFFSET(TLBCAM_MAS1, tlbcam, MAS1);
741 	OFFSET(TLBCAM_MAS2, tlbcam, MAS2);
742 	OFFSET(TLBCAM_MAS3, tlbcam, MAS3);
743 	OFFSET(TLBCAM_MAS7, tlbcam, MAS7);
744 #endif
745 
746 #if defined(CONFIG_KVM) && defined(CONFIG_SPE)
747 	OFFSET(VCPU_EVR, kvm_vcpu, arch.evr[0]);
748 	OFFSET(VCPU_ACC, kvm_vcpu, arch.acc);
749 	OFFSET(VCPU_SPEFSCR, kvm_vcpu, arch.spefscr);
750 	OFFSET(VCPU_HOST_SPEFSCR, kvm_vcpu, arch.host_spefscr);
751 #endif
752 
753 #ifdef CONFIG_KVM_BOOKE_HV
754 	OFFSET(VCPU_HOST_MAS4, kvm_vcpu, arch.host_mas4);
755 	OFFSET(VCPU_HOST_MAS6, kvm_vcpu, arch.host_mas6);
756 #endif
757 
758 #ifdef CONFIG_KVM_XICS
759 	DEFINE(VCPU_XIVE_SAVED_STATE, offsetof(struct kvm_vcpu,
760 					       arch.xive_saved_state));
761 	DEFINE(VCPU_XIVE_CAM_WORD, offsetof(struct kvm_vcpu,
762 					    arch.xive_cam_word));
763 	DEFINE(VCPU_XIVE_PUSHED, offsetof(struct kvm_vcpu, arch.xive_pushed));
764 	DEFINE(VCPU_XIVE_ESC_ON, offsetof(struct kvm_vcpu, arch.xive_esc_on));
765 	DEFINE(VCPU_XIVE_ESC_RADDR, offsetof(struct kvm_vcpu, arch.xive_esc_raddr));
766 	DEFINE(VCPU_XIVE_ESC_VADDR, offsetof(struct kvm_vcpu, arch.xive_esc_vaddr));
767 #endif
768 
769 #ifdef CONFIG_KVM_EXIT_TIMING
770 	OFFSET(VCPU_TIMING_EXIT_TBU, kvm_vcpu, arch.timing_exit.tv32.tbu);
771 	OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl);
772 	OFFSET(VCPU_TIMING_LAST_ENTER_TBU, kvm_vcpu, arch.timing_last_enter.tv32.tbu);
773 	OFFSET(VCPU_TIMING_LAST_ENTER_TBL, kvm_vcpu, arch.timing_last_enter.tv32.tbl);
774 #endif
775 
776 	DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
777 	DEFINE(PPC_DBELL_MSGTYPE, PPC_DBELL_MSGTYPE);
778 
779 #ifdef CONFIG_PPC_8xx
780 	DEFINE(VIRT_IMMR_BASE, (u64)__fix_to_virt(FIX_IMMR_BASE));
781 #endif
782 
783 #ifdef CONFIG_XMON
784 	DEFINE(BPT_SIZE, BPT_SIZE);
785 #endif
786 
787 	return 0;
788 }
789