xref: /openbmc/linux/arch/riscv/kernel/asm-offsets.c (revision d40d48e1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  * Copyright (C) 2017 SiFive
5  */
6 
7 #define GENERATING_ASM_OFFSETS
8 
9 #include <linux/kbuild.h>
10 #include <linux/mm.h>
11 #include <linux/sched.h>
12 #include <asm/kvm_host.h>
13 #include <asm/thread_info.h>
14 #include <asm/ptrace.h>
15 
16 void asm_offsets(void);
17 
18 void asm_offsets(void)
19 {
20 	OFFSET(TASK_THREAD_RA, task_struct, thread.ra);
21 	OFFSET(TASK_THREAD_SP, task_struct, thread.sp);
22 	OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]);
23 	OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]);
24 	OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]);
25 	OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]);
26 	OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]);
27 	OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]);
28 	OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]);
29 	OFFSET(TASK_THREAD_S7, task_struct, thread.s[7]);
30 	OFFSET(TASK_THREAD_S8, task_struct, thread.s[8]);
31 	OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]);
32 	OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]);
33 	OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
34 	OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
35 	OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
36 	OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
37 	OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
38 
39 	OFFSET(TASK_THREAD_F0,  task_struct, thread.fstate.f[0]);
40 	OFFSET(TASK_THREAD_F1,  task_struct, thread.fstate.f[1]);
41 	OFFSET(TASK_THREAD_F2,  task_struct, thread.fstate.f[2]);
42 	OFFSET(TASK_THREAD_F3,  task_struct, thread.fstate.f[3]);
43 	OFFSET(TASK_THREAD_F4,  task_struct, thread.fstate.f[4]);
44 	OFFSET(TASK_THREAD_F5,  task_struct, thread.fstate.f[5]);
45 	OFFSET(TASK_THREAD_F6,  task_struct, thread.fstate.f[6]);
46 	OFFSET(TASK_THREAD_F7,  task_struct, thread.fstate.f[7]);
47 	OFFSET(TASK_THREAD_F8,  task_struct, thread.fstate.f[8]);
48 	OFFSET(TASK_THREAD_F9,  task_struct, thread.fstate.f[9]);
49 	OFFSET(TASK_THREAD_F10, task_struct, thread.fstate.f[10]);
50 	OFFSET(TASK_THREAD_F11, task_struct, thread.fstate.f[11]);
51 	OFFSET(TASK_THREAD_F12, task_struct, thread.fstate.f[12]);
52 	OFFSET(TASK_THREAD_F13, task_struct, thread.fstate.f[13]);
53 	OFFSET(TASK_THREAD_F14, task_struct, thread.fstate.f[14]);
54 	OFFSET(TASK_THREAD_F15, task_struct, thread.fstate.f[15]);
55 	OFFSET(TASK_THREAD_F16, task_struct, thread.fstate.f[16]);
56 	OFFSET(TASK_THREAD_F17, task_struct, thread.fstate.f[17]);
57 	OFFSET(TASK_THREAD_F18, task_struct, thread.fstate.f[18]);
58 	OFFSET(TASK_THREAD_F19, task_struct, thread.fstate.f[19]);
59 	OFFSET(TASK_THREAD_F20, task_struct, thread.fstate.f[20]);
60 	OFFSET(TASK_THREAD_F21, task_struct, thread.fstate.f[21]);
61 	OFFSET(TASK_THREAD_F22, task_struct, thread.fstate.f[22]);
62 	OFFSET(TASK_THREAD_F23, task_struct, thread.fstate.f[23]);
63 	OFFSET(TASK_THREAD_F24, task_struct, thread.fstate.f[24]);
64 	OFFSET(TASK_THREAD_F25, task_struct, thread.fstate.f[25]);
65 	OFFSET(TASK_THREAD_F26, task_struct, thread.fstate.f[26]);
66 	OFFSET(TASK_THREAD_F27, task_struct, thread.fstate.f[27]);
67 	OFFSET(TASK_THREAD_F28, task_struct, thread.fstate.f[28]);
68 	OFFSET(TASK_THREAD_F29, task_struct, thread.fstate.f[29]);
69 	OFFSET(TASK_THREAD_F30, task_struct, thread.fstate.f[30]);
70 	OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]);
71 	OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr);
72 #ifdef CONFIG_STACKPROTECTOR
73 	OFFSET(TSK_STACK_CANARY, task_struct, stack_canary);
74 #endif
75 
76 	DEFINE(PT_SIZE, sizeof(struct pt_regs));
77 	OFFSET(PT_EPC, pt_regs, epc);
78 	OFFSET(PT_RA, pt_regs, ra);
79 	OFFSET(PT_FP, pt_regs, s0);
80 	OFFSET(PT_S0, pt_regs, s0);
81 	OFFSET(PT_S1, pt_regs, s1);
82 	OFFSET(PT_S2, pt_regs, s2);
83 	OFFSET(PT_S3, pt_regs, s3);
84 	OFFSET(PT_S4, pt_regs, s4);
85 	OFFSET(PT_S5, pt_regs, s5);
86 	OFFSET(PT_S6, pt_regs, s6);
87 	OFFSET(PT_S7, pt_regs, s7);
88 	OFFSET(PT_S8, pt_regs, s8);
89 	OFFSET(PT_S9, pt_regs, s9);
90 	OFFSET(PT_S10, pt_regs, s10);
91 	OFFSET(PT_S11, pt_regs, s11);
92 	OFFSET(PT_SP, pt_regs, sp);
93 	OFFSET(PT_TP, pt_regs, tp);
94 	OFFSET(PT_A0, pt_regs, a0);
95 	OFFSET(PT_A1, pt_regs, a1);
96 	OFFSET(PT_A2, pt_regs, a2);
97 	OFFSET(PT_A3, pt_regs, a3);
98 	OFFSET(PT_A4, pt_regs, a4);
99 	OFFSET(PT_A5, pt_regs, a5);
100 	OFFSET(PT_A6, pt_regs, a6);
101 	OFFSET(PT_A7, pt_regs, a7);
102 	OFFSET(PT_T0, pt_regs, t0);
103 	OFFSET(PT_T1, pt_regs, t1);
104 	OFFSET(PT_T2, pt_regs, t2);
105 	OFFSET(PT_T3, pt_regs, t3);
106 	OFFSET(PT_T4, pt_regs, t4);
107 	OFFSET(PT_T5, pt_regs, t5);
108 	OFFSET(PT_T6, pt_regs, t6);
109 	OFFSET(PT_GP, pt_regs, gp);
110 	OFFSET(PT_ORIG_A0, pt_regs, orig_a0);
111 	OFFSET(PT_STATUS, pt_regs, status);
112 	OFFSET(PT_BADADDR, pt_regs, badaddr);
113 	OFFSET(PT_CAUSE, pt_regs, cause);
114 
115 	OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero);
116 	OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra);
117 	OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp);
118 	OFFSET(KVM_ARCH_GUEST_GP, kvm_vcpu_arch, guest_context.gp);
119 	OFFSET(KVM_ARCH_GUEST_TP, kvm_vcpu_arch, guest_context.tp);
120 	OFFSET(KVM_ARCH_GUEST_T0, kvm_vcpu_arch, guest_context.t0);
121 	OFFSET(KVM_ARCH_GUEST_T1, kvm_vcpu_arch, guest_context.t1);
122 	OFFSET(KVM_ARCH_GUEST_T2, kvm_vcpu_arch, guest_context.t2);
123 	OFFSET(KVM_ARCH_GUEST_S0, kvm_vcpu_arch, guest_context.s0);
124 	OFFSET(KVM_ARCH_GUEST_S1, kvm_vcpu_arch, guest_context.s1);
125 	OFFSET(KVM_ARCH_GUEST_A0, kvm_vcpu_arch, guest_context.a0);
126 	OFFSET(KVM_ARCH_GUEST_A1, kvm_vcpu_arch, guest_context.a1);
127 	OFFSET(KVM_ARCH_GUEST_A2, kvm_vcpu_arch, guest_context.a2);
128 	OFFSET(KVM_ARCH_GUEST_A3, kvm_vcpu_arch, guest_context.a3);
129 	OFFSET(KVM_ARCH_GUEST_A4, kvm_vcpu_arch, guest_context.a4);
130 	OFFSET(KVM_ARCH_GUEST_A5, kvm_vcpu_arch, guest_context.a5);
131 	OFFSET(KVM_ARCH_GUEST_A6, kvm_vcpu_arch, guest_context.a6);
132 	OFFSET(KVM_ARCH_GUEST_A7, kvm_vcpu_arch, guest_context.a7);
133 	OFFSET(KVM_ARCH_GUEST_S2, kvm_vcpu_arch, guest_context.s2);
134 	OFFSET(KVM_ARCH_GUEST_S3, kvm_vcpu_arch, guest_context.s3);
135 	OFFSET(KVM_ARCH_GUEST_S4, kvm_vcpu_arch, guest_context.s4);
136 	OFFSET(KVM_ARCH_GUEST_S5, kvm_vcpu_arch, guest_context.s5);
137 	OFFSET(KVM_ARCH_GUEST_S6, kvm_vcpu_arch, guest_context.s6);
138 	OFFSET(KVM_ARCH_GUEST_S7, kvm_vcpu_arch, guest_context.s7);
139 	OFFSET(KVM_ARCH_GUEST_S8, kvm_vcpu_arch, guest_context.s8);
140 	OFFSET(KVM_ARCH_GUEST_S9, kvm_vcpu_arch, guest_context.s9);
141 	OFFSET(KVM_ARCH_GUEST_S10, kvm_vcpu_arch, guest_context.s10);
142 	OFFSET(KVM_ARCH_GUEST_S11, kvm_vcpu_arch, guest_context.s11);
143 	OFFSET(KVM_ARCH_GUEST_T3, kvm_vcpu_arch, guest_context.t3);
144 	OFFSET(KVM_ARCH_GUEST_T4, kvm_vcpu_arch, guest_context.t4);
145 	OFFSET(KVM_ARCH_GUEST_T5, kvm_vcpu_arch, guest_context.t5);
146 	OFFSET(KVM_ARCH_GUEST_T6, kvm_vcpu_arch, guest_context.t6);
147 	OFFSET(KVM_ARCH_GUEST_SEPC, kvm_vcpu_arch, guest_context.sepc);
148 	OFFSET(KVM_ARCH_GUEST_SSTATUS, kvm_vcpu_arch, guest_context.sstatus);
149 	OFFSET(KVM_ARCH_GUEST_HSTATUS, kvm_vcpu_arch, guest_context.hstatus);
150 	OFFSET(KVM_ARCH_GUEST_SCOUNTEREN, kvm_vcpu_arch, guest_csr.scounteren);
151 
152 	OFFSET(KVM_ARCH_HOST_ZERO, kvm_vcpu_arch, host_context.zero);
153 	OFFSET(KVM_ARCH_HOST_RA, kvm_vcpu_arch, host_context.ra);
154 	OFFSET(KVM_ARCH_HOST_SP, kvm_vcpu_arch, host_context.sp);
155 	OFFSET(KVM_ARCH_HOST_GP, kvm_vcpu_arch, host_context.gp);
156 	OFFSET(KVM_ARCH_HOST_TP, kvm_vcpu_arch, host_context.tp);
157 	OFFSET(KVM_ARCH_HOST_T0, kvm_vcpu_arch, host_context.t0);
158 	OFFSET(KVM_ARCH_HOST_T1, kvm_vcpu_arch, host_context.t1);
159 	OFFSET(KVM_ARCH_HOST_T2, kvm_vcpu_arch, host_context.t2);
160 	OFFSET(KVM_ARCH_HOST_S0, kvm_vcpu_arch, host_context.s0);
161 	OFFSET(KVM_ARCH_HOST_S1, kvm_vcpu_arch, host_context.s1);
162 	OFFSET(KVM_ARCH_HOST_A0, kvm_vcpu_arch, host_context.a0);
163 	OFFSET(KVM_ARCH_HOST_A1, kvm_vcpu_arch, host_context.a1);
164 	OFFSET(KVM_ARCH_HOST_A2, kvm_vcpu_arch, host_context.a2);
165 	OFFSET(KVM_ARCH_HOST_A3, kvm_vcpu_arch, host_context.a3);
166 	OFFSET(KVM_ARCH_HOST_A4, kvm_vcpu_arch, host_context.a4);
167 	OFFSET(KVM_ARCH_HOST_A5, kvm_vcpu_arch, host_context.a5);
168 	OFFSET(KVM_ARCH_HOST_A6, kvm_vcpu_arch, host_context.a6);
169 	OFFSET(KVM_ARCH_HOST_A7, kvm_vcpu_arch, host_context.a7);
170 	OFFSET(KVM_ARCH_HOST_S2, kvm_vcpu_arch, host_context.s2);
171 	OFFSET(KVM_ARCH_HOST_S3, kvm_vcpu_arch, host_context.s3);
172 	OFFSET(KVM_ARCH_HOST_S4, kvm_vcpu_arch, host_context.s4);
173 	OFFSET(KVM_ARCH_HOST_S5, kvm_vcpu_arch, host_context.s5);
174 	OFFSET(KVM_ARCH_HOST_S6, kvm_vcpu_arch, host_context.s6);
175 	OFFSET(KVM_ARCH_HOST_S7, kvm_vcpu_arch, host_context.s7);
176 	OFFSET(KVM_ARCH_HOST_S8, kvm_vcpu_arch, host_context.s8);
177 	OFFSET(KVM_ARCH_HOST_S9, kvm_vcpu_arch, host_context.s9);
178 	OFFSET(KVM_ARCH_HOST_S10, kvm_vcpu_arch, host_context.s10);
179 	OFFSET(KVM_ARCH_HOST_S11, kvm_vcpu_arch, host_context.s11);
180 	OFFSET(KVM_ARCH_HOST_T3, kvm_vcpu_arch, host_context.t3);
181 	OFFSET(KVM_ARCH_HOST_T4, kvm_vcpu_arch, host_context.t4);
182 	OFFSET(KVM_ARCH_HOST_T5, kvm_vcpu_arch, host_context.t5);
183 	OFFSET(KVM_ARCH_HOST_T6, kvm_vcpu_arch, host_context.t6);
184 	OFFSET(KVM_ARCH_HOST_SEPC, kvm_vcpu_arch, host_context.sepc);
185 	OFFSET(KVM_ARCH_HOST_SSTATUS, kvm_vcpu_arch, host_context.sstatus);
186 	OFFSET(KVM_ARCH_HOST_HSTATUS, kvm_vcpu_arch, host_context.hstatus);
187 	OFFSET(KVM_ARCH_HOST_SSCRATCH, kvm_vcpu_arch, host_sscratch);
188 	OFFSET(KVM_ARCH_HOST_STVEC, kvm_vcpu_arch, host_stvec);
189 	OFFSET(KVM_ARCH_HOST_SCOUNTEREN, kvm_vcpu_arch, host_scounteren);
190 
191 	OFFSET(KVM_ARCH_TRAP_SEPC, kvm_cpu_trap, sepc);
192 	OFFSET(KVM_ARCH_TRAP_SCAUSE, kvm_cpu_trap, scause);
193 	OFFSET(KVM_ARCH_TRAP_STVAL, kvm_cpu_trap, stval);
194 	OFFSET(KVM_ARCH_TRAP_HTVAL, kvm_cpu_trap, htval);
195 	OFFSET(KVM_ARCH_TRAP_HTINST, kvm_cpu_trap, htinst);
196 
197 	/* F extension */
198 
199 	OFFSET(KVM_ARCH_FP_F_F0, kvm_cpu_context, fp.f.f[0]);
200 	OFFSET(KVM_ARCH_FP_F_F1, kvm_cpu_context, fp.f.f[1]);
201 	OFFSET(KVM_ARCH_FP_F_F2, kvm_cpu_context, fp.f.f[2]);
202 	OFFSET(KVM_ARCH_FP_F_F3, kvm_cpu_context, fp.f.f[3]);
203 	OFFSET(KVM_ARCH_FP_F_F4, kvm_cpu_context, fp.f.f[4]);
204 	OFFSET(KVM_ARCH_FP_F_F5, kvm_cpu_context, fp.f.f[5]);
205 	OFFSET(KVM_ARCH_FP_F_F6, kvm_cpu_context, fp.f.f[6]);
206 	OFFSET(KVM_ARCH_FP_F_F7, kvm_cpu_context, fp.f.f[7]);
207 	OFFSET(KVM_ARCH_FP_F_F8, kvm_cpu_context, fp.f.f[8]);
208 	OFFSET(KVM_ARCH_FP_F_F9, kvm_cpu_context, fp.f.f[9]);
209 	OFFSET(KVM_ARCH_FP_F_F10, kvm_cpu_context, fp.f.f[10]);
210 	OFFSET(KVM_ARCH_FP_F_F11, kvm_cpu_context, fp.f.f[11]);
211 	OFFSET(KVM_ARCH_FP_F_F12, kvm_cpu_context, fp.f.f[12]);
212 	OFFSET(KVM_ARCH_FP_F_F13, kvm_cpu_context, fp.f.f[13]);
213 	OFFSET(KVM_ARCH_FP_F_F14, kvm_cpu_context, fp.f.f[14]);
214 	OFFSET(KVM_ARCH_FP_F_F15, kvm_cpu_context, fp.f.f[15]);
215 	OFFSET(KVM_ARCH_FP_F_F16, kvm_cpu_context, fp.f.f[16]);
216 	OFFSET(KVM_ARCH_FP_F_F17, kvm_cpu_context, fp.f.f[17]);
217 	OFFSET(KVM_ARCH_FP_F_F18, kvm_cpu_context, fp.f.f[18]);
218 	OFFSET(KVM_ARCH_FP_F_F19, kvm_cpu_context, fp.f.f[19]);
219 	OFFSET(KVM_ARCH_FP_F_F20, kvm_cpu_context, fp.f.f[20]);
220 	OFFSET(KVM_ARCH_FP_F_F21, kvm_cpu_context, fp.f.f[21]);
221 	OFFSET(KVM_ARCH_FP_F_F22, kvm_cpu_context, fp.f.f[22]);
222 	OFFSET(KVM_ARCH_FP_F_F23, kvm_cpu_context, fp.f.f[23]);
223 	OFFSET(KVM_ARCH_FP_F_F24, kvm_cpu_context, fp.f.f[24]);
224 	OFFSET(KVM_ARCH_FP_F_F25, kvm_cpu_context, fp.f.f[25]);
225 	OFFSET(KVM_ARCH_FP_F_F26, kvm_cpu_context, fp.f.f[26]);
226 	OFFSET(KVM_ARCH_FP_F_F27, kvm_cpu_context, fp.f.f[27]);
227 	OFFSET(KVM_ARCH_FP_F_F28, kvm_cpu_context, fp.f.f[28]);
228 	OFFSET(KVM_ARCH_FP_F_F29, kvm_cpu_context, fp.f.f[29]);
229 	OFFSET(KVM_ARCH_FP_F_F30, kvm_cpu_context, fp.f.f[30]);
230 	OFFSET(KVM_ARCH_FP_F_F31, kvm_cpu_context, fp.f.f[31]);
231 	OFFSET(KVM_ARCH_FP_F_FCSR, kvm_cpu_context, fp.f.fcsr);
232 
233 	/* D extension */
234 
235 	OFFSET(KVM_ARCH_FP_D_F0, kvm_cpu_context, fp.d.f[0]);
236 	OFFSET(KVM_ARCH_FP_D_F1, kvm_cpu_context, fp.d.f[1]);
237 	OFFSET(KVM_ARCH_FP_D_F2, kvm_cpu_context, fp.d.f[2]);
238 	OFFSET(KVM_ARCH_FP_D_F3, kvm_cpu_context, fp.d.f[3]);
239 	OFFSET(KVM_ARCH_FP_D_F4, kvm_cpu_context, fp.d.f[4]);
240 	OFFSET(KVM_ARCH_FP_D_F5, kvm_cpu_context, fp.d.f[5]);
241 	OFFSET(KVM_ARCH_FP_D_F6, kvm_cpu_context, fp.d.f[6]);
242 	OFFSET(KVM_ARCH_FP_D_F7, kvm_cpu_context, fp.d.f[7]);
243 	OFFSET(KVM_ARCH_FP_D_F8, kvm_cpu_context, fp.d.f[8]);
244 	OFFSET(KVM_ARCH_FP_D_F9, kvm_cpu_context, fp.d.f[9]);
245 	OFFSET(KVM_ARCH_FP_D_F10, kvm_cpu_context, fp.d.f[10]);
246 	OFFSET(KVM_ARCH_FP_D_F11, kvm_cpu_context, fp.d.f[11]);
247 	OFFSET(KVM_ARCH_FP_D_F12, kvm_cpu_context, fp.d.f[12]);
248 	OFFSET(KVM_ARCH_FP_D_F13, kvm_cpu_context, fp.d.f[13]);
249 	OFFSET(KVM_ARCH_FP_D_F14, kvm_cpu_context, fp.d.f[14]);
250 	OFFSET(KVM_ARCH_FP_D_F15, kvm_cpu_context, fp.d.f[15]);
251 	OFFSET(KVM_ARCH_FP_D_F16, kvm_cpu_context, fp.d.f[16]);
252 	OFFSET(KVM_ARCH_FP_D_F17, kvm_cpu_context, fp.d.f[17]);
253 	OFFSET(KVM_ARCH_FP_D_F18, kvm_cpu_context, fp.d.f[18]);
254 	OFFSET(KVM_ARCH_FP_D_F19, kvm_cpu_context, fp.d.f[19]);
255 	OFFSET(KVM_ARCH_FP_D_F20, kvm_cpu_context, fp.d.f[20]);
256 	OFFSET(KVM_ARCH_FP_D_F21, kvm_cpu_context, fp.d.f[21]);
257 	OFFSET(KVM_ARCH_FP_D_F22, kvm_cpu_context, fp.d.f[22]);
258 	OFFSET(KVM_ARCH_FP_D_F23, kvm_cpu_context, fp.d.f[23]);
259 	OFFSET(KVM_ARCH_FP_D_F24, kvm_cpu_context, fp.d.f[24]);
260 	OFFSET(KVM_ARCH_FP_D_F25, kvm_cpu_context, fp.d.f[25]);
261 	OFFSET(KVM_ARCH_FP_D_F26, kvm_cpu_context, fp.d.f[26]);
262 	OFFSET(KVM_ARCH_FP_D_F27, kvm_cpu_context, fp.d.f[27]);
263 	OFFSET(KVM_ARCH_FP_D_F28, kvm_cpu_context, fp.d.f[28]);
264 	OFFSET(KVM_ARCH_FP_D_F29, kvm_cpu_context, fp.d.f[29]);
265 	OFFSET(KVM_ARCH_FP_D_F30, kvm_cpu_context, fp.d.f[30]);
266 	OFFSET(KVM_ARCH_FP_D_F31, kvm_cpu_context, fp.d.f[31]);
267 	OFFSET(KVM_ARCH_FP_D_FCSR, kvm_cpu_context, fp.d.fcsr);
268 
269 	/*
270 	 * THREAD_{F,X}* might be larger than a S-type offset can handle, but
271 	 * these are used in performance-sensitive assembly so we can't resort
272 	 * to loading the long immediate every time.
273 	 */
274 	DEFINE(TASK_THREAD_RA_RA,
275 		  offsetof(struct task_struct, thread.ra)
276 		- offsetof(struct task_struct, thread.ra)
277 	);
278 	DEFINE(TASK_THREAD_SP_RA,
279 		  offsetof(struct task_struct, thread.sp)
280 		- offsetof(struct task_struct, thread.ra)
281 	);
282 	DEFINE(TASK_THREAD_S0_RA,
283 		  offsetof(struct task_struct, thread.s[0])
284 		- offsetof(struct task_struct, thread.ra)
285 	);
286 	DEFINE(TASK_THREAD_S1_RA,
287 		  offsetof(struct task_struct, thread.s[1])
288 		- offsetof(struct task_struct, thread.ra)
289 	);
290 	DEFINE(TASK_THREAD_S2_RA,
291 		  offsetof(struct task_struct, thread.s[2])
292 		- offsetof(struct task_struct, thread.ra)
293 	);
294 	DEFINE(TASK_THREAD_S3_RA,
295 		  offsetof(struct task_struct, thread.s[3])
296 		- offsetof(struct task_struct, thread.ra)
297 	);
298 	DEFINE(TASK_THREAD_S4_RA,
299 		  offsetof(struct task_struct, thread.s[4])
300 		- offsetof(struct task_struct, thread.ra)
301 	);
302 	DEFINE(TASK_THREAD_S5_RA,
303 		  offsetof(struct task_struct, thread.s[5])
304 		- offsetof(struct task_struct, thread.ra)
305 	);
306 	DEFINE(TASK_THREAD_S6_RA,
307 		  offsetof(struct task_struct, thread.s[6])
308 		- offsetof(struct task_struct, thread.ra)
309 	);
310 	DEFINE(TASK_THREAD_S7_RA,
311 		  offsetof(struct task_struct, thread.s[7])
312 		- offsetof(struct task_struct, thread.ra)
313 	);
314 	DEFINE(TASK_THREAD_S8_RA,
315 		  offsetof(struct task_struct, thread.s[8])
316 		- offsetof(struct task_struct, thread.ra)
317 	);
318 	DEFINE(TASK_THREAD_S9_RA,
319 		  offsetof(struct task_struct, thread.s[9])
320 		- offsetof(struct task_struct, thread.ra)
321 	);
322 	DEFINE(TASK_THREAD_S10_RA,
323 		  offsetof(struct task_struct, thread.s[10])
324 		- offsetof(struct task_struct, thread.ra)
325 	);
326 	DEFINE(TASK_THREAD_S11_RA,
327 		  offsetof(struct task_struct, thread.s[11])
328 		- offsetof(struct task_struct, thread.ra)
329 	);
330 
331 	DEFINE(TASK_THREAD_F0_F0,
332 		  offsetof(struct task_struct, thread.fstate.f[0])
333 		- offsetof(struct task_struct, thread.fstate.f[0])
334 	);
335 	DEFINE(TASK_THREAD_F1_F0,
336 		  offsetof(struct task_struct, thread.fstate.f[1])
337 		- offsetof(struct task_struct, thread.fstate.f[0])
338 	);
339 	DEFINE(TASK_THREAD_F2_F0,
340 		  offsetof(struct task_struct, thread.fstate.f[2])
341 		- offsetof(struct task_struct, thread.fstate.f[0])
342 	);
343 	DEFINE(TASK_THREAD_F3_F0,
344 		  offsetof(struct task_struct, thread.fstate.f[3])
345 		- offsetof(struct task_struct, thread.fstate.f[0])
346 	);
347 	DEFINE(TASK_THREAD_F4_F0,
348 		  offsetof(struct task_struct, thread.fstate.f[4])
349 		- offsetof(struct task_struct, thread.fstate.f[0])
350 	);
351 	DEFINE(TASK_THREAD_F5_F0,
352 		  offsetof(struct task_struct, thread.fstate.f[5])
353 		- offsetof(struct task_struct, thread.fstate.f[0])
354 	);
355 	DEFINE(TASK_THREAD_F6_F0,
356 		  offsetof(struct task_struct, thread.fstate.f[6])
357 		- offsetof(struct task_struct, thread.fstate.f[0])
358 	);
359 	DEFINE(TASK_THREAD_F7_F0,
360 		  offsetof(struct task_struct, thread.fstate.f[7])
361 		- offsetof(struct task_struct, thread.fstate.f[0])
362 	);
363 	DEFINE(TASK_THREAD_F8_F0,
364 		  offsetof(struct task_struct, thread.fstate.f[8])
365 		- offsetof(struct task_struct, thread.fstate.f[0])
366 	);
367 	DEFINE(TASK_THREAD_F9_F0,
368 		  offsetof(struct task_struct, thread.fstate.f[9])
369 		- offsetof(struct task_struct, thread.fstate.f[0])
370 	);
371 	DEFINE(TASK_THREAD_F10_F0,
372 		  offsetof(struct task_struct, thread.fstate.f[10])
373 		- offsetof(struct task_struct, thread.fstate.f[0])
374 	);
375 	DEFINE(TASK_THREAD_F11_F0,
376 		  offsetof(struct task_struct, thread.fstate.f[11])
377 		- offsetof(struct task_struct, thread.fstate.f[0])
378 	);
379 	DEFINE(TASK_THREAD_F12_F0,
380 		  offsetof(struct task_struct, thread.fstate.f[12])
381 		- offsetof(struct task_struct, thread.fstate.f[0])
382 	);
383 	DEFINE(TASK_THREAD_F13_F0,
384 		  offsetof(struct task_struct, thread.fstate.f[13])
385 		- offsetof(struct task_struct, thread.fstate.f[0])
386 	);
387 	DEFINE(TASK_THREAD_F14_F0,
388 		  offsetof(struct task_struct, thread.fstate.f[14])
389 		- offsetof(struct task_struct, thread.fstate.f[0])
390 	);
391 	DEFINE(TASK_THREAD_F15_F0,
392 		  offsetof(struct task_struct, thread.fstate.f[15])
393 		- offsetof(struct task_struct, thread.fstate.f[0])
394 	);
395 	DEFINE(TASK_THREAD_F16_F0,
396 		  offsetof(struct task_struct, thread.fstate.f[16])
397 		- offsetof(struct task_struct, thread.fstate.f[0])
398 	);
399 	DEFINE(TASK_THREAD_F17_F0,
400 		  offsetof(struct task_struct, thread.fstate.f[17])
401 		- offsetof(struct task_struct, thread.fstate.f[0])
402 	);
403 	DEFINE(TASK_THREAD_F18_F0,
404 		  offsetof(struct task_struct, thread.fstate.f[18])
405 		- offsetof(struct task_struct, thread.fstate.f[0])
406 	);
407 	DEFINE(TASK_THREAD_F19_F0,
408 		  offsetof(struct task_struct, thread.fstate.f[19])
409 		- offsetof(struct task_struct, thread.fstate.f[0])
410 	);
411 	DEFINE(TASK_THREAD_F20_F0,
412 		  offsetof(struct task_struct, thread.fstate.f[20])
413 		- offsetof(struct task_struct, thread.fstate.f[0])
414 	);
415 	DEFINE(TASK_THREAD_F21_F0,
416 		  offsetof(struct task_struct, thread.fstate.f[21])
417 		- offsetof(struct task_struct, thread.fstate.f[0])
418 	);
419 	DEFINE(TASK_THREAD_F22_F0,
420 		  offsetof(struct task_struct, thread.fstate.f[22])
421 		- offsetof(struct task_struct, thread.fstate.f[0])
422 	);
423 	DEFINE(TASK_THREAD_F23_F0,
424 		  offsetof(struct task_struct, thread.fstate.f[23])
425 		- offsetof(struct task_struct, thread.fstate.f[0])
426 	);
427 	DEFINE(TASK_THREAD_F24_F0,
428 		  offsetof(struct task_struct, thread.fstate.f[24])
429 		- offsetof(struct task_struct, thread.fstate.f[0])
430 	);
431 	DEFINE(TASK_THREAD_F25_F0,
432 		  offsetof(struct task_struct, thread.fstate.f[25])
433 		- offsetof(struct task_struct, thread.fstate.f[0])
434 	);
435 	DEFINE(TASK_THREAD_F26_F0,
436 		  offsetof(struct task_struct, thread.fstate.f[26])
437 		- offsetof(struct task_struct, thread.fstate.f[0])
438 	);
439 	DEFINE(TASK_THREAD_F27_F0,
440 		  offsetof(struct task_struct, thread.fstate.f[27])
441 		- offsetof(struct task_struct, thread.fstate.f[0])
442 	);
443 	DEFINE(TASK_THREAD_F28_F0,
444 		  offsetof(struct task_struct, thread.fstate.f[28])
445 		- offsetof(struct task_struct, thread.fstate.f[0])
446 	);
447 	DEFINE(TASK_THREAD_F29_F0,
448 		  offsetof(struct task_struct, thread.fstate.f[29])
449 		- offsetof(struct task_struct, thread.fstate.f[0])
450 	);
451 	DEFINE(TASK_THREAD_F30_F0,
452 		  offsetof(struct task_struct, thread.fstate.f[30])
453 		- offsetof(struct task_struct, thread.fstate.f[0])
454 	);
455 	DEFINE(TASK_THREAD_F31_F0,
456 		  offsetof(struct task_struct, thread.fstate.f[31])
457 		- offsetof(struct task_struct, thread.fstate.f[0])
458 	);
459 	DEFINE(TASK_THREAD_FCSR_F0,
460 		  offsetof(struct task_struct, thread.fstate.fcsr)
461 		- offsetof(struct task_struct, thread.fstate.f[0])
462 	);
463 
464 	/*
465 	 * We allocate a pt_regs on the stack when entering the kernel.  This
466 	 * ensures the alignment is sane.
467 	 */
468 	DEFINE(PT_SIZE_ON_STACK, ALIGN(sizeof(struct pt_regs), STACK_ALIGN));
469 
470 	OFFSET(KERNEL_MAP_VIRT_ADDR, kernel_mapping, virt_addr);
471 }
472