xref: /openbmc/linux/arch/powerpc/include/asm/ppc_asm.h (revision ba61bb17)
1 /*
2  * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
3  */
4 #ifndef _ASM_POWERPC_PPC_ASM_H
5 #define _ASM_POWERPC_PPC_ASM_H
6 
7 #include <linux/stringify.h>
8 #include <asm/asm-compat.h>
9 #include <asm/processor.h>
10 #include <asm/ppc-opcode.h>
11 #include <asm/firmware.h>
12 
13 #ifdef __ASSEMBLY__
14 
15 #define SZL			(BITS_PER_LONG/8)
16 
17 /*
18  * Stuff for accurate CPU time accounting.
19  * These macros handle transitions between user and system state
20  * in exception entry and exit and accumulate time to the
21  * user_time and system_time fields in the paca.
22  */
23 
24 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
25 #define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb)
26 #define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)
27 #define ACCOUNT_STOLEN_TIME
28 #else
29 #define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb)				\
30 	MFTB(ra);			/* get timebase */		\
31 	PPC_LL	rb, ACCOUNT_STARTTIME_USER(ptr);			\
32 	PPC_STL	ra, ACCOUNT_STARTTIME(ptr);				\
33 	subf	rb,rb,ra;		/* subtract start value */	\
34 	PPC_LL	ra, ACCOUNT_USER_TIME(ptr);				\
35 	add	ra,ra,rb;		/* add on to user time */	\
36 	PPC_STL	ra, ACCOUNT_USER_TIME(ptr);				\
37 
38 #define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)				\
39 	MFTB(ra);			/* get timebase */		\
40 	PPC_LL	rb, ACCOUNT_STARTTIME(ptr);				\
41 	PPC_STL	ra, ACCOUNT_STARTTIME_USER(ptr);			\
42 	subf	rb,rb,ra;		/* subtract start value */	\
43 	PPC_LL	ra, ACCOUNT_SYSTEM_TIME(ptr);				\
44 	add	ra,ra,rb;		/* add on to system time */	\
45 	PPC_STL	ra, ACCOUNT_SYSTEM_TIME(ptr)
46 
47 #ifdef CONFIG_PPC_SPLPAR
48 #define ACCOUNT_STOLEN_TIME						\
49 BEGIN_FW_FTR_SECTION;							\
50 	beq	33f;							\
51 	/* from user - see if there are any DTL entries to process */	\
52 	ld	r10,PACALPPACAPTR(r13);	/* get ptr to VPA */		\
53 	ld	r11,PACA_DTL_RIDX(r13);	/* get log read index */	\
54 	addi	r10,r10,LPPACA_DTLIDX;					\
55 	LDX_BE	r10,0,r10;		/* get log write index */	\
56 	cmpd	cr1,r11,r10;						\
57 	beq+	cr1,33f;						\
58 	bl	accumulate_stolen_time;				\
59 	ld	r12,_MSR(r1);						\
60 	andi.	r10,r12,MSR_PR;		/* Restore cr0 (coming from user) */ \
61 33:									\
62 END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
63 
64 #else  /* CONFIG_PPC_SPLPAR */
65 #define ACCOUNT_STOLEN_TIME
66 
67 #endif /* CONFIG_PPC_SPLPAR */
68 
69 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
70 
71 /*
72  * Macros for storing registers into and loading registers from
73  * exception frames.
74  */
75 #ifdef __powerpc64__
76 #define SAVE_GPR(n, base)	std	n,GPR0+8*(n)(base)
77 #define REST_GPR(n, base)	ld	n,GPR0+8*(n)(base)
78 #define SAVE_NVGPRS(base)	SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
79 #define REST_NVGPRS(base)	REST_8GPRS(14, base); REST_10GPRS(22, base)
80 #else
81 #define SAVE_GPR(n, base)	stw	n,GPR0+4*(n)(base)
82 #define REST_GPR(n, base)	lwz	n,GPR0+4*(n)(base)
83 #define SAVE_NVGPRS(base)	stmw	13, GPR0+4*13(base)
84 #define REST_NVGPRS(base)	lmw	13, GPR0+4*13(base)
85 #endif
86 
87 #define SAVE_2GPRS(n, base)	SAVE_GPR(n, base); SAVE_GPR(n+1, base)
88 #define SAVE_4GPRS(n, base)	SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
89 #define SAVE_8GPRS(n, base)	SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
90 #define SAVE_10GPRS(n, base)	SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
91 #define REST_2GPRS(n, base)	REST_GPR(n, base); REST_GPR(n+1, base)
92 #define REST_4GPRS(n, base)	REST_2GPRS(n, base); REST_2GPRS(n+2, base)
93 #define REST_8GPRS(n, base)	REST_4GPRS(n, base); REST_4GPRS(n+4, base)
94 #define REST_10GPRS(n, base)	REST_8GPRS(n, base); REST_2GPRS(n+8, base)
95 
96 #define SAVE_FPR(n, base)	stfd	n,8*TS_FPRWIDTH*(n)(base)
97 #define SAVE_2FPRS(n, base)	SAVE_FPR(n, base); SAVE_FPR(n+1, base)
98 #define SAVE_4FPRS(n, base)	SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
99 #define SAVE_8FPRS(n, base)	SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
100 #define SAVE_16FPRS(n, base)	SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
101 #define SAVE_32FPRS(n, base)	SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
102 #define REST_FPR(n, base)	lfd	n,8*TS_FPRWIDTH*(n)(base)
103 #define REST_2FPRS(n, base)	REST_FPR(n, base); REST_FPR(n+1, base)
104 #define REST_4FPRS(n, base)	REST_2FPRS(n, base); REST_2FPRS(n+2, base)
105 #define REST_8FPRS(n, base)	REST_4FPRS(n, base); REST_4FPRS(n+4, base)
106 #define REST_16FPRS(n, base)	REST_8FPRS(n, base); REST_8FPRS(n+8, base)
107 #define REST_32FPRS(n, base)	REST_16FPRS(n, base); REST_16FPRS(n+16, base)
108 
109 #define SAVE_VR(n,b,base)	li b,16*(n);  stvx n,base,b
110 #define SAVE_2VRS(n,b,base)	SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
111 #define SAVE_4VRS(n,b,base)	SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
112 #define SAVE_8VRS(n,b,base)	SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
113 #define SAVE_16VRS(n,b,base)	SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
114 #define SAVE_32VRS(n,b,base)	SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
115 #define REST_VR(n,b,base)	li b,16*(n); lvx n,base,b
116 #define REST_2VRS(n,b,base)	REST_VR(n,b,base); REST_VR(n+1,b,base)
117 #define REST_4VRS(n,b,base)	REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
118 #define REST_8VRS(n,b,base)	REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
119 #define REST_16VRS(n,b,base)	REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
120 #define REST_32VRS(n,b,base)	REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
121 
122 #ifdef __BIG_ENDIAN__
123 #define STXVD2X_ROT(n,b,base)		STXVD2X(n,b,base)
124 #define LXVD2X_ROT(n,b,base)		LXVD2X(n,b,base)
125 #else
126 #define STXVD2X_ROT(n,b,base)		XXSWAPD(n,n);		\
127 					STXVD2X(n,b,base);	\
128 					XXSWAPD(n,n)
129 
130 #define LXVD2X_ROT(n,b,base)		LXVD2X(n,b,base);	\
131 					XXSWAPD(n,n)
132 #endif
133 /* Save the lower 32 VSRs in the thread VSR region */
134 #define SAVE_VSR(n,b,base)	li b,16*(n);  STXVD2X_ROT(n,R##base,R##b)
135 #define SAVE_2VSRS(n,b,base)	SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
136 #define SAVE_4VSRS(n,b,base)	SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
137 #define SAVE_8VSRS(n,b,base)	SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
138 #define SAVE_16VSRS(n,b,base)	SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
139 #define SAVE_32VSRS(n,b,base)	SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
140 #define REST_VSR(n,b,base)	li b,16*(n); LXVD2X_ROT(n,R##base,R##b)
141 #define REST_2VSRS(n,b,base)	REST_VSR(n,b,base); REST_VSR(n+1,b,base)
142 #define REST_4VSRS(n,b,base)	REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
143 #define REST_8VSRS(n,b,base)	REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
144 #define REST_16VSRS(n,b,base)	REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
145 #define REST_32VSRS(n,b,base)	REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
146 
147 /*
148  * b = base register for addressing, o = base offset from register of 1st EVR
149  * n = first EVR, s = scratch
150  */
151 #define SAVE_EVR(n,s,b,o)	evmergehi s,s,n; stw s,o+4*(n)(b)
152 #define SAVE_2EVRS(n,s,b,o)	SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o)
153 #define SAVE_4EVRS(n,s,b,o)	SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o)
154 #define SAVE_8EVRS(n,s,b,o)	SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o)
155 #define SAVE_16EVRS(n,s,b,o)	SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o)
156 #define SAVE_32EVRS(n,s,b,o)	SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o)
157 #define REST_EVR(n,s,b,o)	lwz s,o+4*(n)(b); evmergelo n,s,n
158 #define REST_2EVRS(n,s,b,o)	REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o)
159 #define REST_4EVRS(n,s,b,o)	REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o)
160 #define REST_8EVRS(n,s,b,o)	REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o)
161 #define REST_16EVRS(n,s,b,o)	REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o)
162 #define REST_32EVRS(n,s,b,o)	REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o)
163 
164 /* Macros to adjust thread priority for hardware multithreading */
165 #define HMT_VERY_LOW	or	31,31,31	# very low priority
166 #define HMT_LOW		or	1,1,1
167 #define HMT_MEDIUM_LOW  or	6,6,6		# medium low priority
168 #define HMT_MEDIUM	or	2,2,2
169 #define HMT_MEDIUM_HIGH or	5,5,5		# medium high priority
170 #define HMT_HIGH	or	3,3,3
171 #define HMT_EXTRA_HIGH	or	7,7,7		# power7 only
172 
173 #ifdef CONFIG_PPC64
174 #define ULONG_SIZE 	8
175 #else
176 #define ULONG_SIZE	4
177 #endif
178 #define __VCPU_GPR(n)	(VCPU_GPRS + (n * ULONG_SIZE))
179 #define VCPU_GPR(n)	__VCPU_GPR(__REG_##n)
180 
181 #ifdef __KERNEL__
182 #ifdef CONFIG_PPC64
183 
184 #define STACKFRAMESIZE 256
185 #define __STK_REG(i)   (112 + ((i)-14)*8)
186 #define STK_REG(i)     __STK_REG(__REG_##i)
187 
188 #ifdef PPC64_ELF_ABI_v2
189 #define STK_GOT		24
190 #define __STK_PARAM(i)	(32 + ((i)-3)*8)
191 #else
192 #define STK_GOT		40
193 #define __STK_PARAM(i)	(48 + ((i)-3)*8)
194 #endif
195 #define STK_PARAM(i)	__STK_PARAM(__REG_##i)
196 
197 #ifdef PPC64_ELF_ABI_v2
198 
199 #define _GLOBAL(name) \
200 	.align 2 ; \
201 	.type name,@function; \
202 	.globl name; \
203 name:
204 
205 #define _GLOBAL_TOC(name) \
206 	.align 2 ; \
207 	.type name,@function; \
208 	.globl name; \
209 name: \
210 0:	addis r2,r12,(.TOC.-0b)@ha; \
211 	addi r2,r2,(.TOC.-0b)@l; \
212 	.localentry name,.-name
213 
214 #define DOTSYM(a)	a
215 
216 #else
217 
218 #define XGLUE(a,b) a##b
219 #define GLUE(a,b) XGLUE(a,b)
220 
221 #define _GLOBAL(name) \
222 	.align 2 ; \
223 	.globl name; \
224 	.globl GLUE(.,name); \
225 	.pushsection ".opd","aw"; \
226 name: \
227 	.quad GLUE(.,name); \
228 	.quad .TOC.@tocbase; \
229 	.quad 0; \
230 	.popsection; \
231 	.type GLUE(.,name),@function; \
232 GLUE(.,name):
233 
234 #define _GLOBAL_TOC(name) _GLOBAL(name)
235 
236 #define DOTSYM(a)	GLUE(.,a)
237 
238 #endif
239 
240 #else /* 32-bit */
241 
242 #define _ENTRY(n)	\
243 	.globl n;	\
244 n:
245 
246 #define _GLOBAL(n)	\
247 	.stabs __stringify(n:F-1),N_FUN,0,0,n;\
248 	.globl n;	\
249 n:
250 
251 #define _GLOBAL_TOC(name) _GLOBAL(name)
252 
253 #endif
254 
255 /*
256  * __kprobes (the C annotation) puts the symbol into the .kprobes.text
257  * section, which gets emitted at the end of regular text.
258  *
259  * _ASM_NOKPROBE_SYMBOL and NOKPROBE_SYMBOL just adds the symbol to
260  * a blacklist. The former is for core kprobe functions/data, the
261  * latter is for those that incdentially must be excluded from probing
262  * and allows them to be linked at more optimal location within text.
263  */
264 #ifdef CONFIG_KPROBES
265 #define _ASM_NOKPROBE_SYMBOL(entry)			\
266 	.pushsection "_kprobe_blacklist","aw";		\
267 	PPC_LONG (entry) ;				\
268 	.popsection
269 #else
270 #define _ASM_NOKPROBE_SYMBOL(entry)
271 #endif
272 
273 #define FUNC_START(name)	_GLOBAL(name)
274 #define FUNC_END(name)
275 
276 /*
277  * LOAD_REG_IMMEDIATE(rn, expr)
278  *   Loads the value of the constant expression 'expr' into register 'rn'
279  *   using immediate instructions only.  Use this when it's important not
280  *   to reference other data (i.e. on ppc64 when the TOC pointer is not
281  *   valid) and when 'expr' is a constant or absolute address.
282  *
283  * LOAD_REG_ADDR(rn, name)
284  *   Loads the address of label 'name' into register 'rn'.  Use this when
285  *   you don't particularly need immediate instructions only, but you need
286  *   the whole address in one register (e.g. it's a structure address and
287  *   you want to access various offsets within it).  On ppc32 this is
288  *   identical to LOAD_REG_IMMEDIATE.
289  *
290  * LOAD_REG_ADDR_PIC(rn, name)
291  *   Loads the address of label 'name' into register 'run'. Use this when
292  *   the kernel doesn't run at the linked or relocated address. Please
293  *   note that this macro will clobber the lr register.
294  *
295  * LOAD_REG_ADDRBASE(rn, name)
296  * ADDROFF(name)
297  *   LOAD_REG_ADDRBASE loads part of the address of label 'name' into
298  *   register 'rn'.  ADDROFF(name) returns the remainder of the address as
299  *   a constant expression.  ADDROFF(name) is a signed expression < 16 bits
300  *   in size, so is suitable for use directly as an offset in load and store
301  *   instructions.  Use this when loading/storing a single word or less as:
302  *      LOAD_REG_ADDRBASE(rX, name)
303  *      ld	rY,ADDROFF(name)(rX)
304  */
305 
306 /* Be careful, this will clobber the lr register. */
307 #define LOAD_REG_ADDR_PIC(reg, name)		\
308 	bl	0f;				\
309 0:	mflr	reg;				\
310 	addis	reg,reg,(name - 0b)@ha;		\
311 	addi	reg,reg,(name - 0b)@l;
312 
313 #ifdef __powerpc64__
314 #ifdef HAVE_AS_ATHIGH
315 #define __AS_ATHIGH high
316 #else
317 #define __AS_ATHIGH h
318 #endif
319 #define LOAD_REG_IMMEDIATE(reg,expr)		\
320 	lis     reg,(expr)@highest;		\
321 	ori     reg,reg,(expr)@higher;	\
322 	rldicr  reg,reg,32,31;		\
323 	oris    reg,reg,(expr)@__AS_ATHIGH;	\
324 	ori     reg,reg,(expr)@l;
325 
326 #define LOAD_REG_ADDR(reg,name)			\
327 	ld	reg,name@got(r2)
328 
329 #define LOAD_REG_ADDRBASE(reg,name)	LOAD_REG_ADDR(reg,name)
330 #define ADDROFF(name)			0
331 
332 /* offsets for stack frame layout */
333 #define LRSAVE	16
334 
335 #else /* 32-bit */
336 
337 #define LOAD_REG_IMMEDIATE(reg,expr)		\
338 	lis	reg,(expr)@ha;		\
339 	addi	reg,reg,(expr)@l;
340 
341 #define LOAD_REG_ADDR(reg,name)		LOAD_REG_IMMEDIATE(reg, name)
342 
343 #define LOAD_REG_ADDRBASE(reg, name)	lis	reg,name@ha
344 #define ADDROFF(name)			name@l
345 
346 /* offsets for stack frame layout */
347 #define LRSAVE	4
348 
349 #endif
350 
351 /* various errata or part fixups */
352 #ifdef CONFIG_PPC601_SYNC_FIX
353 #define SYNC				\
354 BEGIN_FTR_SECTION			\
355 	sync;				\
356 	isync;				\
357 END_FTR_SECTION_IFSET(CPU_FTR_601)
358 #define SYNC_601			\
359 BEGIN_FTR_SECTION			\
360 	sync;				\
361 END_FTR_SECTION_IFSET(CPU_FTR_601)
362 #define ISYNC_601			\
363 BEGIN_FTR_SECTION			\
364 	isync;				\
365 END_FTR_SECTION_IFSET(CPU_FTR_601)
366 #else
367 #define	SYNC
368 #define SYNC_601
369 #define ISYNC_601
370 #endif
371 
372 #if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
373 #define MFTB(dest)			\
374 90:	mfspr dest, SPRN_TBRL;		\
375 BEGIN_FTR_SECTION_NESTED(96);		\
376 	cmpwi dest,0;			\
377 	beq-  90b;			\
378 END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
379 #else
380 #define MFTB(dest)			MFTBL(dest)
381 #endif
382 
383 #ifdef CONFIG_PPC_8xx
384 #define MFTBL(dest)			mftb dest
385 #define MFTBU(dest)			mftbu dest
386 #else
387 #define MFTBL(dest)			mfspr dest, SPRN_TBRL
388 #define MFTBU(dest)			mfspr dest, SPRN_TBRU
389 #endif
390 
391 #ifndef CONFIG_SMP
392 #define TLBSYNC
393 #else /* CONFIG_SMP */
394 /* tlbsync is not implemented on 601 */
395 #define TLBSYNC				\
396 BEGIN_FTR_SECTION			\
397 	tlbsync;			\
398 	sync;				\
399 END_FTR_SECTION_IFCLR(CPU_FTR_601)
400 #endif
401 
402 #ifdef CONFIG_PPC64
403 #define MTOCRF(FXM, RS)			\
404 	BEGIN_FTR_SECTION_NESTED(848);	\
405 	mtcrf	(FXM), RS;		\
406 	FTR_SECTION_ELSE_NESTED(848);	\
407 	mtocrf (FXM), RS;		\
408 	ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848)
409 #endif
410 
411 /*
412  * This instruction is not implemented on the PPC 603 or 601; however, on
413  * the 403GCX and 405GP tlbia IS defined and tlbie is not.
414  * All of these instructions exist in the 8xx, they have magical powers,
415  * and they must be used.
416  */
417 
418 #if !defined(CONFIG_4xx) && !defined(CONFIG_PPC_8xx)
419 #define tlbia					\
420 	li	r4,1024;			\
421 	mtctr	r4;				\
422 	lis	r4,KERNELBASE@h;		\
423 	.machine push;				\
424 	.machine "power4";			\
425 0:	tlbie	r4;				\
426 	.machine pop;				\
427 	addi	r4,r4,0x1000;			\
428 	bdnz	0b
429 #endif
430 
431 
432 #ifdef CONFIG_IBM440EP_ERR42
433 #define PPC440EP_ERR42 isync
434 #else
435 #define PPC440EP_ERR42
436 #endif
437 
438 /* The following stops all load and store data streams associated with stream
439  * ID (ie. streams created explicitly).  The embedded and server mnemonics for
440  * dcbt are different so this must only be used for server.
441  */
442 #define DCBT_BOOK3S_STOP_ALL_STREAM_IDS(scratch)	\
443        lis     scratch,0x60000000@h;			\
444        dcbt    0,scratch,0b01010
445 
446 /*
447  * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
448  * keep the address intact to be compatible with code shared with
449  * 32-bit classic.
450  *
451  * On the other hand, I find it useful to have them behave as expected
452  * by their name (ie always do the addition) on 64-bit BookE
453  */
454 #if defined(CONFIG_BOOKE) && !defined(CONFIG_PPC64)
455 #define toreal(rd)
456 #define fromreal(rd)
457 
458 /*
459  * We use addis to ensure compatibility with the "classic" ppc versions of
460  * these macros, which use rs = 0 to get the tophys offset in rd, rather than
461  * converting the address in r0, and so this version has to do that too
462  * (i.e. set register rd to 0 when rs == 0).
463  */
464 #define tophys(rd,rs)				\
465 	addis	rd,rs,0
466 
467 #define tovirt(rd,rs)				\
468 	addis	rd,rs,0
469 
470 #elif defined(CONFIG_PPC64)
471 #define toreal(rd)		/* we can access c000... in real mode */
472 #define fromreal(rd)
473 
474 #define tophys(rd,rs)                           \
475 	clrldi	rd,rs,2
476 
477 #define tovirt(rd,rs)                           \
478 	rotldi	rd,rs,16;			\
479 	ori	rd,rd,((KERNELBASE>>48)&0xFFFF);\
480 	rotldi	rd,rd,48
481 #else
482 /*
483  * On APUS (Amiga PowerPC cpu upgrade board), we don't know the
484  * physical base address of RAM at compile time.
485  */
486 #define toreal(rd)	tophys(rd,rd)
487 #define fromreal(rd)	tovirt(rd,rd)
488 
489 #define tophys(rd,rs)				\
490 0:	addis	rd,rs,-PAGE_OFFSET@h;		\
491 	.section ".vtop_fixup","aw";		\
492 	.align  1;				\
493 	.long   0b;				\
494 	.previous
495 
496 #define tovirt(rd,rs)				\
497 0:	addis	rd,rs,PAGE_OFFSET@h;		\
498 	.section ".ptov_fixup","aw";		\
499 	.align  1;				\
500 	.long   0b;				\
501 	.previous
502 #endif
503 
504 #ifdef CONFIG_PPC_BOOK3S_64
505 #define RFI		rfid
506 #define MTMSRD(r)	mtmsrd	r
507 #define MTMSR_EERI(reg)	mtmsrd	reg,1
508 #else
509 #ifndef CONFIG_40x
510 #define	RFI		rfi
511 #else
512 #define RFI		rfi; b .	/* Prevent prefetch past rfi */
513 #endif
514 #define MTMSRD(r)	mtmsr	r
515 #define MTMSR_EERI(reg)	mtmsr	reg
516 #endif
517 
518 #endif /* __KERNEL__ */
519 
520 /* The boring bits... */
521 
522 /* Condition Register Bit Fields */
523 
524 #define	cr0	0
525 #define	cr1	1
526 #define	cr2	2
527 #define	cr3	3
528 #define	cr4	4
529 #define	cr5	5
530 #define	cr6	6
531 #define	cr7	7
532 
533 
534 /*
535  * General Purpose Registers (GPRs)
536  *
537  * The lower case r0-r31 should be used in preference to the upper
538  * case R0-R31 as they provide more error checking in the assembler.
539  * Use R0-31 only when really nessesary.
540  */
541 
542 #define	r0	%r0
543 #define	r1	%r1
544 #define	r2	%r2
545 #define	r3	%r3
546 #define	r4	%r4
547 #define	r5	%r5
548 #define	r6	%r6
549 #define	r7	%r7
550 #define	r8	%r8
551 #define	r9	%r9
552 #define	r10	%r10
553 #define	r11	%r11
554 #define	r12	%r12
555 #define	r13	%r13
556 #define	r14	%r14
557 #define	r15	%r15
558 #define	r16	%r16
559 #define	r17	%r17
560 #define	r18	%r18
561 #define	r19	%r19
562 #define	r20	%r20
563 #define	r21	%r21
564 #define	r22	%r22
565 #define	r23	%r23
566 #define	r24	%r24
567 #define	r25	%r25
568 #define	r26	%r26
569 #define	r27	%r27
570 #define	r28	%r28
571 #define	r29	%r29
572 #define	r30	%r30
573 #define	r31	%r31
574 
575 
576 /* Floating Point Registers (FPRs) */
577 
578 #define	fr0	0
579 #define	fr1	1
580 #define	fr2	2
581 #define	fr3	3
582 #define	fr4	4
583 #define	fr5	5
584 #define	fr6	6
585 #define	fr7	7
586 #define	fr8	8
587 #define	fr9	9
588 #define	fr10	10
589 #define	fr11	11
590 #define	fr12	12
591 #define	fr13	13
592 #define	fr14	14
593 #define	fr15	15
594 #define	fr16	16
595 #define	fr17	17
596 #define	fr18	18
597 #define	fr19	19
598 #define	fr20	20
599 #define	fr21	21
600 #define	fr22	22
601 #define	fr23	23
602 #define	fr24	24
603 #define	fr25	25
604 #define	fr26	26
605 #define	fr27	27
606 #define	fr28	28
607 #define	fr29	29
608 #define	fr30	30
609 #define	fr31	31
610 
611 /* AltiVec Registers (VPRs) */
612 
613 #define	v0	0
614 #define	v1	1
615 #define	v2	2
616 #define	v3	3
617 #define	v4	4
618 #define	v5	5
619 #define	v6	6
620 #define	v7	7
621 #define	v8	8
622 #define	v9	9
623 #define	v10	10
624 #define	v11	11
625 #define	v12	12
626 #define	v13	13
627 #define	v14	14
628 #define	v15	15
629 #define	v16	16
630 #define	v17	17
631 #define	v18	18
632 #define	v19	19
633 #define	v20	20
634 #define	v21	21
635 #define	v22	22
636 #define	v23	23
637 #define	v24	24
638 #define	v25	25
639 #define	v26	26
640 #define	v27	27
641 #define	v28	28
642 #define	v29	29
643 #define	v30	30
644 #define	v31	31
645 
646 /* VSX Registers (VSRs) */
647 
648 #define	vs0	0
649 #define	vs1	1
650 #define	vs2	2
651 #define	vs3	3
652 #define	vs4	4
653 #define	vs5	5
654 #define	vs6	6
655 #define	vs7	7
656 #define	vs8	8
657 #define	vs9	9
658 #define	vs10	10
659 #define	vs11	11
660 #define	vs12	12
661 #define	vs13	13
662 #define	vs14	14
663 #define	vs15	15
664 #define	vs16	16
665 #define	vs17	17
666 #define	vs18	18
667 #define	vs19	19
668 #define	vs20	20
669 #define	vs21	21
670 #define	vs22	22
671 #define	vs23	23
672 #define	vs24	24
673 #define	vs25	25
674 #define	vs26	26
675 #define	vs27	27
676 #define	vs28	28
677 #define	vs29	29
678 #define	vs30	30
679 #define	vs31	31
680 #define	vs32	32
681 #define	vs33	33
682 #define	vs34	34
683 #define	vs35	35
684 #define	vs36	36
685 #define	vs37	37
686 #define	vs38	38
687 #define	vs39	39
688 #define	vs40	40
689 #define	vs41	41
690 #define	vs42	42
691 #define	vs43	43
692 #define	vs44	44
693 #define	vs45	45
694 #define	vs46	46
695 #define	vs47	47
696 #define	vs48	48
697 #define	vs49	49
698 #define	vs50	50
699 #define	vs51	51
700 #define	vs52	52
701 #define	vs53	53
702 #define	vs54	54
703 #define	vs55	55
704 #define	vs56	56
705 #define	vs57	57
706 #define	vs58	58
707 #define	vs59	59
708 #define	vs60	60
709 #define	vs61	61
710 #define	vs62	62
711 #define	vs63	63
712 
713 /* SPE Registers (EVPRs) */
714 
715 #define	evr0	0
716 #define	evr1	1
717 #define	evr2	2
718 #define	evr3	3
719 #define	evr4	4
720 #define	evr5	5
721 #define	evr6	6
722 #define	evr7	7
723 #define	evr8	8
724 #define	evr9	9
725 #define	evr10	10
726 #define	evr11	11
727 #define	evr12	12
728 #define	evr13	13
729 #define	evr14	14
730 #define	evr15	15
731 #define	evr16	16
732 #define	evr17	17
733 #define	evr18	18
734 #define	evr19	19
735 #define	evr20	20
736 #define	evr21	21
737 #define	evr22	22
738 #define	evr23	23
739 #define	evr24	24
740 #define	evr25	25
741 #define	evr26	26
742 #define	evr27	27
743 #define	evr28	28
744 #define	evr29	29
745 #define	evr30	30
746 #define	evr31	31
747 
748 /* some stab codes */
749 #define N_FUN	36
750 #define N_RSYM	64
751 #define N_SLINE	68
752 #define N_SO	100
753 
754 /*
755  * Create an endian fixup trampoline
756  *
757  * This starts with a "tdi 0,0,0x48" instruction which is
758  * essentially a "trap never", and thus akin to a nop.
759  *
760  * The opcode for this instruction read with the wrong endian
761  * however results in a b . + 8
762  *
763  * So essentially we use that trick to execute the following
764  * trampoline in "reverse endian" if we are running with the
765  * MSR_LE bit set the "wrong" way for whatever endianness the
766  * kernel is built for.
767  */
768 
769 #ifdef CONFIG_PPC_BOOK3E
770 #define FIXUP_ENDIAN
771 #else
772 /*
773  * This version may be used in in HV or non-HV context.
774  * MSR[EE] must be disabled.
775  */
776 #define FIXUP_ENDIAN						   \
777 	tdi   0,0,0x48;	  /* Reverse endian of b . + 8		*/ \
778 	b     191f;	  /* Skip trampoline if endian is good	*/ \
779 	.long 0xa600607d; /* mfmsr r11				*/ \
780 	.long 0x01006b69; /* xori r11,r11,1			*/ \
781 	.long 0x00004039; /* li r10,0				*/ \
782 	.long 0x6401417d; /* mtmsrd r10,1			*/ \
783 	.long 0x05009f42; /* bcl 20,31,$+4			*/ \
784 	.long 0xa602487d; /* mflr r10				*/ \
785 	.long 0x14004a39; /* addi r10,r10,20			*/ \
786 	.long 0xa6035a7d; /* mtsrr0 r10				*/ \
787 	.long 0xa6037b7d; /* mtsrr1 r11				*/ \
788 	.long 0x2400004c; /* rfid				*/ \
789 191:
790 
791 /*
792  * This version that may only be used with MSR[HV]=1
793  * - Does not clear MSR[RI], so more robust.
794  * - Slightly smaller and faster.
795  */
796 #define FIXUP_ENDIAN_HV						   \
797 	tdi   0,0,0x48;	  /* Reverse endian of b . + 8		*/ \
798 	b     191f;	  /* Skip trampoline if endian is good	*/ \
799 	.long 0xa600607d; /* mfmsr r11				*/ \
800 	.long 0x01006b69; /* xori r11,r11,1			*/ \
801 	.long 0x05009f42; /* bcl 20,31,$+4			*/ \
802 	.long 0xa602487d; /* mflr r10				*/ \
803 	.long 0x14004a39; /* addi r10,r10,20			*/ \
804 	.long 0xa64b5a7d; /* mthsrr0 r10			*/ \
805 	.long 0xa64b7b7d; /* mthsrr1 r11			*/ \
806 	.long 0x2402004c; /* hrfid				*/ \
807 191:
808 
809 #endif /* !CONFIG_PPC_BOOK3E */
810 
811 #endif /*  __ASSEMBLY__ */
812 
813 /*
814  * Helper macro for exception table entries
815  */
816 #define EX_TABLE(_fault, _target)		\
817 	stringify_in_c(.section __ex_table,"a";)\
818 	stringify_in_c(.balign 4;)		\
819 	stringify_in_c(.long (_fault) - . ;)	\
820 	stringify_in_c(.long (_target) - . ;)	\
821 	stringify_in_c(.previous)
822 
823 #endif /* _ASM_POWERPC_PPC_ASM_H */
824