xref: /openbmc/linux/arch/ia64/include/asm/asmmacro.h (revision d7a3d85e)
1 #ifndef _ASM_IA64_ASMMACRO_H
2 #define _ASM_IA64_ASMMACRO_H
3 
4 /*
5  * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
6  *	David Mosberger-Tang <davidm@hpl.hp.com>
7  */
8 
9 
10 #define ENTRY(name)				\
11 	.align 32;				\
12 	.proc name;				\
13 name:
14 
15 #define ENTRY_MIN_ALIGN(name)			\
16 	.align 16;				\
17 	.proc name;				\
18 name:
19 
20 #define GLOBAL_ENTRY(name)			\
21 	.global name;				\
22 	ENTRY(name)
23 
24 #define END(name)				\
25 	.endp name
26 
27 /*
28  * Helper macros to make unwind directives more readable:
29  */
30 
31 /* prologue_gr: */
32 #define ASM_UNW_PRLG_RP			0x8
33 #define ASM_UNW_PRLG_PFS		0x4
34 #define ASM_UNW_PRLG_PSP		0x2
35 #define ASM_UNW_PRLG_PR			0x1
36 #define ASM_UNW_PRLG_GRSAVE(ninputs)	(32+(ninputs))
37 
38 /*
39  * Helper macros for accessing user memory.
40  *
41  * When adding any new .section/.previous entries here, make sure to
42  * also add it to the DISCARD section in arch/ia64/kernel/gate.lds.S or
43  * unpleasant things will happen.
44  */
45 
46 	.section "__ex_table", "a"		// declare section & section attributes
47 	.previous
48 
49 # define EX(y,x...)				\
50 	.xdata4 "__ex_table", 99f-., y-.;	\
51   [99:]	x
52 # define EXCLR(y,x...)				\
53 	.xdata4 "__ex_table", 99f-., y-.+4;	\
54   [99:]	x
55 
56 /*
57  * Tag MCA recoverable instruction ranges.
58  */
59 
60 	.section "__mca_table", "a"		// declare section & section attributes
61 	.previous
62 
63 # define MCA_RECOVER_RANGE(y)			\
64 	.xdata4 "__mca_table", y-., 99f-.;	\
65   [99:]
66 
67 /*
68  * Mark instructions that need a load of a virtual address patched to be
69  * a load of a physical address.  We use this either in critical performance
70  * path (ivt.S - TLB miss processing) or in places where it might not be
71  * safe to use a "tpa" instruction (mca_asm.S - error recovery).
72  */
73 	.section ".data..patch.vtop", "a"	// declare section & section attributes
74 	.previous
75 
76 #define	LOAD_PHYSICAL(pr, reg, obj)		\
77 [1:](pr)movl reg = obj;				\
78 	.xdata4 ".data..patch.vtop", 1b-.
79 
80 /*
81  * For now, we always put in the McKinley E9 workaround.  On CPUs that don't need it,
82  * we'll patch out the work-around bundles with NOPs, so their impact is minimal.
83  */
84 #define DO_MCKINLEY_E9_WORKAROUND
85 
86 #ifdef DO_MCKINLEY_E9_WORKAROUND
87 	.section ".data..patch.mckinley_e9", "a"
88 	.previous
89 /* workaround for Itanium 2 Errata 9: */
90 # define FSYS_RETURN					\
91 	.xdata4 ".data..patch.mckinley_e9", 1f-.;	\
92 1:{ .mib;						\
93 	nop.m 0;					\
94 	mov r16=ar.pfs;					\
95 	br.call.sptk.many b7=2f;;			\
96   };							\
97 2:{ .mib;						\
98 	nop.m 0;					\
99 	mov ar.pfs=r16;					\
100 	br.ret.sptk.many b6;;				\
101   }
102 #else
103 # define FSYS_RETURN	br.ret.sptk.many b6
104 #endif
105 
106 /*
107  * If physical stack register size is different from DEF_NUM_STACK_REG,
108  * dynamically patch the kernel for correct size.
109  */
110 	.section ".data..patch.phys_stack_reg", "a"
111 	.previous
112 #define LOAD_PHYS_STACK_REG_SIZE(reg)			\
113 [1:]	adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0;	\
114 	.xdata4 ".data..patch.phys_stack_reg", 1b-.
115 
116 /*
117  * Up until early 2004, use of .align within a function caused bad unwind info.
118  * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
119  * otherwise.
120  */
121 #ifdef HAVE_WORKING_TEXT_ALIGN
122 # define TEXT_ALIGN(n)	.align n
123 #else
124 # define TEXT_ALIGN(n)
125 #endif
126 
127 #ifdef HAVE_SERIALIZE_DIRECTIVE
128 # define dv_serialize_data		.serialize.data
129 # define dv_serialize_instruction	.serialize.instruction
130 #else
131 # define dv_serialize_data
132 # define dv_serialize_instruction
133 #endif
134 
135 #endif /* _ASM_IA64_ASMMACRO_H */
136