xref: /openbmc/linux/arch/ia64/include/asm/asmmacro.h (revision 498495dba268b20e8eadd7fe93c140c68b6cc9d2)
1*b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
27f30491cSTony Luck #ifndef _ASM_IA64_ASMMACRO_H
37f30491cSTony Luck #define _ASM_IA64_ASMMACRO_H
47f30491cSTony Luck 
57f30491cSTony Luck /*
67f30491cSTony Luck  * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
77f30491cSTony Luck  *	David Mosberger-Tang <davidm@hpl.hp.com>
87f30491cSTony Luck  */
97f30491cSTony Luck 
107f30491cSTony Luck 
117f30491cSTony Luck #define ENTRY(name)				\
127f30491cSTony Luck 	.align 32;				\
137f30491cSTony Luck 	.proc name;				\
147f30491cSTony Luck name:
157f30491cSTony Luck 
167f30491cSTony Luck #define ENTRY_MIN_ALIGN(name)			\
177f30491cSTony Luck 	.align 16;				\
187f30491cSTony Luck 	.proc name;				\
197f30491cSTony Luck name:
207f30491cSTony Luck 
217f30491cSTony Luck #define GLOBAL_ENTRY(name)			\
227f30491cSTony Luck 	.global name;				\
237f30491cSTony Luck 	ENTRY(name)
247f30491cSTony Luck 
257f30491cSTony Luck #define END(name)				\
267f30491cSTony Luck 	.endp name
277f30491cSTony Luck 
287f30491cSTony Luck /*
297f30491cSTony Luck  * Helper macros to make unwind directives more readable:
307f30491cSTony Luck  */
317f30491cSTony Luck 
327f30491cSTony Luck /* prologue_gr: */
337f30491cSTony Luck #define ASM_UNW_PRLG_RP			0x8
347f30491cSTony Luck #define ASM_UNW_PRLG_PFS		0x4
357f30491cSTony Luck #define ASM_UNW_PRLG_PSP		0x2
367f30491cSTony Luck #define ASM_UNW_PRLG_PR			0x1
377f30491cSTony Luck #define ASM_UNW_PRLG_GRSAVE(ninputs)	(32+(ninputs))
387f30491cSTony Luck 
397f30491cSTony Luck /*
407f30491cSTony Luck  * Helper macros for accessing user memory.
417f30491cSTony Luck  *
427f30491cSTony Luck  * When adding any new .section/.previous entries here, make sure to
437f30491cSTony Luck  * also add it to the DISCARD section in arch/ia64/kernel/gate.lds.S or
447f30491cSTony Luck  * unpleasant things will happen.
457f30491cSTony Luck  */
467f30491cSTony Luck 
477f30491cSTony Luck 	.section "__ex_table", "a"		// declare section & section attributes
487f30491cSTony Luck 	.previous
497f30491cSTony Luck 
507f30491cSTony Luck # define EX(y,x...)				\
517f30491cSTony Luck 	.xdata4 "__ex_table", 99f-., y-.;	\
527f30491cSTony Luck   [99:]	x
537f30491cSTony Luck # define EXCLR(y,x...)				\
547f30491cSTony Luck 	.xdata4 "__ex_table", 99f-., y-.+4;	\
557f30491cSTony Luck   [99:]	x
567f30491cSTony Luck 
577f30491cSTony Luck /*
587f30491cSTony Luck  * Tag MCA recoverable instruction ranges.
597f30491cSTony Luck  */
607f30491cSTony Luck 
617f30491cSTony Luck 	.section "__mca_table", "a"		// declare section & section attributes
627f30491cSTony Luck 	.previous
637f30491cSTony Luck 
647f30491cSTony Luck # define MCA_RECOVER_RANGE(y)			\
657f30491cSTony Luck 	.xdata4 "__mca_table", y-., 99f-.;	\
667f30491cSTony Luck   [99:]
677f30491cSTony Luck 
687f30491cSTony Luck /*
697f30491cSTony Luck  * Mark instructions that need a load of a virtual address patched to be
707f30491cSTony Luck  * a load of a physical address.  We use this either in critical performance
717f30491cSTony Luck  * path (ivt.S - TLB miss processing) or in places where it might not be
727f30491cSTony Luck  * safe to use a "tpa" instruction (mca_asm.S - error recovery).
737f30491cSTony Luck  */
74dafb9320SDenys Vlasenko 	.section ".data..patch.vtop", "a"	// declare section & section attributes
757f30491cSTony Luck 	.previous
767f30491cSTony Luck 
777f30491cSTony Luck #define	LOAD_PHYSICAL(pr, reg, obj)		\
787f30491cSTony Luck [1:](pr)movl reg = obj;				\
79dafb9320SDenys Vlasenko 	.xdata4 ".data..patch.vtop", 1b-.
807f30491cSTony Luck 
817f30491cSTony Luck /*
827f30491cSTony Luck  * For now, we always put in the McKinley E9 workaround.  On CPUs that don't need it,
837f30491cSTony Luck  * we'll patch out the work-around bundles with NOPs, so their impact is minimal.
847f30491cSTony Luck  */
857f30491cSTony Luck #define DO_MCKINLEY_E9_WORKAROUND
867f30491cSTony Luck 
877f30491cSTony Luck #ifdef DO_MCKINLEY_E9_WORKAROUND
88dafb9320SDenys Vlasenko 	.section ".data..patch.mckinley_e9", "a"
897f30491cSTony Luck 	.previous
907f30491cSTony Luck /* workaround for Itanium 2 Errata 9: */
917f30491cSTony Luck # define FSYS_RETURN					\
92dafb9320SDenys Vlasenko 	.xdata4 ".data..patch.mckinley_e9", 1f-.;	\
937f30491cSTony Luck 1:{ .mib;						\
947f30491cSTony Luck 	nop.m 0;					\
957f30491cSTony Luck 	mov r16=ar.pfs;					\
967f30491cSTony Luck 	br.call.sptk.many b7=2f;;			\
977f30491cSTony Luck   };							\
987f30491cSTony Luck 2:{ .mib;						\
997f30491cSTony Luck 	nop.m 0;					\
1007f30491cSTony Luck 	mov ar.pfs=r16;					\
1017f30491cSTony Luck 	br.ret.sptk.many b6;;				\
1027f30491cSTony Luck   }
1037f30491cSTony Luck #else
1047f30491cSTony Luck # define FSYS_RETURN	br.ret.sptk.many b6
1057f30491cSTony Luck #endif
1067f30491cSTony Luck 
1077f30491cSTony Luck /*
1087f30491cSTony Luck  * If physical stack register size is different from DEF_NUM_STACK_REG,
1097f30491cSTony Luck  * dynamically patch the kernel for correct size.
1107f30491cSTony Luck  */
111dafb9320SDenys Vlasenko 	.section ".data..patch.phys_stack_reg", "a"
1127f30491cSTony Luck 	.previous
1137f30491cSTony Luck #define LOAD_PHYS_STACK_REG_SIZE(reg)			\
1147f30491cSTony Luck [1:]	adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0;	\
115dafb9320SDenys Vlasenko 	.xdata4 ".data..patch.phys_stack_reg", 1b-.
1167f30491cSTony Luck 
1177f30491cSTony Luck /*
1187f30491cSTony Luck  * Up until early 2004, use of .align within a function caused bad unwind info.
1197f30491cSTony Luck  * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
1207f30491cSTony Luck  * otherwise.
1217f30491cSTony Luck  */
1227f30491cSTony Luck #ifdef HAVE_WORKING_TEXT_ALIGN
1237f30491cSTony Luck # define TEXT_ALIGN(n)	.align n
1247f30491cSTony Luck #else
1257f30491cSTony Luck # define TEXT_ALIGN(n)
1267f30491cSTony Luck #endif
1277f30491cSTony Luck 
1287f30491cSTony Luck #ifdef HAVE_SERIALIZE_DIRECTIVE
1297f30491cSTony Luck # define dv_serialize_data		.serialize.data
1307f30491cSTony Luck # define dv_serialize_instruction	.serialize.instruction
1317f30491cSTony Luck #else
1327f30491cSTony Luck # define dv_serialize_data
1337f30491cSTony Luck # define dv_serialize_instruction
1347f30491cSTony Luck #endif
1357f30491cSTony Luck 
1367f30491cSTony Luck #endif /* _ASM_IA64_ASMMACRO_H */
137