xref: /openbmc/linux/arch/ia64/include/asm/gcc_intrin.h (revision 384740dc)
1 #ifndef _ASM_IA64_GCC_INTRIN_H
2 #define _ASM_IA64_GCC_INTRIN_H
3 /*
4  *
5  * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
6  * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
7  */
8 
9 #include <linux/compiler.h>
10 
11 /* define this macro to get some asm stmts included in 'c' files */
12 #define ASM_SUPPORTED
13 
14 /* Optimization barrier */
15 /* The "volatile" is due to gcc bugs */
16 #define ia64_barrier()	asm volatile ("":::"memory")
17 
18 #define ia64_stop()	asm volatile (";;"::)
19 
20 #define ia64_invala_gr(regnum)	asm volatile ("invala.e r%0" :: "i"(regnum))
21 
22 #define ia64_invala_fr(regnum)	asm volatile ("invala.e f%0" :: "i"(regnum))
23 
24 #define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
25 
26 #define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
27 
28 extern void ia64_bad_param_for_setreg (void);
29 extern void ia64_bad_param_for_getreg (void);
30 
31 #ifdef __KERNEL__
32 register unsigned long ia64_r13 asm ("r13") __used;
33 #endif
34 
35 #define ia64_native_setreg(regnum, val)						\
36 ({										\
37 	switch (regnum) {							\
38 	    case _IA64_REG_PSR_L:						\
39 		    asm volatile ("mov psr.l=%0" :: "r"(val) : "memory");	\
40 		    break;							\
41 	    case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:				\
42 		    asm volatile ("mov ar%0=%1" ::				\
43 		    			  "i" (regnum - _IA64_REG_AR_KR0),	\
44 					  "r"(val): "memory");			\
45 		    break;							\
46 	    case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:			\
47 		    asm volatile ("mov cr%0=%1" ::				\
48 				          "i" (regnum - _IA64_REG_CR_DCR),	\
49 					  "r"(val): "memory" );			\
50 		    break;							\
51 	    case _IA64_REG_SP:							\
52 		    asm volatile ("mov r12=%0" ::				\
53 			    		  "r"(val): "memory");			\
54 		    break;							\
55 	    case _IA64_REG_GP:							\
56 		    asm volatile ("mov gp=%0" :: "r"(val) : "memory");		\
57 		break;								\
58 	    default:								\
59 		    ia64_bad_param_for_setreg();				\
60 		    break;							\
61 	}									\
62 })
63 
64 #define ia64_native_getreg(regnum)						\
65 ({										\
66 	__u64 ia64_intri_res;							\
67 										\
68 	switch (regnum) {							\
69 	case _IA64_REG_GP:							\
70 		asm volatile ("mov %0=gp" : "=r"(ia64_intri_res));		\
71 		break;								\
72 	case _IA64_REG_IP:							\
73 		asm volatile ("mov %0=ip" : "=r"(ia64_intri_res));		\
74 		break;								\
75 	case _IA64_REG_PSR:							\
76 		asm volatile ("mov %0=psr" : "=r"(ia64_intri_res));		\
77 		break;								\
78 	case _IA64_REG_TP:	/* for current() */				\
79 		ia64_intri_res = ia64_r13;					\
80 		break;								\
81 	case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:				\
82 		asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res)		\
83 				      : "i"(regnum - _IA64_REG_AR_KR0));	\
84 		break;								\
85 	case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:				\
86 		asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res)		\
87 				      : "i" (regnum - _IA64_REG_CR_DCR));	\
88 		break;								\
89 	case _IA64_REG_SP:							\
90 		asm volatile ("mov %0=sp" : "=r" (ia64_intri_res));		\
91 		break;								\
92 	default:								\
93 		ia64_bad_param_for_getreg();					\
94 		break;								\
95 	}									\
96 	ia64_intri_res;								\
97 })
98 
99 #define ia64_hint_pause 0
100 
101 #define ia64_hint(mode)						\
102 ({								\
103 	switch (mode) {						\
104 	case ia64_hint_pause:					\
105 		asm volatile ("hint @pause" ::: "memory");	\
106 		break;						\
107 	}							\
108 })
109 
110 
111 /* Integer values for mux1 instruction */
112 #define ia64_mux1_brcst 0
113 #define ia64_mux1_mix   8
114 #define ia64_mux1_shuf  9
115 #define ia64_mux1_alt  10
116 #define ia64_mux1_rev  11
117 
118 #define ia64_mux1(x, mode)							\
119 ({										\
120 	__u64 ia64_intri_res;							\
121 										\
122 	switch (mode) {								\
123 	case ia64_mux1_brcst:							\
124 		asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x));	\
125 		break;								\
126 	case ia64_mux1_mix:							\
127 		asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x));	\
128 		break;								\
129 	case ia64_mux1_shuf:							\
130 		asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x));	\
131 		break;								\
132 	case ia64_mux1_alt:							\
133 		asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x));	\
134 		break;								\
135 	case ia64_mux1_rev:							\
136 		asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x));	\
137 		break;								\
138 	}									\
139 	ia64_intri_res;								\
140 })
141 
142 #if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
143 # define ia64_popcnt(x)		__builtin_popcountl(x)
144 #else
145 # define ia64_popcnt(x)						\
146   ({								\
147 	__u64 ia64_intri_res;					\
148 	asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x));	\
149 								\
150 	ia64_intri_res;						\
151   })
152 #endif
153 
154 #define ia64_getf_exp(x)					\
155 ({								\
156 	long ia64_intri_res;					\
157 								\
158 	asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x));	\
159 								\
160 	ia64_intri_res;						\
161 })
162 
163 #define ia64_shrp(a, b, count)								\
164 ({											\
165 	__u64 ia64_intri_res;								\
166 	asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count));	\
167 	ia64_intri_res;									\
168 })
169 
170 #define ia64_ldfs(regnum, x)					\
171 ({								\
172 	register double __f__ asm ("f"#regnum);			\
173 	asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x));	\
174 })
175 
176 #define ia64_ldfd(regnum, x)					\
177 ({								\
178 	register double __f__ asm ("f"#regnum);			\
179 	asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x));	\
180 })
181 
182 #define ia64_ldfe(regnum, x)					\
183 ({								\
184 	register double __f__ asm ("f"#regnum);			\
185 	asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x));	\
186 })
187 
188 #define ia64_ldf8(regnum, x)					\
189 ({								\
190 	register double __f__ asm ("f"#regnum);			\
191 	asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x));	\
192 })
193 
194 #define ia64_ldf_fill(regnum, x)				\
195 ({								\
196 	register double __f__ asm ("f"#regnum);			\
197 	asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x));	\
198 })
199 
200 #define ia64_st4_rel_nta(m, val)					\
201 ({									\
202 	asm volatile ("st4.rel.nta [%0] = %1\n\t" :: "r"(m), "r"(val));	\
203 })
204 
205 #define ia64_stfs(x, regnum)						\
206 ({									\
207 	register double __f__ asm ("f"#regnum);				\
208 	asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
209 })
210 
211 #define ia64_stfd(x, regnum)						\
212 ({									\
213 	register double __f__ asm ("f"#regnum);				\
214 	asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
215 })
216 
217 #define ia64_stfe(x, regnum)						\
218 ({									\
219 	register double __f__ asm ("f"#regnum);				\
220 	asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
221 })
222 
223 #define ia64_stf8(x, regnum)						\
224 ({									\
225 	register double __f__ asm ("f"#regnum);				\
226 	asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
227 })
228 
229 #define ia64_stf_spill(x, regnum)						\
230 ({										\
231 	register double __f__ asm ("f"#regnum);					\
232 	asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
233 })
234 
235 #define ia64_fetchadd4_acq(p, inc)						\
236 ({										\
237 										\
238 	__u64 ia64_intri_res;							\
239 	asm volatile ("fetchadd4.acq %0=[%1],%2"				\
240 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
241 				: "memory");					\
242 										\
243 	ia64_intri_res;								\
244 })
245 
246 #define ia64_fetchadd4_rel(p, inc)						\
247 ({										\
248 	__u64 ia64_intri_res;							\
249 	asm volatile ("fetchadd4.rel %0=[%1],%2"				\
250 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
251 				: "memory");					\
252 										\
253 	ia64_intri_res;								\
254 })
255 
256 #define ia64_fetchadd8_acq(p, inc)						\
257 ({										\
258 										\
259 	__u64 ia64_intri_res;							\
260 	asm volatile ("fetchadd8.acq %0=[%1],%2"				\
261 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
262 				: "memory");					\
263 										\
264 	ia64_intri_res;								\
265 })
266 
267 #define ia64_fetchadd8_rel(p, inc)						\
268 ({										\
269 	__u64 ia64_intri_res;							\
270 	asm volatile ("fetchadd8.rel %0=[%1],%2"				\
271 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
272 				: "memory");					\
273 										\
274 	ia64_intri_res;								\
275 })
276 
277 #define ia64_xchg1(ptr,x)							\
278 ({										\
279 	__u64 ia64_intri_res;							\
280 	asm volatile ("xchg1 %0=[%1],%2"					\
281 		      : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory");	\
282 	ia64_intri_res;								\
283 })
284 
285 #define ia64_xchg2(ptr,x)						\
286 ({									\
287 	__u64 ia64_intri_res;						\
288 	asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res)	\
289 		      : "r" (ptr), "r" (x) : "memory");			\
290 	ia64_intri_res;							\
291 })
292 
293 #define ia64_xchg4(ptr,x)						\
294 ({									\
295 	__u64 ia64_intri_res;						\
296 	asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res)	\
297 		      : "r" (ptr), "r" (x) : "memory");			\
298 	ia64_intri_res;							\
299 })
300 
301 #define ia64_xchg8(ptr,x)						\
302 ({									\
303 	__u64 ia64_intri_res;						\
304 	asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res)	\
305 		      : "r" (ptr), "r" (x) : "memory");			\
306 	ia64_intri_res;							\
307 })
308 
309 #define ia64_cmpxchg1_acq(ptr, new, old)						\
310 ({											\
311 	__u64 ia64_intri_res;								\
312 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
313 	asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv":					\
314 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
315 	ia64_intri_res;									\
316 })
317 
318 #define ia64_cmpxchg1_rel(ptr, new, old)						\
319 ({											\
320 	__u64 ia64_intri_res;								\
321 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
322 	asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv":					\
323 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
324 	ia64_intri_res;									\
325 })
326 
327 #define ia64_cmpxchg2_acq(ptr, new, old)						\
328 ({											\
329 	__u64 ia64_intri_res;								\
330 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
331 	asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv":					\
332 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
333 	ia64_intri_res;									\
334 })
335 
336 #define ia64_cmpxchg2_rel(ptr, new, old)						\
337 ({											\
338 	__u64 ia64_intri_res;								\
339 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
340 											\
341 	asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv":					\
342 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
343 	ia64_intri_res;									\
344 })
345 
346 #define ia64_cmpxchg4_acq(ptr, new, old)						\
347 ({											\
348 	__u64 ia64_intri_res;								\
349 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
350 	asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv":					\
351 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
352 	ia64_intri_res;									\
353 })
354 
355 #define ia64_cmpxchg4_rel(ptr, new, old)						\
356 ({											\
357 	__u64 ia64_intri_res;								\
358 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
359 	asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv":					\
360 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
361 	ia64_intri_res;									\
362 })
363 
364 #define ia64_cmpxchg8_acq(ptr, new, old)						\
365 ({											\
366 	__u64 ia64_intri_res;								\
367 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
368 	asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv":					\
369 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
370 	ia64_intri_res;									\
371 })
372 
373 #define ia64_cmpxchg8_rel(ptr, new, old)						\
374 ({											\
375 	__u64 ia64_intri_res;								\
376 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
377 											\
378 	asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv":					\
379 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
380 	ia64_intri_res;									\
381 })
382 
383 #define ia64_mf()	asm volatile ("mf" ::: "memory")
384 #define ia64_mfa()	asm volatile ("mf.a" ::: "memory")
385 
386 #define ia64_invala() asm volatile ("invala" ::: "memory")
387 
388 #define ia64_native_thash(addr)							\
389 ({										\
390 	__u64 ia64_intri_res;							\
391 	asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr));	\
392 	ia64_intri_res;								\
393 })
394 
395 #define ia64_srlz_i()	asm volatile (";; srlz.i ;;" ::: "memory")
396 #define ia64_srlz_d()	asm volatile (";; srlz.d" ::: "memory");
397 
398 #ifdef HAVE_SERIALIZE_DIRECTIVE
399 # define ia64_dv_serialize_data()		asm volatile (".serialize.data");
400 # define ia64_dv_serialize_instruction()	asm volatile (".serialize.instruction");
401 #else
402 # define ia64_dv_serialize_data()
403 # define ia64_dv_serialize_instruction()
404 #endif
405 
406 #define ia64_nop(x)	asm volatile ("nop %0"::"i"(x));
407 
408 #define ia64_itci(addr)	asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
409 
410 #define ia64_itcd(addr)	asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
411 
412 
413 #define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"				\
414 					     :: "r"(trnum), "r"(addr) : "memory")
415 
416 #define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"				\
417 					     :: "r"(trnum), "r"(addr) : "memory")
418 
419 #define ia64_tpa(addr)								\
420 ({										\
421 	__u64 ia64_pa;								\
422 	asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory");	\
423 	ia64_pa;								\
424 })
425 
426 #define __ia64_set_dbr(index, val)						\
427 	asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
428 
429 #define ia64_set_ibr(index, val)						\
430 	asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
431 
432 #define ia64_set_pkr(index, val)						\
433 	asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
434 
435 #define ia64_set_pmc(index, val)						\
436 	asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
437 
438 #define ia64_set_pmd(index, val)						\
439 	asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
440 
441 #define ia64_native_set_rr(index, val)							\
442 	asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
443 
444 #define ia64_native_get_cpuid(index)							\
445 ({											\
446 	__u64 ia64_intri_res;								\
447 	asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index));	\
448 	ia64_intri_res;									\
449 })
450 
451 #define __ia64_get_dbr(index)							\
452 ({										\
453 	__u64 ia64_intri_res;							\
454 	asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
455 	ia64_intri_res;								\
456 })
457 
458 #define ia64_get_ibr(index)							\
459 ({										\
460 	__u64 ia64_intri_res;							\
461 	asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
462 	ia64_intri_res;								\
463 })
464 
465 #define ia64_get_pkr(index)							\
466 ({										\
467 	__u64 ia64_intri_res;							\
468 	asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
469 	ia64_intri_res;								\
470 })
471 
472 #define ia64_get_pmc(index)							\
473 ({										\
474 	__u64 ia64_intri_res;							\
475 	asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
476 	ia64_intri_res;								\
477 })
478 
479 
480 #define ia64_native_get_pmd(index)						\
481 ({										\
482 	__u64 ia64_intri_res;							\
483 	asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
484 	ia64_intri_res;								\
485 })
486 
487 #define ia64_native_get_rr(index)						\
488 ({										\
489 	__u64 ia64_intri_res;							\
490 	asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index));	\
491 	ia64_intri_res;								\
492 })
493 
494 #define ia64_native_fc(addr)	asm volatile ("fc %0" :: "r"(addr) : "memory")
495 
496 
497 #define ia64_sync_i()	asm volatile (";; sync.i" ::: "memory")
498 
499 #define ia64_native_ssm(mask)	asm volatile ("ssm %0":: "i"((mask)) : "memory")
500 #define ia64_native_rsm(mask)	asm volatile ("rsm %0":: "i"((mask)) : "memory")
501 #define ia64_sum(mask)	asm volatile ("sum %0":: "i"((mask)) : "memory")
502 #define ia64_rum(mask)	asm volatile ("rum %0":: "i"((mask)) : "memory")
503 
504 #define ia64_ptce(addr)	asm volatile ("ptc.e %0" :: "r"(addr))
505 
506 #define ia64_native_ptcga(addr, size)						\
507 do {										\
508 	asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory");	\
509 	ia64_dv_serialize_data();						\
510 } while (0)
511 
512 #define ia64_ptcl(addr, size)							\
513 do {										\
514 	asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory");	\
515 	ia64_dv_serialize_data();						\
516 } while (0)
517 
518 #define ia64_ptri(addr, size)						\
519 	asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
520 
521 #define ia64_ptrd(addr, size)						\
522 	asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
523 
524 #define ia64_ttag(addr)							\
525 ({									  \
526 	__u64 ia64_intri_res;						   \
527 	asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr));   \
528 	ia64_intri_res;							 \
529 })
530 
531 
532 /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
533 
534 #define ia64_lfhint_none   0
535 #define ia64_lfhint_nt1    1
536 #define ia64_lfhint_nt2    2
537 #define ia64_lfhint_nta    3
538 
539 #define ia64_lfetch(lfhint, y)					\
540 ({								\
541         switch (lfhint) {					\
542         case ia64_lfhint_none:					\
543                 asm volatile ("lfetch [%0]" : : "r"(y));	\
544                 break;						\
545         case ia64_lfhint_nt1:					\
546                 asm volatile ("lfetch.nt1 [%0]" : : "r"(y));	\
547                 break;						\
548         case ia64_lfhint_nt2:					\
549                 asm volatile ("lfetch.nt2 [%0]" : : "r"(y));	\
550                 break;						\
551         case ia64_lfhint_nta:					\
552                 asm volatile ("lfetch.nta [%0]" : : "r"(y));	\
553                 break;						\
554         }							\
555 })
556 
557 #define ia64_lfetch_excl(lfhint, y)					\
558 ({									\
559         switch (lfhint) {						\
560         case ia64_lfhint_none:						\
561                 asm volatile ("lfetch.excl [%0]" :: "r"(y));		\
562                 break;							\
563         case ia64_lfhint_nt1:						\
564                 asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y));	\
565                 break;							\
566         case ia64_lfhint_nt2:						\
567                 asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y));	\
568                 break;							\
569         case ia64_lfhint_nta:						\
570                 asm volatile ("lfetch.excl.nta [%0]" :: "r"(y));	\
571                 break;							\
572         }								\
573 })
574 
575 #define ia64_lfetch_fault(lfhint, y)					\
576 ({									\
577         switch (lfhint) {						\
578         case ia64_lfhint_none:						\
579                 asm volatile ("lfetch.fault [%0]" : : "r"(y));		\
580                 break;							\
581         case ia64_lfhint_nt1:						\
582                 asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y));	\
583                 break;							\
584         case ia64_lfhint_nt2:						\
585                 asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y));	\
586                 break;							\
587         case ia64_lfhint_nta:						\
588                 asm volatile ("lfetch.fault.nta [%0]" : : "r"(y));	\
589                 break;							\
590         }								\
591 })
592 
593 #define ia64_lfetch_fault_excl(lfhint, y)				\
594 ({									\
595         switch (lfhint) {						\
596         case ia64_lfhint_none:						\
597                 asm volatile ("lfetch.fault.excl [%0]" :: "r"(y));	\
598                 break;							\
599         case ia64_lfhint_nt1:						\
600                 asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y));	\
601                 break;							\
602         case ia64_lfhint_nt2:						\
603                 asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y));	\
604                 break;							\
605         case ia64_lfhint_nta:						\
606                 asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y));	\
607                 break;							\
608         }								\
609 })
610 
611 #define ia64_native_intrin_local_irq_restore(x)			\
612 do {								\
613 	asm volatile (";;   cmp.ne p6,p7=%0,r0;;"		\
614 		      "(p6) ssm psr.i;"				\
615 		      "(p7) rsm psr.i;;"			\
616 		      "(p6) srlz.d"				\
617 		      :: "r"((x)) : "p6", "p7", "memory");	\
618 } while (0)
619 
620 #endif /* _ASM_IA64_GCC_INTRIN_H */
621