1 /*
2  *
3  * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
4  * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
5  */
6 #ifndef _UAPI_ASM_IA64_GCC_INTRIN_H
7 #define _UAPI_ASM_IA64_GCC_INTRIN_H
8 
9 #include <linux/types.h>
10 #include <linux/compiler.h>
11 
12 /* define this macro to get some asm stmts included in 'c' files */
13 #define ASM_SUPPORTED
14 
15 /* Optimization barrier */
16 /* The "volatile" is due to gcc bugs */
17 #define ia64_barrier()	asm volatile ("":::"memory")
18 
19 #define ia64_stop()	asm volatile (";;"::)
20 
21 #define ia64_invala_gr(regnum)	asm volatile ("invala.e r%0" :: "i"(regnum))
22 
23 #define ia64_invala_fr(regnum)	asm volatile ("invala.e f%0" :: "i"(regnum))
24 
25 #define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
26 
27 #define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
28 
29 extern void ia64_bad_param_for_setreg (void);
30 extern void ia64_bad_param_for_getreg (void);
31 
32 
33 #define ia64_native_setreg(regnum, val)						\
34 ({										\
35 	switch (regnum) {							\
36 	    case _IA64_REG_PSR_L:						\
37 		    asm volatile ("mov psr.l=%0" :: "r"(val) : "memory");	\
38 		    break;							\
39 	    case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:				\
40 		    asm volatile ("mov ar%0=%1" ::				\
41 		    			  "i" (regnum - _IA64_REG_AR_KR0),	\
42 					  "r"(val): "memory");			\
43 		    break;							\
44 	    case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:			\
45 		    asm volatile ("mov cr%0=%1" ::				\
46 				          "i" (regnum - _IA64_REG_CR_DCR),	\
47 					  "r"(val): "memory" );			\
48 		    break;							\
49 	    case _IA64_REG_SP:							\
50 		    asm volatile ("mov r12=%0" ::				\
51 			    		  "r"(val): "memory");			\
52 		    break;							\
53 	    case _IA64_REG_GP:							\
54 		    asm volatile ("mov gp=%0" :: "r"(val) : "memory");		\
55 		break;								\
56 	    default:								\
57 		    ia64_bad_param_for_setreg();				\
58 		    break;							\
59 	}									\
60 })
61 
62 #define ia64_native_getreg(regnum)						\
63 ({										\
64 	__u64 ia64_intri_res;							\
65 										\
66 	switch (regnum) {							\
67 	case _IA64_REG_GP:							\
68 		asm volatile ("mov %0=gp" : "=r"(ia64_intri_res));		\
69 		break;								\
70 	case _IA64_REG_IP:							\
71 		asm volatile ("mov %0=ip" : "=r"(ia64_intri_res));		\
72 		break;								\
73 	case _IA64_REG_PSR:							\
74 		asm volatile ("mov %0=psr" : "=r"(ia64_intri_res));		\
75 		break;								\
76 	case _IA64_REG_TP:	/* for current() */				\
77 		ia64_intri_res = ia64_r13;					\
78 		break;								\
79 	case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:				\
80 		asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res)		\
81 				      : "i"(regnum - _IA64_REG_AR_KR0));	\
82 		break;								\
83 	case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:				\
84 		asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res)		\
85 				      : "i" (regnum - _IA64_REG_CR_DCR));	\
86 		break;								\
87 	case _IA64_REG_SP:							\
88 		asm volatile ("mov %0=sp" : "=r" (ia64_intri_res));		\
89 		break;								\
90 	default:								\
91 		ia64_bad_param_for_getreg();					\
92 		break;								\
93 	}									\
94 	ia64_intri_res;								\
95 })
96 
97 #define ia64_hint_pause 0
98 
99 #define ia64_hint(mode)						\
100 ({								\
101 	switch (mode) {						\
102 	case ia64_hint_pause:					\
103 		asm volatile ("hint @pause" ::: "memory");	\
104 		break;						\
105 	}							\
106 })
107 
108 
109 /* Integer values for mux1 instruction */
110 #define ia64_mux1_brcst 0
111 #define ia64_mux1_mix   8
112 #define ia64_mux1_shuf  9
113 #define ia64_mux1_alt  10
114 #define ia64_mux1_rev  11
115 
116 #define ia64_mux1(x, mode)							\
117 ({										\
118 	__u64 ia64_intri_res;							\
119 										\
120 	switch (mode) {								\
121 	case ia64_mux1_brcst:							\
122 		asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x));	\
123 		break;								\
124 	case ia64_mux1_mix:							\
125 		asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x));	\
126 		break;								\
127 	case ia64_mux1_shuf:							\
128 		asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x));	\
129 		break;								\
130 	case ia64_mux1_alt:							\
131 		asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x));	\
132 		break;								\
133 	case ia64_mux1_rev:							\
134 		asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x));	\
135 		break;								\
136 	}									\
137 	ia64_intri_res;								\
138 })
139 
140 #if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
141 # define ia64_popcnt(x)		__builtin_popcountl(x)
142 #else
143 # define ia64_popcnt(x)						\
144   ({								\
145 	__u64 ia64_intri_res;					\
146 	asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x));	\
147 								\
148 	ia64_intri_res;						\
149   })
150 #endif
151 
152 #define ia64_getf_exp(x)					\
153 ({								\
154 	long ia64_intri_res;					\
155 								\
156 	asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x));	\
157 								\
158 	ia64_intri_res;						\
159 })
160 
161 #define ia64_shrp(a, b, count)								\
162 ({											\
163 	__u64 ia64_intri_res;								\
164 	asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count));	\
165 	ia64_intri_res;									\
166 })
167 
168 #define ia64_ldfs(regnum, x)					\
169 ({								\
170 	register double __f__ asm ("f"#regnum);			\
171 	asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x));	\
172 })
173 
174 #define ia64_ldfd(regnum, x)					\
175 ({								\
176 	register double __f__ asm ("f"#regnum);			\
177 	asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x));	\
178 })
179 
180 #define ia64_ldfe(regnum, x)					\
181 ({								\
182 	register double __f__ asm ("f"#regnum);			\
183 	asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x));	\
184 })
185 
186 #define ia64_ldf8(regnum, x)					\
187 ({								\
188 	register double __f__ asm ("f"#regnum);			\
189 	asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x));	\
190 })
191 
192 #define ia64_ldf_fill(regnum, x)				\
193 ({								\
194 	register double __f__ asm ("f"#regnum);			\
195 	asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x));	\
196 })
197 
198 #define ia64_st4_rel_nta(m, val)					\
199 ({									\
200 	asm volatile ("st4.rel.nta [%0] = %1\n\t" :: "r"(m), "r"(val));	\
201 })
202 
203 #define ia64_stfs(x, regnum)						\
204 ({									\
205 	register double __f__ asm ("f"#regnum);				\
206 	asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
207 })
208 
209 #define ia64_stfd(x, regnum)						\
210 ({									\
211 	register double __f__ asm ("f"#regnum);				\
212 	asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
213 })
214 
215 #define ia64_stfe(x, regnum)						\
216 ({									\
217 	register double __f__ asm ("f"#regnum);				\
218 	asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
219 })
220 
221 #define ia64_stf8(x, regnum)						\
222 ({									\
223 	register double __f__ asm ("f"#regnum);				\
224 	asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
225 })
226 
227 #define ia64_stf_spill(x, regnum)						\
228 ({										\
229 	register double __f__ asm ("f"#regnum);					\
230 	asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
231 })
232 
233 #define ia64_fetchadd4_acq(p, inc)						\
234 ({										\
235 										\
236 	__u64 ia64_intri_res;							\
237 	asm volatile ("fetchadd4.acq %0=[%1],%2"				\
238 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
239 				: "memory");					\
240 										\
241 	ia64_intri_res;								\
242 })
243 
244 #define ia64_fetchadd4_rel(p, inc)						\
245 ({										\
246 	__u64 ia64_intri_res;							\
247 	asm volatile ("fetchadd4.rel %0=[%1],%2"				\
248 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
249 				: "memory");					\
250 										\
251 	ia64_intri_res;								\
252 })
253 
254 #define ia64_fetchadd8_acq(p, inc)						\
255 ({										\
256 										\
257 	__u64 ia64_intri_res;							\
258 	asm volatile ("fetchadd8.acq %0=[%1],%2"				\
259 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
260 				: "memory");					\
261 										\
262 	ia64_intri_res;								\
263 })
264 
265 #define ia64_fetchadd8_rel(p, inc)						\
266 ({										\
267 	__u64 ia64_intri_res;							\
268 	asm volatile ("fetchadd8.rel %0=[%1],%2"				\
269 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
270 				: "memory");					\
271 										\
272 	ia64_intri_res;								\
273 })
274 
275 #define ia64_xchg1(ptr,x)							\
276 ({										\
277 	__u64 ia64_intri_res;							\
278 	asm volatile ("xchg1 %0=[%1],%2"					\
279 		      : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory");	\
280 	ia64_intri_res;								\
281 })
282 
283 #define ia64_xchg2(ptr,x)						\
284 ({									\
285 	__u64 ia64_intri_res;						\
286 	asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res)	\
287 		      : "r" (ptr), "r" (x) : "memory");			\
288 	ia64_intri_res;							\
289 })
290 
291 #define ia64_xchg4(ptr,x)						\
292 ({									\
293 	__u64 ia64_intri_res;						\
294 	asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res)	\
295 		      : "r" (ptr), "r" (x) : "memory");			\
296 	ia64_intri_res;							\
297 })
298 
299 #define ia64_xchg8(ptr,x)						\
300 ({									\
301 	__u64 ia64_intri_res;						\
302 	asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res)	\
303 		      : "r" (ptr), "r" (x) : "memory");			\
304 	ia64_intri_res;							\
305 })
306 
307 #define ia64_cmpxchg1_acq(ptr, new, old)						\
308 ({											\
309 	__u64 ia64_intri_res;								\
310 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
311 	asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv":					\
312 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
313 	ia64_intri_res;									\
314 })
315 
316 #define ia64_cmpxchg1_rel(ptr, new, old)						\
317 ({											\
318 	__u64 ia64_intri_res;								\
319 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
320 	asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv":					\
321 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
322 	ia64_intri_res;									\
323 })
324 
325 #define ia64_cmpxchg2_acq(ptr, new, old)						\
326 ({											\
327 	__u64 ia64_intri_res;								\
328 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
329 	asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv":					\
330 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
331 	ia64_intri_res;									\
332 })
333 
334 #define ia64_cmpxchg2_rel(ptr, new, old)						\
335 ({											\
336 	__u64 ia64_intri_res;								\
337 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
338 											\
339 	asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv":					\
340 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
341 	ia64_intri_res;									\
342 })
343 
344 #define ia64_cmpxchg4_acq(ptr, new, old)						\
345 ({											\
346 	__u64 ia64_intri_res;								\
347 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
348 	asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv":					\
349 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
350 	ia64_intri_res;									\
351 })
352 
353 #define ia64_cmpxchg4_rel(ptr, new, old)						\
354 ({											\
355 	__u64 ia64_intri_res;								\
356 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
357 	asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv":					\
358 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
359 	ia64_intri_res;									\
360 })
361 
362 #define ia64_cmpxchg8_acq(ptr, new, old)						\
363 ({											\
364 	__u64 ia64_intri_res;								\
365 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
366 	asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv":					\
367 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
368 	ia64_intri_res;									\
369 })
370 
371 #define ia64_cmpxchg8_rel(ptr, new, old)						\
372 ({											\
373 	__u64 ia64_intri_res;								\
374 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
375 											\
376 	asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv":					\
377 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
378 	ia64_intri_res;									\
379 })
380 
381 #define ia64_mf()	asm volatile ("mf" ::: "memory")
382 #define ia64_mfa()	asm volatile ("mf.a" ::: "memory")
383 
384 #define ia64_invala() asm volatile ("invala" ::: "memory")
385 
386 #define ia64_native_thash(addr)							\
387 ({										\
388 	unsigned long ia64_intri_res;						\
389 	asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr));	\
390 	ia64_intri_res;								\
391 })
392 
393 #define ia64_srlz_i()	asm volatile (";; srlz.i ;;" ::: "memory")
394 #define ia64_srlz_d()	asm volatile (";; srlz.d" ::: "memory");
395 
396 #ifdef HAVE_SERIALIZE_DIRECTIVE
397 # define ia64_dv_serialize_data()		asm volatile (".serialize.data");
398 # define ia64_dv_serialize_instruction()	asm volatile (".serialize.instruction");
399 #else
400 # define ia64_dv_serialize_data()
401 # define ia64_dv_serialize_instruction()
402 #endif
403 
404 #define ia64_nop(x)	asm volatile ("nop %0"::"i"(x));
405 
406 #define ia64_itci(addr)	asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
407 
408 #define ia64_itcd(addr)	asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
409 
410 
411 #define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"				\
412 					     :: "r"(trnum), "r"(addr) : "memory")
413 
414 #define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"				\
415 					     :: "r"(trnum), "r"(addr) : "memory")
416 
417 #define ia64_tpa(addr)								\
418 ({										\
419 	unsigned long ia64_pa;							\
420 	asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory");	\
421 	ia64_pa;								\
422 })
423 
424 #define __ia64_set_dbr(index, val)						\
425 	asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
426 
427 #define ia64_set_ibr(index, val)						\
428 	asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
429 
430 #define ia64_set_pkr(index, val)						\
431 	asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
432 
433 #define ia64_set_pmc(index, val)						\
434 	asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
435 
436 #define ia64_set_pmd(index, val)						\
437 	asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
438 
439 #define ia64_native_set_rr(index, val)							\
440 	asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
441 
442 #define ia64_native_get_cpuid(index)							\
443 ({											\
444 	unsigned long ia64_intri_res;							\
445 	asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index));	\
446 	ia64_intri_res;									\
447 })
448 
449 #define __ia64_get_dbr(index)							\
450 ({										\
451 	unsigned long ia64_intri_res;						\
452 	asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
453 	ia64_intri_res;								\
454 })
455 
456 #define ia64_get_ibr(index)							\
457 ({										\
458 	unsigned long ia64_intri_res;						\
459 	asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
460 	ia64_intri_res;								\
461 })
462 
463 #define ia64_get_pkr(index)							\
464 ({										\
465 	unsigned long ia64_intri_res;						\
466 	asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
467 	ia64_intri_res;								\
468 })
469 
470 #define ia64_get_pmc(index)							\
471 ({										\
472 	unsigned long ia64_intri_res;						\
473 	asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
474 	ia64_intri_res;								\
475 })
476 
477 
478 #define ia64_native_get_pmd(index)						\
479 ({										\
480 	unsigned long ia64_intri_res;						\
481 	asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
482 	ia64_intri_res;								\
483 })
484 
485 #define ia64_native_get_rr(index)						\
486 ({										\
487 	unsigned long ia64_intri_res;						\
488 	asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index));	\
489 	ia64_intri_res;								\
490 })
491 
492 #define ia64_native_fc(addr)	asm volatile ("fc %0" :: "r"(addr) : "memory")
493 
494 
495 #define ia64_sync_i()	asm volatile (";; sync.i" ::: "memory")
496 
497 #define ia64_native_ssm(mask)	asm volatile ("ssm %0":: "i"((mask)) : "memory")
498 #define ia64_native_rsm(mask)	asm volatile ("rsm %0":: "i"((mask)) : "memory")
499 #define ia64_sum(mask)	asm volatile ("sum %0":: "i"((mask)) : "memory")
500 #define ia64_rum(mask)	asm volatile ("rum %0":: "i"((mask)) : "memory")
501 
502 #define ia64_ptce(addr)	asm volatile ("ptc.e %0" :: "r"(addr))
503 
504 #define ia64_native_ptcga(addr, size)						\
505 do {										\
506 	asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory");	\
507 	ia64_dv_serialize_data();						\
508 } while (0)
509 
510 #define ia64_ptcl(addr, size)							\
511 do {										\
512 	asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory");	\
513 	ia64_dv_serialize_data();						\
514 } while (0)
515 
516 #define ia64_ptri(addr, size)						\
517 	asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
518 
519 #define ia64_ptrd(addr, size)						\
520 	asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
521 
522 #define ia64_ttag(addr)							\
523 ({									  \
524 	__u64 ia64_intri_res;						   \
525 	asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr));   \
526 	ia64_intri_res;							 \
527 })
528 
529 
530 /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
531 
532 #define ia64_lfhint_none   0
533 #define ia64_lfhint_nt1    1
534 #define ia64_lfhint_nt2    2
535 #define ia64_lfhint_nta    3
536 
537 #define ia64_lfetch(lfhint, y)					\
538 ({								\
539         switch (lfhint) {					\
540         case ia64_lfhint_none:					\
541                 asm volatile ("lfetch [%0]" : : "r"(y));	\
542                 break;						\
543         case ia64_lfhint_nt1:					\
544                 asm volatile ("lfetch.nt1 [%0]" : : "r"(y));	\
545                 break;						\
546         case ia64_lfhint_nt2:					\
547                 asm volatile ("lfetch.nt2 [%0]" : : "r"(y));	\
548                 break;						\
549         case ia64_lfhint_nta:					\
550                 asm volatile ("lfetch.nta [%0]" : : "r"(y));	\
551                 break;						\
552         }							\
553 })
554 
555 #define ia64_lfetch_excl(lfhint, y)					\
556 ({									\
557         switch (lfhint) {						\
558         case ia64_lfhint_none:						\
559                 asm volatile ("lfetch.excl [%0]" :: "r"(y));		\
560                 break;							\
561         case ia64_lfhint_nt1:						\
562                 asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y));	\
563                 break;							\
564         case ia64_lfhint_nt2:						\
565                 asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y));	\
566                 break;							\
567         case ia64_lfhint_nta:						\
568                 asm volatile ("lfetch.excl.nta [%0]" :: "r"(y));	\
569                 break;							\
570         }								\
571 })
572 
573 #define ia64_lfetch_fault(lfhint, y)					\
574 ({									\
575         switch (lfhint) {						\
576         case ia64_lfhint_none:						\
577                 asm volatile ("lfetch.fault [%0]" : : "r"(y));		\
578                 break;							\
579         case ia64_lfhint_nt1:						\
580                 asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y));	\
581                 break;							\
582         case ia64_lfhint_nt2:						\
583                 asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y));	\
584                 break;							\
585         case ia64_lfhint_nta:						\
586                 asm volatile ("lfetch.fault.nta [%0]" : : "r"(y));	\
587                 break;							\
588         }								\
589 })
590 
591 #define ia64_lfetch_fault_excl(lfhint, y)				\
592 ({									\
593         switch (lfhint) {						\
594         case ia64_lfhint_none:						\
595                 asm volatile ("lfetch.fault.excl [%0]" :: "r"(y));	\
596                 break;							\
597         case ia64_lfhint_nt1:						\
598                 asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y));	\
599                 break;							\
600         case ia64_lfhint_nt2:						\
601                 asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y));	\
602                 break;							\
603         case ia64_lfhint_nta:						\
604                 asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y));	\
605                 break;							\
606         }								\
607 })
608 
609 #define ia64_native_intrin_local_irq_restore(x)			\
610 do {								\
611 	asm volatile (";;   cmp.ne p6,p7=%0,r0;;"		\
612 		      "(p6) ssm psr.i;"				\
613 		      "(p7) rsm psr.i;;"			\
614 		      "(p6) srlz.d"				\
615 		      :: "r"((x)) : "p6", "p7", "memory");	\
616 } while (0)
617 
618 #endif /* _UAPI_ASM_IA64_GCC_INTRIN_H */
619