gcc_intrin.h 17.3 KB
Newer Older
1 2 3 4 5 6 7 8
#ifndef _ASM_IA64_GCC_INTRIN_H
#define _ASM_IA64_GCC_INTRIN_H
/*
 *
 * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
 * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
 */

9 10
#include <linux/compiler.h>

11 12 13 14 15
/* define this macro to get some asm stmts included in 'c' files */
#define ASM_SUPPORTED

/* Optimization barrier */
/* The "volatile" is due to gcc bugs */
16 17 18 19 20 21 22 23 24 25 26
#define ia64_barrier()	asm volatile ("":::"memory")

#define ia64_stop()	asm volatile (";;"::)

#define ia64_invala_gr(regnum)	asm volatile ("invala.e r%0" :: "i"(regnum))

#define ia64_invala_fr(regnum)	asm volatile ("invala.e f%0" :: "i"(regnum))

extern void ia64_bad_param_for_setreg (void);
extern void ia64_bad_param_for_getreg (void);

27
register unsigned long ia64_r13 asm ("r13") __attribute_used__;
28

29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
#define ia64_setreg(regnum, val)						\
({										\
	switch (regnum) {							\
	    case _IA64_REG_PSR_L:						\
		    asm volatile ("mov psr.l=%0" :: "r"(val) : "memory");	\
		    break;							\
	    case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:				\
		    asm volatile ("mov ar%0=%1" ::				\
		    			  "i" (regnum - _IA64_REG_AR_KR0),	\
					  "r"(val): "memory");			\
		    break;							\
	    case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:			\
		    asm volatile ("mov cr%0=%1" ::				\
				          "i" (regnum - _IA64_REG_CR_DCR),	\
					  "r"(val): "memory" );			\
		    break;							\
	    case _IA64_REG_SP:							\
		    asm volatile ("mov r12=%0" ::				\
			    		  "r"(val): "memory");			\
		    break;							\
	    case _IA64_REG_GP:							\
		    asm volatile ("mov gp=%0" :: "r"(val) : "memory");		\
		break;								\
	    default:								\
		    ia64_bad_param_for_setreg();				\
		    break;							\
	}									\
})

#define ia64_getreg(regnum)							\
({										\
	__u64 ia64_intri_res;							\
										\
	switch (regnum) {							\
	case _IA64_REG_GP:							\
		asm volatile ("mov %0=gp" : "=r"(ia64_intri_res));		\
		break;								\
	case _IA64_REG_IP:							\
		asm volatile ("mov %0=ip" : "=r"(ia64_intri_res));		\
		break;								\
	case _IA64_REG_PSR:							\
		asm volatile ("mov %0=psr" : "=r"(ia64_intri_res));		\
		break;								\
	case _IA64_REG_TP:	/* for current() */				\
73
		ia64_intri_res = ia64_r13;					\
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
		break;								\
	case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:				\
		asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res)		\
				      : "i"(regnum - _IA64_REG_AR_KR0));	\
		break;								\
	case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:				\
		asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res)		\
				      : "i" (regnum - _IA64_REG_CR_DCR));	\
		break;								\
	case _IA64_REG_SP:							\
		asm volatile ("mov %0=sp" : "=r" (ia64_intri_res));		\
		break;								\
	default:								\
		ia64_bad_param_for_getreg();					\
		break;								\
	}									\
	ia64_intri_res;								\
91 92 93
})

#define ia64_hint_pause 0
94

95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
#define ia64_hint(mode)						\
({								\
	switch (mode) {						\
	case ia64_hint_pause:					\
		asm volatile ("hint @pause" ::: "memory");	\
		break;						\
	}							\
})


/* Integer values for mux1 instruction */
#define ia64_mux1_brcst 0
#define ia64_mux1_mix   8
#define ia64_mux1_shuf  9
#define ia64_mux1_alt  10
#define ia64_mux1_rev  11

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
#define ia64_mux1(x, mode)							\
({										\
	__u64 ia64_intri_res;							\
										\
	switch (mode) {								\
	case ia64_mux1_brcst:							\
		asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x));	\
		break;								\
	case ia64_mux1_mix:							\
		asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x));	\
		break;								\
	case ia64_mux1_shuf:							\
		asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x));	\
		break;								\
	case ia64_mux1_alt:							\
		asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x));	\
		break;								\
	case ia64_mux1_rev:							\
		asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x));	\
		break;								\
	}									\
	ia64_intri_res;								\
134 135 136 137 138
})

#define ia64_popcnt(x)						\
({								\
	__u64 ia64_intri_res;					\
139
	asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x));	\
140 141 142 143 144 145 146 147
								\
	ia64_intri_res;						\
})

#define ia64_getf_exp(x)					\
({								\
	long ia64_intri_res;					\
								\
148
	asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x));	\
149 150 151 152
								\
	ia64_intri_res;						\
})

153 154 155 156 157
#define ia64_shrp(a, b, count)								\
({											\
	__u64 ia64_intri_res;								\
	asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count));	\
	ia64_intri_res;									\
158 159 160 161 162
})

#define ia64_ldfs(regnum, x)					\
({								\
	register double __f__ asm ("f"#regnum);			\
163
	asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x));	\
164 165 166 167 168
})

#define ia64_ldfd(regnum, x)					\
({								\
	register double __f__ asm ("f"#regnum);			\
169
	asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x));	\
170 171 172 173 174
})

#define ia64_ldfe(regnum, x)					\
({								\
	register double __f__ asm ("f"#regnum);			\
175
	asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x));	\
176 177 178 179 180
})

#define ia64_ldf8(regnum, x)					\
({								\
	register double __f__ asm ("f"#regnum);			\
181
	asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x));	\
182 183 184 185 186
})

#define ia64_ldf_fill(regnum, x)				\
({								\
	register double __f__ asm ("f"#regnum);			\
187
	asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x));	\
188 189
})

190 191 192 193
#define ia64_stfs(x, regnum)						\
({									\
	register double __f__ asm ("f"#regnum);				\
	asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
194 195
})

196 197 198 199
#define ia64_stfd(x, regnum)						\
({									\
	register double __f__ asm ("f"#regnum);				\
	asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
200 201
})

202 203 204 205
#define ia64_stfe(x, regnum)						\
({									\
	register double __f__ asm ("f"#regnum);				\
	asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
206 207
})

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
#define ia64_stf8(x, regnum)						\
({									\
	register double __f__ asm ("f"#regnum);				\
	asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
})

#define ia64_stf_spill(x, regnum)						\
({										\
	register double __f__ asm ("f"#regnum);					\
	asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
})

#define ia64_fetchadd4_acq(p, inc)						\
({										\
										\
	__u64 ia64_intri_res;							\
	asm volatile ("fetchadd4.acq %0=[%1],%2"				\
				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
				: "memory");					\
										\
	ia64_intri_res;								\
})

#define ia64_fetchadd4_rel(p, inc)						\
({										\
	__u64 ia64_intri_res;							\
	asm volatile ("fetchadd4.rel %0=[%1],%2"				\
				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
				: "memory");					\
										\
	ia64_intri_res;								\
})

#define ia64_fetchadd8_acq(p, inc)						\
({										\
										\
	__u64 ia64_intri_res;							\
	asm volatile ("fetchadd8.acq %0=[%1],%2"				\
				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
				: "memory");					\
										\
	ia64_intri_res;								\
})

#define ia64_fetchadd8_rel(p, inc)						\
({										\
	__u64 ia64_intri_res;							\
	asm volatile ("fetchadd8.rel %0=[%1],%2"				\
				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
				: "memory");					\
										\
	ia64_intri_res;								\
})

#define ia64_xchg1(ptr,x)						\
263 264
({									\
	__u64 ia64_intri_res;						\
265 266
	asm __volatile ("xchg1 %0=[%1],%2" : "=r" (ia64_intri_res)	\
			    : "r" (ptr), "r" (x) : "memory");		\
267 268 269
	ia64_intri_res;							\
})

270
#define ia64_xchg2(ptr,x)						\
271 272
({									\
	__u64 ia64_intri_res;						\
273 274
	asm __volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res)	\
			    : "r" (ptr), "r" (x) : "memory");		\
275 276 277
	ia64_intri_res;							\
})

278
#define ia64_xchg4(ptr,x)						\
279 280
({									\
	__u64 ia64_intri_res;						\
281 282
	asm __volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res)	\
			    : "r" (ptr), "r" (x) : "memory");		\
283 284 285
	ia64_intri_res;							\
})

286
#define ia64_xchg8(ptr,x)						\
287 288
({									\
	__u64 ia64_intri_res;						\
289 290
	asm __volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res)	\
			    : "r" (ptr), "r" (x) : "memory");		\
291 292 293
	ia64_intri_res;							\
})

294 295 296 297 298 299 300
#define ia64_cmpxchg1_acq(ptr, new, old)						\
({											\
	__u64 ia64_intri_res;								\
	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
	asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv":					\
			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
	ia64_intri_res;									\
301 302
})

303 304 305 306 307 308 309
#define ia64_cmpxchg1_rel(ptr, new, old)						\
({											\
	__u64 ia64_intri_res;								\
	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
	asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv":					\
			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
	ia64_intri_res;									\
310 311
})

312 313 314 315 316 317 318
#define ia64_cmpxchg2_acq(ptr, new, old)						\
({											\
	__u64 ia64_intri_res;								\
	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
	asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv":					\
			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
	ia64_intri_res;									\
319 320
})

321 322 323 324 325 326 327 328
#define ia64_cmpxchg2_rel(ptr, new, old)						\
({											\
	__u64 ia64_intri_res;								\
	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
											\
	asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv":					\
			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
	ia64_intri_res;									\
329 330
})

331 332 333 334 335 336 337
#define ia64_cmpxchg4_acq(ptr, new, old)						\
({											\
	__u64 ia64_intri_res;								\
	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
	asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv":					\
			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
	ia64_intri_res;									\
338 339
})

340 341 342 343 344 345 346
#define ia64_cmpxchg4_rel(ptr, new, old)						\
({											\
	__u64 ia64_intri_res;								\
	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
	asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv":					\
			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
	ia64_intri_res;									\
347 348
})

349 350 351 352 353 354 355
#define ia64_cmpxchg8_acq(ptr, new, old)						\
({											\
	__u64 ia64_intri_res;								\
	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
	asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv":					\
			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
	ia64_intri_res;									\
356 357
})

358 359 360 361 362 363 364 365
#define ia64_cmpxchg8_rel(ptr, new, old)						\
({											\
	__u64 ia64_intri_res;								\
	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
											\
	asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv":					\
			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
	ia64_intri_res;									\
366 367
})

368 369
#define ia64_mf()	asm volatile ("mf" ::: "memory")
#define ia64_mfa()	asm volatile ("mf.a" ::: "memory")
370

371
#define ia64_invala() asm volatile ("invala" ::: "memory")
372

373 374 375 376 377
#define ia64_thash(addr)							\
({										\
	__u64 ia64_intri_res;							\
	asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr));	\
	ia64_intri_res;								\
378 379
})

380 381
#define ia64_srlz_i()	asm volatile (";; srlz.i ;;" ::: "memory")
#define ia64_srlz_d()	asm volatile (";; srlz.d" ::: "memory");
382

383 384 385 386 387 388 389 390
#ifdef HAVE_SERIALIZE_DIRECTIVE
# define ia64_dv_serialize_data()		asm volatile (".serialize.data");
# define ia64_dv_serialize_instruction()	asm volatile (".serialize.instruction");
#else
# define ia64_dv_serialize_data()
# define ia64_dv_serialize_instruction()
#endif

391
#define ia64_nop(x)	asm volatile ("nop %0"::"i"(x));
392

393
#define ia64_itci(addr)	asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
394

395
#define ia64_itcd(addr)	asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
396 397


398 399
#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"				\
					     :: "r"(trnum), "r"(addr) : "memory")
400

401 402
#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"				\
					     :: "r"(trnum), "r"(addr) : "memory")
403

404 405 406 407
#define ia64_tpa(addr)								\
({										\
	__u64 ia64_pa;								\
	asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory");	\
408 409 410
	ia64_pa;								\
})

411 412
#define __ia64_set_dbr(index, val)						\
	asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
413

414 415
#define ia64_set_ibr(index, val)						\
	asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
416

417 418
#define ia64_set_pkr(index, val)						\
	asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
419

420 421
#define ia64_set_pmc(index, val)						\
	asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
422

423 424
#define ia64_set_pmd(index, val)						\
	asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
425

426 427
#define ia64_set_rr(index, val)							\
	asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
428

429 430 431 432 433
#define ia64_get_cpuid(index)								\
({											\
	__u64 ia64_intri_res;								\
	asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index));	\
	ia64_intri_res;									\
434 435
})

436 437 438 439 440
#define __ia64_get_dbr(index)							\
({										\
	__u64 ia64_intri_res;							\
	asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
	ia64_intri_res;								\
441 442
})

443 444 445 446 447
#define ia64_get_ibr(index)							\
({										\
	__u64 ia64_intri_res;							\
	asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
	ia64_intri_res;								\
448 449
})

450 451 452 453 454
#define ia64_get_pkr(index)							\
({										\
	__u64 ia64_intri_res;							\
	asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
	ia64_intri_res;								\
455 456
})

457 458 459 460 461
#define ia64_get_pmc(index)							\
({										\
	__u64 ia64_intri_res;							\
	asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
	ia64_intri_res;								\
462 463 464
})


465 466 467 468 469
#define ia64_get_pmd(index)							\
({										\
	__u64 ia64_intri_res;							\
	asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
	ia64_intri_res;								\
470 471
})

472 473 474 475 476
#define ia64_get_rr(index)							\
({										\
	__u64 ia64_intri_res;							\
	asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index));	\
	ia64_intri_res;								\
477 478
})

479
#define ia64_fc(addr)	asm volatile ("fc %0" :: "r"(addr) : "memory")
480 481


482
#define ia64_sync_i()	asm volatile (";; sync.i" ::: "memory")
483

484 485 486 487
#define ia64_ssm(mask)	asm volatile ("ssm %0":: "i"((mask)) : "memory")
#define ia64_rsm(mask)	asm volatile ("rsm %0":: "i"((mask)) : "memory")
#define ia64_sum(mask)	asm volatile ("sum %0":: "i"((mask)) : "memory")
#define ia64_rum(mask)	asm volatile ("rum %0":: "i"((mask)) : "memory")
488

489
#define ia64_ptce(addr)	asm volatile ("ptc.e %0" :: "r"(addr))
490

491
#define ia64_ptcga(addr, size)							\
492 493 494 495
do {										\
	asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory");	\
	ia64_dv_serialize_data();						\
} while (0)
496

497 498 499 500 501
#define ia64_ptcl(addr, size)							\
do {										\
	asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory");	\
	ia64_dv_serialize_data();						\
} while (0)
502

503 504
#define ia64_ptri(addr, size)						\
	asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
505

506 507
#define ia64_ptrd(addr, size)						\
	asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
508 509 510 511 512 513 514 515

/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */

#define ia64_lfhint_none   0
#define ia64_lfhint_nt1    1
#define ia64_lfhint_nt2    2
#define ia64_lfhint_nta    3

516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
#define ia64_lfetch(lfhint, y)					\
({								\
        switch (lfhint) {					\
        case ia64_lfhint_none:					\
                asm volatile ("lfetch [%0]" : : "r"(y));	\
                break;						\
        case ia64_lfhint_nt1:					\
                asm volatile ("lfetch.nt1 [%0]" : : "r"(y));	\
                break;						\
        case ia64_lfhint_nt2:					\
                asm volatile ("lfetch.nt2 [%0]" : : "r"(y));	\
                break;						\
        case ia64_lfhint_nta:					\
                asm volatile ("lfetch.nta [%0]" : : "r"(y));	\
                break;						\
        }							\
532 533 534 535 536 537
})

#define ia64_lfetch_excl(lfhint, y)					\
({									\
        switch (lfhint) {						\
        case ia64_lfhint_none:						\
538
                asm volatile ("lfetch.excl [%0]" :: "r"(y));		\
539 540
                break;							\
        case ia64_lfhint_nt1:						\
541
                asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y));	\
542 543
                break;							\
        case ia64_lfhint_nt2:						\
544
                asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y));	\
545 546
                break;							\
        case ia64_lfhint_nta:						\
547
                asm volatile ("lfetch.excl.nta [%0]" :: "r"(y));	\
548 549 550 551 552 553 554 555
                break;							\
        }								\
})

#define ia64_lfetch_fault(lfhint, y)					\
({									\
        switch (lfhint) {						\
        case ia64_lfhint_none:						\
556
                asm volatile ("lfetch.fault [%0]" : : "r"(y));		\
557 558
                break;							\
        case ia64_lfhint_nt1:						\
559
                asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y));	\
560 561
                break;							\
        case ia64_lfhint_nt2:						\
562
                asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y));	\
563 564
                break;							\
        case ia64_lfhint_nta:						\
565
                asm volatile ("lfetch.fault.nta [%0]" : : "r"(y));	\
566 567 568 569 570 571 572 573
                break;							\
        }								\
})

#define ia64_lfetch_fault_excl(lfhint, y)				\
({									\
        switch (lfhint) {						\
        case ia64_lfhint_none:						\
574
                asm volatile ("lfetch.fault.excl [%0]" :: "r"(y));	\
575 576
                break;							\
        case ia64_lfhint_nt1:						\
577
                asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y));	\
578 579
                break;							\
        case ia64_lfhint_nt2:						\
580
                asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y));	\
581 582
                break;							\
        case ia64_lfhint_nta:						\
583
                asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y));	\
584 585 586 587
                break;							\
        }								\
})

588 589
#define ia64_intrin_local_irq_restore(x)			\
do {								\
590
	asm volatile (";;   cmp.ne p6,p7=%0,r0;;"		\
591 592 593 594
		      "(p6) ssm psr.i;"				\
		      "(p7) rsm psr.i;;"			\
		      "(p6) srlz.d"				\
		      :: "r"((x)) : "p6", "p7", "memory");	\
595 596 597
} while (0)

#endif /* _ASM_IA64_GCC_INTRIN_H */