1/* 2 * arch/ia64/kernel/ivt.S 3 * 4 * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co 5 * Stephane Eranian <eranian@hpl.hp.com> 6 * David Mosberger <davidm@hpl.hp.com> 7 * Copyright (C) 2000, 2002-2003 Intel Co 8 * Asit Mallick <asit.k.mallick@intel.com> 9 * Suresh Siddha <suresh.b.siddha@intel.com> 10 * Kenneth Chen <kenneth.w.chen@intel.com> 11 * Fenghua Yu <fenghua.yu@intel.com> 12 * 13 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP 14 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT. 15 */ 16/* 17 * This file defines the interruption vector table used by the CPU. 18 * It does not include one entry per possible cause of interruption. 19 * 20 * The first 20 entries of the table contain 64 bundles each while the 21 * remaining 48 entries contain only 16 bundles each. 22 * 23 * The 64 bundles are used to allow inlining the whole handler for critical 24 * interruptions like TLB misses. 25 * 26 * For each entry, the comment is as follows: 27 * 28 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) 29 * entry offset ----/ / / / / 30 * entry number ---------/ / / / 31 * size of the entry -------------/ / / 32 * vector name -------------------------------------/ / 33 * interruptions triggering this vector ----------------------/ 34 * 35 * The table is 32KB in size and must be aligned on 32KB boundary. 36 * (The CPU ignores the 15 lower bits of the address) 37 * 38 * Table is based upon EAS2.6 (Oct 1999) 39 */ 40 41#include <linux/config.h> 42 43#include <asm/asmmacro.h> 44#include <asm/break.h> 45#include <asm/ia32.h> 46#include <asm/kregs.h> 47#include <asm/offsets.h> 48#include <asm/pgtable.h> 49#include <asm/processor.h> 50#include <asm/ptrace.h> 51#include <asm/system.h> 52#include <asm/thread_info.h> 53#include <asm/unistd.h> 54#include <asm/errno.h> 55 56#if 1 57# define PSR_DEFAULT_BITS psr.ac 58#else 59# define PSR_DEFAULT_BITS 0 60#endif 61 62#if 0 63 /* 64 * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't 65 * needed for something else before enabling this... 66 */ 67# define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16 68#else 69# define DBG_FAULT(i) 70#endif 71 72#define MINSTATE_VIRT /* needed by minstate.h */ 73#include "minstate.h" 74 75#define FAULT(n) \ 76 mov r31=pr; \ 77 mov r19=n;; /* prepare to save predicates */ \ 78 br.sptk.many dispatch_to_fault_handler 79 80 .section .text.ivt,"ax" 81 82 .align 32768 // align on 32KB boundary 83 .global ia64_ivt 84ia64_ivt: 85///////////////////////////////////////////////////////////////////////////////////////// 86// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) 87ENTRY(vhpt_miss) 88 DBG_FAULT(0) 89 /* 90 * The VHPT vector is invoked when the TLB entry for the virtual page table 91 * is missing. This happens only as a result of a previous 92 * (the "original") TLB miss, which may either be caused by an instruction 93 * fetch or a data access (or non-access). 94 * 95 * What we do here is normal TLB miss handing for the _original_ miss, followed 96 * by inserting the TLB entry for the virtual page table page that the VHPT 97 * walker was attempting to access. The latter gets inserted as long 98 * as both L1 and L2 have valid mappings for the faulting address. 99 * The TLB entry for the original miss gets inserted only if 100 * the L3 entry indicates that the page is present. 101 * 102 * do_page_fault gets invoked in the following cases: 103 * - the faulting virtual address uses unimplemented address bits 104 * - the faulting virtual address has no L1, L2, or L3 mapping 105 */ 106 mov r16=cr.ifa // get address that caused the TLB miss 107#ifdef CONFIG_HUGETLB_PAGE 108 movl r18=PAGE_SHIFT 109 mov r25=cr.itir 110#endif 111 ;; 112 rsm psr.dt // use physical addressing for data 113 mov r31=pr // save the predicate registers 114 mov r19=IA64_KR(PT_BASE) // get page table base address 115 shl r21=r16,3 // shift bit 60 into sign bit 116 shr.u r17=r16,61 // get the region number into r17 117 ;; 118 shr r22=r21,3 119#ifdef CONFIG_HUGETLB_PAGE 120 extr.u r26=r25,2,6 121 ;; 122 cmp.ne p8,p0=r18,r26 123 sub r27=r26,r18 124 ;; 125(p8) dep r25=r18,r25,2,6 126(p8) shr r22=r22,r27 127#endif 128 ;; 129 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5? 130 shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address 131 ;; 132(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place 133 134 srlz.d 135 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir 136 137 .pred.rel "mutex", p6, p7 138(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT 139(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 140 ;; 141(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8 142(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8) 143 cmp.eq p7,p6=0,r21 // unused address bits all zeroes? 144 shr.u r18=r22,PMD_SHIFT // shift L2 index into position 145 ;; 146 ld8 r17=[r17] // fetch the L1 entry (may be 0) 147 ;; 148(p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL? 149 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry 150 ;; 151(p7) ld8 r20=[r17] // fetch the L2 entry (may be 0) 152 shr.u r19=r22,PAGE_SHIFT // shift L3 index into position 153 ;; 154(p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L2 entry NULL? 155 dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L3 page table entry 156 ;; 157(p7) ld8 r18=[r21] // read the L3 PTE 158 mov r19=cr.isr // cr.isr bit 0 tells us if this is an insn miss 159 ;; 160(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? 161 mov r22=cr.iha // get the VHPT address that caused the TLB miss 162 ;; // avoid RAW on p7 163(p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss? 164 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address 165 ;; 166(p10) itc.i r18 // insert the instruction TLB entry 167(p11) itc.d r18 // insert the data TLB entry 168(p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) 169 mov cr.ifa=r22 170 171#ifdef CONFIG_HUGETLB_PAGE 172(p8) mov cr.itir=r25 // change to default page-size for VHPT 173#endif 174 175 /* 176 * Now compute and insert the TLB entry for the virtual page table. We never 177 * execute in a page table page so there is no need to set the exception deferral 178 * bit. 179 */ 180 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23 181 ;; 182(p7) itc.d r24 183 ;; 184#ifdef CONFIG_SMP 185 /* 186 * Tell the assemblers dependency-violation checker that the above "itc" instructions 187 * cannot possibly affect the following loads: 188 */ 189 dv_serialize_data 190 191 /* 192 * Re-check L2 and L3 pagetable. If they changed, we may have received a ptc.g 193 * between reading the pagetable and the "itc". If so, flush the entry we 194 * inserted and retry. 195 */ 196 ld8 r25=[r21] // read L3 PTE again 197 ld8 r26=[r17] // read L2 entry again 198 ;; 199 cmp.ne p6,p7=r26,r20 // did L2 entry change 200 mov r27=PAGE_SHIFT<<2 201 ;; 202(p6) ptc.l r22,r27 // purge PTE page translation 203(p7) cmp.ne.or.andcm p6,p7=r25,r18 // did L3 PTE change 204 ;; 205(p6) ptc.l r16,r27 // purge translation 206#endif 207 208 mov pr=r31,-1 // restore predicate registers 209 rfi 210END(vhpt_miss) 211 212 .org ia64_ivt+0x400 213///////////////////////////////////////////////////////////////////////////////////////// 214// 0x0400 Entry 1 (size 64 bundles) ITLB (21) 215ENTRY(itlb_miss) 216 DBG_FAULT(1) 217 /* 218 * The ITLB handler accesses the L3 PTE via the virtually mapped linear 219 * page table. If a nested TLB miss occurs, we switch into physical 220 * mode, walk the page table, and then re-execute the L3 PTE read 221 * and go on normally after that. 222 */ 223 mov r16=cr.ifa // get virtual address 224 mov r29=b0 // save b0 225 mov r31=pr // save predicates 226.itlb_fault: 227 mov r17=cr.iha // get virtual address of L3 PTE 228 movl r30=1f // load nested fault continuation point 229 ;; 2301: ld8 r18=[r17] // read L3 PTE 231 ;; 232 mov b0=r29 233 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 234(p6) br.cond.spnt page_fault 235 ;; 236 itc.i r18 237 ;; 238#ifdef CONFIG_SMP 239 /* 240 * Tell the assemblers dependency-violation checker that the above "itc" instructions 241 * cannot possibly affect the following loads: 242 */ 243 dv_serialize_data 244 245 ld8 r19=[r17] // read L3 PTE again and see if same 246 mov r20=PAGE_SHIFT<<2 // setup page size for purge 247 ;; 248 cmp.ne p7,p0=r18,r19 249 ;; 250(p7) ptc.l r16,r20 251#endif 252 mov pr=r31,-1 253 rfi 254END(itlb_miss) 255 256 .org ia64_ivt+0x0800 257///////////////////////////////////////////////////////////////////////////////////////// 258// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) 259ENTRY(dtlb_miss) 260 DBG_FAULT(2) 261 /* 262 * The DTLB handler accesses the L3 PTE via the virtually mapped linear 263 * page table. If a nested TLB miss occurs, we switch into physical 264 * mode, walk the page table, and then re-execute the L3 PTE read 265 * and go on normally after that. 266 */ 267 mov r16=cr.ifa // get virtual address 268 mov r29=b0 // save b0 269 mov r31=pr // save predicates 270dtlb_fault: 271 mov r17=cr.iha // get virtual address of L3 PTE 272 movl r30=1f // load nested fault continuation point 273 ;; 2741: ld8 r18=[r17] // read L3 PTE 275 ;; 276 mov b0=r29 277 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 278(p6) br.cond.spnt page_fault 279 ;; 280 itc.d r18 281 ;; 282#ifdef CONFIG_SMP 283 /* 284 * Tell the assemblers dependency-violation checker that the above "itc" instructions 285 * cannot possibly affect the following loads: 286 */ 287 dv_serialize_data 288 289 ld8 r19=[r17] // read L3 PTE again and see if same 290 mov r20=PAGE_SHIFT<<2 // setup page size for purge 291 ;; 292 cmp.ne p7,p0=r18,r19 293 ;; 294(p7) ptc.l r16,r20 295#endif 296 mov pr=r31,-1 297 rfi 298END(dtlb_miss) 299 300 .org ia64_ivt+0x0c00 301///////////////////////////////////////////////////////////////////////////////////////// 302// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) 303ENTRY(alt_itlb_miss) 304 DBG_FAULT(3) 305 mov r16=cr.ifa // get address that caused the TLB miss 306 movl r17=PAGE_KERNEL 307 mov r21=cr.ipsr 308 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 309 mov r31=pr 310 ;; 311#ifdef CONFIG_DISABLE_VHPT 312 shr.u r22=r16,61 // get the region number into r21 313 ;; 314 cmp.gt p8,p0=6,r22 // user mode 315 ;; 316(p8) thash r17=r16 317 ;; 318(p8) mov cr.iha=r17 319(p8) mov r29=b0 // save b0 320(p8) br.cond.dptk .itlb_fault 321#endif 322 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl 323 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits 324 shr.u r18=r16,57 // move address bit 61 to bit 4 325 ;; 326 andcm r18=0x10,r18 // bit 4=~address-bit(61) 327 cmp.ne p8,p0=r0,r23 // psr.cpl != 0? 328 or r19=r17,r19 // insert PTE control bits into r19 329 ;; 330 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 331(p8) br.cond.spnt page_fault 332 ;; 333 itc.i r19 // insert the TLB entry 334 mov pr=r31,-1 335 rfi 336END(alt_itlb_miss) 337 338 .org ia64_ivt+0x1000 339///////////////////////////////////////////////////////////////////////////////////////// 340// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) 341ENTRY(alt_dtlb_miss) 342 DBG_FAULT(4) 343 mov r16=cr.ifa // get address that caused the TLB miss 344 movl r17=PAGE_KERNEL 345 mov r20=cr.isr 346 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 347 mov r21=cr.ipsr 348 mov r31=pr 349 ;; 350#ifdef CONFIG_DISABLE_VHPT 351 shr.u r22=r16,61 // get the region number into r21 352 ;; 353 cmp.gt p8,p0=6,r22 // access to region 0-5 354 ;; 355(p8) thash r17=r16 356 ;; 357(p8) mov cr.iha=r17 358(p8) mov r29=b0 // save b0 359(p8) br.cond.dptk dtlb_fault 360#endif 361 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl 362 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field 363 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? 364 shr.u r18=r16,57 // move address bit 61 to bit 4 365 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits 366 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? 367 ;; 368 andcm r18=0x10,r18 // bit 4=~address-bit(61) 369 cmp.ne p8,p0=r0,r23 370(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field 371(p8) br.cond.spnt page_fault 372 373 dep r21=-1,r21,IA64_PSR_ED_BIT,1 374 or r19=r19,r17 // insert PTE control bits into r19 375 ;; 376 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 377(p6) mov cr.ipsr=r21 378 ;; 379(p7) itc.d r19 // insert the TLB entry 380 mov pr=r31,-1 381 rfi 382END(alt_dtlb_miss) 383 384 .org ia64_ivt+0x1400 385///////////////////////////////////////////////////////////////////////////////////////// 386// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) 387ENTRY(nested_dtlb_miss) 388 /* 389 * In the absence of kernel bugs, we get here when the virtually mapped linear 390 * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction 391 * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page 392 * table is missing, a nested TLB miss fault is triggered and control is 393 * transferred to this point. When this happens, we lookup the pte for the 394 * faulting address by walking the page table in physical mode and return to the 395 * continuation point passed in register r30 (or call page_fault if the address is 396 * not mapped). 397 * 398 * Input: r16: faulting address 399 * r29: saved b0 400 * r30: continuation address 401 * r31: saved pr 402 * 403 * Output: r17: physical address of L3 PTE of faulting address 404 * r29: saved b0 405 * r30: continuation address 406 * r31: saved pr 407 * 408 * Clobbered: b0, r18, r19, r21, psr.dt (cleared) 409 */ 410 rsm psr.dt // switch to using physical data addressing 411 mov r19=IA64_KR(PT_BASE) // get the page table base address 412 shl r21=r16,3 // shift bit 60 into sign bit 413 ;; 414 shr.u r17=r16,61 // get the region number into r17 415 ;; 416 cmp.eq p6,p7=5,r17 // is faulting address in region 5? 417 shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address 418 ;; 419(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place 420 421 srlz.d 422 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir 423 424 .pred.rel "mutex", p6, p7 425(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT 426(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 427 ;; 428(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8 429(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8) 430 cmp.eq p7,p6=0,r21 // unused address bits all zeroes? 431 shr.u r18=r16,PMD_SHIFT // shift L2 index into position 432 ;; 433 ld8 r17=[r17] // fetch the L1 entry (may be 0) 434 ;; 435(p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL? 436 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry 437 ;; 438(p7) ld8 r17=[r17] // fetch the L2 entry (may be 0) 439 shr.u r19=r16,PAGE_SHIFT // shift L3 index into position 440 ;; 441(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL? 442 dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table entry 443(p6) br.cond.spnt page_fault 444 mov b0=r30 445 br.sptk.many b0 // return to continuation point 446END(nested_dtlb_miss) 447 448 .org ia64_ivt+0x1800 449///////////////////////////////////////////////////////////////////////////////////////// 450// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) 451ENTRY(ikey_miss) 452 DBG_FAULT(6) 453 FAULT(6) 454END(ikey_miss) 455 456 //----------------------------------------------------------------------------------- 457 // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address) 458ENTRY(page_fault) 459 ssm psr.dt 460 ;; 461 srlz.i 462 ;; 463 SAVE_MIN_WITH_COVER 464 alloc r15=ar.pfs,0,0,3,0 465 mov out0=cr.ifa 466 mov out1=cr.isr 467 adds r3=8,r2 // set up second base pointer 468 ;; 469 ssm psr.ic | PSR_DEFAULT_BITS 470 ;; 471 srlz.i // guarantee that interruption collectin is on 472 ;; 473(p15) ssm psr.i // restore psr.i 474 movl r14=ia64_leave_kernel 475 ;; 476 SAVE_REST 477 mov rp=r14 478 ;; 479 adds out2=16,r12 // out2 = pointer to pt_regs 480 br.call.sptk.many b6=ia64_do_page_fault // ignore return address 481END(page_fault) 482 483 .org ia64_ivt+0x1c00 484///////////////////////////////////////////////////////////////////////////////////////// 485// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) 486ENTRY(dkey_miss) 487 DBG_FAULT(7) 488 FAULT(7) 489END(dkey_miss) 490 491 .org ia64_ivt+0x2000 492///////////////////////////////////////////////////////////////////////////////////////// 493// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) 494ENTRY(dirty_bit) 495 DBG_FAULT(8) 496 /* 497 * What we do here is to simply turn on the dirty bit in the PTE. We need to 498 * update both the page-table and the TLB entry. To efficiently access the PTE, 499 * we address it through the virtual page table. Most likely, the TLB entry for 500 * the relevant virtual page table page is still present in the TLB so we can 501 * normally do this without additional TLB misses. In case the necessary virtual 502 * page table TLB entry isn't present, we take a nested TLB miss hit where we look 503 * up the physical address of the L3 PTE and then continue at label 1 below. 504 */ 505 mov r16=cr.ifa // get the address that caused the fault 506 movl r30=1f // load continuation point in case of nested fault 507 ;; 508 thash r17=r16 // compute virtual address of L3 PTE 509 mov r29=b0 // save b0 in case of nested fault 510 mov r31=pr // save pr 511#ifdef CONFIG_SMP 512 mov r28=ar.ccv // save ar.ccv 513 ;; 5141: ld8 r18=[r17] 515 ;; // avoid RAW on r18 516 mov ar.ccv=r18 // set compare value for cmpxchg 517 or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits 518 ;; 519 cmpxchg8.acq r26=[r17],r25,ar.ccv 520 mov r24=PAGE_SHIFT<<2 521 ;; 522 cmp.eq p6,p7=r26,r18 523 ;; 524(p6) itc.d r25 // install updated PTE 525 ;; 526 /* 527 * Tell the assemblers dependency-violation checker that the above "itc" instructions 528 * cannot possibly affect the following loads: 529 */ 530 dv_serialize_data 531 532 ld8 r18=[r17] // read PTE again 533 ;; 534 cmp.eq p6,p7=r18,r25 // is it same as the newly installed 535 ;; 536(p7) ptc.l r16,r24 537 mov b0=r29 // restore b0 538 mov ar.ccv=r28 539#else 540 ;; 5411: ld8 r18=[r17] 542 ;; // avoid RAW on r18 543 or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits 544 mov b0=r29 // restore b0 545 ;; 546 st8 [r17]=r18 // store back updated PTE 547 itc.d r18 // install updated PTE 548#endif 549 mov pr=r31,-1 // restore pr 550 rfi 551END(dirty_bit) 552 553 .org ia64_ivt+0x2400 554///////////////////////////////////////////////////////////////////////////////////////// 555// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) 556ENTRY(iaccess_bit) 557 DBG_FAULT(9) 558 // Like Entry 8, except for instruction access 559 mov r16=cr.ifa // get the address that caused the fault 560 movl r30=1f // load continuation point in case of nested fault 561 mov r31=pr // save predicates 562#ifdef CONFIG_ITANIUM 563 /* 564 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status. 565 */ 566 mov r17=cr.ipsr 567 ;; 568 mov r18=cr.iip 569 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set? 570 ;; 571(p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa 572#endif /* CONFIG_ITANIUM */ 573 ;; 574 thash r17=r16 // compute virtual address of L3 PTE 575 mov r29=b0 // save b0 in case of nested fault) 576#ifdef CONFIG_SMP 577 mov r28=ar.ccv // save ar.ccv 578 ;; 5791: ld8 r18=[r17] 580 ;; 581 mov ar.ccv=r18 // set compare value for cmpxchg 582 or r25=_PAGE_A,r18 // set the accessed bit 583 ;; 584 cmpxchg8.acq r26=[r17],r25,ar.ccv 585 mov r24=PAGE_SHIFT<<2 586 ;; 587 cmp.eq p6,p7=r26,r18 588 ;; 589(p6) itc.i r25 // install updated PTE 590 ;; 591 /* 592 * Tell the assemblers dependency-violation checker that the above "itc" instructions 593 * cannot possibly affect the following loads: 594 */ 595 dv_serialize_data 596 597 ld8 r18=[r17] // read PTE again 598 ;; 599 cmp.eq p6,p7=r18,r25 // is it same as the newly installed 600 ;; 601(p7) ptc.l r16,r24 602 mov b0=r29 // restore b0 603 mov ar.ccv=r28 604#else /* !CONFIG_SMP */ 605 ;; 6061: ld8 r18=[r17] 607 ;; 608 or r18=_PAGE_A,r18 // set the accessed bit 609 mov b0=r29 // restore b0 610 ;; 611 st8 [r17]=r18 // store back updated PTE 612 itc.i r18 // install updated PTE 613#endif /* !CONFIG_SMP */ 614 mov pr=r31,-1 615 rfi 616END(iaccess_bit) 617 618 .org ia64_ivt+0x2800 619///////////////////////////////////////////////////////////////////////////////////////// 620// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) 621ENTRY(daccess_bit) 622 DBG_FAULT(10) 623 // Like Entry 8, except for data access 624 mov r16=cr.ifa // get the address that caused the fault 625 movl r30=1f // load continuation point in case of nested fault 626 ;; 627 thash r17=r16 // compute virtual address of L3 PTE 628 mov r31=pr 629 mov r29=b0 // save b0 in case of nested fault) 630#ifdef CONFIG_SMP 631 mov r28=ar.ccv // save ar.ccv 632 ;; 6331: ld8 r18=[r17] 634 ;; // avoid RAW on r18 635 mov ar.ccv=r18 // set compare value for cmpxchg 636 or r25=_PAGE_A,r18 // set the dirty bit 637 ;; 638 cmpxchg8.acq r26=[r17],r25,ar.ccv 639 mov r24=PAGE_SHIFT<<2 640 ;; 641 cmp.eq p6,p7=r26,r18 642 ;; 643(p6) itc.d r25 // install updated PTE 644 /* 645 * Tell the assemblers dependency-violation checker that the above "itc" instructions 646 * cannot possibly affect the following loads: 647 */ 648 dv_serialize_data 649 ;; 650 ld8 r18=[r17] // read PTE again 651 ;; 652 cmp.eq p6,p7=r18,r25 // is it same as the newly installed 653 ;; 654(p7) ptc.l r16,r24 655 mov ar.ccv=r28 656#else 657 ;; 6581: ld8 r18=[r17] 659 ;; // avoid RAW on r18 660 or r18=_PAGE_A,r18 // set the accessed bit 661 ;; 662 st8 [r17]=r18 // store back updated PTE 663 itc.d r18 // install updated PTE 664#endif 665 mov b0=r29 // restore b0 666 mov pr=r31,-1 667 rfi 668END(daccess_bit) 669 670 .org ia64_ivt+0x2c00 671///////////////////////////////////////////////////////////////////////////////////////// 672// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) 673ENTRY(break_fault) 674 /* 675 * The streamlined system call entry/exit paths only save/restore the initial part 676 * of pt_regs. This implies that the callers of system-calls must adhere to the 677 * normal procedure calling conventions. 678 * 679 * Registers to be saved & restored: 680 * CR registers: cr.ipsr, cr.iip, cr.ifs 681 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr 682 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15 683 * Registers to be restored only: 684 * r8-r11: output value from the system call. 685 * 686 * During system call exit, scratch registers (including r15) are modified/cleared 687 * to prevent leaking bits from kernel to user level. 688 */ 689 DBG_FAULT(11) 690 mov r16=IA64_KR(CURRENT) // r16 = current task; 12 cycle read lat. 691 mov r17=cr.iim 692 mov r18=__IA64_BREAK_SYSCALL 693 mov r21=ar.fpsr 694 mov r29=cr.ipsr 695 mov r19=b6 696 mov r25=ar.unat 697 mov r27=ar.rsc 698 mov r26=ar.pfs 699 mov r28=cr.iip 700 mov r31=pr // prepare to save predicates 701 mov r20=r1 702 ;; 703 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 704 cmp.eq p0,p7=r18,r17 // is this a system call? (p7 <- false, if so) 705(p7) br.cond.spnt non_syscall 706 ;; 707 ld1 r17=[r16] // load current->thread.on_ustack flag 708 st1 [r16]=r0 // clear current->thread.on_ustack flag 709 add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // set r1 for MINSTATE_START_SAVE_MIN_VIRT 710 ;; 711 invala 712 713 /* adjust return address so we skip over the break instruction: */ 714 715 extr.u r8=r29,41,2 // extract ei field from cr.ipsr 716 ;; 717 cmp.eq p6,p7=2,r8 // isr.ei==2? 718 mov r2=r1 // setup r2 for ia64_syscall_setup 719 ;; 720(p6) mov r8=0 // clear ei to 0 721(p6) adds r28=16,r28 // switch cr.iip to next bundle cr.ipsr.ei wrapped 722(p7) adds r8=1,r8 // increment ei to next slot 723 ;; 724 cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already? 725 dep r29=r8,r29,41,2 // insert new ei into cr.ipsr 726 ;; 727 728 // switch from user to kernel RBS: 729 MINSTATE_START_SAVE_MIN_VIRT 730 br.call.sptk.many b7=ia64_syscall_setup 731 ;; 732 MINSTATE_END_SAVE_MIN_VIRT // switch to bank 1 733 ssm psr.ic | PSR_DEFAULT_BITS 734 ;; 735 srlz.i // guarantee that interruption collection is on 736 mov r3=NR_syscalls - 1 737 ;; 738(p15) ssm psr.i // restore psr.i 739 // p10==true means out registers are more than 8 or r15's Nat is true 740(p10) br.cond.spnt.many ia64_ret_from_syscall 741 ;; 742 movl r16=sys_call_table 743 744 adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024 745 movl r2=ia64_ret_from_syscall 746 ;; 747 shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024) 748 cmp.leu p6,p7=r15,r3 // (syscall > 0 && syscall < 1024 + NR_syscalls) ? 749 mov rp=r2 // set the real return addr 750 ;; 751(p6) ld8 r20=[r20] // load address of syscall entry point 752(p7) movl r20=sys_ni_syscall 753 754 add r2=TI_FLAGS+IA64_TASK_SIZE,r13 755 ;; 756 ld4 r2=[r2] // r2 = current_thread_info()->flags 757 ;; 758 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit 759 ;; 760 cmp.eq p8,p0=r2,r0 761 mov b6=r20 762 ;; 763(p8) br.call.sptk.many b6=b6 // ignore this return addr 764 br.cond.sptk ia64_trace_syscall 765 // NOT REACHED 766END(break_fault) 767 768 .org ia64_ivt+0x3000 769///////////////////////////////////////////////////////////////////////////////////////// 770// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) 771ENTRY(interrupt) 772 DBG_FAULT(12) 773 mov r31=pr // prepare to save predicates 774 ;; 775 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 776 ssm psr.ic | PSR_DEFAULT_BITS 777 ;; 778 adds r3=8,r2 // set up second base pointer for SAVE_REST 779 srlz.i // ensure everybody knows psr.ic is back on 780 ;; 781 SAVE_REST 782 ;; 783 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group 784 mov out0=cr.ivr // pass cr.ivr as first arg 785 add out1=16,sp // pass pointer to pt_regs as second arg 786 ;; 787 srlz.d // make sure we see the effect of cr.ivr 788 movl r14=ia64_leave_kernel 789 ;; 790 mov rp=r14 791 br.call.sptk.many b6=ia64_handle_irq 792END(interrupt) 793 794 .org ia64_ivt+0x3400 795///////////////////////////////////////////////////////////////////////////////////////// 796// 0x3400 Entry 13 (size 64 bundles) Reserved 797 DBG_FAULT(13) 798 FAULT(13) 799 800 .org ia64_ivt+0x3800 801///////////////////////////////////////////////////////////////////////////////////////// 802// 0x3800 Entry 14 (size 64 bundles) Reserved 803 DBG_FAULT(14) 804 FAULT(14) 805 806 /* 807 * There is no particular reason for this code to be here, other than that 808 * there happens to be space here that would go unused otherwise. If this 809 * fault ever gets "unreserved", simply moved the following code to a more 810 * suitable spot... 811 * 812 * ia64_syscall_setup() is a separate subroutine so that it can 813 * allocate stacked registers so it can safely demine any 814 * potential NaT values from the input registers. 815 * 816 * On entry: 817 * - executing on bank 0 or bank 1 register set (doesn't matter) 818 * - r1: stack pointer 819 * - r2: current task pointer 820 * - r3: preserved 821 * - r11: original contents (saved ar.pfs to be saved) 822 * - r12: original contents (sp to be saved) 823 * - r13: original contents (tp to be saved) 824 * - r15: original contents (syscall # to be saved) 825 * - r18: saved bsp (after switching to kernel stack) 826 * - r19: saved b6 827 * - r20: saved r1 (gp) 828 * - r21: saved ar.fpsr 829 * - r22: kernel's register backing store base (krbs_base) 830 * - r23: saved ar.bspstore 831 * - r24: saved ar.rnat 832 * - r25: saved ar.unat 833 * - r26: saved ar.pfs 834 * - r27: saved ar.rsc 835 * - r28: saved cr.iip 836 * - r29: saved cr.ipsr 837 * - r31: saved pr 838 * - b0: original contents (to be saved) 839 * On exit: 840 * - executing on bank 1 registers 841 * - psr.ic enabled, interrupts restored 842 * - p10: TRUE if syscall is invoked with more than 8 out 843 * registers or r15's Nat is true 844 * - r1: kernel's gp 845 * - r3: preserved (same as on entry) 846 * - r8: -EINVAL if p10 is true 847 * - r12: points to kernel stack 848 * - r13: points to current task 849 * - p15: TRUE if interrupts need to be re-enabled 850 * - ar.fpsr: set to kernel settings 851 */ 852GLOBAL_ENTRY(ia64_syscall_setup) 853#if PT(B6) != 0 854# error This code assumes that b6 is the first field in pt_regs. 855#endif 856 st8 [r1]=r19 // save b6 857 add r16=PT(CR_IPSR),r1 // initialize first base pointer 858 add r17=PT(R11),r1 // initialize second base pointer 859 ;; 860 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable 861 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr 862 tnat.nz p8,p0=in0 863 864 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11 865 tnat.nz p9,p0=in1 866(pKStk) mov r18=r0 // make sure r18 isn't NaT 867 ;; 868 869 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs 870 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip 871 mov r28=b0 // save b0 (2 cyc) 872 ;; 873 874 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat 875 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0] 876(p8) mov in0=-1 877 ;; 878 879 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs 880 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs 881 and r8=0x7f,r19 // A // get sof of ar.pfs 882 883 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc 884 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0 885(p9) mov in1=-1 886 ;; 887 888(pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8 889 tnat.nz p10,p0=in2 890 add r11=8,r11 891 ;; 892(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field 893(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field 894 tnat.nz p11,p0=in3 895 ;; 896(p10) mov in2=-1 897 tnat.nz p12,p0=in4 // [I0] 898(p11) mov in3=-1 899 ;; 900(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat 901(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore 902 shl r18=r18,16 // compute ar.rsc to be used for "loadrs" 903 ;; 904 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates 905 st8 [r17]=r28,PT(R1)-PT(B0) // save b0 906 tnat.nz p13,p0=in5 // [I0] 907 ;; 908 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs" 909 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1 910(p12) mov in4=-1 911 ;; 912 913.mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12 914.mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13 915(p13) mov in5=-1 916 ;; 917 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr 918 tnat.nz p14,p0=in6 919 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8 920 ;; 921 mov r8=1 922(p9) tnat.nz p10,p0=r15 923 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch) 924 925 st8.spill [r17]=r15 // save r15 926 tnat.nz p8,p0=in7 927 nop.i 0 928 929 mov r13=r2 // establish `current' 930 movl r1=__gp // establish kernel global pointer 931 ;; 932 st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error) 933(p14) mov in6=-1 934(p8) mov in7=-1 935 936 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 937 movl r17=FPSR_DEFAULT 938 ;; 939 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value 940(p10) mov r8=-EINVAL 941 br.ret.sptk.many b7 942END(ia64_syscall_setup) 943 944 .org ia64_ivt+0x3c00 945///////////////////////////////////////////////////////////////////////////////////////// 946// 0x3c00 Entry 15 (size 64 bundles) Reserved 947 DBG_FAULT(15) 948 FAULT(15) 949 950 /* 951 * Squatting in this space ... 952 * 953 * This special case dispatcher for illegal operation faults allows preserved 954 * registers to be modified through a callback function (asm only) that is handed 955 * back from the fault handler in r8. Up to three arguments can be passed to the 956 * callback function by returning an aggregate with the callback as its first 957 * element, followed by the arguments. 958 */ 959ENTRY(dispatch_illegal_op_fault) 960 .prologue 961 .body 962 SAVE_MIN_WITH_COVER 963 ssm psr.ic | PSR_DEFAULT_BITS 964 ;; 965 srlz.i // guarantee that interruption collection is on 966 ;; 967(p15) ssm psr.i // restore psr.i 968 adds r3=8,r2 // set up second base pointer for SAVE_REST 969 ;; 970 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group 971 mov out0=ar.ec 972 ;; 973 SAVE_REST 974 PT_REGS_UNWIND_INFO(0) 975 ;; 976 br.call.sptk.many rp=ia64_illegal_op_fault 977.ret0: ;; 978 alloc r14=ar.pfs,0,0,3,0 // must be first in insn group 979 mov out0=r9 980 mov out1=r10 981 mov out2=r11 982 movl r15=ia64_leave_kernel 983 ;; 984 mov rp=r15 985 mov b6=r8 986 ;; 987 cmp.ne p6,p0=0,r8 988(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel 989 br.sptk.many ia64_leave_kernel 990END(dispatch_illegal_op_fault) 991 992 .org ia64_ivt+0x4000 993///////////////////////////////////////////////////////////////////////////////////////// 994// 0x4000 Entry 16 (size 64 bundles) Reserved 995 DBG_FAULT(16) 996 FAULT(16) 997 998 .org ia64_ivt+0x4400 999///////////////////////////////////////////////////////////////////////////////////////// 1000// 0x4400 Entry 17 (size 64 bundles) Reserved 1001 DBG_FAULT(17) 1002 FAULT(17) 1003 1004ENTRY(non_syscall) 1005 SAVE_MIN_WITH_COVER 1006 1007 // There is no particular reason for this code to be here, other than that 1008 // there happens to be space here that would go unused otherwise. If this 1009 // fault ever gets "unreserved", simply moved the following code to a more 1010 // suitable spot... 1011 1012 alloc r14=ar.pfs,0,0,2,0 1013 mov out0=cr.iim 1014 add out1=16,sp 1015 adds r3=8,r2 // set up second base pointer for SAVE_REST 1016 1017 ssm psr.ic | PSR_DEFAULT_BITS 1018 ;; 1019 srlz.i // guarantee that interruption collection is on 1020 ;; 1021(p15) ssm psr.i // restore psr.i 1022 movl r15=ia64_leave_kernel 1023 ;; 1024 SAVE_REST 1025 mov rp=r15 1026 ;; 1027 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr 1028END(non_syscall) 1029 1030 .org ia64_ivt+0x4800 1031///////////////////////////////////////////////////////////////////////////////////////// 1032// 0x4800 Entry 18 (size 64 bundles) Reserved 1033 DBG_FAULT(18) 1034 FAULT(18) 1035 1036 /* 1037 * There is no particular reason for this code to be here, other than that 1038 * there happens to be space here that would go unused otherwise. If this 1039 * fault ever gets "unreserved", simply moved the following code to a more 1040 * suitable spot... 1041 */ 1042 1043ENTRY(dispatch_unaligned_handler) 1044 SAVE_MIN_WITH_COVER 1045 ;; 1046 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) 1047 mov out0=cr.ifa 1048 adds out1=16,sp 1049 1050 ssm psr.ic | PSR_DEFAULT_BITS 1051 ;; 1052 srlz.i // guarantee that interruption collection is on 1053 ;; 1054(p15) ssm psr.i // restore psr.i 1055 adds r3=8,r2 // set up second base pointer 1056 ;; 1057 SAVE_REST 1058 movl r14=ia64_leave_kernel 1059 ;; 1060 mov rp=r14 1061 br.sptk.many ia64_prepare_handle_unaligned 1062END(dispatch_unaligned_handler) 1063 1064 .org ia64_ivt+0x4c00 1065///////////////////////////////////////////////////////////////////////////////////////// 1066// 0x4c00 Entry 19 (size 64 bundles) Reserved 1067 DBG_FAULT(19) 1068 FAULT(19) 1069 1070 /* 1071 * There is no particular reason for this code to be here, other than that 1072 * there happens to be space here that would go unused otherwise. If this 1073 * fault ever gets "unreserved", simply moved the following code to a more 1074 * suitable spot... 1075 */ 1076 1077ENTRY(dispatch_to_fault_handler) 1078 /* 1079 * Input: 1080 * psr.ic: off 1081 * r19: fault vector number (e.g., 24 for General Exception) 1082 * r31: contains saved predicates (pr) 1083 */ 1084 SAVE_MIN_WITH_COVER_R19 1085 alloc r14=ar.pfs,0,0,5,0 1086 mov out0=r15 1087 mov out1=cr.isr 1088 mov out2=cr.ifa 1089 mov out3=cr.iim 1090 mov out4=cr.itir 1091 ;; 1092 ssm psr.ic | PSR_DEFAULT_BITS 1093 ;; 1094 srlz.i // guarantee that interruption collection is on 1095 ;; 1096(p15) ssm psr.i // restore psr.i 1097 adds r3=8,r2 // set up second base pointer for SAVE_REST 1098 ;; 1099 SAVE_REST 1100 movl r14=ia64_leave_kernel 1101 ;; 1102 mov rp=r14 1103 br.call.sptk.many b6=ia64_fault 1104END(dispatch_to_fault_handler) 1105 1106// 1107// --- End of long entries, Beginning of short entries 1108// 1109 1110 .org ia64_ivt+0x5000 1111///////////////////////////////////////////////////////////////////////////////////////// 1112// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49) 1113ENTRY(page_not_present) 1114 DBG_FAULT(20) 1115 mov r16=cr.ifa 1116 rsm psr.dt 1117 /* 1118 * The Linux page fault handler doesn't expect non-present pages to be in 1119 * the TLB. Flush the existing entry now, so we meet that expectation. 1120 */ 1121 mov r17=PAGE_SHIFT<<2 1122 ;; 1123 ptc.l r16,r17 1124 ;; 1125 mov r31=pr 1126 srlz.d 1127 br.sptk.many page_fault 1128END(page_not_present) 1129 1130 .org ia64_ivt+0x5100 1131///////////////////////////////////////////////////////////////////////////////////////// 1132// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52) 1133ENTRY(key_permission) 1134 DBG_FAULT(21) 1135 mov r16=cr.ifa 1136 rsm psr.dt 1137 mov r31=pr 1138 ;; 1139 srlz.d 1140 br.sptk.many page_fault 1141END(key_permission) 1142 1143 .org ia64_ivt+0x5200 1144///////////////////////////////////////////////////////////////////////////////////////// 1145// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) 1146ENTRY(iaccess_rights) 1147 DBG_FAULT(22) 1148 mov r16=cr.ifa 1149 rsm psr.dt 1150 mov r31=pr 1151 ;; 1152 srlz.d 1153 br.sptk.many page_fault 1154END(iaccess_rights) 1155 1156 .org ia64_ivt+0x5300 1157///////////////////////////////////////////////////////////////////////////////////////// 1158// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) 1159ENTRY(daccess_rights) 1160 DBG_FAULT(23) 1161 mov r16=cr.ifa 1162 rsm psr.dt 1163 mov r31=pr 1164 ;; 1165 srlz.d 1166 br.sptk.many page_fault 1167END(daccess_rights) 1168 1169 .org ia64_ivt+0x5400 1170///////////////////////////////////////////////////////////////////////////////////////// 1171// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) 1172ENTRY(general_exception) 1173 DBG_FAULT(24) 1174 mov r16=cr.isr 1175 mov r31=pr 1176 ;; 1177 cmp4.eq p6,p0=0,r16 1178(p6) br.sptk.many dispatch_illegal_op_fault 1179 ;; 1180 mov r19=24 // fault number 1181 br.sptk.many dispatch_to_fault_handler 1182END(general_exception) 1183 1184 .org ia64_ivt+0x5500 1185///////////////////////////////////////////////////////////////////////////////////////// 1186// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) 1187ENTRY(disabled_fp_reg) 1188 DBG_FAULT(25) 1189 rsm psr.dfh // ensure we can access fph 1190 ;; 1191 srlz.d 1192 mov r31=pr 1193 mov r19=25 1194 br.sptk.many dispatch_to_fault_handler 1195END(disabled_fp_reg) 1196 1197 .org ia64_ivt+0x5600 1198///////////////////////////////////////////////////////////////////////////////////////// 1199// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) 1200ENTRY(nat_consumption) 1201 DBG_FAULT(26) 1202 FAULT(26) 1203END(nat_consumption) 1204 1205 .org ia64_ivt+0x5700 1206///////////////////////////////////////////////////////////////////////////////////////// 1207// 0x5700 Entry 27 (size 16 bundles) Speculation (40) 1208ENTRY(speculation_vector) 1209 DBG_FAULT(27) 1210 /* 1211 * A [f]chk.[as] instruction needs to take the branch to the recovery code but 1212 * this part of the architecture is not implemented in hardware on some CPUs, such 1213 * as Itanium. Thus, in general we need to emulate the behavior. IIM contains 1214 * the relative target (not yet sign extended). So after sign extending it we 1215 * simply add it to IIP. We also need to reset the EI field of the IPSR to zero, 1216 * i.e., the slot to restart into. 1217 * 1218 * cr.imm contains zero_ext(imm21) 1219 */ 1220 mov r18=cr.iim 1221 ;; 1222 mov r17=cr.iip 1223 shl r18=r18,43 // put sign bit in position (43=64-21) 1224 ;; 1225 1226 mov r16=cr.ipsr 1227 shr r18=r18,39 // sign extend (39=43-4) 1228 ;; 1229 1230 add r17=r17,r18 // now add the offset 1231 ;; 1232 mov cr.iip=r17 1233 dep r16=0,r16,41,2 // clear EI 1234 ;; 1235 1236 mov cr.ipsr=r16 1237 ;; 1238 1239 rfi // and go back 1240END(speculation_vector) 1241 1242 .org ia64_ivt+0x5800 1243///////////////////////////////////////////////////////////////////////////////////////// 1244// 0x5800 Entry 28 (size 16 bundles) Reserved 1245 DBG_FAULT(28) 1246 FAULT(28) 1247 1248 .org ia64_ivt+0x5900 1249///////////////////////////////////////////////////////////////////////////////////////// 1250// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) 1251ENTRY(debug_vector) 1252 DBG_FAULT(29) 1253 FAULT(29) 1254END(debug_vector) 1255 1256 .org ia64_ivt+0x5a00 1257///////////////////////////////////////////////////////////////////////////////////////// 1258// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) 1259ENTRY(unaligned_access) 1260 DBG_FAULT(30) 1261 mov r16=cr.ipsr 1262 mov r31=pr // prepare to save predicates 1263 ;; 1264 br.sptk.many dispatch_unaligned_handler 1265END(unaligned_access) 1266 1267 .org ia64_ivt+0x5b00 1268///////////////////////////////////////////////////////////////////////////////////////// 1269// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) 1270ENTRY(unsupported_data_reference) 1271 DBG_FAULT(31) 1272 FAULT(31) 1273END(unsupported_data_reference) 1274 1275 .org ia64_ivt+0x5c00 1276///////////////////////////////////////////////////////////////////////////////////////// 1277// 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64) 1278ENTRY(floating_point_fault) 1279 DBG_FAULT(32) 1280 FAULT(32) 1281END(floating_point_fault) 1282 1283 .org ia64_ivt+0x5d00 1284///////////////////////////////////////////////////////////////////////////////////////// 1285// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) 1286ENTRY(floating_point_trap) 1287 DBG_FAULT(33) 1288 FAULT(33) 1289END(floating_point_trap) 1290 1291 .org ia64_ivt+0x5e00 1292///////////////////////////////////////////////////////////////////////////////////////// 1293// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) 1294ENTRY(lower_privilege_trap) 1295 DBG_FAULT(34) 1296 FAULT(34) 1297END(lower_privilege_trap) 1298 1299 .org ia64_ivt+0x5f00 1300///////////////////////////////////////////////////////////////////////////////////////// 1301// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) 1302ENTRY(taken_branch_trap) 1303 DBG_FAULT(35) 1304 FAULT(35) 1305END(taken_branch_trap) 1306 1307 .org ia64_ivt+0x6000 1308///////////////////////////////////////////////////////////////////////////////////////// 1309// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) 1310ENTRY(single_step_trap) 1311 DBG_FAULT(36) 1312 FAULT(36) 1313END(single_step_trap) 1314 1315 .org ia64_ivt+0x6100 1316///////////////////////////////////////////////////////////////////////////////////////// 1317// 0x6100 Entry 37 (size 16 bundles) Reserved 1318 DBG_FAULT(37) 1319 FAULT(37) 1320 1321 .org ia64_ivt+0x6200 1322///////////////////////////////////////////////////////////////////////////////////////// 1323// 0x6200 Entry 38 (size 16 bundles) Reserved 1324 DBG_FAULT(38) 1325 FAULT(38) 1326 1327 .org ia64_ivt+0x6300 1328///////////////////////////////////////////////////////////////////////////////////////// 1329// 0x6300 Entry 39 (size 16 bundles) Reserved 1330 DBG_FAULT(39) 1331 FAULT(39) 1332 1333 .org ia64_ivt+0x6400 1334///////////////////////////////////////////////////////////////////////////////////////// 1335// 0x6400 Entry 40 (size 16 bundles) Reserved 1336 DBG_FAULT(40) 1337 FAULT(40) 1338 1339 .org ia64_ivt+0x6500 1340///////////////////////////////////////////////////////////////////////////////////////// 1341// 0x6500 Entry 41 (size 16 bundles) Reserved 1342 DBG_FAULT(41) 1343 FAULT(41) 1344 1345 .org ia64_ivt+0x6600 1346///////////////////////////////////////////////////////////////////////////////////////// 1347// 0x6600 Entry 42 (size 16 bundles) Reserved 1348 DBG_FAULT(42) 1349 FAULT(42) 1350 1351 .org ia64_ivt+0x6700 1352///////////////////////////////////////////////////////////////////////////////////////// 1353// 0x6700 Entry 43 (size 16 bundles) Reserved 1354 DBG_FAULT(43) 1355 FAULT(43) 1356 1357 .org ia64_ivt+0x6800 1358///////////////////////////////////////////////////////////////////////////////////////// 1359// 0x6800 Entry 44 (size 16 bundles) Reserved 1360 DBG_FAULT(44) 1361 FAULT(44) 1362 1363 .org ia64_ivt+0x6900 1364///////////////////////////////////////////////////////////////////////////////////////// 1365// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) 1366ENTRY(ia32_exception) 1367 DBG_FAULT(45) 1368 FAULT(45) 1369END(ia32_exception) 1370 1371 .org ia64_ivt+0x6a00 1372///////////////////////////////////////////////////////////////////////////////////////// 1373// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) 1374ENTRY(ia32_intercept) 1375 DBG_FAULT(46) 1376#ifdef CONFIG_IA32_SUPPORT 1377 mov r31=pr 1378 mov r16=cr.isr 1379 ;; 1380 extr.u r17=r16,16,8 // get ISR.code 1381 mov r18=ar.eflag 1382 mov r19=cr.iim // old eflag value 1383 ;; 1384 cmp.ne p6,p0=2,r17 1385(p6) br.cond.spnt 1f // not a system flag fault 1386 xor r16=r18,r19 1387 ;; 1388 extr.u r17=r16,18,1 // get the eflags.ac bit 1389 ;; 1390 cmp.eq p6,p0=0,r17 1391(p6) br.cond.spnt 1f // eflags.ac bit didn't change 1392 ;; 1393 mov pr=r31,-1 // restore predicate registers 1394 rfi 1395 13961: 1397#endif // CONFIG_IA32_SUPPORT 1398 FAULT(46) 1399END(ia32_intercept) 1400 1401 .org ia64_ivt+0x6b00 1402///////////////////////////////////////////////////////////////////////////////////////// 1403// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) 1404ENTRY(ia32_interrupt) 1405 DBG_FAULT(47) 1406#ifdef CONFIG_IA32_SUPPORT 1407 mov r31=pr 1408 br.sptk.many dispatch_to_ia32_handler 1409#else 1410 FAULT(47) 1411#endif 1412END(ia32_interrupt) 1413 1414 .org ia64_ivt+0x6c00 1415///////////////////////////////////////////////////////////////////////////////////////// 1416// 0x6c00 Entry 48 (size 16 bundles) Reserved 1417 DBG_FAULT(48) 1418 FAULT(48) 1419 1420 .org ia64_ivt+0x6d00 1421///////////////////////////////////////////////////////////////////////////////////////// 1422// 0x6d00 Entry 49 (size 16 bundles) Reserved 1423 DBG_FAULT(49) 1424 FAULT(49) 1425 1426 .org ia64_ivt+0x6e00 1427///////////////////////////////////////////////////////////////////////////////////////// 1428// 0x6e00 Entry 50 (size 16 bundles) Reserved 1429 DBG_FAULT(50) 1430 FAULT(50) 1431 1432 .org ia64_ivt+0x6f00 1433///////////////////////////////////////////////////////////////////////////////////////// 1434// 0x6f00 Entry 51 (size 16 bundles) Reserved 1435 DBG_FAULT(51) 1436 FAULT(51) 1437 1438 .org ia64_ivt+0x7000 1439///////////////////////////////////////////////////////////////////////////////////////// 1440// 0x7000 Entry 52 (size 16 bundles) Reserved 1441 DBG_FAULT(52) 1442 FAULT(52) 1443 1444 .org ia64_ivt+0x7100 1445///////////////////////////////////////////////////////////////////////////////////////// 1446// 0x7100 Entry 53 (size 16 bundles) Reserved 1447 DBG_FAULT(53) 1448 FAULT(53) 1449 1450 .org ia64_ivt+0x7200 1451///////////////////////////////////////////////////////////////////////////////////////// 1452// 0x7200 Entry 54 (size 16 bundles) Reserved 1453 DBG_FAULT(54) 1454 FAULT(54) 1455 1456 .org ia64_ivt+0x7300 1457///////////////////////////////////////////////////////////////////////////////////////// 1458// 0x7300 Entry 55 (size 16 bundles) Reserved 1459 DBG_FAULT(55) 1460 FAULT(55) 1461 1462 .org ia64_ivt+0x7400 1463///////////////////////////////////////////////////////////////////////////////////////// 1464// 0x7400 Entry 56 (size 16 bundles) Reserved 1465 DBG_FAULT(56) 1466 FAULT(56) 1467 1468 .org ia64_ivt+0x7500 1469///////////////////////////////////////////////////////////////////////////////////////// 1470// 0x7500 Entry 57 (size 16 bundles) Reserved 1471 DBG_FAULT(57) 1472 FAULT(57) 1473 1474 .org ia64_ivt+0x7600 1475///////////////////////////////////////////////////////////////////////////////////////// 1476// 0x7600 Entry 58 (size 16 bundles) Reserved 1477 DBG_FAULT(58) 1478 FAULT(58) 1479 1480 .org ia64_ivt+0x7700 1481///////////////////////////////////////////////////////////////////////////////////////// 1482// 0x7700 Entry 59 (size 16 bundles) Reserved 1483 DBG_FAULT(59) 1484 FAULT(59) 1485 1486 .org ia64_ivt+0x7800 1487///////////////////////////////////////////////////////////////////////////////////////// 1488// 0x7800 Entry 60 (size 16 bundles) Reserved 1489 DBG_FAULT(60) 1490 FAULT(60) 1491 1492 .org ia64_ivt+0x7900 1493///////////////////////////////////////////////////////////////////////////////////////// 1494// 0x7900 Entry 61 (size 16 bundles) Reserved 1495 DBG_FAULT(61) 1496 FAULT(61) 1497 1498 .org ia64_ivt+0x7a00 1499///////////////////////////////////////////////////////////////////////////////////////// 1500// 0x7a00 Entry 62 (size 16 bundles) Reserved 1501 DBG_FAULT(62) 1502 FAULT(62) 1503 1504 .org ia64_ivt+0x7b00 1505///////////////////////////////////////////////////////////////////////////////////////// 1506// 0x7b00 Entry 63 (size 16 bundles) Reserved 1507 DBG_FAULT(63) 1508 FAULT(63) 1509 1510 .org ia64_ivt+0x7c00 1511///////////////////////////////////////////////////////////////////////////////////////// 1512// 0x7c00 Entry 64 (size 16 bundles) Reserved 1513 DBG_FAULT(64) 1514 FAULT(64) 1515 1516 .org ia64_ivt+0x7d00 1517///////////////////////////////////////////////////////////////////////////////////////// 1518// 0x7d00 Entry 65 (size 16 bundles) Reserved 1519 DBG_FAULT(65) 1520 FAULT(65) 1521 1522 .org ia64_ivt+0x7e00 1523///////////////////////////////////////////////////////////////////////////////////////// 1524// 0x7e00 Entry 66 (size 16 bundles) Reserved 1525 DBG_FAULT(66) 1526 FAULT(66) 1527 1528 .org ia64_ivt+0x7f00 1529///////////////////////////////////////////////////////////////////////////////////////// 1530// 0x7f00 Entry 67 (size 16 bundles) Reserved 1531 DBG_FAULT(67) 1532 FAULT(67) 1533 1534#ifdef CONFIG_IA32_SUPPORT 1535 1536 /* 1537 * There is no particular reason for this code to be here, other than that 1538 * there happens to be space here that would go unused otherwise. If this 1539 * fault ever gets "unreserved", simply moved the following code to a more 1540 * suitable spot... 1541 */ 1542 1543 // IA32 interrupt entry point 1544 1545ENTRY(dispatch_to_ia32_handler) 1546 SAVE_MIN 1547 ;; 1548 mov r14=cr.isr 1549 ssm psr.ic | PSR_DEFAULT_BITS 1550 ;; 1551 srlz.i // guarantee that interruption collection is on 1552 ;; 1553(p15) ssm psr.i 1554 adds r3=8,r2 // Base pointer for SAVE_REST 1555 ;; 1556 SAVE_REST 1557 ;; 1558 mov r15=0x80 1559 shr r14=r14,16 // Get interrupt number 1560 ;; 1561 cmp.ne p6,p0=r14,r15 1562(p6) br.call.dpnt.many b6=non_ia32_syscall 1563 1564 adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions 1565 adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp 1566 ;; 1567 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 1568 ld8 r8=[r14] // get r8 1569 ;; 1570 st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP) 1571 ;; 1572 alloc r15=ar.pfs,0,0,6,0 // must first in an insn group 1573 ;; 1574 ld4 r8=[r14],8 // r8 == eax (syscall number) 1575 mov r15=IA32_NR_syscalls 1576 ;; 1577 cmp.ltu.unc p6,p7=r8,r15 1578 ld4 out1=[r14],8 // r9 == ecx 1579 ;; 1580 ld4 out2=[r14],8 // r10 == edx 1581 ;; 1582 ld4 out0=[r14] // r11 == ebx 1583 adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp 1584 ;; 1585 ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp 1586 ;; 1587 ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi 1588 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 1589 ;; 1590 ld4 out4=[r14] // r15 == edi 1591 movl r16=ia32_syscall_table 1592 ;; 1593(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number 1594 ld4 r2=[r2] // r2 = current_thread_info()->flags 1595 ;; 1596 ld8 r16=[r16] 1597 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit 1598 ;; 1599 mov b6=r16 1600 movl r15=ia32_ret_from_syscall 1601 cmp.eq p8,p0=r2,r0 1602 ;; 1603 mov rp=r15 1604(p8) br.call.sptk.many b6=b6 1605 br.cond.sptk ia32_trace_syscall 1606 1607non_ia32_syscall: 1608 alloc r15=ar.pfs,0,0,2,0 1609 mov out0=r14 // interrupt # 1610 add out1=16,sp // pointer to pt_regs 1611 ;; // avoid WAW on CFM 1612 br.call.sptk.many rp=ia32_bad_interrupt 1613.ret1: movl r15=ia64_leave_kernel 1614 ;; 1615 mov rp=r15 1616 br.ret.sptk.many rp 1617END(dispatch_to_ia32_handler) 1618 1619#endif /* CONFIG_IA32_SUPPORT */ 1620