1/* 2 * arch/ia64/kernel/ivt.S 3 * 4 * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co 5 * Stephane Eranian <eranian@hpl.hp.com> 6 * David Mosberger <davidm@hpl.hp.com> 7 * Copyright (C) 2000, 2002-2003 Intel Co 8 * Asit Mallick <asit.k.mallick@intel.com> 9 * Suresh Siddha <suresh.b.siddha@intel.com> 10 * Kenneth Chen <kenneth.w.chen@intel.com> 11 * Fenghua Yu <fenghua.yu@intel.com> 12 * 13 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP 14 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT. 15 */ 16/* 17 * This file defines the interruption vector table used by the CPU. 18 * It does not include one entry per possible cause of interruption. 19 * 20 * The first 20 entries of the table contain 64 bundles each while the 21 * remaining 48 entries contain only 16 bundles each. 22 * 23 * The 64 bundles are used to allow inlining the whole handler for critical 24 * interruptions like TLB misses. 25 * 26 * For each entry, the comment is as follows: 27 * 28 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) 29 * entry offset ----/ / / / / 30 * entry number ---------/ / / / 31 * size of the entry -------------/ / / 32 * vector name -------------------------------------/ / 33 * interruptions triggering this vector ----------------------/ 34 * 35 * The table is 32KB in size and must be aligned on 32KB boundary. 36 * (The CPU ignores the 15 lower bits of the address) 37 * 38 * Table is based upon EAS2.6 (Oct 1999) 39 */ 40 41#include <linux/config.h> 42 43#include <asm/asmmacro.h> 44#include <asm/break.h> 45#include <asm/ia32.h> 46#include <asm/kregs.h> 47#include <asm/asm-offsets.h> 48#include <asm/pgtable.h> 49#include <asm/processor.h> 50#include <asm/ptrace.h> 51#include <asm/system.h> 52#include <asm/thread_info.h> 53#include <asm/unistd.h> 54#include <asm/errno.h> 55 56#if 1 57# define PSR_DEFAULT_BITS psr.ac 58#else 59# define PSR_DEFAULT_BITS 0 60#endif 61 62#if 0 63 /* 64 * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't 65 * needed for something else before enabling this... 66 */ 67# define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16 68#else 69# define DBG_FAULT(i) 70#endif 71 72#include "minstate.h" 73 74#define FAULT(n) \ 75 mov r31=pr; \ 76 mov r19=n;; /* prepare to save predicates */ \ 77 br.sptk.many dispatch_to_fault_handler 78 79 .section .text.ivt,"ax" 80 81 .align 32768 // align on 32KB boundary 82 .global ia64_ivt 83ia64_ivt: 84///////////////////////////////////////////////////////////////////////////////////////// 85// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) 86ENTRY(vhpt_miss) 87 DBG_FAULT(0) 88 /* 89 * The VHPT vector is invoked when the TLB entry for the virtual page table 90 * is missing. This happens only as a result of a previous 91 * (the "original") TLB miss, which may either be caused by an instruction 92 * fetch or a data access (or non-access). 93 * 94 * What we do here is normal TLB miss handing for the _original_ miss, followed 95 * by inserting the TLB entry for the virtual page table page that the VHPT 96 * walker was attempting to access. The latter gets inserted as long 97 * as both L1 and L2 have valid mappings for the faulting address. 98 * The TLB entry for the original miss gets inserted only if 99 * the L3 entry indicates that the page is present. 100 * 101 * do_page_fault gets invoked in the following cases: 102 * - the faulting virtual address uses unimplemented address bits 103 * - the faulting virtual address has no L1, L2, or L3 mapping 104 */ 105 mov r16=cr.ifa // get address that caused the TLB miss 106#ifdef CONFIG_HUGETLB_PAGE 107 movl r18=PAGE_SHIFT 108 mov r25=cr.itir 109#endif 110 ;; 111 rsm psr.dt // use physical addressing for data 112 mov r31=pr // save the predicate registers 113 mov r19=IA64_KR(PT_BASE) // get page table base address 114 shl r21=r16,3 // shift bit 60 into sign bit 115 shr.u r17=r16,61 // get the region number into r17 116 ;; 117 shr r22=r21,3 118#ifdef CONFIG_HUGETLB_PAGE 119 extr.u r26=r25,2,6 120 ;; 121 cmp.ne p8,p0=r18,r26 122 sub r27=r26,r18 123 ;; 124(p8) dep r25=r18,r25,2,6 125(p8) shr r22=r22,r27 126#endif 127 ;; 128 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5? 129 shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address 130 ;; 131(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place 132 133 srlz.d 134 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir 135 136 .pred.rel "mutex", p6, p7 137(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT 138(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 139 ;; 140(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8 141(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8) 142 cmp.eq p7,p6=0,r21 // unused address bits all zeroes? 143 shr.u r18=r22,PMD_SHIFT // shift L2 index into position 144 ;; 145 ld8 r17=[r17] // fetch the L1 entry (may be 0) 146 ;; 147(p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL? 148 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry 149 ;; 150(p7) ld8 r20=[r17] // fetch the L2 entry (may be 0) 151 shr.u r19=r22,PAGE_SHIFT // shift L3 index into position 152 ;; 153(p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L2 entry NULL? 154 dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L3 page table entry 155 ;; 156(p7) ld8 r18=[r21] // read the L3 PTE 157 mov r19=cr.isr // cr.isr bit 0 tells us if this is an insn miss 158 ;; 159(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? 160 mov r22=cr.iha // get the VHPT address that caused the TLB miss 161 ;; // avoid RAW on p7 162(p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss? 163 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address 164 ;; 165(p10) itc.i r18 // insert the instruction TLB entry 166(p11) itc.d r18 // insert the data TLB entry 167(p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) 168 mov cr.ifa=r22 169 170#ifdef CONFIG_HUGETLB_PAGE 171(p8) mov cr.itir=r25 // change to default page-size for VHPT 172#endif 173 174 /* 175 * Now compute and insert the TLB entry for the virtual page table. We never 176 * execute in a page table page so there is no need to set the exception deferral 177 * bit. 178 */ 179 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23 180 ;; 181(p7) itc.d r24 182 ;; 183#ifdef CONFIG_SMP 184 /* 185 * Tell the assemblers dependency-violation checker that the above "itc" instructions 186 * cannot possibly affect the following loads: 187 */ 188 dv_serialize_data 189 190 /* 191 * Re-check L2 and L3 pagetable. If they changed, we may have received a ptc.g 192 * between reading the pagetable and the "itc". If so, flush the entry we 193 * inserted and retry. 194 */ 195 ld8 r25=[r21] // read L3 PTE again 196 ld8 r26=[r17] // read L2 entry again 197 ;; 198 cmp.ne p6,p7=r26,r20 // did L2 entry change 199 mov r27=PAGE_SHIFT<<2 200 ;; 201(p6) ptc.l r22,r27 // purge PTE page translation 202(p7) cmp.ne.or.andcm p6,p7=r25,r18 // did L3 PTE change 203 ;; 204(p6) ptc.l r16,r27 // purge translation 205#endif 206 207 mov pr=r31,-1 // restore predicate registers 208 rfi 209END(vhpt_miss) 210 211 .org ia64_ivt+0x400 212///////////////////////////////////////////////////////////////////////////////////////// 213// 0x0400 Entry 1 (size 64 bundles) ITLB (21) 214ENTRY(itlb_miss) 215 DBG_FAULT(1) 216 /* 217 * The ITLB handler accesses the L3 PTE via the virtually mapped linear 218 * page table. If a nested TLB miss occurs, we switch into physical 219 * mode, walk the page table, and then re-execute the L3 PTE read 220 * and go on normally after that. 221 */ 222 mov r16=cr.ifa // get virtual address 223 mov r29=b0 // save b0 224 mov r31=pr // save predicates 225.itlb_fault: 226 mov r17=cr.iha // get virtual address of L3 PTE 227 movl r30=1f // load nested fault continuation point 228 ;; 2291: ld8 r18=[r17] // read L3 PTE 230 ;; 231 mov b0=r29 232 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 233(p6) br.cond.spnt page_fault 234 ;; 235 itc.i r18 236 ;; 237#ifdef CONFIG_SMP 238 /* 239 * Tell the assemblers dependency-violation checker that the above "itc" instructions 240 * cannot possibly affect the following loads: 241 */ 242 dv_serialize_data 243 244 ld8 r19=[r17] // read L3 PTE again and see if same 245 mov r20=PAGE_SHIFT<<2 // setup page size for purge 246 ;; 247 cmp.ne p7,p0=r18,r19 248 ;; 249(p7) ptc.l r16,r20 250#endif 251 mov pr=r31,-1 252 rfi 253END(itlb_miss) 254 255 .org ia64_ivt+0x0800 256///////////////////////////////////////////////////////////////////////////////////////// 257// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) 258ENTRY(dtlb_miss) 259 DBG_FAULT(2) 260 /* 261 * The DTLB handler accesses the L3 PTE via the virtually mapped linear 262 * page table. If a nested TLB miss occurs, we switch into physical 263 * mode, walk the page table, and then re-execute the L3 PTE read 264 * and go on normally after that. 265 */ 266 mov r16=cr.ifa // get virtual address 267 mov r29=b0 // save b0 268 mov r31=pr // save predicates 269dtlb_fault: 270 mov r17=cr.iha // get virtual address of L3 PTE 271 movl r30=1f // load nested fault continuation point 272 ;; 2731: ld8 r18=[r17] // read L3 PTE 274 ;; 275 mov b0=r29 276 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 277(p6) br.cond.spnt page_fault 278 ;; 279 itc.d r18 280 ;; 281#ifdef CONFIG_SMP 282 /* 283 * Tell the assemblers dependency-violation checker that the above "itc" instructions 284 * cannot possibly affect the following loads: 285 */ 286 dv_serialize_data 287 288 ld8 r19=[r17] // read L3 PTE again and see if same 289 mov r20=PAGE_SHIFT<<2 // setup page size for purge 290 ;; 291 cmp.ne p7,p0=r18,r19 292 ;; 293(p7) ptc.l r16,r20 294#endif 295 mov pr=r31,-1 296 rfi 297END(dtlb_miss) 298 299 .org ia64_ivt+0x0c00 300///////////////////////////////////////////////////////////////////////////////////////// 301// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) 302ENTRY(alt_itlb_miss) 303 DBG_FAULT(3) 304 mov r16=cr.ifa // get address that caused the TLB miss 305 movl r17=PAGE_KERNEL 306 mov r21=cr.ipsr 307 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 308 mov r31=pr 309 ;; 310#ifdef CONFIG_DISABLE_VHPT 311 shr.u r22=r16,61 // get the region number into r21 312 ;; 313 cmp.gt p8,p0=6,r22 // user mode 314 ;; 315(p8) thash r17=r16 316 ;; 317(p8) mov cr.iha=r17 318(p8) mov r29=b0 // save b0 319(p8) br.cond.dptk .itlb_fault 320#endif 321 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl 322 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits 323 shr.u r18=r16,57 // move address bit 61 to bit 4 324 ;; 325 andcm r18=0x10,r18 // bit 4=~address-bit(61) 326 cmp.ne p8,p0=r0,r23 // psr.cpl != 0? 327 or r19=r17,r19 // insert PTE control bits into r19 328 ;; 329 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 330(p8) br.cond.spnt page_fault 331 ;; 332 itc.i r19 // insert the TLB entry 333 mov pr=r31,-1 334 rfi 335END(alt_itlb_miss) 336 337 .org ia64_ivt+0x1000 338///////////////////////////////////////////////////////////////////////////////////////// 339// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) 340ENTRY(alt_dtlb_miss) 341 DBG_FAULT(4) 342 mov r16=cr.ifa // get address that caused the TLB miss 343 movl r17=PAGE_KERNEL 344 mov r20=cr.isr 345 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 346 mov r21=cr.ipsr 347 mov r31=pr 348 ;; 349#ifdef CONFIG_DISABLE_VHPT 350 shr.u r22=r16,61 // get the region number into r21 351 ;; 352 cmp.gt p8,p0=6,r22 // access to region 0-5 353 ;; 354(p8) thash r17=r16 355 ;; 356(p8) mov cr.iha=r17 357(p8) mov r29=b0 // save b0 358(p8) br.cond.dptk dtlb_fault 359#endif 360 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl 361 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field 362 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? 363 shr.u r18=r16,57 // move address bit 61 to bit 4 364 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits 365 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? 366 ;; 367 andcm r18=0x10,r18 // bit 4=~address-bit(61) 368 cmp.ne p8,p0=r0,r23 369(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field 370(p8) br.cond.spnt page_fault 371 372 dep r21=-1,r21,IA64_PSR_ED_BIT,1 373 or r19=r19,r17 // insert PTE control bits into r19 374 ;; 375 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 376(p6) mov cr.ipsr=r21 377 ;; 378(p7) itc.d r19 // insert the TLB entry 379 mov pr=r31,-1 380 rfi 381END(alt_dtlb_miss) 382 383 .org ia64_ivt+0x1400 384///////////////////////////////////////////////////////////////////////////////////////// 385// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) 386ENTRY(nested_dtlb_miss) 387 /* 388 * In the absence of kernel bugs, we get here when the virtually mapped linear 389 * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction 390 * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page 391 * table is missing, a nested TLB miss fault is triggered and control is 392 * transferred to this point. When this happens, we lookup the pte for the 393 * faulting address by walking the page table in physical mode and return to the 394 * continuation point passed in register r30 (or call page_fault if the address is 395 * not mapped). 396 * 397 * Input: r16: faulting address 398 * r29: saved b0 399 * r30: continuation address 400 * r31: saved pr 401 * 402 * Output: r17: physical address of L3 PTE of faulting address 403 * r29: saved b0 404 * r30: continuation address 405 * r31: saved pr 406 * 407 * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared) 408 */ 409 rsm psr.dt // switch to using physical data addressing 410 mov r19=IA64_KR(PT_BASE) // get the page table base address 411 shl r21=r16,3 // shift bit 60 into sign bit 412 mov r18=cr.itir 413 ;; 414 shr.u r17=r16,61 // get the region number into r17 415 extr.u r18=r18,2,6 // get the faulting page size 416 ;; 417 cmp.eq p6,p7=5,r17 // is faulting address in region 5? 418 add r22=-PAGE_SHIFT,r18 // adjustment for hugetlb address 419 add r18=PGDIR_SHIFT-PAGE_SHIFT,r18 420 ;; 421 shr.u r22=r16,r22 422 shr.u r18=r16,r18 423(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place 424 425 srlz.d 426 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir 427 428 .pred.rel "mutex", p6, p7 429(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT 430(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 431 ;; 432(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8 433(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8) 434 cmp.eq p7,p6=0,r21 // unused address bits all zeroes? 435 shr.u r18=r22,PMD_SHIFT // shift L2 index into position 436 ;; 437 ld8 r17=[r17] // fetch the L1 entry (may be 0) 438 ;; 439(p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL? 440 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry 441 ;; 442(p7) ld8 r17=[r17] // fetch the L2 entry (may be 0) 443 shr.u r19=r22,PAGE_SHIFT // shift L3 index into position 444 ;; 445(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL? 446 dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table entry 447(p6) br.cond.spnt page_fault 448 mov b0=r30 449 br.sptk.many b0 // return to continuation point 450END(nested_dtlb_miss) 451 452 .org ia64_ivt+0x1800 453///////////////////////////////////////////////////////////////////////////////////////// 454// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) 455ENTRY(ikey_miss) 456 DBG_FAULT(6) 457 FAULT(6) 458END(ikey_miss) 459 460 //----------------------------------------------------------------------------------- 461 // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address) 462ENTRY(page_fault) 463 ssm psr.dt 464 ;; 465 srlz.i 466 ;; 467 SAVE_MIN_WITH_COVER 468 alloc r15=ar.pfs,0,0,3,0 469 mov out0=cr.ifa 470 mov out1=cr.isr 471 adds r3=8,r2 // set up second base pointer 472 ;; 473 ssm psr.ic | PSR_DEFAULT_BITS 474 ;; 475 srlz.i // guarantee that interruption collectin is on 476 ;; 477(p15) ssm psr.i // restore psr.i 478 movl r14=ia64_leave_kernel 479 ;; 480 SAVE_REST 481 mov rp=r14 482 ;; 483 adds out2=16,r12 // out2 = pointer to pt_regs 484 br.call.sptk.many b6=ia64_do_page_fault // ignore return address 485END(page_fault) 486 487 .org ia64_ivt+0x1c00 488///////////////////////////////////////////////////////////////////////////////////////// 489// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) 490ENTRY(dkey_miss) 491 DBG_FAULT(7) 492 FAULT(7) 493END(dkey_miss) 494 495 .org ia64_ivt+0x2000 496///////////////////////////////////////////////////////////////////////////////////////// 497// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) 498ENTRY(dirty_bit) 499 DBG_FAULT(8) 500 /* 501 * What we do here is to simply turn on the dirty bit in the PTE. We need to 502 * update both the page-table and the TLB entry. To efficiently access the PTE, 503 * we address it through the virtual page table. Most likely, the TLB entry for 504 * the relevant virtual page table page is still present in the TLB so we can 505 * normally do this without additional TLB misses. In case the necessary virtual 506 * page table TLB entry isn't present, we take a nested TLB miss hit where we look 507 * up the physical address of the L3 PTE and then continue at label 1 below. 508 */ 509 mov r16=cr.ifa // get the address that caused the fault 510 movl r30=1f // load continuation point in case of nested fault 511 ;; 512 thash r17=r16 // compute virtual address of L3 PTE 513 mov r29=b0 // save b0 in case of nested fault 514 mov r31=pr // save pr 515#ifdef CONFIG_SMP 516 mov r28=ar.ccv // save ar.ccv 517 ;; 5181: ld8 r18=[r17] 519 ;; // avoid RAW on r18 520 mov ar.ccv=r18 // set compare value for cmpxchg 521 or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits 522 ;; 523 cmpxchg8.acq r26=[r17],r25,ar.ccv 524 mov r24=PAGE_SHIFT<<2 525 ;; 526 cmp.eq p6,p7=r26,r18 527 ;; 528(p6) itc.d r25 // install updated PTE 529 ;; 530 /* 531 * Tell the assemblers dependency-violation checker that the above "itc" instructions 532 * cannot possibly affect the following loads: 533 */ 534 dv_serialize_data 535 536 ld8 r18=[r17] // read PTE again 537 ;; 538 cmp.eq p6,p7=r18,r25 // is it same as the newly installed 539 ;; 540(p7) ptc.l r16,r24 541 mov b0=r29 // restore b0 542 mov ar.ccv=r28 543#else 544 ;; 5451: ld8 r18=[r17] 546 ;; // avoid RAW on r18 547 or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits 548 mov b0=r29 // restore b0 549 ;; 550 st8 [r17]=r18 // store back updated PTE 551 itc.d r18 // install updated PTE 552#endif 553 mov pr=r31,-1 // restore pr 554 rfi 555END(dirty_bit) 556 557 .org ia64_ivt+0x2400 558///////////////////////////////////////////////////////////////////////////////////////// 559// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) 560ENTRY(iaccess_bit) 561 DBG_FAULT(9) 562 // Like Entry 8, except for instruction access 563 mov r16=cr.ifa // get the address that caused the fault 564 movl r30=1f // load continuation point in case of nested fault 565 mov r31=pr // save predicates 566#ifdef CONFIG_ITANIUM 567 /* 568 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status. 569 */ 570 mov r17=cr.ipsr 571 ;; 572 mov r18=cr.iip 573 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set? 574 ;; 575(p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa 576#endif /* CONFIG_ITANIUM */ 577 ;; 578 thash r17=r16 // compute virtual address of L3 PTE 579 mov r29=b0 // save b0 in case of nested fault) 580#ifdef CONFIG_SMP 581 mov r28=ar.ccv // save ar.ccv 582 ;; 5831: ld8 r18=[r17] 584 ;; 585 mov ar.ccv=r18 // set compare value for cmpxchg 586 or r25=_PAGE_A,r18 // set the accessed bit 587 ;; 588 cmpxchg8.acq r26=[r17],r25,ar.ccv 589 mov r24=PAGE_SHIFT<<2 590 ;; 591 cmp.eq p6,p7=r26,r18 592 ;; 593(p6) itc.i r25 // install updated PTE 594 ;; 595 /* 596 * Tell the assemblers dependency-violation checker that the above "itc" instructions 597 * cannot possibly affect the following loads: 598 */ 599 dv_serialize_data 600 601 ld8 r18=[r17] // read PTE again 602 ;; 603 cmp.eq p6,p7=r18,r25 // is it same as the newly installed 604 ;; 605(p7) ptc.l r16,r24 606 mov b0=r29 // restore b0 607 mov ar.ccv=r28 608#else /* !CONFIG_SMP */ 609 ;; 6101: ld8 r18=[r17] 611 ;; 612 or r18=_PAGE_A,r18 // set the accessed bit 613 mov b0=r29 // restore b0 614 ;; 615 st8 [r17]=r18 // store back updated PTE 616 itc.i r18 // install updated PTE 617#endif /* !CONFIG_SMP */ 618 mov pr=r31,-1 619 rfi 620END(iaccess_bit) 621 622 .org ia64_ivt+0x2800 623///////////////////////////////////////////////////////////////////////////////////////// 624// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) 625ENTRY(daccess_bit) 626 DBG_FAULT(10) 627 // Like Entry 8, except for data access 628 mov r16=cr.ifa // get the address that caused the fault 629 movl r30=1f // load continuation point in case of nested fault 630 ;; 631 thash r17=r16 // compute virtual address of L3 PTE 632 mov r31=pr 633 mov r29=b0 // save b0 in case of nested fault) 634#ifdef CONFIG_SMP 635 mov r28=ar.ccv // save ar.ccv 636 ;; 6371: ld8 r18=[r17] 638 ;; // avoid RAW on r18 639 mov ar.ccv=r18 // set compare value for cmpxchg 640 or r25=_PAGE_A,r18 // set the dirty bit 641 ;; 642 cmpxchg8.acq r26=[r17],r25,ar.ccv 643 mov r24=PAGE_SHIFT<<2 644 ;; 645 cmp.eq p6,p7=r26,r18 646 ;; 647(p6) itc.d r25 // install updated PTE 648 /* 649 * Tell the assemblers dependency-violation checker that the above "itc" instructions 650 * cannot possibly affect the following loads: 651 */ 652 dv_serialize_data 653 ;; 654 ld8 r18=[r17] // read PTE again 655 ;; 656 cmp.eq p6,p7=r18,r25 // is it same as the newly installed 657 ;; 658(p7) ptc.l r16,r24 659 mov ar.ccv=r28 660#else 661 ;; 6621: ld8 r18=[r17] 663 ;; // avoid RAW on r18 664 or r18=_PAGE_A,r18 // set the accessed bit 665 ;; 666 st8 [r17]=r18 // store back updated PTE 667 itc.d r18 // install updated PTE 668#endif 669 mov b0=r29 // restore b0 670 mov pr=r31,-1 671 rfi 672END(daccess_bit) 673 674 .org ia64_ivt+0x2c00 675///////////////////////////////////////////////////////////////////////////////////////// 676// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) 677ENTRY(break_fault) 678 /* 679 * The streamlined system call entry/exit paths only save/restore the initial part 680 * of pt_regs. This implies that the callers of system-calls must adhere to the 681 * normal procedure calling conventions. 682 * 683 * Registers to be saved & restored: 684 * CR registers: cr.ipsr, cr.iip, cr.ifs 685 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr 686 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15 687 * Registers to be restored only: 688 * r8-r11: output value from the system call. 689 * 690 * During system call exit, scratch registers (including r15) are modified/cleared 691 * to prevent leaking bits from kernel to user level. 692 */ 693 DBG_FAULT(11) 694 mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc) 695 mov r29=cr.ipsr // M2 (12 cyc) 696 mov r31=pr // I0 (2 cyc) 697 698 mov r17=cr.iim // M2 (2 cyc) 699 mov.m r27=ar.rsc // M2 (12 cyc) 700 mov r18=__IA64_BREAK_SYSCALL // A 701 702 mov.m ar.rsc=0 // M2 703 mov.m r21=ar.fpsr // M2 (12 cyc) 704 mov r19=b6 // I0 (2 cyc) 705 ;; 706 mov.m r23=ar.bspstore // M2 (12 cyc) 707 mov.m r24=ar.rnat // M2 (5 cyc) 708 mov.i r26=ar.pfs // I0 (2 cyc) 709 710 invala // M0|1 711 nop.m 0 // M 712 mov r20=r1 // A save r1 713 714 nop.m 0 715 movl r30=sys_call_table // X 716 717 mov r28=cr.iip // M2 (2 cyc) 718 cmp.eq p0,p7=r18,r17 // I0 is this a system call? 719(p7) br.cond.spnt non_syscall // B no -> 720 // 721 // From this point on, we are definitely on the syscall-path 722 // and we can use (non-banked) scratch registers. 723 // 724/////////////////////////////////////////////////////////////////////// 725 mov r1=r16 // A move task-pointer to "addl"-addressable reg 726 mov r2=r16 // A setup r2 for ia64_syscall_setup 727 add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = ¤t_thread_info()->flags 728 729 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 730 adds r15=-1024,r15 // A subtract 1024 from syscall number 731 mov r3=NR_syscalls - 1 732 ;; 733 ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag 734 ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags 735 extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr 736 737 shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024) 738 addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS 739 cmp.leu p6,p7=r15,r3 // A syscall number in range? 740 ;; 741 742 lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS 743(p6) ld8 r30=[r30] // M0|1 load address of syscall entry point 744 tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT? 745 746 mov.m ar.bspstore=r22 // M2 switch to kernel RBS 747 cmp.eq p8,p9=2,r8 // A isr.ei==2? 748 ;; 749 750(p8) mov r8=0 // A clear ei to 0 751(p7) movl r30=sys_ni_syscall // X 752 753(p8) adds r28=16,r28 // A switch cr.iip to next bundle 754(p9) adds r8=1,r8 // A increment ei to next slot 755 nop.i 0 756 ;; 757 758 mov.m r25=ar.unat // M2 (5 cyc) 759 dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr 760 adds r15=1024,r15 // A restore original syscall number 761 // 762 // If any of the above loads miss in L1D, we'll stall here until 763 // the data arrives. 764 // 765/////////////////////////////////////////////////////////////////////// 766 st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag 767 mov b6=r30 // I0 setup syscall handler branch reg early 768 cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already? 769 770 and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit 771 mov r18=ar.bsp // M2 (12 cyc) 772(pKStk) br.cond.spnt .break_fixup // B we're already in kernel-mode -- fix up RBS 773 ;; 774.back_from_break_fixup: 775(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack 776 cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? 777 br.call.sptk.many b7=ia64_syscall_setup // B 7781: 779 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 780 nop 0 781 bsw.1 // B (6 cyc) regs are saved, switch to bank 1 782 ;; 783 784 ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection 785 movl r3=ia64_ret_from_syscall // X 786 ;; 787 788 srlz.i // M0 ensure interruption collection is on 789 mov rp=r3 // I0 set the real return addr 790(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT 791 792(p15) ssm psr.i // M2 restore psr.i 793(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr) 794 br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic 795 // NOT REACHED 796/////////////////////////////////////////////////////////////////////// 797 // On entry, we optimistically assumed that we're coming from user-space. 798 // For the rare cases where a system-call is done from within the kernel, 799 // we fix things up at this point: 800.break_fixup: 801 add r1=-IA64_PT_REGS_SIZE,sp // A allocate space for pt_regs structure 802 mov ar.rnat=r24 // M2 restore kernel's AR.RNAT 803 ;; 804 mov ar.bspstore=r23 // M2 restore kernel's AR.BSPSTORE 805 br.cond.sptk .back_from_break_fixup 806END(break_fault) 807 808 .org ia64_ivt+0x3000 809///////////////////////////////////////////////////////////////////////////////////////// 810// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) 811ENTRY(interrupt) 812 DBG_FAULT(12) 813 mov r31=pr // prepare to save predicates 814 ;; 815 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 816 ssm psr.ic | PSR_DEFAULT_BITS 817 ;; 818 adds r3=8,r2 // set up second base pointer for SAVE_REST 819 srlz.i // ensure everybody knows psr.ic is back on 820 ;; 821 SAVE_REST 822 ;; 823 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group 824 mov out0=cr.ivr // pass cr.ivr as first arg 825 add out1=16,sp // pass pointer to pt_regs as second arg 826 ;; 827 srlz.d // make sure we see the effect of cr.ivr 828 movl r14=ia64_leave_kernel 829 ;; 830 mov rp=r14 831 br.call.sptk.many b6=ia64_handle_irq 832END(interrupt) 833 834 .org ia64_ivt+0x3400 835///////////////////////////////////////////////////////////////////////////////////////// 836// 0x3400 Entry 13 (size 64 bundles) Reserved 837 DBG_FAULT(13) 838 FAULT(13) 839 840 .org ia64_ivt+0x3800 841///////////////////////////////////////////////////////////////////////////////////////// 842// 0x3800 Entry 14 (size 64 bundles) Reserved 843 DBG_FAULT(14) 844 FAULT(14) 845 846 /* 847 * There is no particular reason for this code to be here, other than that 848 * there happens to be space here that would go unused otherwise. If this 849 * fault ever gets "unreserved", simply moved the following code to a more 850 * suitable spot... 851 * 852 * ia64_syscall_setup() is a separate subroutine so that it can 853 * allocate stacked registers so it can safely demine any 854 * potential NaT values from the input registers. 855 * 856 * On entry: 857 * - executing on bank 0 or bank 1 register set (doesn't matter) 858 * - r1: stack pointer 859 * - r2: current task pointer 860 * - r3: preserved 861 * - r11: original contents (saved ar.pfs to be saved) 862 * - r12: original contents (sp to be saved) 863 * - r13: original contents (tp to be saved) 864 * - r15: original contents (syscall # to be saved) 865 * - r18: saved bsp (after switching to kernel stack) 866 * - r19: saved b6 867 * - r20: saved r1 (gp) 868 * - r21: saved ar.fpsr 869 * - r22: kernel's register backing store base (krbs_base) 870 * - r23: saved ar.bspstore 871 * - r24: saved ar.rnat 872 * - r25: saved ar.unat 873 * - r26: saved ar.pfs 874 * - r27: saved ar.rsc 875 * - r28: saved cr.iip 876 * - r29: saved cr.ipsr 877 * - r31: saved pr 878 * - b0: original contents (to be saved) 879 * On exit: 880 * - p10: TRUE if syscall is invoked with more than 8 out 881 * registers or r15's Nat is true 882 * - r1: kernel's gp 883 * - r3: preserved (same as on entry) 884 * - r8: -EINVAL if p10 is true 885 * - r12: points to kernel stack 886 * - r13: points to current task 887 * - r14: preserved (same as on entry) 888 * - p13: preserved 889 * - p15: TRUE if interrupts need to be re-enabled 890 * - ar.fpsr: set to kernel settings 891 * - b6: preserved (same as on entry) 892 */ 893GLOBAL_ENTRY(ia64_syscall_setup) 894#if PT(B6) != 0 895# error This code assumes that b6 is the first field in pt_regs. 896#endif 897 st8 [r1]=r19 // save b6 898 add r16=PT(CR_IPSR),r1 // initialize first base pointer 899 add r17=PT(R11),r1 // initialize second base pointer 900 ;; 901 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable 902 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr 903 tnat.nz p8,p0=in0 904 905 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11 906 tnat.nz p9,p0=in1 907(pKStk) mov r18=r0 // make sure r18 isn't NaT 908 ;; 909 910 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs 911 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip 912 mov r28=b0 // save b0 (2 cyc) 913 ;; 914 915 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat 916 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0] 917(p8) mov in0=-1 918 ;; 919 920 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs 921 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs 922 and r8=0x7f,r19 // A // get sof of ar.pfs 923 924 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc 925 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0 926(p9) mov in1=-1 927 ;; 928 929(pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8 930 tnat.nz p10,p0=in2 931 add r11=8,r11 932 ;; 933(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field 934(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field 935 tnat.nz p11,p0=in3 936 ;; 937(p10) mov in2=-1 938 tnat.nz p12,p0=in4 // [I0] 939(p11) mov in3=-1 940 ;; 941(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat 942(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore 943 shl r18=r18,16 // compute ar.rsc to be used for "loadrs" 944 ;; 945 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates 946 st8 [r17]=r28,PT(R1)-PT(B0) // save b0 947 tnat.nz p13,p0=in5 // [I0] 948 ;; 949 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs" 950 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1 951(p12) mov in4=-1 952 ;; 953 954.mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12 955.mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13 956(p13) mov in5=-1 957 ;; 958 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr 959 tnat.nz p13,p0=in6 960 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8 961 ;; 962 mov r8=1 963(p9) tnat.nz p10,p0=r15 964 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch) 965 966 st8.spill [r17]=r15 // save r15 967 tnat.nz p8,p0=in7 968 nop.i 0 969 970 mov r13=r2 // establish `current' 971 movl r1=__gp // establish kernel global pointer 972 ;; 973 st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error) 974(p13) mov in6=-1 975(p8) mov in7=-1 976 977 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 978 movl r17=FPSR_DEFAULT 979 ;; 980 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value 981(p10) mov r8=-EINVAL 982 br.ret.sptk.many b7 983END(ia64_syscall_setup) 984 985 .org ia64_ivt+0x3c00 986///////////////////////////////////////////////////////////////////////////////////////// 987// 0x3c00 Entry 15 (size 64 bundles) Reserved 988 DBG_FAULT(15) 989 FAULT(15) 990 991 /* 992 * Squatting in this space ... 993 * 994 * This special case dispatcher for illegal operation faults allows preserved 995 * registers to be modified through a callback function (asm only) that is handed 996 * back from the fault handler in r8. Up to three arguments can be passed to the 997 * callback function by returning an aggregate with the callback as its first 998 * element, followed by the arguments. 999 */ 1000ENTRY(dispatch_illegal_op_fault) 1001 .prologue 1002 .body 1003 SAVE_MIN_WITH_COVER 1004 ssm psr.ic | PSR_DEFAULT_BITS 1005 ;; 1006 srlz.i // guarantee that interruption collection is on 1007 ;; 1008(p15) ssm psr.i // restore psr.i 1009 adds r3=8,r2 // set up second base pointer for SAVE_REST 1010 ;; 1011 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group 1012 mov out0=ar.ec 1013 ;; 1014 SAVE_REST 1015 PT_REGS_UNWIND_INFO(0) 1016 ;; 1017 br.call.sptk.many rp=ia64_illegal_op_fault 1018.ret0: ;; 1019 alloc r14=ar.pfs,0,0,3,0 // must be first in insn group 1020 mov out0=r9 1021 mov out1=r10 1022 mov out2=r11 1023 movl r15=ia64_leave_kernel 1024 ;; 1025 mov rp=r15 1026 mov b6=r8 1027 ;; 1028 cmp.ne p6,p0=0,r8 1029(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel 1030 br.sptk.many ia64_leave_kernel 1031END(dispatch_illegal_op_fault) 1032 1033 .org ia64_ivt+0x4000 1034///////////////////////////////////////////////////////////////////////////////////////// 1035// 0x4000 Entry 16 (size 64 bundles) Reserved 1036 DBG_FAULT(16) 1037 FAULT(16) 1038 1039 .org ia64_ivt+0x4400 1040///////////////////////////////////////////////////////////////////////////////////////// 1041// 0x4400 Entry 17 (size 64 bundles) Reserved 1042 DBG_FAULT(17) 1043 FAULT(17) 1044 1045ENTRY(non_syscall) 1046 mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER 1047 ;; 1048 SAVE_MIN_WITH_COVER 1049 1050 // There is no particular reason for this code to be here, other than that 1051 // there happens to be space here that would go unused otherwise. If this 1052 // fault ever gets "unreserved", simply moved the following code to a more 1053 // suitable spot... 1054 1055 alloc r14=ar.pfs,0,0,2,0 1056 mov out0=cr.iim 1057 add out1=16,sp 1058 adds r3=8,r2 // set up second base pointer for SAVE_REST 1059 1060 ssm psr.ic | PSR_DEFAULT_BITS 1061 ;; 1062 srlz.i // guarantee that interruption collection is on 1063 ;; 1064(p15) ssm psr.i // restore psr.i 1065 movl r15=ia64_leave_kernel 1066 ;; 1067 SAVE_REST 1068 mov rp=r15 1069 ;; 1070 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr 1071END(non_syscall) 1072 1073 .org ia64_ivt+0x4800 1074///////////////////////////////////////////////////////////////////////////////////////// 1075// 0x4800 Entry 18 (size 64 bundles) Reserved 1076 DBG_FAULT(18) 1077 FAULT(18) 1078 1079 /* 1080 * There is no particular reason for this code to be here, other than that 1081 * there happens to be space here that would go unused otherwise. If this 1082 * fault ever gets "unreserved", simply moved the following code to a more 1083 * suitable spot... 1084 */ 1085 1086ENTRY(dispatch_unaligned_handler) 1087 SAVE_MIN_WITH_COVER 1088 ;; 1089 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) 1090 mov out0=cr.ifa 1091 adds out1=16,sp 1092 1093 ssm psr.ic | PSR_DEFAULT_BITS 1094 ;; 1095 srlz.i // guarantee that interruption collection is on 1096 ;; 1097(p15) ssm psr.i // restore psr.i 1098 adds r3=8,r2 // set up second base pointer 1099 ;; 1100 SAVE_REST 1101 movl r14=ia64_leave_kernel 1102 ;; 1103 mov rp=r14 1104 br.sptk.many ia64_prepare_handle_unaligned 1105END(dispatch_unaligned_handler) 1106 1107 .org ia64_ivt+0x4c00 1108///////////////////////////////////////////////////////////////////////////////////////// 1109// 0x4c00 Entry 19 (size 64 bundles) Reserved 1110 DBG_FAULT(19) 1111 FAULT(19) 1112 1113 /* 1114 * There is no particular reason for this code to be here, other than that 1115 * there happens to be space here that would go unused otherwise. If this 1116 * fault ever gets "unreserved", simply moved the following code to a more 1117 * suitable spot... 1118 */ 1119 1120ENTRY(dispatch_to_fault_handler) 1121 /* 1122 * Input: 1123 * psr.ic: off 1124 * r19: fault vector number (e.g., 24 for General Exception) 1125 * r31: contains saved predicates (pr) 1126 */ 1127 SAVE_MIN_WITH_COVER_R19 1128 alloc r14=ar.pfs,0,0,5,0 1129 mov out0=r15 1130 mov out1=cr.isr 1131 mov out2=cr.ifa 1132 mov out3=cr.iim 1133 mov out4=cr.itir 1134 ;; 1135 ssm psr.ic | PSR_DEFAULT_BITS 1136 ;; 1137 srlz.i // guarantee that interruption collection is on 1138 ;; 1139(p15) ssm psr.i // restore psr.i 1140 adds r3=8,r2 // set up second base pointer for SAVE_REST 1141 ;; 1142 SAVE_REST 1143 movl r14=ia64_leave_kernel 1144 ;; 1145 mov rp=r14 1146 br.call.sptk.many b6=ia64_fault 1147END(dispatch_to_fault_handler) 1148 1149// 1150// --- End of long entries, Beginning of short entries 1151// 1152 1153 .org ia64_ivt+0x5000 1154///////////////////////////////////////////////////////////////////////////////////////// 1155// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49) 1156ENTRY(page_not_present) 1157 DBG_FAULT(20) 1158 mov r16=cr.ifa 1159 rsm psr.dt 1160 /* 1161 * The Linux page fault handler doesn't expect non-present pages to be in 1162 * the TLB. Flush the existing entry now, so we meet that expectation. 1163 */ 1164 mov r17=PAGE_SHIFT<<2 1165 ;; 1166 ptc.l r16,r17 1167 ;; 1168 mov r31=pr 1169 srlz.d 1170 br.sptk.many page_fault 1171END(page_not_present) 1172 1173 .org ia64_ivt+0x5100 1174///////////////////////////////////////////////////////////////////////////////////////// 1175// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52) 1176ENTRY(key_permission) 1177 DBG_FAULT(21) 1178 mov r16=cr.ifa 1179 rsm psr.dt 1180 mov r31=pr 1181 ;; 1182 srlz.d 1183 br.sptk.many page_fault 1184END(key_permission) 1185 1186 .org ia64_ivt+0x5200 1187///////////////////////////////////////////////////////////////////////////////////////// 1188// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) 1189ENTRY(iaccess_rights) 1190 DBG_FAULT(22) 1191 mov r16=cr.ifa 1192 rsm psr.dt 1193 mov r31=pr 1194 ;; 1195 srlz.d 1196 br.sptk.many page_fault 1197END(iaccess_rights) 1198 1199 .org ia64_ivt+0x5300 1200///////////////////////////////////////////////////////////////////////////////////////// 1201// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) 1202ENTRY(daccess_rights) 1203 DBG_FAULT(23) 1204 mov r16=cr.ifa 1205 rsm psr.dt 1206 mov r31=pr 1207 ;; 1208 srlz.d 1209 br.sptk.many page_fault 1210END(daccess_rights) 1211 1212 .org ia64_ivt+0x5400 1213///////////////////////////////////////////////////////////////////////////////////////// 1214// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) 1215ENTRY(general_exception) 1216 DBG_FAULT(24) 1217 mov r16=cr.isr 1218 mov r31=pr 1219 ;; 1220 cmp4.eq p6,p0=0,r16 1221(p6) br.sptk.many dispatch_illegal_op_fault 1222 ;; 1223 mov r19=24 // fault number 1224 br.sptk.many dispatch_to_fault_handler 1225END(general_exception) 1226 1227 .org ia64_ivt+0x5500 1228///////////////////////////////////////////////////////////////////////////////////////// 1229// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) 1230ENTRY(disabled_fp_reg) 1231 DBG_FAULT(25) 1232 rsm psr.dfh // ensure we can access fph 1233 ;; 1234 srlz.d 1235 mov r31=pr 1236 mov r19=25 1237 br.sptk.many dispatch_to_fault_handler 1238END(disabled_fp_reg) 1239 1240 .org ia64_ivt+0x5600 1241///////////////////////////////////////////////////////////////////////////////////////// 1242// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) 1243ENTRY(nat_consumption) 1244 DBG_FAULT(26) 1245 1246 mov r16=cr.ipsr 1247 mov r17=cr.isr 1248 mov r31=pr // save PR 1249 ;; 1250 and r18=0xf,r17 // r18 = cr.ipsr.code{3:0} 1251 tbit.z p6,p0=r17,IA64_ISR_NA_BIT 1252 ;; 1253 cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18 1254 dep r16=-1,r16,IA64_PSR_ED_BIT,1 1255(p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH) 1256 ;; 1257 mov cr.ipsr=r16 // set cr.ipsr.na 1258 mov pr=r31,-1 1259 ;; 1260 rfi 1261 12621: mov pr=r31,-1 1263 ;; 1264 FAULT(26) 1265END(nat_consumption) 1266 1267 .org ia64_ivt+0x5700 1268///////////////////////////////////////////////////////////////////////////////////////// 1269// 0x5700 Entry 27 (size 16 bundles) Speculation (40) 1270ENTRY(speculation_vector) 1271 DBG_FAULT(27) 1272 /* 1273 * A [f]chk.[as] instruction needs to take the branch to the recovery code but 1274 * this part of the architecture is not implemented in hardware on some CPUs, such 1275 * as Itanium. Thus, in general we need to emulate the behavior. IIM contains 1276 * the relative target (not yet sign extended). So after sign extending it we 1277 * simply add it to IIP. We also need to reset the EI field of the IPSR to zero, 1278 * i.e., the slot to restart into. 1279 * 1280 * cr.imm contains zero_ext(imm21) 1281 */ 1282 mov r18=cr.iim 1283 ;; 1284 mov r17=cr.iip 1285 shl r18=r18,43 // put sign bit in position (43=64-21) 1286 ;; 1287 1288 mov r16=cr.ipsr 1289 shr r18=r18,39 // sign extend (39=43-4) 1290 ;; 1291 1292 add r17=r17,r18 // now add the offset 1293 ;; 1294 mov cr.iip=r17 1295 dep r16=0,r16,41,2 // clear EI 1296 ;; 1297 1298 mov cr.ipsr=r16 1299 ;; 1300 1301 rfi // and go back 1302END(speculation_vector) 1303 1304 .org ia64_ivt+0x5800 1305///////////////////////////////////////////////////////////////////////////////////////// 1306// 0x5800 Entry 28 (size 16 bundles) Reserved 1307 DBG_FAULT(28) 1308 FAULT(28) 1309 1310 .org ia64_ivt+0x5900 1311///////////////////////////////////////////////////////////////////////////////////////// 1312// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) 1313ENTRY(debug_vector) 1314 DBG_FAULT(29) 1315 FAULT(29) 1316END(debug_vector) 1317 1318 .org ia64_ivt+0x5a00 1319///////////////////////////////////////////////////////////////////////////////////////// 1320// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) 1321ENTRY(unaligned_access) 1322 DBG_FAULT(30) 1323 mov r16=cr.ipsr 1324 mov r31=pr // prepare to save predicates 1325 ;; 1326 br.sptk.many dispatch_unaligned_handler 1327END(unaligned_access) 1328 1329 .org ia64_ivt+0x5b00 1330///////////////////////////////////////////////////////////////////////////////////////// 1331// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) 1332ENTRY(unsupported_data_reference) 1333 DBG_FAULT(31) 1334 FAULT(31) 1335END(unsupported_data_reference) 1336 1337 .org ia64_ivt+0x5c00 1338///////////////////////////////////////////////////////////////////////////////////////// 1339// 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64) 1340ENTRY(floating_point_fault) 1341 DBG_FAULT(32) 1342 FAULT(32) 1343END(floating_point_fault) 1344 1345 .org ia64_ivt+0x5d00 1346///////////////////////////////////////////////////////////////////////////////////////// 1347// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) 1348ENTRY(floating_point_trap) 1349 DBG_FAULT(33) 1350 FAULT(33) 1351END(floating_point_trap) 1352 1353 .org ia64_ivt+0x5e00 1354///////////////////////////////////////////////////////////////////////////////////////// 1355// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) 1356ENTRY(lower_privilege_trap) 1357 DBG_FAULT(34) 1358 FAULT(34) 1359END(lower_privilege_trap) 1360 1361 .org ia64_ivt+0x5f00 1362///////////////////////////////////////////////////////////////////////////////////////// 1363// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) 1364ENTRY(taken_branch_trap) 1365 DBG_FAULT(35) 1366 FAULT(35) 1367END(taken_branch_trap) 1368 1369 .org ia64_ivt+0x6000 1370///////////////////////////////////////////////////////////////////////////////////////// 1371// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) 1372ENTRY(single_step_trap) 1373 DBG_FAULT(36) 1374 FAULT(36) 1375END(single_step_trap) 1376 1377 .org ia64_ivt+0x6100 1378///////////////////////////////////////////////////////////////////////////////////////// 1379// 0x6100 Entry 37 (size 16 bundles) Reserved 1380 DBG_FAULT(37) 1381 FAULT(37) 1382 1383 .org ia64_ivt+0x6200 1384///////////////////////////////////////////////////////////////////////////////////////// 1385// 0x6200 Entry 38 (size 16 bundles) Reserved 1386 DBG_FAULT(38) 1387 FAULT(38) 1388 1389 .org ia64_ivt+0x6300 1390///////////////////////////////////////////////////////////////////////////////////////// 1391// 0x6300 Entry 39 (size 16 bundles) Reserved 1392 DBG_FAULT(39) 1393 FAULT(39) 1394 1395 .org ia64_ivt+0x6400 1396///////////////////////////////////////////////////////////////////////////////////////// 1397// 0x6400 Entry 40 (size 16 bundles) Reserved 1398 DBG_FAULT(40) 1399 FAULT(40) 1400 1401 .org ia64_ivt+0x6500 1402///////////////////////////////////////////////////////////////////////////////////////// 1403// 0x6500 Entry 41 (size 16 bundles) Reserved 1404 DBG_FAULT(41) 1405 FAULT(41) 1406 1407 .org ia64_ivt+0x6600 1408///////////////////////////////////////////////////////////////////////////////////////// 1409// 0x6600 Entry 42 (size 16 bundles) Reserved 1410 DBG_FAULT(42) 1411 FAULT(42) 1412 1413 .org ia64_ivt+0x6700 1414///////////////////////////////////////////////////////////////////////////////////////// 1415// 0x6700 Entry 43 (size 16 bundles) Reserved 1416 DBG_FAULT(43) 1417 FAULT(43) 1418 1419 .org ia64_ivt+0x6800 1420///////////////////////////////////////////////////////////////////////////////////////// 1421// 0x6800 Entry 44 (size 16 bundles) Reserved 1422 DBG_FAULT(44) 1423 FAULT(44) 1424 1425 .org ia64_ivt+0x6900 1426///////////////////////////////////////////////////////////////////////////////////////// 1427// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) 1428ENTRY(ia32_exception) 1429 DBG_FAULT(45) 1430 FAULT(45) 1431END(ia32_exception) 1432 1433 .org ia64_ivt+0x6a00 1434///////////////////////////////////////////////////////////////////////////////////////// 1435// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) 1436ENTRY(ia32_intercept) 1437 DBG_FAULT(46) 1438#ifdef CONFIG_IA32_SUPPORT 1439 mov r31=pr 1440 mov r16=cr.isr 1441 ;; 1442 extr.u r17=r16,16,8 // get ISR.code 1443 mov r18=ar.eflag 1444 mov r19=cr.iim // old eflag value 1445 ;; 1446 cmp.ne p6,p0=2,r17 1447(p6) br.cond.spnt 1f // not a system flag fault 1448 xor r16=r18,r19 1449 ;; 1450 extr.u r17=r16,18,1 // get the eflags.ac bit 1451 ;; 1452 cmp.eq p6,p0=0,r17 1453(p6) br.cond.spnt 1f // eflags.ac bit didn't change 1454 ;; 1455 mov pr=r31,-1 // restore predicate registers 1456 rfi 1457 14581: 1459#endif // CONFIG_IA32_SUPPORT 1460 FAULT(46) 1461END(ia32_intercept) 1462 1463 .org ia64_ivt+0x6b00 1464///////////////////////////////////////////////////////////////////////////////////////// 1465// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) 1466ENTRY(ia32_interrupt) 1467 DBG_FAULT(47) 1468#ifdef CONFIG_IA32_SUPPORT 1469 mov r31=pr 1470 br.sptk.many dispatch_to_ia32_handler 1471#else 1472 FAULT(47) 1473#endif 1474END(ia32_interrupt) 1475 1476 .org ia64_ivt+0x6c00 1477///////////////////////////////////////////////////////////////////////////////////////// 1478// 0x6c00 Entry 48 (size 16 bundles) Reserved 1479 DBG_FAULT(48) 1480 FAULT(48) 1481 1482 .org ia64_ivt+0x6d00 1483///////////////////////////////////////////////////////////////////////////////////////// 1484// 0x6d00 Entry 49 (size 16 bundles) Reserved 1485 DBG_FAULT(49) 1486 FAULT(49) 1487 1488 .org ia64_ivt+0x6e00 1489///////////////////////////////////////////////////////////////////////////////////////// 1490// 0x6e00 Entry 50 (size 16 bundles) Reserved 1491 DBG_FAULT(50) 1492 FAULT(50) 1493 1494 .org ia64_ivt+0x6f00 1495///////////////////////////////////////////////////////////////////////////////////////// 1496// 0x6f00 Entry 51 (size 16 bundles) Reserved 1497 DBG_FAULT(51) 1498 FAULT(51) 1499 1500 .org ia64_ivt+0x7000 1501///////////////////////////////////////////////////////////////////////////////////////// 1502// 0x7000 Entry 52 (size 16 bundles) Reserved 1503 DBG_FAULT(52) 1504 FAULT(52) 1505 1506 .org ia64_ivt+0x7100 1507///////////////////////////////////////////////////////////////////////////////////////// 1508// 0x7100 Entry 53 (size 16 bundles) Reserved 1509 DBG_FAULT(53) 1510 FAULT(53) 1511 1512 .org ia64_ivt+0x7200 1513///////////////////////////////////////////////////////////////////////////////////////// 1514// 0x7200 Entry 54 (size 16 bundles) Reserved 1515 DBG_FAULT(54) 1516 FAULT(54) 1517 1518 .org ia64_ivt+0x7300 1519///////////////////////////////////////////////////////////////////////////////////////// 1520// 0x7300 Entry 55 (size 16 bundles) Reserved 1521 DBG_FAULT(55) 1522 FAULT(55) 1523 1524 .org ia64_ivt+0x7400 1525///////////////////////////////////////////////////////////////////////////////////////// 1526// 0x7400 Entry 56 (size 16 bundles) Reserved 1527 DBG_FAULT(56) 1528 FAULT(56) 1529 1530 .org ia64_ivt+0x7500 1531///////////////////////////////////////////////////////////////////////////////////////// 1532// 0x7500 Entry 57 (size 16 bundles) Reserved 1533 DBG_FAULT(57) 1534 FAULT(57) 1535 1536 .org ia64_ivt+0x7600 1537///////////////////////////////////////////////////////////////////////////////////////// 1538// 0x7600 Entry 58 (size 16 bundles) Reserved 1539 DBG_FAULT(58) 1540 FAULT(58) 1541 1542 .org ia64_ivt+0x7700 1543///////////////////////////////////////////////////////////////////////////////////////// 1544// 0x7700 Entry 59 (size 16 bundles) Reserved 1545 DBG_FAULT(59) 1546 FAULT(59) 1547 1548 .org ia64_ivt+0x7800 1549///////////////////////////////////////////////////////////////////////////////////////// 1550// 0x7800 Entry 60 (size 16 bundles) Reserved 1551 DBG_FAULT(60) 1552 FAULT(60) 1553 1554 .org ia64_ivt+0x7900 1555///////////////////////////////////////////////////////////////////////////////////////// 1556// 0x7900 Entry 61 (size 16 bundles) Reserved 1557 DBG_FAULT(61) 1558 FAULT(61) 1559 1560 .org ia64_ivt+0x7a00 1561///////////////////////////////////////////////////////////////////////////////////////// 1562// 0x7a00 Entry 62 (size 16 bundles) Reserved 1563 DBG_FAULT(62) 1564 FAULT(62) 1565 1566 .org ia64_ivt+0x7b00 1567///////////////////////////////////////////////////////////////////////////////////////// 1568// 0x7b00 Entry 63 (size 16 bundles) Reserved 1569 DBG_FAULT(63) 1570 FAULT(63) 1571 1572 .org ia64_ivt+0x7c00 1573///////////////////////////////////////////////////////////////////////////////////////// 1574// 0x7c00 Entry 64 (size 16 bundles) Reserved 1575 DBG_FAULT(64) 1576 FAULT(64) 1577 1578 .org ia64_ivt+0x7d00 1579///////////////////////////////////////////////////////////////////////////////////////// 1580// 0x7d00 Entry 65 (size 16 bundles) Reserved 1581 DBG_FAULT(65) 1582 FAULT(65) 1583 1584 .org ia64_ivt+0x7e00 1585///////////////////////////////////////////////////////////////////////////////////////// 1586// 0x7e00 Entry 66 (size 16 bundles) Reserved 1587 DBG_FAULT(66) 1588 FAULT(66) 1589 1590 .org ia64_ivt+0x7f00 1591///////////////////////////////////////////////////////////////////////////////////////// 1592// 0x7f00 Entry 67 (size 16 bundles) Reserved 1593 DBG_FAULT(67) 1594 FAULT(67) 1595 1596#ifdef CONFIG_IA32_SUPPORT 1597 1598 /* 1599 * There is no particular reason for this code to be here, other than that 1600 * there happens to be space here that would go unused otherwise. If this 1601 * fault ever gets "unreserved", simply moved the following code to a more 1602 * suitable spot... 1603 */ 1604 1605 // IA32 interrupt entry point 1606 1607ENTRY(dispatch_to_ia32_handler) 1608 SAVE_MIN 1609 ;; 1610 mov r14=cr.isr 1611 ssm psr.ic | PSR_DEFAULT_BITS 1612 ;; 1613 srlz.i // guarantee that interruption collection is on 1614 ;; 1615(p15) ssm psr.i 1616 adds r3=8,r2 // Base pointer for SAVE_REST 1617 ;; 1618 SAVE_REST 1619 ;; 1620 mov r15=0x80 1621 shr r14=r14,16 // Get interrupt number 1622 ;; 1623 cmp.ne p6,p0=r14,r15 1624(p6) br.call.dpnt.many b6=non_ia32_syscall 1625 1626 adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions 1627 adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp 1628 ;; 1629 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 1630 ld8 r8=[r14] // get r8 1631 ;; 1632 st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP) 1633 ;; 1634 alloc r15=ar.pfs,0,0,6,0 // must first in an insn group 1635 ;; 1636 ld4 r8=[r14],8 // r8 == eax (syscall number) 1637 mov r15=IA32_NR_syscalls 1638 ;; 1639 cmp.ltu.unc p6,p7=r8,r15 1640 ld4 out1=[r14],8 // r9 == ecx 1641 ;; 1642 ld4 out2=[r14],8 // r10 == edx 1643 ;; 1644 ld4 out0=[r14] // r11 == ebx 1645 adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp 1646 ;; 1647 ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp 1648 ;; 1649 ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi 1650 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 1651 ;; 1652 ld4 out4=[r14] // r15 == edi 1653 movl r16=ia32_syscall_table 1654 ;; 1655(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number 1656 ld4 r2=[r2] // r2 = current_thread_info()->flags 1657 ;; 1658 ld8 r16=[r16] 1659 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit 1660 ;; 1661 mov b6=r16 1662 movl r15=ia32_ret_from_syscall 1663 cmp.eq p8,p0=r2,r0 1664 ;; 1665 mov rp=r15 1666(p8) br.call.sptk.many b6=b6 1667 br.cond.sptk ia32_trace_syscall 1668 1669non_ia32_syscall: 1670 alloc r15=ar.pfs,0,0,2,0 1671 mov out0=r14 // interrupt # 1672 add out1=16,sp // pointer to pt_regs 1673 ;; // avoid WAW on CFM 1674 br.call.sptk.many rp=ia32_bad_interrupt 1675.ret1: movl r15=ia64_leave_kernel 1676 ;; 1677 mov rp=r15 1678 br.ret.sptk.many rp 1679END(dispatch_to_ia32_handler) 1680 1681#endif /* CONFIG_IA32_SUPPORT */ 1682