1/* 2 * arch/ia64/kernel/ivt.S 3 * 4 * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co 5 * Stephane Eranian <eranian@hpl.hp.com> 6 * David Mosberger <davidm@hpl.hp.com> 7 * Copyright (C) 2000, 2002-2003 Intel Co 8 * Asit Mallick <asit.k.mallick@intel.com> 9 * Suresh Siddha <suresh.b.siddha@intel.com> 10 * Kenneth Chen <kenneth.w.chen@intel.com> 11 * Fenghua Yu <fenghua.yu@intel.com> 12 * 13 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP 14 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT. 15 */ 16/* 17 * This file defines the interruption vector table used by the CPU. 18 * It does not include one entry per possible cause of interruption. 19 * 20 * The first 20 entries of the table contain 64 bundles each while the 21 * remaining 48 entries contain only 16 bundles each. 22 * 23 * The 64 bundles are used to allow inlining the whole handler for critical 24 * interruptions like TLB misses. 25 * 26 * For each entry, the comment is as follows: 27 * 28 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) 29 * entry offset ----/ / / / / 30 * entry number ---------/ / / / 31 * size of the entry -------------/ / / 32 * vector name -------------------------------------/ / 33 * interruptions triggering this vector ----------------------/ 34 * 35 * The table is 32KB in size and must be aligned on 32KB boundary. 36 * (The CPU ignores the 15 lower bits of the address) 37 * 38 * Table is based upon EAS2.6 (Oct 1999) 39 */ 40 41#include <linux/config.h> 42 43#include <asm/asmmacro.h> 44#include <asm/break.h> 45#include <asm/ia32.h> 46#include <asm/kregs.h> 47#include <asm/asm-offsets.h> 48#include <asm/pgtable.h> 49#include <asm/processor.h> 50#include <asm/ptrace.h> 51#include <asm/system.h> 52#include <asm/thread_info.h> 53#include <asm/unistd.h> 54#include <asm/errno.h> 55 56#if 1 57# define PSR_DEFAULT_BITS psr.ac 58#else 59# define PSR_DEFAULT_BITS 0 60#endif 61 62#if 0 63 /* 64 * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't 65 * needed for something else before enabling this... 66 */ 67# define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16 68#else 69# define DBG_FAULT(i) 70#endif 71 72#include "minstate.h" 73 74#define FAULT(n) \ 75 mov r31=pr; \ 76 mov r19=n;; /* prepare to save predicates */ \ 77 br.sptk.many dispatch_to_fault_handler 78 79 .section .text.ivt,"ax" 80 81 .align 32768 // align on 32KB boundary 82 .global ia64_ivt 83ia64_ivt: 84///////////////////////////////////////////////////////////////////////////////////////// 85// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) 86ENTRY(vhpt_miss) 87 DBG_FAULT(0) 88 /* 89 * The VHPT vector is invoked when the TLB entry for the virtual page table 90 * is missing. This happens only as a result of a previous 91 * (the "original") TLB miss, which may either be caused by an instruction 92 * fetch or a data access (or non-access). 93 * 94 * What we do here is normal TLB miss handing for the _original_ miss, 95 * followed by inserting the TLB entry for the virtual page table page 96 * that the VHPT walker was attempting to access. The latter gets 97 * inserted as long as page table entry above pte level have valid 98 * mappings for the faulting address. The TLB entry for the original 99 * miss gets inserted only if the pte entry indicates that the page is 100 * present. 101 * 102 * do_page_fault gets invoked in the following cases: 103 * - the faulting virtual address uses unimplemented address bits 104 * - the faulting virtual address has no valid page table mapping 105 */ 106 mov r16=cr.ifa // get address that caused the TLB miss 107#ifdef CONFIG_HUGETLB_PAGE 108 movl r18=PAGE_SHIFT 109 mov r25=cr.itir 110#endif 111 ;; 112 rsm psr.dt // use physical addressing for data 113 mov r31=pr // save the predicate registers 114 mov r19=IA64_KR(PT_BASE) // get page table base address 115 shl r21=r16,3 // shift bit 60 into sign bit 116 shr.u r17=r16,61 // get the region number into r17 117 ;; 118 shr.u r22=r21,3 119#ifdef CONFIG_HUGETLB_PAGE 120 extr.u r26=r25,2,6 121 ;; 122 cmp.ne p8,p0=r18,r26 123 sub r27=r26,r18 124 ;; 125(p8) dep r25=r18,r25,2,6 126(p8) shr r22=r22,r27 127#endif 128 ;; 129 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5? 130 shr.u r18=r22,PGDIR_SHIFT // get bottom portion of pgd index bit 131 ;; 132(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place 133 134 srlz.d 135 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir 136 137 .pred.rel "mutex", p6, p7 138(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT 139(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 140 ;; 141(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5 142(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4] 143 cmp.eq p7,p6=0,r21 // unused address bits all zeroes? 144#ifdef CONFIG_PGTABLE_4 145 shr.u r28=r22,PUD_SHIFT // shift pud index into position 146#else 147 shr.u r18=r22,PMD_SHIFT // shift pmd index into position 148#endif 149 ;; 150 ld8 r17=[r17] // get *pgd (may be 0) 151 ;; 152(p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL? 153#ifdef CONFIG_PGTABLE_4 154 dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr) 155 ;; 156 shr.u r18=r22,PMD_SHIFT // shift pmd index into position 157(p7) ld8 r29=[r28] // get *pud (may be 0) 158 ;; 159(p7) cmp.eq.or.andcm p6,p7=r29,r0 // was pud_present(*pud) == NULL? 160 dep r17=r18,r29,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr) 161#else 162 dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pgd,addr) 163#endif 164 ;; 165(p7) ld8 r20=[r17] // get *pmd (may be 0) 166 shr.u r19=r22,PAGE_SHIFT // shift pte index into position 167 ;; 168(p7) cmp.eq.or.andcm p6,p7=r20,r0 // was pmd_present(*pmd) == NULL? 169 dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr) 170 ;; 171(p7) ld8 r18=[r21] // read *pte 172 mov r19=cr.isr // cr.isr bit 32 tells us if this is an insn miss 173 ;; 174(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? 175 mov r22=cr.iha // get the VHPT address that caused the TLB miss 176 ;; // avoid RAW on p7 177(p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss? 178 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address 179 ;; 180(p10) itc.i r18 // insert the instruction TLB entry 181(p11) itc.d r18 // insert the data TLB entry 182(p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) 183 mov cr.ifa=r22 184 185#ifdef CONFIG_HUGETLB_PAGE 186(p8) mov cr.itir=r25 // change to default page-size for VHPT 187#endif 188 189 /* 190 * Now compute and insert the TLB entry for the virtual page table. We never 191 * execute in a page table page so there is no need to set the exception deferral 192 * bit. 193 */ 194 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23 195 ;; 196(p7) itc.d r24 197 ;; 198#ifdef CONFIG_SMP 199 /* 200 * Tell the assemblers dependency-violation checker that the above "itc" instructions 201 * cannot possibly affect the following loads: 202 */ 203 dv_serialize_data 204 205 /* 206 * Re-check pagetable entry. If they changed, we may have received a ptc.g 207 * between reading the pagetable and the "itc". If so, flush the entry we 208 * inserted and retry. At this point, we have: 209 * 210 * r28 = equivalent of pud_offset(pgd, ifa) 211 * r17 = equivalent of pmd_offset(pud, ifa) 212 * r21 = equivalent of pte_offset(pmd, ifa) 213 * 214 * r29 = *pud 215 * r20 = *pmd 216 * r18 = *pte 217 */ 218 ld8 r25=[r21] // read *pte again 219 ld8 r26=[r17] // read *pmd again 220#ifdef CONFIG_PGTABLE_4 221 ld8 r19=[r28] // read *pud again 222#endif 223 cmp.ne p6,p7=r0,r0 224 ;; 225 cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change 226#ifdef CONFIG_PGTABLE_4 227 cmp.ne.or.andcm p6,p7=r19,r29 // did *pud change 228#endif 229 mov r27=PAGE_SHIFT<<2 230 ;; 231(p6) ptc.l r22,r27 // purge PTE page translation 232(p7) cmp.ne.or.andcm p6,p7=r25,r18 // did *pte change 233 ;; 234(p6) ptc.l r16,r27 // purge translation 235#endif 236 237 mov pr=r31,-1 // restore predicate registers 238 rfi 239END(vhpt_miss) 240 241 .org ia64_ivt+0x400 242///////////////////////////////////////////////////////////////////////////////////////// 243// 0x0400 Entry 1 (size 64 bundles) ITLB (21) 244ENTRY(itlb_miss) 245 DBG_FAULT(1) 246 /* 247 * The ITLB handler accesses the PTE via the virtually mapped linear 248 * page table. If a nested TLB miss occurs, we switch into physical 249 * mode, walk the page table, and then re-execute the PTE read and 250 * go on normally after that. 251 */ 252 mov r16=cr.ifa // get virtual address 253 mov r29=b0 // save b0 254 mov r31=pr // save predicates 255.itlb_fault: 256 mov r17=cr.iha // get virtual address of PTE 257 movl r30=1f // load nested fault continuation point 258 ;; 2591: ld8 r18=[r17] // read *pte 260 ;; 261 mov b0=r29 262 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 263(p6) br.cond.spnt page_fault 264 ;; 265 itc.i r18 266 ;; 267#ifdef CONFIG_SMP 268 /* 269 * Tell the assemblers dependency-violation checker that the above "itc" instructions 270 * cannot possibly affect the following loads: 271 */ 272 dv_serialize_data 273 274 ld8 r19=[r17] // read *pte again and see if same 275 mov r20=PAGE_SHIFT<<2 // setup page size for purge 276 ;; 277 cmp.ne p7,p0=r18,r19 278 ;; 279(p7) ptc.l r16,r20 280#endif 281 mov pr=r31,-1 282 rfi 283END(itlb_miss) 284 285 .org ia64_ivt+0x0800 286///////////////////////////////////////////////////////////////////////////////////////// 287// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) 288ENTRY(dtlb_miss) 289 DBG_FAULT(2) 290 /* 291 * The DTLB handler accesses the PTE via the virtually mapped linear 292 * page table. If a nested TLB miss occurs, we switch into physical 293 * mode, walk the page table, and then re-execute the PTE read and 294 * go on normally after that. 295 */ 296 mov r16=cr.ifa // get virtual address 297 mov r29=b0 // save b0 298 mov r31=pr // save predicates 299dtlb_fault: 300 mov r17=cr.iha // get virtual address of PTE 301 movl r30=1f // load nested fault continuation point 302 ;; 3031: ld8 r18=[r17] // read *pte 304 ;; 305 mov b0=r29 306 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 307(p6) br.cond.spnt page_fault 308 ;; 309 itc.d r18 310 ;; 311#ifdef CONFIG_SMP 312 /* 313 * Tell the assemblers dependency-violation checker that the above "itc" instructions 314 * cannot possibly affect the following loads: 315 */ 316 dv_serialize_data 317 318 ld8 r19=[r17] // read *pte again and see if same 319 mov r20=PAGE_SHIFT<<2 // setup page size for purge 320 ;; 321 cmp.ne p7,p0=r18,r19 322 ;; 323(p7) ptc.l r16,r20 324#endif 325 mov pr=r31,-1 326 rfi 327END(dtlb_miss) 328 329 .org ia64_ivt+0x0c00 330///////////////////////////////////////////////////////////////////////////////////////// 331// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) 332ENTRY(alt_itlb_miss) 333 DBG_FAULT(3) 334 mov r16=cr.ifa // get address that caused the TLB miss 335 movl r17=PAGE_KERNEL 336 mov r21=cr.ipsr 337 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 338 mov r31=pr 339 ;; 340#ifdef CONFIG_DISABLE_VHPT 341 shr.u r22=r16,61 // get the region number into r21 342 ;; 343 cmp.gt p8,p0=6,r22 // user mode 344 ;; 345(p8) thash r17=r16 346 ;; 347(p8) mov cr.iha=r17 348(p8) mov r29=b0 // save b0 349(p8) br.cond.dptk .itlb_fault 350#endif 351 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl 352 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits 353 shr.u r18=r16,57 // move address bit 61 to bit 4 354 ;; 355 andcm r18=0x10,r18 // bit 4=~address-bit(61) 356 cmp.ne p8,p0=r0,r23 // psr.cpl != 0? 357 or r19=r17,r19 // insert PTE control bits into r19 358 ;; 359 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 360(p8) br.cond.spnt page_fault 361 ;; 362 itc.i r19 // insert the TLB entry 363 mov pr=r31,-1 364 rfi 365END(alt_itlb_miss) 366 367 .org ia64_ivt+0x1000 368///////////////////////////////////////////////////////////////////////////////////////// 369// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) 370ENTRY(alt_dtlb_miss) 371 DBG_FAULT(4) 372 mov r16=cr.ifa // get address that caused the TLB miss 373 movl r17=PAGE_KERNEL 374 mov r20=cr.isr 375 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 376 mov r21=cr.ipsr 377 mov r31=pr 378 ;; 379#ifdef CONFIG_DISABLE_VHPT 380 shr.u r22=r16,61 // get the region number into r21 381 ;; 382 cmp.gt p8,p0=6,r22 // access to region 0-5 383 ;; 384(p8) thash r17=r16 385 ;; 386(p8) mov cr.iha=r17 387(p8) mov r29=b0 // save b0 388(p8) br.cond.dptk dtlb_fault 389#endif 390 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl 391 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field 392 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? 393 shr.u r18=r16,57 // move address bit 61 to bit 4 394 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits 395 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? 396 ;; 397 andcm r18=0x10,r18 // bit 4=~address-bit(61) 398 cmp.ne p8,p0=r0,r23 399(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field 400(p8) br.cond.spnt page_fault 401 402 dep r21=-1,r21,IA64_PSR_ED_BIT,1 403 or r19=r19,r17 // insert PTE control bits into r19 404 ;; 405 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 406(p6) mov cr.ipsr=r21 407 ;; 408(p7) itc.d r19 // insert the TLB entry 409 mov pr=r31,-1 410 rfi 411END(alt_dtlb_miss) 412 413 .org ia64_ivt+0x1400 414///////////////////////////////////////////////////////////////////////////////////////// 415// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) 416ENTRY(nested_dtlb_miss) 417 /* 418 * In the absence of kernel bugs, we get here when the virtually mapped linear 419 * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction 420 * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page 421 * table is missing, a nested TLB miss fault is triggered and control is 422 * transferred to this point. When this happens, we lookup the pte for the 423 * faulting address by walking the page table in physical mode and return to the 424 * continuation point passed in register r30 (or call page_fault if the address is 425 * not mapped). 426 * 427 * Input: r16: faulting address 428 * r29: saved b0 429 * r30: continuation address 430 * r31: saved pr 431 * 432 * Output: r17: physical address of PTE of faulting address 433 * r29: saved b0 434 * r30: continuation address 435 * r31: saved pr 436 * 437 * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared) 438 */ 439 rsm psr.dt // switch to using physical data addressing 440 mov r19=IA64_KR(PT_BASE) // get the page table base address 441 shl r21=r16,3 // shift bit 60 into sign bit 442 mov r18=cr.itir 443 ;; 444 shr.u r17=r16,61 // get the region number into r17 445 extr.u r18=r18,2,6 // get the faulting page size 446 ;; 447 cmp.eq p6,p7=5,r17 // is faulting address in region 5? 448 add r22=-PAGE_SHIFT,r18 // adjustment for hugetlb address 449 add r18=PGDIR_SHIFT-PAGE_SHIFT,r18 450 ;; 451 shr.u r22=r16,r22 452 shr.u r18=r16,r18 453(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place 454 455 srlz.d 456 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir 457 458 .pred.rel "mutex", p6, p7 459(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT 460(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 461 ;; 462(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5 463(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4] 464 cmp.eq p7,p6=0,r21 // unused address bits all zeroes? 465#ifdef CONFIG_PGTABLE_4 466 shr.u r18=r22,PUD_SHIFT // shift pud index into position 467#else 468 shr.u r18=r22,PMD_SHIFT // shift pmd index into position 469#endif 470 ;; 471 ld8 r17=[r17] // get *pgd (may be 0) 472 ;; 473(p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL? 474 dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=p[u|m]d_offset(pgd,addr) 475 ;; 476#ifdef CONFIG_PGTABLE_4 477(p7) ld8 r17=[r17] // get *pud (may be 0) 478 shr.u r18=r22,PMD_SHIFT // shift pmd index into position 479 ;; 480(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pud_present(*pud) == NULL? 481 dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr) 482 ;; 483#endif 484(p7) ld8 r17=[r17] // get *pmd (may be 0) 485 shr.u r19=r22,PAGE_SHIFT // shift pte index into position 486 ;; 487(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pmd_present(*pmd) == NULL? 488 dep r17=r19,r17,3,(PAGE_SHIFT-3) // r17=pte_offset(pmd,addr); 489(p6) br.cond.spnt page_fault 490 mov b0=r30 491 br.sptk.many b0 // return to continuation point 492END(nested_dtlb_miss) 493 494 .org ia64_ivt+0x1800 495///////////////////////////////////////////////////////////////////////////////////////// 496// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) 497ENTRY(ikey_miss) 498 DBG_FAULT(6) 499 FAULT(6) 500END(ikey_miss) 501 502 //----------------------------------------------------------------------------------- 503 // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address) 504ENTRY(page_fault) 505 ssm psr.dt 506 ;; 507 srlz.i 508 ;; 509 SAVE_MIN_WITH_COVER 510 alloc r15=ar.pfs,0,0,3,0 511 mov out0=cr.ifa 512 mov out1=cr.isr 513 adds r3=8,r2 // set up second base pointer 514 ;; 515 ssm psr.ic | PSR_DEFAULT_BITS 516 ;; 517 srlz.i // guarantee that interruption collectin is on 518 ;; 519(p15) ssm psr.i // restore psr.i 520 movl r14=ia64_leave_kernel 521 ;; 522 SAVE_REST 523 mov rp=r14 524 ;; 525 adds out2=16,r12 // out2 = pointer to pt_regs 526 br.call.sptk.many b6=ia64_do_page_fault // ignore return address 527END(page_fault) 528 529 .org ia64_ivt+0x1c00 530///////////////////////////////////////////////////////////////////////////////////////// 531// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) 532ENTRY(dkey_miss) 533 DBG_FAULT(7) 534 FAULT(7) 535END(dkey_miss) 536 537 .org ia64_ivt+0x2000 538///////////////////////////////////////////////////////////////////////////////////////// 539// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) 540ENTRY(dirty_bit) 541 DBG_FAULT(8) 542 /* 543 * What we do here is to simply turn on the dirty bit in the PTE. We need to 544 * update both the page-table and the TLB entry. To efficiently access the PTE, 545 * we address it through the virtual page table. Most likely, the TLB entry for 546 * the relevant virtual page table page is still present in the TLB so we can 547 * normally do this without additional TLB misses. In case the necessary virtual 548 * page table TLB entry isn't present, we take a nested TLB miss hit where we look 549 * up the physical address of the L3 PTE and then continue at label 1 below. 550 */ 551 mov r16=cr.ifa // get the address that caused the fault 552 movl r30=1f // load continuation point in case of nested fault 553 ;; 554 thash r17=r16 // compute virtual address of L3 PTE 555 mov r29=b0 // save b0 in case of nested fault 556 mov r31=pr // save pr 557#ifdef CONFIG_SMP 558 mov r28=ar.ccv // save ar.ccv 559 ;; 5601: ld8 r18=[r17] 561 ;; // avoid RAW on r18 562 mov ar.ccv=r18 // set compare value for cmpxchg 563 or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits 564 ;; 565 cmpxchg8.acq r26=[r17],r25,ar.ccv 566 mov r24=PAGE_SHIFT<<2 567 ;; 568 cmp.eq p6,p7=r26,r18 569 ;; 570(p6) itc.d r25 // install updated PTE 571 ;; 572 /* 573 * Tell the assemblers dependency-violation checker that the above "itc" instructions 574 * cannot possibly affect the following loads: 575 */ 576 dv_serialize_data 577 578 ld8 r18=[r17] // read PTE again 579 ;; 580 cmp.eq p6,p7=r18,r25 // is it same as the newly installed 581 ;; 582(p7) ptc.l r16,r24 583 mov b0=r29 // restore b0 584 mov ar.ccv=r28 585#else 586 ;; 5871: ld8 r18=[r17] 588 ;; // avoid RAW on r18 589 or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits 590 mov b0=r29 // restore b0 591 ;; 592 st8 [r17]=r18 // store back updated PTE 593 itc.d r18 // install updated PTE 594#endif 595 mov pr=r31,-1 // restore pr 596 rfi 597END(dirty_bit) 598 599 .org ia64_ivt+0x2400 600///////////////////////////////////////////////////////////////////////////////////////// 601// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) 602ENTRY(iaccess_bit) 603 DBG_FAULT(9) 604 // Like Entry 8, except for instruction access 605 mov r16=cr.ifa // get the address that caused the fault 606 movl r30=1f // load continuation point in case of nested fault 607 mov r31=pr // save predicates 608#ifdef CONFIG_ITANIUM 609 /* 610 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status. 611 */ 612 mov r17=cr.ipsr 613 ;; 614 mov r18=cr.iip 615 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set? 616 ;; 617(p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa 618#endif /* CONFIG_ITANIUM */ 619 ;; 620 thash r17=r16 // compute virtual address of L3 PTE 621 mov r29=b0 // save b0 in case of nested fault) 622#ifdef CONFIG_SMP 623 mov r28=ar.ccv // save ar.ccv 624 ;; 6251: ld8 r18=[r17] 626 ;; 627 mov ar.ccv=r18 // set compare value for cmpxchg 628 or r25=_PAGE_A,r18 // set the accessed bit 629 ;; 630 cmpxchg8.acq r26=[r17],r25,ar.ccv 631 mov r24=PAGE_SHIFT<<2 632 ;; 633 cmp.eq p6,p7=r26,r18 634 ;; 635(p6) itc.i r25 // install updated PTE 636 ;; 637 /* 638 * Tell the assemblers dependency-violation checker that the above "itc" instructions 639 * cannot possibly affect the following loads: 640 */ 641 dv_serialize_data 642 643 ld8 r18=[r17] // read PTE again 644 ;; 645 cmp.eq p6,p7=r18,r25 // is it same as the newly installed 646 ;; 647(p7) ptc.l r16,r24 648 mov b0=r29 // restore b0 649 mov ar.ccv=r28 650#else /* !CONFIG_SMP */ 651 ;; 6521: ld8 r18=[r17] 653 ;; 654 or r18=_PAGE_A,r18 // set the accessed bit 655 mov b0=r29 // restore b0 656 ;; 657 st8 [r17]=r18 // store back updated PTE 658 itc.i r18 // install updated PTE 659#endif /* !CONFIG_SMP */ 660 mov pr=r31,-1 661 rfi 662END(iaccess_bit) 663 664 .org ia64_ivt+0x2800 665///////////////////////////////////////////////////////////////////////////////////////// 666// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) 667ENTRY(daccess_bit) 668 DBG_FAULT(10) 669 // Like Entry 8, except for data access 670 mov r16=cr.ifa // get the address that caused the fault 671 movl r30=1f // load continuation point in case of nested fault 672 ;; 673 thash r17=r16 // compute virtual address of L3 PTE 674 mov r31=pr 675 mov r29=b0 // save b0 in case of nested fault) 676#ifdef CONFIG_SMP 677 mov r28=ar.ccv // save ar.ccv 678 ;; 6791: ld8 r18=[r17] 680 ;; // avoid RAW on r18 681 mov ar.ccv=r18 // set compare value for cmpxchg 682 or r25=_PAGE_A,r18 // set the dirty bit 683 ;; 684 cmpxchg8.acq r26=[r17],r25,ar.ccv 685 mov r24=PAGE_SHIFT<<2 686 ;; 687 cmp.eq p6,p7=r26,r18 688 ;; 689(p6) itc.d r25 // install updated PTE 690 /* 691 * Tell the assemblers dependency-violation checker that the above "itc" instructions 692 * cannot possibly affect the following loads: 693 */ 694 dv_serialize_data 695 ;; 696 ld8 r18=[r17] // read PTE again 697 ;; 698 cmp.eq p6,p7=r18,r25 // is it same as the newly installed 699 ;; 700(p7) ptc.l r16,r24 701 mov ar.ccv=r28 702#else 703 ;; 7041: ld8 r18=[r17] 705 ;; // avoid RAW on r18 706 or r18=_PAGE_A,r18 // set the accessed bit 707 ;; 708 st8 [r17]=r18 // store back updated PTE 709 itc.d r18 // install updated PTE 710#endif 711 mov b0=r29 // restore b0 712 mov pr=r31,-1 713 rfi 714END(daccess_bit) 715 716 .org ia64_ivt+0x2c00 717///////////////////////////////////////////////////////////////////////////////////////// 718// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) 719ENTRY(break_fault) 720 /* 721 * The streamlined system call entry/exit paths only save/restore the initial part 722 * of pt_regs. This implies that the callers of system-calls must adhere to the 723 * normal procedure calling conventions. 724 * 725 * Registers to be saved & restored: 726 * CR registers: cr.ipsr, cr.iip, cr.ifs 727 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr 728 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15 729 * Registers to be restored only: 730 * r8-r11: output value from the system call. 731 * 732 * During system call exit, scratch registers (including r15) are modified/cleared 733 * to prevent leaking bits from kernel to user level. 734 */ 735 DBG_FAULT(11) 736 mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc) 737 mov r29=cr.ipsr // M2 (12 cyc) 738 mov r31=pr // I0 (2 cyc) 739 740 mov r17=cr.iim // M2 (2 cyc) 741 mov.m r27=ar.rsc // M2 (12 cyc) 742 mov r18=__IA64_BREAK_SYSCALL // A 743 744 mov.m ar.rsc=0 // M2 745 mov.m r21=ar.fpsr // M2 (12 cyc) 746 mov r19=b6 // I0 (2 cyc) 747 ;; 748 mov.m r23=ar.bspstore // M2 (12 cyc) 749 mov.m r24=ar.rnat // M2 (5 cyc) 750 mov.i r26=ar.pfs // I0 (2 cyc) 751 752 invala // M0|1 753 nop.m 0 // M 754 mov r20=r1 // A save r1 755 756 nop.m 0 757 movl r30=sys_call_table // X 758 759 mov r28=cr.iip // M2 (2 cyc) 760 cmp.eq p0,p7=r18,r17 // I0 is this a system call? 761(p7) br.cond.spnt non_syscall // B no -> 762 // 763 // From this point on, we are definitely on the syscall-path 764 // and we can use (non-banked) scratch registers. 765 // 766/////////////////////////////////////////////////////////////////////// 767 mov r1=r16 // A move task-pointer to "addl"-addressable reg 768 mov r2=r16 // A setup r2 for ia64_syscall_setup 769 add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = ¤t_thread_info()->flags 770 771 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 772 adds r15=-1024,r15 // A subtract 1024 from syscall number 773 mov r3=NR_syscalls - 1 774 ;; 775 ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag 776 ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags 777 extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr 778 779 shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024) 780 addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS 781 cmp.leu p6,p7=r15,r3 // A syscall number in range? 782 ;; 783 784 lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS 785(p6) ld8 r30=[r30] // M0|1 load address of syscall entry point 786 tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT? 787 788 mov.m ar.bspstore=r22 // M2 switch to kernel RBS 789 cmp.eq p8,p9=2,r8 // A isr.ei==2? 790 ;; 791 792(p8) mov r8=0 // A clear ei to 0 793(p7) movl r30=sys_ni_syscall // X 794 795(p8) adds r28=16,r28 // A switch cr.iip to next bundle 796(p9) adds r8=1,r8 // A increment ei to next slot 797 nop.i 0 798 ;; 799 800 mov.m r25=ar.unat // M2 (5 cyc) 801 dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr 802 adds r15=1024,r15 // A restore original syscall number 803 // 804 // If any of the above loads miss in L1D, we'll stall here until 805 // the data arrives. 806 // 807/////////////////////////////////////////////////////////////////////// 808 st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag 809 mov b6=r30 // I0 setup syscall handler branch reg early 810 cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already? 811 812 and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit 813 mov r18=ar.bsp // M2 (12 cyc) 814(pKStk) br.cond.spnt .break_fixup // B we're already in kernel-mode -- fix up RBS 815 ;; 816.back_from_break_fixup: 817(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack 818 cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? 819 br.call.sptk.many b7=ia64_syscall_setup // B 8201: 821 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 822 nop 0 823 bsw.1 // B (6 cyc) regs are saved, switch to bank 1 824 ;; 825 826 ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection 827 movl r3=ia64_ret_from_syscall // X 828 ;; 829 830 srlz.i // M0 ensure interruption collection is on 831 mov rp=r3 // I0 set the real return addr 832(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT 833 834(p15) ssm psr.i // M2 restore psr.i 835(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr) 836 br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic 837 // NOT REACHED 838/////////////////////////////////////////////////////////////////////// 839 // On entry, we optimistically assumed that we're coming from user-space. 840 // For the rare cases where a system-call is done from within the kernel, 841 // we fix things up at this point: 842.break_fixup: 843 add r1=-IA64_PT_REGS_SIZE,sp // A allocate space for pt_regs structure 844 mov ar.rnat=r24 // M2 restore kernel's AR.RNAT 845 ;; 846 mov ar.bspstore=r23 // M2 restore kernel's AR.BSPSTORE 847 br.cond.sptk .back_from_break_fixup 848END(break_fault) 849 850 .org ia64_ivt+0x3000 851///////////////////////////////////////////////////////////////////////////////////////// 852// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) 853ENTRY(interrupt) 854 DBG_FAULT(12) 855 mov r31=pr // prepare to save predicates 856 ;; 857 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 858 ssm psr.ic | PSR_DEFAULT_BITS 859 ;; 860 adds r3=8,r2 // set up second base pointer for SAVE_REST 861 srlz.i // ensure everybody knows psr.ic is back on 862 ;; 863 SAVE_REST 864 ;; 865 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group 866 mov out0=cr.ivr // pass cr.ivr as first arg 867 add out1=16,sp // pass pointer to pt_regs as second arg 868 ;; 869 srlz.d // make sure we see the effect of cr.ivr 870 movl r14=ia64_leave_kernel 871 ;; 872 mov rp=r14 873 br.call.sptk.many b6=ia64_handle_irq 874END(interrupt) 875 876 .org ia64_ivt+0x3400 877///////////////////////////////////////////////////////////////////////////////////////// 878// 0x3400 Entry 13 (size 64 bundles) Reserved 879 DBG_FAULT(13) 880 FAULT(13) 881 882 .org ia64_ivt+0x3800 883///////////////////////////////////////////////////////////////////////////////////////// 884// 0x3800 Entry 14 (size 64 bundles) Reserved 885 DBG_FAULT(14) 886 FAULT(14) 887 888 /* 889 * There is no particular reason for this code to be here, other than that 890 * there happens to be space here that would go unused otherwise. If this 891 * fault ever gets "unreserved", simply moved the following code to a more 892 * suitable spot... 893 * 894 * ia64_syscall_setup() is a separate subroutine so that it can 895 * allocate stacked registers so it can safely demine any 896 * potential NaT values from the input registers. 897 * 898 * On entry: 899 * - executing on bank 0 or bank 1 register set (doesn't matter) 900 * - r1: stack pointer 901 * - r2: current task pointer 902 * - r3: preserved 903 * - r11: original contents (saved ar.pfs to be saved) 904 * - r12: original contents (sp to be saved) 905 * - r13: original contents (tp to be saved) 906 * - r15: original contents (syscall # to be saved) 907 * - r18: saved bsp (after switching to kernel stack) 908 * - r19: saved b6 909 * - r20: saved r1 (gp) 910 * - r21: saved ar.fpsr 911 * - r22: kernel's register backing store base (krbs_base) 912 * - r23: saved ar.bspstore 913 * - r24: saved ar.rnat 914 * - r25: saved ar.unat 915 * - r26: saved ar.pfs 916 * - r27: saved ar.rsc 917 * - r28: saved cr.iip 918 * - r29: saved cr.ipsr 919 * - r31: saved pr 920 * - b0: original contents (to be saved) 921 * On exit: 922 * - p10: TRUE if syscall is invoked with more than 8 out 923 * registers or r15's Nat is true 924 * - r1: kernel's gp 925 * - r3: preserved (same as on entry) 926 * - r8: -EINVAL if p10 is true 927 * - r12: points to kernel stack 928 * - r13: points to current task 929 * - r14: preserved (same as on entry) 930 * - p13: preserved 931 * - p15: TRUE if interrupts need to be re-enabled 932 * - ar.fpsr: set to kernel settings 933 * - b6: preserved (same as on entry) 934 */ 935GLOBAL_ENTRY(ia64_syscall_setup) 936#if PT(B6) != 0 937# error This code assumes that b6 is the first field in pt_regs. 938#endif 939 st8 [r1]=r19 // save b6 940 add r16=PT(CR_IPSR),r1 // initialize first base pointer 941 add r17=PT(R11),r1 // initialize second base pointer 942 ;; 943 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable 944 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr 945 tnat.nz p8,p0=in0 946 947 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11 948 tnat.nz p9,p0=in1 949(pKStk) mov r18=r0 // make sure r18 isn't NaT 950 ;; 951 952 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs 953 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip 954 mov r28=b0 // save b0 (2 cyc) 955 ;; 956 957 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat 958 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0] 959(p8) mov in0=-1 960 ;; 961 962 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs 963 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs 964 and r8=0x7f,r19 // A // get sof of ar.pfs 965 966 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc 967 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0 968(p9) mov in1=-1 969 ;; 970 971(pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8 972 tnat.nz p10,p0=in2 973 add r11=8,r11 974 ;; 975(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field 976(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field 977 tnat.nz p11,p0=in3 978 ;; 979(p10) mov in2=-1 980 tnat.nz p12,p0=in4 // [I0] 981(p11) mov in3=-1 982 ;; 983(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat 984(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore 985 shl r18=r18,16 // compute ar.rsc to be used for "loadrs" 986 ;; 987 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates 988 st8 [r17]=r28,PT(R1)-PT(B0) // save b0 989 tnat.nz p13,p0=in5 // [I0] 990 ;; 991 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs" 992 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1 993(p12) mov in4=-1 994 ;; 995 996.mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12 997.mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13 998(p13) mov in5=-1 999 ;; 1000 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr 1001 tnat.nz p13,p0=in6 1002 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8 1003 ;; 1004 mov r8=1 1005(p9) tnat.nz p10,p0=r15 1006 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch) 1007 1008 st8.spill [r17]=r15 // save r15 1009 tnat.nz p8,p0=in7 1010 nop.i 0 1011 1012 mov r13=r2 // establish `current' 1013 movl r1=__gp // establish kernel global pointer 1014 ;; 1015 st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error) 1016(p13) mov in6=-1 1017(p8) mov in7=-1 1018 1019 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 1020 movl r17=FPSR_DEFAULT 1021 ;; 1022 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value 1023(p10) mov r8=-EINVAL 1024 br.ret.sptk.many b7 1025END(ia64_syscall_setup) 1026 1027 .org ia64_ivt+0x3c00 1028///////////////////////////////////////////////////////////////////////////////////////// 1029// 0x3c00 Entry 15 (size 64 bundles) Reserved 1030 DBG_FAULT(15) 1031 FAULT(15) 1032 1033 /* 1034 * Squatting in this space ... 1035 * 1036 * This special case dispatcher for illegal operation faults allows preserved 1037 * registers to be modified through a callback function (asm only) that is handed 1038 * back from the fault handler in r8. Up to three arguments can be passed to the 1039 * callback function by returning an aggregate with the callback as its first 1040 * element, followed by the arguments. 1041 */ 1042ENTRY(dispatch_illegal_op_fault) 1043 .prologue 1044 .body 1045 SAVE_MIN_WITH_COVER 1046 ssm psr.ic | PSR_DEFAULT_BITS 1047 ;; 1048 srlz.i // guarantee that interruption collection is on 1049 ;; 1050(p15) ssm psr.i // restore psr.i 1051 adds r3=8,r2 // set up second base pointer for SAVE_REST 1052 ;; 1053 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group 1054 mov out0=ar.ec 1055 ;; 1056 SAVE_REST 1057 PT_REGS_UNWIND_INFO(0) 1058 ;; 1059 br.call.sptk.many rp=ia64_illegal_op_fault 1060.ret0: ;; 1061 alloc r14=ar.pfs,0,0,3,0 // must be first in insn group 1062 mov out0=r9 1063 mov out1=r10 1064 mov out2=r11 1065 movl r15=ia64_leave_kernel 1066 ;; 1067 mov rp=r15 1068 mov b6=r8 1069 ;; 1070 cmp.ne p6,p0=0,r8 1071(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel 1072 br.sptk.many ia64_leave_kernel 1073END(dispatch_illegal_op_fault) 1074 1075 .org ia64_ivt+0x4000 1076///////////////////////////////////////////////////////////////////////////////////////// 1077// 0x4000 Entry 16 (size 64 bundles) Reserved 1078 DBG_FAULT(16) 1079 FAULT(16) 1080 1081 .org ia64_ivt+0x4400 1082///////////////////////////////////////////////////////////////////////////////////////// 1083// 0x4400 Entry 17 (size 64 bundles) Reserved 1084 DBG_FAULT(17) 1085 FAULT(17) 1086 1087ENTRY(non_syscall) 1088 mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER 1089 ;; 1090 SAVE_MIN_WITH_COVER 1091 1092 // There is no particular reason for this code to be here, other than that 1093 // there happens to be space here that would go unused otherwise. If this 1094 // fault ever gets "unreserved", simply moved the following code to a more 1095 // suitable spot... 1096 1097 alloc r14=ar.pfs,0,0,2,0 1098 mov out0=cr.iim 1099 add out1=16,sp 1100 adds r3=8,r2 // set up second base pointer for SAVE_REST 1101 1102 ssm psr.ic | PSR_DEFAULT_BITS 1103 ;; 1104 srlz.i // guarantee that interruption collection is on 1105 ;; 1106(p15) ssm psr.i // restore psr.i 1107 movl r15=ia64_leave_kernel 1108 ;; 1109 SAVE_REST 1110 mov rp=r15 1111 ;; 1112 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr 1113END(non_syscall) 1114 1115 .org ia64_ivt+0x4800 1116///////////////////////////////////////////////////////////////////////////////////////// 1117// 0x4800 Entry 18 (size 64 bundles) Reserved 1118 DBG_FAULT(18) 1119 FAULT(18) 1120 1121 /* 1122 * There is no particular reason for this code to be here, other than that 1123 * there happens to be space here that would go unused otherwise. If this 1124 * fault ever gets "unreserved", simply moved the following code to a more 1125 * suitable spot... 1126 */ 1127 1128ENTRY(dispatch_unaligned_handler) 1129 SAVE_MIN_WITH_COVER 1130 ;; 1131 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) 1132 mov out0=cr.ifa 1133 adds out1=16,sp 1134 1135 ssm psr.ic | PSR_DEFAULT_BITS 1136 ;; 1137 srlz.i // guarantee that interruption collection is on 1138 ;; 1139(p15) ssm psr.i // restore psr.i 1140 adds r3=8,r2 // set up second base pointer 1141 ;; 1142 SAVE_REST 1143 movl r14=ia64_leave_kernel 1144 ;; 1145 mov rp=r14 1146 br.sptk.many ia64_prepare_handle_unaligned 1147END(dispatch_unaligned_handler) 1148 1149 .org ia64_ivt+0x4c00 1150///////////////////////////////////////////////////////////////////////////////////////// 1151// 0x4c00 Entry 19 (size 64 bundles) Reserved 1152 DBG_FAULT(19) 1153 FAULT(19) 1154 1155 /* 1156 * There is no particular reason for this code to be here, other than that 1157 * there happens to be space here that would go unused otherwise. If this 1158 * fault ever gets "unreserved", simply moved the following code to a more 1159 * suitable spot... 1160 */ 1161 1162ENTRY(dispatch_to_fault_handler) 1163 /* 1164 * Input: 1165 * psr.ic: off 1166 * r19: fault vector number (e.g., 24 for General Exception) 1167 * r31: contains saved predicates (pr) 1168 */ 1169 SAVE_MIN_WITH_COVER_R19 1170 alloc r14=ar.pfs,0,0,5,0 1171 mov out0=r15 1172 mov out1=cr.isr 1173 mov out2=cr.ifa 1174 mov out3=cr.iim 1175 mov out4=cr.itir 1176 ;; 1177 ssm psr.ic | PSR_DEFAULT_BITS 1178 ;; 1179 srlz.i // guarantee that interruption collection is on 1180 ;; 1181(p15) ssm psr.i // restore psr.i 1182 adds r3=8,r2 // set up second base pointer for SAVE_REST 1183 ;; 1184 SAVE_REST 1185 movl r14=ia64_leave_kernel 1186 ;; 1187 mov rp=r14 1188 br.call.sptk.many b6=ia64_fault 1189END(dispatch_to_fault_handler) 1190 1191// 1192// --- End of long entries, Beginning of short entries 1193// 1194 1195 .org ia64_ivt+0x5000 1196///////////////////////////////////////////////////////////////////////////////////////// 1197// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49) 1198ENTRY(page_not_present) 1199 DBG_FAULT(20) 1200 mov r16=cr.ifa 1201 rsm psr.dt 1202 /* 1203 * The Linux page fault handler doesn't expect non-present pages to be in 1204 * the TLB. Flush the existing entry now, so we meet that expectation. 1205 */ 1206 mov r17=PAGE_SHIFT<<2 1207 ;; 1208 ptc.l r16,r17 1209 ;; 1210 mov r31=pr 1211 srlz.d 1212 br.sptk.many page_fault 1213END(page_not_present) 1214 1215 .org ia64_ivt+0x5100 1216///////////////////////////////////////////////////////////////////////////////////////// 1217// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52) 1218ENTRY(key_permission) 1219 DBG_FAULT(21) 1220 mov r16=cr.ifa 1221 rsm psr.dt 1222 mov r31=pr 1223 ;; 1224 srlz.d 1225 br.sptk.many page_fault 1226END(key_permission) 1227 1228 .org ia64_ivt+0x5200 1229///////////////////////////////////////////////////////////////////////////////////////// 1230// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) 1231ENTRY(iaccess_rights) 1232 DBG_FAULT(22) 1233 mov r16=cr.ifa 1234 rsm psr.dt 1235 mov r31=pr 1236 ;; 1237 srlz.d 1238 br.sptk.many page_fault 1239END(iaccess_rights) 1240 1241 .org ia64_ivt+0x5300 1242///////////////////////////////////////////////////////////////////////////////////////// 1243// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) 1244ENTRY(daccess_rights) 1245 DBG_FAULT(23) 1246 mov r16=cr.ifa 1247 rsm psr.dt 1248 mov r31=pr 1249 ;; 1250 srlz.d 1251 br.sptk.many page_fault 1252END(daccess_rights) 1253 1254 .org ia64_ivt+0x5400 1255///////////////////////////////////////////////////////////////////////////////////////// 1256// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) 1257ENTRY(general_exception) 1258 DBG_FAULT(24) 1259 mov r16=cr.isr 1260 mov r31=pr 1261 ;; 1262 cmp4.eq p6,p0=0,r16 1263(p6) br.sptk.many dispatch_illegal_op_fault 1264 ;; 1265 mov r19=24 // fault number 1266 br.sptk.many dispatch_to_fault_handler 1267END(general_exception) 1268 1269 .org ia64_ivt+0x5500 1270///////////////////////////////////////////////////////////////////////////////////////// 1271// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) 1272ENTRY(disabled_fp_reg) 1273 DBG_FAULT(25) 1274 rsm psr.dfh // ensure we can access fph 1275 ;; 1276 srlz.d 1277 mov r31=pr 1278 mov r19=25 1279 br.sptk.many dispatch_to_fault_handler 1280END(disabled_fp_reg) 1281 1282 .org ia64_ivt+0x5600 1283///////////////////////////////////////////////////////////////////////////////////////// 1284// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) 1285ENTRY(nat_consumption) 1286 DBG_FAULT(26) 1287 1288 mov r16=cr.ipsr 1289 mov r17=cr.isr 1290 mov r31=pr // save PR 1291 ;; 1292 and r18=0xf,r17 // r18 = cr.ipsr.code{3:0} 1293 tbit.z p6,p0=r17,IA64_ISR_NA_BIT 1294 ;; 1295 cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18 1296 dep r16=-1,r16,IA64_PSR_ED_BIT,1 1297(p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH) 1298 ;; 1299 mov cr.ipsr=r16 // set cr.ipsr.na 1300 mov pr=r31,-1 1301 ;; 1302 rfi 1303 13041: mov pr=r31,-1 1305 ;; 1306 FAULT(26) 1307END(nat_consumption) 1308 1309 .org ia64_ivt+0x5700 1310///////////////////////////////////////////////////////////////////////////////////////// 1311// 0x5700 Entry 27 (size 16 bundles) Speculation (40) 1312ENTRY(speculation_vector) 1313 DBG_FAULT(27) 1314 /* 1315 * A [f]chk.[as] instruction needs to take the branch to the recovery code but 1316 * this part of the architecture is not implemented in hardware on some CPUs, such 1317 * as Itanium. Thus, in general we need to emulate the behavior. IIM contains 1318 * the relative target (not yet sign extended). So after sign extending it we 1319 * simply add it to IIP. We also need to reset the EI field of the IPSR to zero, 1320 * i.e., the slot to restart into. 1321 * 1322 * cr.imm contains zero_ext(imm21) 1323 */ 1324 mov r18=cr.iim 1325 ;; 1326 mov r17=cr.iip 1327 shl r18=r18,43 // put sign bit in position (43=64-21) 1328 ;; 1329 1330 mov r16=cr.ipsr 1331 shr r18=r18,39 // sign extend (39=43-4) 1332 ;; 1333 1334 add r17=r17,r18 // now add the offset 1335 ;; 1336 mov cr.iip=r17 1337 dep r16=0,r16,41,2 // clear EI 1338 ;; 1339 1340 mov cr.ipsr=r16 1341 ;; 1342 1343 rfi // and go back 1344END(speculation_vector) 1345 1346 .org ia64_ivt+0x5800 1347///////////////////////////////////////////////////////////////////////////////////////// 1348// 0x5800 Entry 28 (size 16 bundles) Reserved 1349 DBG_FAULT(28) 1350 FAULT(28) 1351 1352 .org ia64_ivt+0x5900 1353///////////////////////////////////////////////////////////////////////////////////////// 1354// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) 1355ENTRY(debug_vector) 1356 DBG_FAULT(29) 1357 FAULT(29) 1358END(debug_vector) 1359 1360 .org ia64_ivt+0x5a00 1361///////////////////////////////////////////////////////////////////////////////////////// 1362// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) 1363ENTRY(unaligned_access) 1364 DBG_FAULT(30) 1365 mov r16=cr.ipsr 1366 mov r31=pr // prepare to save predicates 1367 ;; 1368 br.sptk.many dispatch_unaligned_handler 1369END(unaligned_access) 1370 1371 .org ia64_ivt+0x5b00 1372///////////////////////////////////////////////////////////////////////////////////////// 1373// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) 1374ENTRY(unsupported_data_reference) 1375 DBG_FAULT(31) 1376 FAULT(31) 1377END(unsupported_data_reference) 1378 1379 .org ia64_ivt+0x5c00 1380///////////////////////////////////////////////////////////////////////////////////////// 1381// 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64) 1382ENTRY(floating_point_fault) 1383 DBG_FAULT(32) 1384 FAULT(32) 1385END(floating_point_fault) 1386 1387 .org ia64_ivt+0x5d00 1388///////////////////////////////////////////////////////////////////////////////////////// 1389// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) 1390ENTRY(floating_point_trap) 1391 DBG_FAULT(33) 1392 FAULT(33) 1393END(floating_point_trap) 1394 1395 .org ia64_ivt+0x5e00 1396///////////////////////////////////////////////////////////////////////////////////////// 1397// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) 1398ENTRY(lower_privilege_trap) 1399 DBG_FAULT(34) 1400 FAULT(34) 1401END(lower_privilege_trap) 1402 1403 .org ia64_ivt+0x5f00 1404///////////////////////////////////////////////////////////////////////////////////////// 1405// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) 1406ENTRY(taken_branch_trap) 1407 DBG_FAULT(35) 1408 FAULT(35) 1409END(taken_branch_trap) 1410 1411 .org ia64_ivt+0x6000 1412///////////////////////////////////////////////////////////////////////////////////////// 1413// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) 1414ENTRY(single_step_trap) 1415 DBG_FAULT(36) 1416 FAULT(36) 1417END(single_step_trap) 1418 1419 .org ia64_ivt+0x6100 1420///////////////////////////////////////////////////////////////////////////////////////// 1421// 0x6100 Entry 37 (size 16 bundles) Reserved 1422 DBG_FAULT(37) 1423 FAULT(37) 1424 1425 .org ia64_ivt+0x6200 1426///////////////////////////////////////////////////////////////////////////////////////// 1427// 0x6200 Entry 38 (size 16 bundles) Reserved 1428 DBG_FAULT(38) 1429 FAULT(38) 1430 1431 .org ia64_ivt+0x6300 1432///////////////////////////////////////////////////////////////////////////////////////// 1433// 0x6300 Entry 39 (size 16 bundles) Reserved 1434 DBG_FAULT(39) 1435 FAULT(39) 1436 1437 .org ia64_ivt+0x6400 1438///////////////////////////////////////////////////////////////////////////////////////// 1439// 0x6400 Entry 40 (size 16 bundles) Reserved 1440 DBG_FAULT(40) 1441 FAULT(40) 1442 1443 .org ia64_ivt+0x6500 1444///////////////////////////////////////////////////////////////////////////////////////// 1445// 0x6500 Entry 41 (size 16 bundles) Reserved 1446 DBG_FAULT(41) 1447 FAULT(41) 1448 1449 .org ia64_ivt+0x6600 1450///////////////////////////////////////////////////////////////////////////////////////// 1451// 0x6600 Entry 42 (size 16 bundles) Reserved 1452 DBG_FAULT(42) 1453 FAULT(42) 1454 1455 .org ia64_ivt+0x6700 1456///////////////////////////////////////////////////////////////////////////////////////// 1457// 0x6700 Entry 43 (size 16 bundles) Reserved 1458 DBG_FAULT(43) 1459 FAULT(43) 1460 1461 .org ia64_ivt+0x6800 1462///////////////////////////////////////////////////////////////////////////////////////// 1463// 0x6800 Entry 44 (size 16 bundles) Reserved 1464 DBG_FAULT(44) 1465 FAULT(44) 1466 1467 .org ia64_ivt+0x6900 1468///////////////////////////////////////////////////////////////////////////////////////// 1469// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) 1470ENTRY(ia32_exception) 1471 DBG_FAULT(45) 1472 FAULT(45) 1473END(ia32_exception) 1474 1475 .org ia64_ivt+0x6a00 1476///////////////////////////////////////////////////////////////////////////////////////// 1477// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) 1478ENTRY(ia32_intercept) 1479 DBG_FAULT(46) 1480#ifdef CONFIG_IA32_SUPPORT 1481 mov r31=pr 1482 mov r16=cr.isr 1483 ;; 1484 extr.u r17=r16,16,8 // get ISR.code 1485 mov r18=ar.eflag 1486 mov r19=cr.iim // old eflag value 1487 ;; 1488 cmp.ne p6,p0=2,r17 1489(p6) br.cond.spnt 1f // not a system flag fault 1490 xor r16=r18,r19 1491 ;; 1492 extr.u r17=r16,18,1 // get the eflags.ac bit 1493 ;; 1494 cmp.eq p6,p0=0,r17 1495(p6) br.cond.spnt 1f // eflags.ac bit didn't change 1496 ;; 1497 mov pr=r31,-1 // restore predicate registers 1498 rfi 1499 15001: 1501#endif // CONFIG_IA32_SUPPORT 1502 FAULT(46) 1503END(ia32_intercept) 1504 1505 .org ia64_ivt+0x6b00 1506///////////////////////////////////////////////////////////////////////////////////////// 1507// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) 1508ENTRY(ia32_interrupt) 1509 DBG_FAULT(47) 1510#ifdef CONFIG_IA32_SUPPORT 1511 mov r31=pr 1512 br.sptk.many dispatch_to_ia32_handler 1513#else 1514 FAULT(47) 1515#endif 1516END(ia32_interrupt) 1517 1518 .org ia64_ivt+0x6c00 1519///////////////////////////////////////////////////////////////////////////////////////// 1520// 0x6c00 Entry 48 (size 16 bundles) Reserved 1521 DBG_FAULT(48) 1522 FAULT(48) 1523 1524 .org ia64_ivt+0x6d00 1525///////////////////////////////////////////////////////////////////////////////////////// 1526// 0x6d00 Entry 49 (size 16 bundles) Reserved 1527 DBG_FAULT(49) 1528 FAULT(49) 1529 1530 .org ia64_ivt+0x6e00 1531///////////////////////////////////////////////////////////////////////////////////////// 1532// 0x6e00 Entry 50 (size 16 bundles) Reserved 1533 DBG_FAULT(50) 1534 FAULT(50) 1535 1536 .org ia64_ivt+0x6f00 1537///////////////////////////////////////////////////////////////////////////////////////// 1538// 0x6f00 Entry 51 (size 16 bundles) Reserved 1539 DBG_FAULT(51) 1540 FAULT(51) 1541 1542 .org ia64_ivt+0x7000 1543///////////////////////////////////////////////////////////////////////////////////////// 1544// 0x7000 Entry 52 (size 16 bundles) Reserved 1545 DBG_FAULT(52) 1546 FAULT(52) 1547 1548 .org ia64_ivt+0x7100 1549///////////////////////////////////////////////////////////////////////////////////////// 1550// 0x7100 Entry 53 (size 16 bundles) Reserved 1551 DBG_FAULT(53) 1552 FAULT(53) 1553 1554 .org ia64_ivt+0x7200 1555///////////////////////////////////////////////////////////////////////////////////////// 1556// 0x7200 Entry 54 (size 16 bundles) Reserved 1557 DBG_FAULT(54) 1558 FAULT(54) 1559 1560 .org ia64_ivt+0x7300 1561///////////////////////////////////////////////////////////////////////////////////////// 1562// 0x7300 Entry 55 (size 16 bundles) Reserved 1563 DBG_FAULT(55) 1564 FAULT(55) 1565 1566 .org ia64_ivt+0x7400 1567///////////////////////////////////////////////////////////////////////////////////////// 1568// 0x7400 Entry 56 (size 16 bundles) Reserved 1569 DBG_FAULT(56) 1570 FAULT(56) 1571 1572 .org ia64_ivt+0x7500 1573///////////////////////////////////////////////////////////////////////////////////////// 1574// 0x7500 Entry 57 (size 16 bundles) Reserved 1575 DBG_FAULT(57) 1576 FAULT(57) 1577 1578 .org ia64_ivt+0x7600 1579///////////////////////////////////////////////////////////////////////////////////////// 1580// 0x7600 Entry 58 (size 16 bundles) Reserved 1581 DBG_FAULT(58) 1582 FAULT(58) 1583 1584 .org ia64_ivt+0x7700 1585///////////////////////////////////////////////////////////////////////////////////////// 1586// 0x7700 Entry 59 (size 16 bundles) Reserved 1587 DBG_FAULT(59) 1588 FAULT(59) 1589 1590 .org ia64_ivt+0x7800 1591///////////////////////////////////////////////////////////////////////////////////////// 1592// 0x7800 Entry 60 (size 16 bundles) Reserved 1593 DBG_FAULT(60) 1594 FAULT(60) 1595 1596 .org ia64_ivt+0x7900 1597///////////////////////////////////////////////////////////////////////////////////////// 1598// 0x7900 Entry 61 (size 16 bundles) Reserved 1599 DBG_FAULT(61) 1600 FAULT(61) 1601 1602 .org ia64_ivt+0x7a00 1603///////////////////////////////////////////////////////////////////////////////////////// 1604// 0x7a00 Entry 62 (size 16 bundles) Reserved 1605 DBG_FAULT(62) 1606 FAULT(62) 1607 1608 .org ia64_ivt+0x7b00 1609///////////////////////////////////////////////////////////////////////////////////////// 1610// 0x7b00 Entry 63 (size 16 bundles) Reserved 1611 DBG_FAULT(63) 1612 FAULT(63) 1613 1614 .org ia64_ivt+0x7c00 1615///////////////////////////////////////////////////////////////////////////////////////// 1616// 0x7c00 Entry 64 (size 16 bundles) Reserved 1617 DBG_FAULT(64) 1618 FAULT(64) 1619 1620 .org ia64_ivt+0x7d00 1621///////////////////////////////////////////////////////////////////////////////////////// 1622// 0x7d00 Entry 65 (size 16 bundles) Reserved 1623 DBG_FAULT(65) 1624 FAULT(65) 1625 1626 .org ia64_ivt+0x7e00 1627///////////////////////////////////////////////////////////////////////////////////////// 1628// 0x7e00 Entry 66 (size 16 bundles) Reserved 1629 DBG_FAULT(66) 1630 FAULT(66) 1631 1632 .org ia64_ivt+0x7f00 1633///////////////////////////////////////////////////////////////////////////////////////// 1634// 0x7f00 Entry 67 (size 16 bundles) Reserved 1635 DBG_FAULT(67) 1636 FAULT(67) 1637 1638#ifdef CONFIG_IA32_SUPPORT 1639 1640 /* 1641 * There is no particular reason for this code to be here, other than that 1642 * there happens to be space here that would go unused otherwise. If this 1643 * fault ever gets "unreserved", simply moved the following code to a more 1644 * suitable spot... 1645 */ 1646 1647 // IA32 interrupt entry point 1648 1649ENTRY(dispatch_to_ia32_handler) 1650 SAVE_MIN 1651 ;; 1652 mov r14=cr.isr 1653 ssm psr.ic | PSR_DEFAULT_BITS 1654 ;; 1655 srlz.i // guarantee that interruption collection is on 1656 ;; 1657(p15) ssm psr.i 1658 adds r3=8,r2 // Base pointer for SAVE_REST 1659 ;; 1660 SAVE_REST 1661 ;; 1662 mov r15=0x80 1663 shr r14=r14,16 // Get interrupt number 1664 ;; 1665 cmp.ne p6,p0=r14,r15 1666(p6) br.call.dpnt.many b6=non_ia32_syscall 1667 1668 adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions 1669 adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp 1670 ;; 1671 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 1672 ld8 r8=[r14] // get r8 1673 ;; 1674 st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP) 1675 ;; 1676 alloc r15=ar.pfs,0,0,6,0 // must first in an insn group 1677 ;; 1678 ld4 r8=[r14],8 // r8 == eax (syscall number) 1679 mov r15=IA32_NR_syscalls 1680 ;; 1681 cmp.ltu.unc p6,p7=r8,r15 1682 ld4 out1=[r14],8 // r9 == ecx 1683 ;; 1684 ld4 out2=[r14],8 // r10 == edx 1685 ;; 1686 ld4 out0=[r14] // r11 == ebx 1687 adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp 1688 ;; 1689 ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp 1690 ;; 1691 ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi 1692 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 1693 ;; 1694 ld4 out4=[r14] // r15 == edi 1695 movl r16=ia32_syscall_table 1696 ;; 1697(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number 1698 ld4 r2=[r2] // r2 = current_thread_info()->flags 1699 ;; 1700 ld8 r16=[r16] 1701 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit 1702 ;; 1703 mov b6=r16 1704 movl r15=ia32_ret_from_syscall 1705 cmp.eq p8,p0=r2,r0 1706 ;; 1707 mov rp=r15 1708(p8) br.call.sptk.many b6=b6 1709 br.cond.sptk ia32_trace_syscall 1710 1711non_ia32_syscall: 1712 alloc r15=ar.pfs,0,0,2,0 1713 mov out0=r14 // interrupt # 1714 add out1=16,sp // pointer to pt_regs 1715 ;; // avoid WAW on CFM 1716 br.call.sptk.many rp=ia32_bad_interrupt 1717.ret1: movl r15=ia64_leave_kernel 1718 ;; 1719 mov rp=r15 1720 br.ret.sptk.many rp 1721END(dispatch_to_ia32_handler) 1722 1723#endif /* CONFIG_IA32_SUPPORT */ 1724