1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Linux/PA-RISC Project (http://www.parisc-linux.org/) 4 * 5 * kernel entry points (interruptions, system call wrappers) 6 * Copyright (C) 1999,2000 Philipp Rumpf 7 * Copyright (C) 1999 SuSE GmbH Nuernberg 8 * Copyright (C) 2000 Hewlett-Packard (John Marvin) 9 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) 10 */ 11 12#include <asm/asm-offsets.h> 13 14/* we have the following possibilities to act on an interruption: 15 * - handle in assembly and use shadowed registers only 16 * - save registers to kernel stack and handle in assembly or C */ 17 18 19#include <asm/psw.h> 20#include <asm/cache.h> /* for L1_CACHE_SHIFT */ 21#include <asm/assembly.h> /* for LDREG/STREG defines */ 22#include <asm/signal.h> 23#include <asm/unistd.h> 24#include <asm/ldcw.h> 25#include <asm/traps.h> 26#include <asm/thread_info.h> 27#include <asm/alternative.h> 28#include <asm/spinlock_types.h> 29 30#include <linux/linkage.h> 31#include <linux/pgtable.h> 32 33#ifdef CONFIG_64BIT 34 .level 2.0w 35#else 36 .level 2.0 37#endif 38 39/* 40 * We need seven instructions after a TLB insert for it to take effect. 41 * The PA8800/PA8900 processors are an exception and need 12 instructions. 42 * The RFI changes both IAOQ_Back and IAOQ_Front, so it counts as one. 43 */ 44#ifdef CONFIG_64BIT 45#define NUM_PIPELINE_INSNS 12 46#else 47#define NUM_PIPELINE_INSNS 7 48#endif 49 50 /* Insert num nops */ 51 .macro insert_nops num 52 .rept \num 53 nop 54 .endr 55 .endm 56 57 /* Get aligned page_table_lock address for this mm from cr28/tr4 */ 58 .macro get_ptl reg 59 mfctl %cr28,\reg 60 .endm 61 62 /* space_to_prot macro creates a prot id from a space id */ 63 64#if (SPACEID_SHIFT) == 0 65 .macro space_to_prot spc prot 66 depd,z \spc,62,31,\prot 67 .endm 68#else 69 .macro space_to_prot spc prot 70 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot 71 .endm 72#endif 73 /* 74 * The "get_stack" macros are responsible for determining the 75 * kernel stack value. 76 * 77 * If sr7 == 0 78 * Already using a kernel stack, so call the 79 * get_stack_use_r30 macro to push a pt_regs structure 80 * on the stack, and store registers there. 81 * else 82 * Need to set up a kernel stack, so call the 83 * get_stack_use_cr30 macro to set up a pointer 84 * to the pt_regs structure contained within the 85 * task pointer pointed to by cr30. Load the stack 86 * pointer from the task structure. 87 * 88 * Note that we use shadowed registers for temps until 89 * we can save %r26 and %r29. %r26 is used to preserve 90 * %r8 (a shadowed register) which temporarily contained 91 * either the fault type ("code") or the eirr. We need 92 * to use a non-shadowed register to carry the value over 93 * the rfir in virt_map. We use %r26 since this value winds 94 * up being passed as the argument to either do_cpu_irq_mask 95 * or handle_interruption. %r29 is used to hold a pointer 96 * the register save area, and once again, it needs to 97 * be a non-shadowed register so that it survives the rfir. 98 */ 99 100 .macro get_stack_use_cr30 101 102 /* we save the registers in the task struct */ 103 104 copy %r30, %r17 105 mfctl %cr30, %r1 106 tophys %r1,%r9 /* task_struct */ 107 LDREG TASK_STACK(%r9),%r30 108 ldo PT_SZ_ALGN(%r30),%r30 109 mtsp %r0,%sr7 /* clear sr7 after kernel stack was set! */ 110 mtsp %r16,%sr3 111 ldo TASK_REGS(%r9),%r9 112 STREG %r17,PT_GR30(%r9) 113 STREG %r29,PT_GR29(%r9) 114 STREG %r26,PT_GR26(%r9) 115 STREG %r16,PT_SR7(%r9) 116 copy %r9,%r29 117 .endm 118 119 .macro get_stack_use_r30 120 121 /* we put a struct pt_regs on the stack and save the registers there */ 122 123 tophys %r30,%r9 124 copy %r30,%r1 125 ldo PT_SZ_ALGN(%r30),%r30 126 STREG %r1,PT_GR30(%r9) 127 STREG %r29,PT_GR29(%r9) 128 STREG %r26,PT_GR26(%r9) 129 STREG %r16,PT_SR7(%r9) 130 copy %r9,%r29 131 .endm 132 133 .macro rest_stack 134 LDREG PT_GR1(%r29), %r1 135 LDREG PT_GR30(%r29),%r30 136 LDREG PT_GR29(%r29),%r29 137 .endm 138 139 /* default interruption handler 140 * (calls traps.c:handle_interruption) */ 141 .macro def code 142 b intr_save 143 ldi \code, %r8 144 .align 32 145 .endm 146 147 /* Interrupt interruption handler 148 * (calls irq.c:do_cpu_irq_mask) */ 149 .macro extint code 150 b intr_extint 151 mfsp %sr7,%r16 152 .align 32 153 .endm 154 155 .import os_hpmc, code 156 157 /* HPMC handler */ 158 .macro hpmc code 159 nop /* must be a NOP, will be patched later */ 160 load32 PA(os_hpmc), %r3 161 bv,n 0(%r3) 162 nop 163 .word 0 /* checksum (will be patched) */ 164 .word 0 /* address of handler */ 165 .word 0 /* length of handler */ 166 .endm 167 168 /* 169 * Performance Note: Instructions will be moved up into 170 * this part of the code later on, once we are sure 171 * that the tlb miss handlers are close to final form. 172 */ 173 174 /* Register definitions for tlb miss handler macros */ 175 176 va = r8 /* virtual address for which the trap occurred */ 177 spc = r24 /* space for which the trap occurred */ 178 179#ifndef CONFIG_64BIT 180 181 /* 182 * itlb miss interruption handler (parisc 1.1 - 32 bit) 183 */ 184 185 .macro itlb_11 code 186 187 mfctl %pcsq, spc 188 b itlb_miss_11 189 mfctl %pcoq, va 190 191 .align 32 192 .endm 193#endif 194 195 /* 196 * itlb miss interruption handler (parisc 2.0) 197 */ 198 199 .macro itlb_20 code 200 mfctl %pcsq, spc 201#ifdef CONFIG_64BIT 202 b itlb_miss_20w 203#else 204 b itlb_miss_20 205#endif 206 mfctl %pcoq, va 207 208 .align 32 209 .endm 210 211#ifndef CONFIG_64BIT 212 /* 213 * naitlb miss interruption handler (parisc 1.1 - 32 bit) 214 */ 215 216 .macro naitlb_11 code 217 218 mfctl %isr,spc 219 b naitlb_miss_11 220 mfctl %ior,va 221 222 .align 32 223 .endm 224#endif 225 226 /* 227 * naitlb miss interruption handler (parisc 2.0) 228 */ 229 230 .macro naitlb_20 code 231 232 mfctl %isr,spc 233#ifdef CONFIG_64BIT 234 b naitlb_miss_20w 235#else 236 b naitlb_miss_20 237#endif 238 mfctl %ior,va 239 240 .align 32 241 .endm 242 243#ifndef CONFIG_64BIT 244 /* 245 * dtlb miss interruption handler (parisc 1.1 - 32 bit) 246 */ 247 248 .macro dtlb_11 code 249 250 mfctl %isr, spc 251 b dtlb_miss_11 252 mfctl %ior, va 253 254 .align 32 255 .endm 256#endif 257 258 /* 259 * dtlb miss interruption handler (parisc 2.0) 260 */ 261 262 .macro dtlb_20 code 263 264 mfctl %isr, spc 265#ifdef CONFIG_64BIT 266 b dtlb_miss_20w 267#else 268 b dtlb_miss_20 269#endif 270 mfctl %ior, va 271 272 .align 32 273 .endm 274 275#ifndef CONFIG_64BIT 276 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */ 277 278 .macro nadtlb_11 code 279 280 mfctl %isr,spc 281 b nadtlb_miss_11 282 mfctl %ior,va 283 284 .align 32 285 .endm 286#endif 287 288 /* nadtlb miss interruption handler (parisc 2.0) */ 289 290 .macro nadtlb_20 code 291 292 mfctl %isr,spc 293#ifdef CONFIG_64BIT 294 b nadtlb_miss_20w 295#else 296 b nadtlb_miss_20 297#endif 298 mfctl %ior,va 299 300 .align 32 301 .endm 302 303#ifndef CONFIG_64BIT 304 /* 305 * dirty bit trap interruption handler (parisc 1.1 - 32 bit) 306 */ 307 308 .macro dbit_11 code 309 310 mfctl %isr,spc 311 b dbit_trap_11 312 mfctl %ior,va 313 314 .align 32 315 .endm 316#endif 317 318 /* 319 * dirty bit trap interruption handler (parisc 2.0) 320 */ 321 322 .macro dbit_20 code 323 324 mfctl %isr,spc 325#ifdef CONFIG_64BIT 326 b dbit_trap_20w 327#else 328 b dbit_trap_20 329#endif 330 mfctl %ior,va 331 332 .align 32 333 .endm 334 335 /* In LP64, the space contains part of the upper 32 bits of the 336 * fault. We have to extract this and place it in the va, 337 * zeroing the corresponding bits in the space register */ 338 .macro space_adjust spc,va,tmp 339#ifdef CONFIG_64BIT 340 extrd,u \spc,63,SPACEID_SHIFT,\tmp 341 depd %r0,63,SPACEID_SHIFT,\spc 342 depd \tmp,31,SPACEID_SHIFT,\va 343#endif 344 .endm 345 346 .import swapper_pg_dir,code 347 348 /* Get the pgd. For faults on space zero (kernel space), this 349 * is simply swapper_pg_dir. For user space faults, the 350 * pgd is stored in %cr25 */ 351 .macro get_pgd spc,reg 352 ldil L%PA(swapper_pg_dir),\reg 353 ldo R%PA(swapper_pg_dir)(\reg),\reg 354 or,COND(=) %r0,\spc,%r0 355 mfctl %cr25,\reg 356 .endm 357 358 /* 359 space_check(spc,tmp,fault) 360 361 spc - The space we saw the fault with. 362 tmp - The place to store the current space. 363 fault - Function to call on failure. 364 365 Only allow faults on different spaces from the 366 currently active one if we're the kernel 367 368 */ 369 .macro space_check spc,tmp,fault 370 mfsp %sr7,\tmp 371 /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */ 372 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page 373 * as kernel, so defeat the space 374 * check if it is */ 375 copy \spc,\tmp 376 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */ 377 cmpb,COND(<>),n \tmp,\spc,\fault 378 .endm 379 380 /* Look up a PTE in a 2-Level scheme (faulting at each 381 * level if the entry isn't present 382 * 383 * NOTE: we use ldw even for LP64, since the short pointers 384 * can address up to 1TB 385 */ 386 .macro L2_ptep pmd,pte,index,va,fault 387#if CONFIG_PGTABLE_LEVELS == 3 388 extru_safe \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index 389#else 390 extru_safe \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 391#endif 392 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 393#if CONFIG_PGTABLE_LEVELS < 3 394 copy %r0,\pte 395#endif 396 ldw,s \index(\pmd),\pmd 397 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 398 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 399 SHLREG \pmd,PxD_VALUE_SHIFT,\pmd 400 extru_safe \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 401 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 402 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */ 403 .endm 404 405 /* Look up PTE in a 3-Level scheme. */ 406 .macro L3_ptep pgd,pte,index,va,fault 407#if CONFIG_PGTABLE_LEVELS == 3 408 copy %r0,\pte 409 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 410 ldw,s \index(\pgd),\pgd 411 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault 412 shld \pgd,PxD_VALUE_SHIFT,\pgd 413#endif 414 L2_ptep \pgd,\pte,\index,\va,\fault 415 .endm 416 417 /* Acquire page_table_lock and check page is present. */ 418 .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault 419#ifdef CONFIG_TLB_PTLOCK 42098: cmpib,COND(=),n 0,\spc,2f 421 get_ptl \tmp 4221: LDCW 0(\tmp),\tmp1 423 cmpib,COND(=) 0,\tmp1,1b 424 nop 425 LDREG 0(\ptp),\pte 426 bb,<,n \pte,_PAGE_PRESENT_BIT,3f 427 b \fault 428 stw \tmp1,0(\tmp) 42999: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 430#endif 4312: LDREG 0(\ptp),\pte 432 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault 4333: 434 .endm 435 436 /* Release page_table_lock if for user space. We use an ordered 437 store to ensure all prior accesses are performed prior to 438 releasing the lock. Note stw may not be executed, so we 439 provide one extra nop when CONFIG_TLB_PTLOCK is defined. */ 440 .macro ptl_unlock spc,tmp,tmp2 441#ifdef CONFIG_TLB_PTLOCK 44298: get_ptl \tmp 443 ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2 444 or,COND(=) %r0,\spc,%r0 445 stw,ma \tmp2,0(\tmp) 44699: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 447 insert_nops NUM_PIPELINE_INSNS - 4 448#else 449 insert_nops NUM_PIPELINE_INSNS - 1 450#endif 451 .endm 452 453 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and 454 * don't needlessly dirty the cache line if it was already set */ 455 .macro update_accessed ptp,pte,tmp,tmp1 456 ldi _PAGE_ACCESSED,\tmp1 457 or \tmp1,\pte,\tmp 458 and,COND(<>) \tmp1,\pte,%r0 459 STREG \tmp,0(\ptp) 460 .endm 461 462 /* Set the dirty bit (and accessed bit). No need to be 463 * clever, this is only used from the dirty fault */ 464 .macro update_dirty ptp,pte,tmp 465 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp 466 or \tmp,\pte,\pte 467 STREG \pte,0(\ptp) 468 .endm 469 470 /* We have (depending on the page size): 471 * - 38 to 52-bit Physical Page Number 472 * - 12 to 26-bit page offset 473 */ 474 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) 475 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ 476 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) 477 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12) 478 479 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 480 .macro convert_for_tlb_insert20 pte,tmp 481#ifdef CONFIG_HUGETLB_PAGE 482 copy \pte,\tmp 483 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 484 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte 485 486 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 487 (63-58)+PAGE_ADD_SHIFT,\pte 488 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0 489 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\ 490 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte 491#else /* Huge pages disabled */ 492 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 493 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte 494 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 495 (63-58)+PAGE_ADD_SHIFT,\pte 496#endif 497 .endm 498 499 /* Convert the pte and prot to tlb insertion values. How 500 * this happens is quite subtle, read below */ 501 .macro make_insert_tlb spc,pte,prot,tmp 502 space_to_prot \spc \prot /* create prot id from space */ 503 /* The following is the real subtlety. This is depositing 504 * T <-> _PAGE_REFTRAP 505 * D <-> _PAGE_DIRTY 506 * B <-> _PAGE_DMB (memory break) 507 * 508 * Then incredible subtlety: The access rights are 509 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE 510 * See 3-14 of the parisc 2.0 manual 511 * 512 * Finally, _PAGE_READ goes in the top bit of PL1 (so we 513 * trigger an access rights trap in user space if the user 514 * tries to read an unreadable page */ 515#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT 516 /* need to drop DMB bit, as it's used as SPECIAL flag */ 517 depi 0,_PAGE_SPECIAL_BIT,1,\pte 518#endif 519 depd \pte,8,7,\prot 520 521 /* PAGE_USER indicates the page can be read with user privileges, 522 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 523 * contains _PAGE_READ) */ 524 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 525 depdi 7,11,3,\prot 526 /* If we're a gateway page, drop PL2 back to zero for promotion 527 * to kernel privilege (so we can execute the page as kernel). 528 * Any privilege promotion page always denys read and write */ 529 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 530 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 531 532 /* Enforce uncacheable pages. 533 * This should ONLY be use for MMIO on PA 2.0 machines. 534 * Memory/DMA is cache coherent on all PA2.0 machines we support 535 * (that means T-class is NOT supported) and the memory controllers 536 * on most of those machines only handles cache transactions. 537 */ 538 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 539 depdi 1,12,1,\prot 540 541 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 542 convert_for_tlb_insert20 \pte \tmp 543 .endm 544 545 /* Identical macro to make_insert_tlb above, except it 546 * makes the tlb entry for the differently formatted pa11 547 * insertion instructions */ 548 .macro make_insert_tlb_11 spc,pte,prot 549#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT 550 /* need to drop DMB bit, as it's used as SPECIAL flag */ 551 depi 0,_PAGE_SPECIAL_BIT,1,\pte 552#endif 553 zdep \spc,30,15,\prot 554 dep \pte,8,7,\prot 555 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0 556 depi 1,12,1,\prot 557 extru,= \pte,_PAGE_USER_BIT,1,%r0 558 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */ 559 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0 560 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 561 562 /* Get rid of prot bits and convert to page addr for iitlba */ 563 564 depi 0,31,ASM_PFN_PTE_SHIFT,\pte 565 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte 566 .endm 567 568 /* This is for ILP32 PA2.0 only. The TLB insertion needs 569 * to extend into I/O space if the address is 0xfXXXXXXX 570 * so we extend the f's into the top word of the pte in 571 * this case */ 572 .macro f_extend pte,tmp 573 extrd,s \pte,42,4,\tmp 574 addi,<> 1,\tmp,%r0 575 extrd,s \pte,63,25,\pte 576 .endm 577 578 /* The alias region is comprised of a pair of 4 MB regions 579 * aligned to 8 MB. It is used to clear/copy/flush user pages 580 * using kernel virtual addresses congruent with the user 581 * virtual address. 582 * 583 * To use the alias page, you set %r26 up with the to TLB 584 * entry (identifying the physical page) and %r23 up with 585 * the from tlb entry (or nothing if only a to entry---for 586 * clear_user_page_asm) */ 587 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype 588 cmpib,COND(<>),n 0,\spc,\fault 589 ldil L%(TMPALIAS_MAP_START),\tmp 590 copy \va,\tmp1 591 depi_safe 0,31,TMPALIAS_SIZE_BITS+1,\tmp1 592 cmpb,COND(<>),n \tmp,\tmp1,\fault 593 mfctl %cr19,\tmp /* iir */ 594 /* get the opcode (first six bits) into \tmp */ 595 extrw,u \tmp,5,6,\tmp 596 /* 597 * Only setting the T bit prevents data cache movein 598 * Setting access rights to zero prevents instruction cache movein 599 * 600 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go 601 * to type field and _PAGE_READ goes to top bit of PL1 602 */ 603 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot 604 /* 605 * so if the opcode is one (i.e. this is a memory management 606 * instruction) nullify the next load so \prot is only T. 607 * Otherwise this is a normal data operation 608 */ 609 cmpiclr,= 0x01,\tmp,%r0 610 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot 611.ifc \patype,20 612 depd,z \prot,8,7,\prot 613.else 614.ifc \patype,11 615 depw,z \prot,8,7,\prot 616.else 617 .error "undefined PA type to do_alias" 618.endif 619.endif 620 /* 621 * OK, it is in the temp alias region, check whether "from" or "to". 622 * Check "subtle" note in pacache.S re: r23/r26. 623 */ 624 extrw,u,= \va,31-TMPALIAS_SIZE_BITS,1,%r0 625 or,COND(tr) %r23,%r0,\pte 626 or %r26,%r0,\pte 627 628 /* convert phys addr in \pte (from r23 or r26) to tlb insert format */ 629 SHRREG \pte,PAGE_SHIFT+PAGE_ADD_SHIFT-5, \pte 630 depi_safe _PAGE_SIZE_ENCODING_DEFAULT, 31,5, \pte 631 .endm 632 633 634 /* 635 * Fault_vectors are architecturally required to be aligned on a 2K 636 * boundary 637 */ 638 639 .section .text.hot 640 .align 2048 641 642ENTRY(fault_vector_20) 643 /* First vector is invalid (0) */ 644 .ascii "cows can fly" 645 .byte 0 646 .align 32 647 648 hpmc 1 649 def 2 650 def 3 651 extint 4 652 def 5 653 itlb_20 PARISC_ITLB_TRAP 654 def 7 655 def 8 656 def 9 657 def 10 658 def 11 659 def 12 660 def 13 661 def 14 662 dtlb_20 15 663 naitlb_20 16 664 nadtlb_20 17 665 def 18 666 def 19 667 dbit_20 20 668 def 21 669 def 22 670 def 23 671 def 24 672 def 25 673 def 26 674 def 27 675 def 28 676 def 29 677 def 30 678 def 31 679END(fault_vector_20) 680 681#ifndef CONFIG_64BIT 682 683 .align 2048 684 685ENTRY(fault_vector_11) 686 /* First vector is invalid (0) */ 687 .ascii "cows can fly" 688 .byte 0 689 .align 32 690 691 hpmc 1 692 def 2 693 def 3 694 extint 4 695 def 5 696 itlb_11 PARISC_ITLB_TRAP 697 def 7 698 def 8 699 def 9 700 def 10 701 def 11 702 def 12 703 def 13 704 def 14 705 dtlb_11 15 706 naitlb_11 16 707 nadtlb_11 17 708 def 18 709 def 19 710 dbit_11 20 711 def 21 712 def 22 713 def 23 714 def 24 715 def 25 716 def 26 717 def 27 718 def 28 719 def 29 720 def 30 721 def 31 722END(fault_vector_11) 723 724#endif 725 /* Fault vector is separately protected and *must* be on its own page */ 726 .align PAGE_SIZE 727 728 .import handle_interruption,code 729 .import do_cpu_irq_mask,code 730 731 /* 732 * Child Returns here 733 * 734 * copy_thread moved args into task save area. 735 */ 736 737ENTRY(ret_from_kernel_thread) 738 /* Call schedule_tail first though */ 739 BL schedule_tail, %r2 740 nop 741 742 mfctl %cr30,%r1 /* task_struct */ 743 LDREG TASK_PT_GR25(%r1), %r26 744#ifdef CONFIG_64BIT 745 LDREG TASK_PT_GR27(%r1), %r27 746#endif 747 LDREG TASK_PT_GR26(%r1), %r1 748 ble 0(%sr7, %r1) 749 copy %r31, %r2 750 b finish_child_return 751 nop 752END(ret_from_kernel_thread) 753 754 755 /* 756 * struct task_struct *_switch_to(struct task_struct *prev, 757 * struct task_struct *next) 758 * 759 * switch kernel stacks and return prev */ 760ENTRY_CFI(_switch_to) 761 STREG %r2, -RP_OFFSET(%r30) 762 763 callee_save_float 764 callee_save 765 766 load32 _switch_to_ret, %r2 767 768 STREG %r2, TASK_PT_KPC(%r26) 769 LDREG TASK_PT_KPC(%r25), %r2 770 771 STREG %r30, TASK_PT_KSP(%r26) 772 LDREG TASK_PT_KSP(%r25), %r30 773 bv %r0(%r2) 774 mtctl %r25,%cr30 775 776ENTRY(_switch_to_ret) 777 mtctl %r0, %cr0 /* Needed for single stepping */ 778 callee_rest 779 callee_rest_float 780 781 LDREG -RP_OFFSET(%r30), %r2 782 bv %r0(%r2) 783 copy %r26, %r28 784ENDPROC_CFI(_switch_to) 785 786 /* 787 * Common rfi return path for interruptions, kernel execve, and 788 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will 789 * return via this path if the signal was received when the process 790 * was running; if the process was blocked on a syscall then the 791 * normal syscall_exit path is used. All syscalls for traced 792 * proceses exit via intr_restore. 793 * 794 * XXX If any syscalls that change a processes space id ever exit 795 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and 796 * adjust IASQ[0..1]. 797 * 798 */ 799 800 .align PAGE_SIZE 801 802ENTRY_CFI(syscall_exit_rfi) 803 mfctl %cr30,%r16 /* task_struct */ 804 ldo TASK_REGS(%r16),%r16 805 /* Force iaoq to userspace, as the user has had access to our current 806 * context via sigcontext. Also Filter the PSW for the same reason. 807 */ 808 LDREG PT_IAOQ0(%r16),%r19 809 depi PRIV_USER,31,2,%r19 810 STREG %r19,PT_IAOQ0(%r16) 811 LDREG PT_IAOQ1(%r16),%r19 812 depi PRIV_USER,31,2,%r19 813 STREG %r19,PT_IAOQ1(%r16) 814 LDREG PT_PSW(%r16),%r19 815 load32 USER_PSW_MASK,%r1 816#ifdef CONFIG_64BIT 817 load32 USER_PSW_HI_MASK,%r20 818 depd %r20,31,32,%r1 819#endif 820 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */ 821 load32 USER_PSW,%r1 822 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */ 823 STREG %r19,PT_PSW(%r16) 824 825 /* 826 * If we aren't being traced, we never saved space registers 827 * (we don't store them in the sigcontext), so set them 828 * to "proper" values now (otherwise we'll wind up restoring 829 * whatever was last stored in the task structure, which might 830 * be inconsistent if an interrupt occurred while on the gateway 831 * page). Note that we may be "trashing" values the user put in 832 * them, but we don't support the user changing them. 833 */ 834 835 STREG %r0,PT_SR2(%r16) 836 mfsp %sr3,%r19 837 STREG %r19,PT_SR0(%r16) 838 STREG %r19,PT_SR1(%r16) 839 STREG %r19,PT_SR3(%r16) 840 STREG %r19,PT_SR4(%r16) 841 STREG %r19,PT_SR5(%r16) 842 STREG %r19,PT_SR6(%r16) 843 STREG %r19,PT_SR7(%r16) 844 845ENTRY(intr_return) 846 /* check for reschedule */ 847 mfctl %cr30,%r1 848 LDREG TASK_TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ 849 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ 850 851 .import do_notify_resume,code 852intr_check_sig: 853 /* As above */ 854 mfctl %cr30,%r1 855 LDREG TASK_TI_FLAGS(%r1),%r19 856 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20 857 and,COND(<>) %r19, %r20, %r0 858 b,n intr_restore /* skip past if we've nothing to do */ 859 860 /* This check is critical to having LWS 861 * working. The IASQ is zero on the gateway 862 * page and we cannot deliver any signals until 863 * we get off the gateway page. 864 * 865 * Only do signals if we are returning to user space 866 */ 867 LDREG PT_IASQ0(%r16), %r20 868 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ 869 LDREG PT_IASQ1(%r16), %r20 870 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ 871 872 copy %r0, %r25 /* long in_syscall = 0 */ 873#ifdef CONFIG_64BIT 874 ldo -16(%r30),%r29 /* Reference param save area */ 875#endif 876 877 /* NOTE: We need to enable interrupts if we have to deliver 878 * signals. We used to do this earlier but it caused kernel 879 * stack overflows. */ 880 ssm PSW_SM_I, %r0 881 882 BL do_notify_resume,%r2 883 copy %r16, %r26 /* struct pt_regs *regs */ 884 885 b,n intr_check_sig 886 887intr_restore: 888 copy %r16,%r29 889 ldo PT_FR31(%r29),%r1 890 rest_fp %r1 891 rest_general %r29 892 893 /* inverse of virt_map */ 894 pcxt_ssm_bug 895 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */ 896 tophys_r1 %r29 897 898 /* Restore space id's and special cr's from PT_REGS 899 * structure pointed to by r29 900 */ 901 rest_specials %r29 902 903 /* IMPORTANT: rest_stack restores r29 last (we are using it)! 904 * It also restores r1 and r30. 905 */ 906 rest_stack 907 908 rfi 909 nop 910 911#ifndef CONFIG_PREEMPTION 912# define intr_do_preempt intr_restore 913#endif /* !CONFIG_PREEMPTION */ 914 915 .import schedule,code 916intr_do_resched: 917 /* Only call schedule on return to userspace. If we're returning 918 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise 919 * we jump back to intr_restore. 920 */ 921 LDREG PT_IASQ0(%r16), %r20 922 cmpib,COND(=) 0, %r20, intr_do_preempt 923 nop 924 LDREG PT_IASQ1(%r16), %r20 925 cmpib,COND(=) 0, %r20, intr_do_preempt 926 nop 927 928 /* NOTE: We need to enable interrupts if we schedule. We used 929 * to do this earlier but it caused kernel stack overflows. */ 930 ssm PSW_SM_I, %r0 931 932#ifdef CONFIG_64BIT 933 ldo -16(%r30),%r29 /* Reference param save area */ 934#endif 935 936 ldil L%intr_check_sig, %r2 937#ifndef CONFIG_64BIT 938 b schedule 939#else 940 load32 schedule, %r20 941 bv %r0(%r20) 942#endif 943 ldo R%intr_check_sig(%r2), %r2 944 945 /* preempt the current task on returning to kernel 946 * mode from an interrupt, iff need_resched is set, 947 * and preempt_count is 0. otherwise, we continue on 948 * our merry way back to the current running task. 949 */ 950#ifdef CONFIG_PREEMPTION 951 .import preempt_schedule_irq,code 952intr_do_preempt: 953 rsm PSW_SM_I, %r0 /* disable interrupts */ 954 955 /* current_thread_info()->preempt_count */ 956 mfctl %cr30, %r1 957 ldw TI_PRE_COUNT(%r1), %r19 958 cmpib,<> 0, %r19, intr_restore /* if preempt_count > 0 */ 959 nop /* prev insn branched backwards */ 960 961 /* check if we interrupted a critical path */ 962 LDREG PT_PSW(%r16), %r20 963 bb,<,n %r20, 31 - PSW_SM_I, intr_restore 964 nop 965 966 /* ssm PSW_SM_I done later in intr_restore */ 967#ifdef CONFIG_MLONGCALLS 968 ldil L%intr_restore, %r2 969 load32 preempt_schedule_irq, %r1 970 bv %r0(%r1) 971 ldo R%intr_restore(%r2), %r2 972#else 973 ldil L%intr_restore, %r1 974 BL preempt_schedule_irq, %r2 975 ldo R%intr_restore(%r1), %r2 976#endif 977#endif /* CONFIG_PREEMPTION */ 978 979 /* 980 * External interrupts. 981 */ 982 983intr_extint: 984 cmpib,COND(=),n 0,%r16,1f 985 986 get_stack_use_cr30 987 b,n 2f 988 9891: 990 get_stack_use_r30 9912: 992 save_specials %r29 993 virt_map 994 save_general %r29 995 996 ldo PT_FR0(%r29), %r24 997 save_fp %r24 998 999 loadgp 1000 1001 copy %r29, %r26 /* arg0 is pt_regs */ 1002 copy %r29, %r16 /* save pt_regs */ 1003 1004 ldil L%intr_return, %r2 1005 1006#ifdef CONFIG_64BIT 1007 ldo -16(%r30),%r29 /* Reference param save area */ 1008#endif 1009 1010 b do_cpu_irq_mask 1011 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ 1012ENDPROC_CFI(syscall_exit_rfi) 1013 1014 1015 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ 1016 1017ENTRY_CFI(intr_save) /* for os_hpmc */ 1018 mfsp %sr7,%r16 1019 cmpib,COND(=),n 0,%r16,1f 1020 get_stack_use_cr30 1021 b 2f 1022 copy %r8,%r26 1023 10241: 1025 get_stack_use_r30 1026 copy %r8,%r26 1027 10282: 1029 save_specials %r29 1030 1031 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */ 1032 cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior 1033 1034 1035 mfctl %isr, %r16 1036 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */ 1037 mfctl %ior, %r17 1038 1039 1040#ifdef CONFIG_64BIT 1041 /* 1042 * If the interrupted code was running with W bit off (32 bit), 1043 * clear the b bits (bits 0 & 1) in the ior. 1044 * save_specials left ipsw value in r8 for us to test. 1045 */ 1046 extrd,u,*<> %r8,PSW_W_BIT,1,%r0 1047 depdi 0,1,2,%r17 1048 1049 /* adjust isr/ior: get high bits from isr and deposit in ior */ 1050 space_adjust %r16,%r17,%r1 1051#endif 1052 STREG %r16, PT_ISR(%r29) 1053 STREG %r17, PT_IOR(%r29) 1054 1055#if 0 && defined(CONFIG_64BIT) 1056 /* Revisit when we have 64-bit code above 4Gb */ 1057 b,n intr_save2 1058 1059skip_save_ior: 1060 /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we 1061 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior 1062 * above. 1063 */ 1064 extrd,u,* %r8,PSW_W_BIT,1,%r1 1065 cmpib,COND(=),n 1,%r1,intr_save2 1066 LDREG PT_IASQ0(%r29), %r16 1067 LDREG PT_IAOQ0(%r29), %r17 1068 /* adjust iasq/iaoq */ 1069 space_adjust %r16,%r17,%r1 1070 STREG %r16, PT_IASQ0(%r29) 1071 STREG %r17, PT_IAOQ0(%r29) 1072#else 1073skip_save_ior: 1074#endif 1075 1076intr_save2: 1077 virt_map 1078 save_general %r29 1079 1080 ldo PT_FR0(%r29), %r25 1081 save_fp %r25 1082 1083 loadgp 1084 1085 copy %r29, %r25 /* arg1 is pt_regs */ 1086#ifdef CONFIG_64BIT 1087 ldo -16(%r30),%r29 /* Reference param save area */ 1088#endif 1089 1090 ldil L%intr_check_sig, %r2 1091 copy %r25, %r16 /* save pt_regs */ 1092 1093 b handle_interruption 1094 ldo R%intr_check_sig(%r2), %r2 1095ENDPROC_CFI(intr_save) 1096 1097 1098 /* 1099 * Note for all tlb miss handlers: 1100 * 1101 * cr24 contains a pointer to the kernel address space 1102 * page directory. 1103 * 1104 * cr25 contains a pointer to the current user address 1105 * space page directory. 1106 * 1107 * sr3 will contain the space id of the user address space 1108 * of the current running thread while that thread is 1109 * running in the kernel. 1110 */ 1111 1112 /* 1113 * register number allocations. Note that these are all 1114 * in the shadowed registers 1115 */ 1116 1117 t0 = r1 /* temporary register 0 */ 1118 va = r8 /* virtual address for which the trap occurred */ 1119 t1 = r9 /* temporary register 1 */ 1120 pte = r16 /* pte/phys page # */ 1121 prot = r17 /* prot bits */ 1122 spc = r24 /* space for which the trap occurred */ 1123 ptp = r25 /* page directory/page table pointer */ 1124 1125#ifdef CONFIG_64BIT 1126 1127dtlb_miss_20w: 1128 space_adjust spc,va,t0 1129 get_pgd spc,ptp 1130 space_check spc,t0,dtlb_fault 1131 1132 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w 1133 1134 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w 1135 update_accessed ptp,pte,t0,t1 1136 1137 make_insert_tlb spc,pte,prot,t1 1138 1139 idtlbt pte,prot 1140 1141 ptl_unlock spc,t0,t1 1142 rfir 1143 nop 1144 1145dtlb_check_alias_20w: 1146 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20 1147 1148 idtlbt pte,prot 1149 1150 insert_nops NUM_PIPELINE_INSNS - 1 1151 rfir 1152 nop 1153 1154nadtlb_miss_20w: 1155 space_adjust spc,va,t0 1156 get_pgd spc,ptp 1157 space_check spc,t0,nadtlb_fault 1158 1159 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w 1160 1161 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w 1162 update_accessed ptp,pte,t0,t1 1163 1164 make_insert_tlb spc,pte,prot,t1 1165 1166 idtlbt pte,prot 1167 1168 ptl_unlock spc,t0,t1 1169 rfir 1170 nop 1171 1172nadtlb_check_alias_20w: 1173 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20 1174 1175 idtlbt pte,prot 1176 1177 insert_nops NUM_PIPELINE_INSNS - 1 1178 rfir 1179 nop 1180 1181#else 1182 1183dtlb_miss_11: 1184 get_pgd spc,ptp 1185 1186 space_check spc,t0,dtlb_fault 1187 1188 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 1189 1190 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 1191 update_accessed ptp,pte,t0,t1 1192 1193 make_insert_tlb_11 spc,pte,prot 1194 1195 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1196 mtsp spc,%sr1 1197 1198 idtlba pte,(%sr1,va) 1199 idtlbp prot,(%sr1,va) 1200 1201 mtsp t1, %sr1 /* Restore sr1 */ 1202 1203 ptl_unlock spc,t0,t1 1204 rfir 1205 nop 1206 1207dtlb_check_alias_11: 1208 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11 1209 1210 idtlba pte,(va) 1211 idtlbp prot,(va) 1212 1213 insert_nops NUM_PIPELINE_INSNS - 1 1214 rfir 1215 nop 1216 1217nadtlb_miss_11: 1218 get_pgd spc,ptp 1219 1220 space_check spc,t0,nadtlb_fault 1221 1222 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 1223 1224 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 1225 update_accessed ptp,pte,t0,t1 1226 1227 make_insert_tlb_11 spc,pte,prot 1228 1229 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1230 mtsp spc,%sr1 1231 1232 idtlba pte,(%sr1,va) 1233 idtlbp prot,(%sr1,va) 1234 1235 mtsp t1, %sr1 /* Restore sr1 */ 1236 1237 ptl_unlock spc,t0,t1 1238 rfir 1239 nop 1240 1241nadtlb_check_alias_11: 1242 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11 1243 1244 idtlba pte,(va) 1245 idtlbp prot,(va) 1246 1247 insert_nops NUM_PIPELINE_INSNS - 1 1248 rfir 1249 nop 1250 1251dtlb_miss_20: 1252 space_adjust spc,va,t0 1253 get_pgd spc,ptp 1254 space_check spc,t0,dtlb_fault 1255 1256 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 1257 1258 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 1259 update_accessed ptp,pte,t0,t1 1260 1261 make_insert_tlb spc,pte,prot,t1 1262 1263 f_extend pte,t1 1264 1265 idtlbt pte,prot 1266 1267 ptl_unlock spc,t0,t1 1268 rfir 1269 nop 1270 1271dtlb_check_alias_20: 1272 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20 1273 1274 idtlbt pte,prot 1275 1276 insert_nops NUM_PIPELINE_INSNS - 1 1277 rfir 1278 nop 1279 1280nadtlb_miss_20: 1281 get_pgd spc,ptp 1282 1283 space_check spc,t0,nadtlb_fault 1284 1285 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 1286 1287 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 1288 update_accessed ptp,pte,t0,t1 1289 1290 make_insert_tlb spc,pte,prot,t1 1291 1292 f_extend pte,t1 1293 1294 idtlbt pte,prot 1295 1296 ptl_unlock spc,t0,t1 1297 rfir 1298 nop 1299 1300nadtlb_check_alias_20: 1301 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20 1302 1303 idtlbt pte,prot 1304 1305 insert_nops NUM_PIPELINE_INSNS - 1 1306 rfir 1307 nop 1308 1309#endif 1310 1311nadtlb_emulate: 1312 1313 /* 1314 * Non-access misses can be caused by fdc,fic,pdc,lpa,probe and 1315 * probei instructions. The kernel no longer faults doing flushes. 1316 * Use of lpa and probe instructions is rare. Given the issue 1317 * with shadow registers, we defer everything to the "slow" path. 1318 */ 1319 b,n nadtlb_fault 1320 1321#ifdef CONFIG_64BIT 1322itlb_miss_20w: 1323 1324 /* 1325 * I miss is a little different, since we allow users to fault 1326 * on the gateway page which is in the kernel address space. 1327 */ 1328 1329 space_adjust spc,va,t0 1330 get_pgd spc,ptp 1331 space_check spc,t0,itlb_fault 1332 1333 L3_ptep ptp,pte,t0,va,itlb_fault 1334 1335 ptl_lock spc,ptp,pte,t0,t1,itlb_fault 1336 update_accessed ptp,pte,t0,t1 1337 1338 make_insert_tlb spc,pte,prot,t1 1339 1340 iitlbt pte,prot 1341 1342 ptl_unlock spc,t0,t1 1343 rfir 1344 nop 1345 1346naitlb_miss_20w: 1347 1348 /* 1349 * I miss is a little different, since we allow users to fault 1350 * on the gateway page which is in the kernel address space. 1351 */ 1352 1353 space_adjust spc,va,t0 1354 get_pgd spc,ptp 1355 space_check spc,t0,naitlb_fault 1356 1357 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w 1358 1359 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w 1360 update_accessed ptp,pte,t0,t1 1361 1362 make_insert_tlb spc,pte,prot,t1 1363 1364 iitlbt pte,prot 1365 1366 ptl_unlock spc,t0,t1 1367 rfir 1368 nop 1369 1370naitlb_check_alias_20w: 1371 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20 1372 1373 iitlbt pte,prot 1374 1375 insert_nops NUM_PIPELINE_INSNS - 1 1376 rfir 1377 nop 1378 1379#else 1380 1381itlb_miss_11: 1382 get_pgd spc,ptp 1383 1384 space_check spc,t0,itlb_fault 1385 1386 L2_ptep ptp,pte,t0,va,itlb_fault 1387 1388 ptl_lock spc,ptp,pte,t0,t1,itlb_fault 1389 update_accessed ptp,pte,t0,t1 1390 1391 make_insert_tlb_11 spc,pte,prot 1392 1393 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1394 mtsp spc,%sr1 1395 1396 iitlba pte,(%sr1,va) 1397 iitlbp prot,(%sr1,va) 1398 1399 mtsp t1, %sr1 /* Restore sr1 */ 1400 1401 ptl_unlock spc,t0,t1 1402 rfir 1403 nop 1404 1405naitlb_miss_11: 1406 get_pgd spc,ptp 1407 1408 space_check spc,t0,naitlb_fault 1409 1410 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 1411 1412 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 1413 update_accessed ptp,pte,t0,t1 1414 1415 make_insert_tlb_11 spc,pte,prot 1416 1417 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1418 mtsp spc,%sr1 1419 1420 iitlba pte,(%sr1,va) 1421 iitlbp prot,(%sr1,va) 1422 1423 mtsp t1, %sr1 /* Restore sr1 */ 1424 1425 ptl_unlock spc,t0,t1 1426 rfir 1427 nop 1428 1429naitlb_check_alias_11: 1430 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11 1431 1432 iitlba pte,(%sr0, va) 1433 iitlbp prot,(%sr0, va) 1434 1435 insert_nops NUM_PIPELINE_INSNS - 1 1436 rfir 1437 nop 1438 1439 1440itlb_miss_20: 1441 get_pgd spc,ptp 1442 1443 space_check spc,t0,itlb_fault 1444 1445 L2_ptep ptp,pte,t0,va,itlb_fault 1446 1447 ptl_lock spc,ptp,pte,t0,t1,itlb_fault 1448 update_accessed ptp,pte,t0,t1 1449 1450 make_insert_tlb spc,pte,prot,t1 1451 1452 f_extend pte,t1 1453 1454 iitlbt pte,prot 1455 1456 ptl_unlock spc,t0,t1 1457 rfir 1458 nop 1459 1460naitlb_miss_20: 1461 get_pgd spc,ptp 1462 1463 space_check spc,t0,naitlb_fault 1464 1465 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 1466 1467 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 1468 update_accessed ptp,pte,t0,t1 1469 1470 make_insert_tlb spc,pte,prot,t1 1471 1472 f_extend pte,t1 1473 1474 iitlbt pte,prot 1475 1476 ptl_unlock spc,t0,t1 1477 rfir 1478 nop 1479 1480naitlb_check_alias_20: 1481 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20 1482 1483 iitlbt pte,prot 1484 1485 insert_nops NUM_PIPELINE_INSNS - 1 1486 rfir 1487 nop 1488 1489#endif 1490 1491#ifdef CONFIG_64BIT 1492 1493dbit_trap_20w: 1494 space_adjust spc,va,t0 1495 get_pgd spc,ptp 1496 space_check spc,t0,dbit_fault 1497 1498 L3_ptep ptp,pte,t0,va,dbit_fault 1499 1500 ptl_lock spc,ptp,pte,t0,t1,dbit_fault 1501 update_dirty ptp,pte,t1 1502 1503 make_insert_tlb spc,pte,prot,t1 1504 1505 idtlbt pte,prot 1506 1507 ptl_unlock spc,t0,t1 1508 rfir 1509 nop 1510#else 1511 1512dbit_trap_11: 1513 1514 get_pgd spc,ptp 1515 1516 space_check spc,t0,dbit_fault 1517 1518 L2_ptep ptp,pte,t0,va,dbit_fault 1519 1520 ptl_lock spc,ptp,pte,t0,t1,dbit_fault 1521 update_dirty ptp,pte,t1 1522 1523 make_insert_tlb_11 spc,pte,prot 1524 1525 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1526 mtsp spc,%sr1 1527 1528 idtlba pte,(%sr1,va) 1529 idtlbp prot,(%sr1,va) 1530 1531 mtsp t1, %sr1 /* Restore sr1 */ 1532 1533 ptl_unlock spc,t0,t1 1534 rfir 1535 nop 1536 1537dbit_trap_20: 1538 get_pgd spc,ptp 1539 1540 space_check spc,t0,dbit_fault 1541 1542 L2_ptep ptp,pte,t0,va,dbit_fault 1543 1544 ptl_lock spc,ptp,pte,t0,t1,dbit_fault 1545 update_dirty ptp,pte,t1 1546 1547 make_insert_tlb spc,pte,prot,t1 1548 1549 f_extend pte,t1 1550 1551 idtlbt pte,prot 1552 1553 ptl_unlock spc,t0,t1 1554 rfir 1555 nop 1556#endif 1557 1558 .import handle_interruption,code 1559 1560kernel_bad_space: 1561 b intr_save 1562 ldi 31,%r8 /* Use an unused code */ 1563 1564dbit_fault: 1565 b intr_save 1566 ldi 20,%r8 1567 1568itlb_fault: 1569 b intr_save 1570 ldi PARISC_ITLB_TRAP,%r8 1571 1572nadtlb_fault: 1573 b intr_save 1574 ldi 17,%r8 1575 1576naitlb_fault: 1577 b intr_save 1578 ldi 16,%r8 1579 1580dtlb_fault: 1581 b intr_save 1582 ldi 15,%r8 1583 1584 /* Register saving semantics for system calls: 1585 1586 %r1 clobbered by system call macro in userspace 1587 %r2 saved in PT_REGS by gateway page 1588 %r3 - %r18 preserved by C code (saved by signal code) 1589 %r19 - %r20 saved in PT_REGS by gateway page 1590 %r21 - %r22 non-standard syscall args 1591 stored in kernel stack by gateway page 1592 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page 1593 %r27 - %r30 saved in PT_REGS by gateway page 1594 %r31 syscall return pointer 1595 */ 1596 1597 /* Floating point registers (FIXME: what do we do with these?) 1598 1599 %fr0 - %fr3 status/exception, not preserved 1600 %fr4 - %fr7 arguments 1601 %fr8 - %fr11 not preserved by C code 1602 %fr12 - %fr21 preserved by C code 1603 %fr22 - %fr31 not preserved by C code 1604 */ 1605 1606 .macro reg_save regs 1607 STREG %r3, PT_GR3(\regs) 1608 STREG %r4, PT_GR4(\regs) 1609 STREG %r5, PT_GR5(\regs) 1610 STREG %r6, PT_GR6(\regs) 1611 STREG %r7, PT_GR7(\regs) 1612 STREG %r8, PT_GR8(\regs) 1613 STREG %r9, PT_GR9(\regs) 1614 STREG %r10,PT_GR10(\regs) 1615 STREG %r11,PT_GR11(\regs) 1616 STREG %r12,PT_GR12(\regs) 1617 STREG %r13,PT_GR13(\regs) 1618 STREG %r14,PT_GR14(\regs) 1619 STREG %r15,PT_GR15(\regs) 1620 STREG %r16,PT_GR16(\regs) 1621 STREG %r17,PT_GR17(\regs) 1622 STREG %r18,PT_GR18(\regs) 1623 .endm 1624 1625 .macro reg_restore regs 1626 LDREG PT_GR3(\regs), %r3 1627 LDREG PT_GR4(\regs), %r4 1628 LDREG PT_GR5(\regs), %r5 1629 LDREG PT_GR6(\regs), %r6 1630 LDREG PT_GR7(\regs), %r7 1631 LDREG PT_GR8(\regs), %r8 1632 LDREG PT_GR9(\regs), %r9 1633 LDREG PT_GR10(\regs),%r10 1634 LDREG PT_GR11(\regs),%r11 1635 LDREG PT_GR12(\regs),%r12 1636 LDREG PT_GR13(\regs),%r13 1637 LDREG PT_GR14(\regs),%r14 1638 LDREG PT_GR15(\regs),%r15 1639 LDREG PT_GR16(\regs),%r16 1640 LDREG PT_GR17(\regs),%r17 1641 LDREG PT_GR18(\regs),%r18 1642 .endm 1643 1644 .macro fork_like name 1645ENTRY_CFI(sys_\name\()_wrapper) 1646 mfctl %cr30,%r1 1647 ldo TASK_REGS(%r1),%r1 1648 reg_save %r1 1649 mfctl %cr27, %r28 1650 ldil L%sys_\name, %r31 1651 be R%sys_\name(%sr4,%r31) 1652 STREG %r28, PT_CR27(%r1) 1653ENDPROC_CFI(sys_\name\()_wrapper) 1654 .endm 1655 1656fork_like clone 1657fork_like clone3 1658fork_like fork 1659fork_like vfork 1660 1661 /* Set the return value for the child */ 1662ENTRY(child_return) 1663 BL schedule_tail, %r2 1664 nop 1665finish_child_return: 1666 mfctl %cr30,%r1 1667 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1668 1669 LDREG PT_CR27(%r1), %r3 1670 mtctl %r3, %cr27 1671 reg_restore %r1 1672 b syscall_exit 1673 copy %r0,%r28 1674END(child_return) 1675 1676ENTRY_CFI(sys_rt_sigreturn_wrapper) 1677 mfctl %cr30,%r26 1678 ldo TASK_REGS(%r26),%r26 /* get pt regs */ 1679 /* Don't save regs, we are going to restore them from sigcontext. */ 1680 STREG %r2, -RP_OFFSET(%r30) 1681#ifdef CONFIG_64BIT 1682 ldo FRAME_SIZE(%r30), %r30 1683 BL sys_rt_sigreturn,%r2 1684 ldo -16(%r30),%r29 /* Reference param save area */ 1685#else 1686 BL sys_rt_sigreturn,%r2 1687 ldo FRAME_SIZE(%r30), %r30 1688#endif 1689 1690 ldo -FRAME_SIZE(%r30), %r30 1691 LDREG -RP_OFFSET(%r30), %r2 1692 1693 /* FIXME: I think we need to restore a few more things here. */ 1694 mfctl %cr30,%r1 1695 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1696 reg_restore %r1 1697 1698 /* If the signal was received while the process was blocked on a 1699 * syscall, then r2 will take us to syscall_exit; otherwise r2 will 1700 * take us to syscall_exit_rfi and on to intr_return. 1701 */ 1702 bv %r0(%r2) 1703 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */ 1704ENDPROC_CFI(sys_rt_sigreturn_wrapper) 1705 1706ENTRY(syscall_exit) 1707 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit 1708 * via syscall_exit_rfi if the signal was received while the process 1709 * was running. 1710 */ 1711 1712 /* save return value now */ 1713 mfctl %cr30, %r1 1714 STREG %r28,TASK_PT_GR28(%r1) 1715 1716 /* Seems to me that dp could be wrong here, if the syscall involved 1717 * calling a module, and nothing got round to restoring dp on return. 1718 */ 1719 loadgp 1720 1721syscall_check_resched: 1722 1723 /* check for reschedule */ 1724 mfctl %cr30,%r19 1725 LDREG TASK_TI_FLAGS(%r19),%r19 /* long */ 1726 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */ 1727 1728 .import do_signal,code 1729syscall_check_sig: 1730 mfctl %cr30,%r19 1731 LDREG TASK_TI_FLAGS(%r19),%r19 1732 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26 1733 and,COND(<>) %r19, %r26, %r0 1734 b,n syscall_restore /* skip past if we've nothing to do */ 1735 1736syscall_do_signal: 1737 /* Save callee-save registers (for sigcontext). 1738 * FIXME: After this point the process structure should be 1739 * consistent with all the relevant state of the process 1740 * before the syscall. We need to verify this. 1741 */ 1742 mfctl %cr30,%r1 1743 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */ 1744 reg_save %r26 1745 1746#ifdef CONFIG_64BIT 1747 ldo -16(%r30),%r29 /* Reference param save area */ 1748#endif 1749 1750 BL do_notify_resume,%r2 1751 ldi 1, %r25 /* long in_syscall = 1 */ 1752 1753 mfctl %cr30,%r1 1754 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */ 1755 reg_restore %r20 1756 1757 b,n syscall_check_sig 1758 1759syscall_restore: 1760 mfctl %cr30,%r1 1761 1762 /* Are we being ptraced? */ 1763 LDREG TASK_TI_FLAGS(%r1),%r19 1764 ldi _TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2 1765 and,COND(=) %r19,%r2,%r0 1766 b,n syscall_restore_rfi 1767 1768 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ 1769 rest_fp %r19 1770 1771 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */ 1772 mtsar %r19 1773 1774 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */ 1775 LDREG TASK_PT_GR19(%r1),%r19 1776 LDREG TASK_PT_GR20(%r1),%r20 1777 LDREG TASK_PT_GR21(%r1),%r21 1778 LDREG TASK_PT_GR22(%r1),%r22 1779 LDREG TASK_PT_GR23(%r1),%r23 1780 LDREG TASK_PT_GR24(%r1),%r24 1781 LDREG TASK_PT_GR25(%r1),%r25 1782 LDREG TASK_PT_GR26(%r1),%r26 1783 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */ 1784 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */ 1785 LDREG TASK_PT_GR29(%r1),%r29 1786 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ 1787 1788 /* NOTE: We use rsm/ssm pair to make this operation atomic */ 1789 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */ 1790 rsm PSW_SM_I, %r0 1791 copy %r1,%r30 /* Restore user sp */ 1792 mfsp %sr3,%r1 /* Get user space id */ 1793 mtsp %r1,%sr7 /* Restore sr7 */ 1794 ssm PSW_SM_I, %r0 1795 1796 /* Set sr2 to zero for userspace syscalls to work. */ 1797 mtsp %r0,%sr2 1798 mtsp %r1,%sr4 /* Restore sr4 */ 1799 mtsp %r1,%sr5 /* Restore sr5 */ 1800 mtsp %r1,%sr6 /* Restore sr6 */ 1801 1802 depi PRIV_USER,31,2,%r31 /* ensure return to user mode. */ 1803 1804#ifdef CONFIG_64BIT 1805 /* decide whether to reset the wide mode bit 1806 * 1807 * For a syscall, the W bit is stored in the lowest bit 1808 * of sp. Extract it and reset W if it is zero */ 1809 extrd,u,*<> %r30,63,1,%r1 1810 rsm PSW_SM_W, %r0 1811 /* now reset the lowest bit of sp if it was set */ 1812 xor %r30,%r1,%r30 1813#endif 1814 be,n 0(%sr3,%r31) /* return to user space */ 1815 1816 /* We have to return via an RFI, so that PSW T and R bits can be set 1817 * appropriately. 1818 * This sets up pt_regs so we can return via intr_restore, which is not 1819 * the most efficient way of doing things, but it works. 1820 */ 1821syscall_restore_rfi: 1822 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */ 1823 mtctl %r2,%cr0 /* for immediate trap */ 1824 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */ 1825 ldi 0x0b,%r20 /* Create new PSW */ 1826 depi -1,13,1,%r20 /* C, Q, D, and I bits */ 1827 1828 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are 1829 * set in thread_info.h and converted to PA bitmap 1830 * numbers in asm-offsets.c */ 1831 1832 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */ 1833 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0 1834 depi -1,27,1,%r20 /* R bit */ 1835 1836 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */ 1837 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 1838 depi -1,7,1,%r20 /* T bit */ 1839 1840 STREG %r20,TASK_PT_PSW(%r1) 1841 1842 /* Always store space registers, since sr3 can be changed (e.g. fork) */ 1843 1844 mfsp %sr3,%r25 1845 STREG %r25,TASK_PT_SR3(%r1) 1846 STREG %r25,TASK_PT_SR4(%r1) 1847 STREG %r25,TASK_PT_SR5(%r1) 1848 STREG %r25,TASK_PT_SR6(%r1) 1849 STREG %r25,TASK_PT_SR7(%r1) 1850 STREG %r25,TASK_PT_IASQ0(%r1) 1851 STREG %r25,TASK_PT_IASQ1(%r1) 1852 1853 /* XXX W bit??? */ 1854 /* Now if old D bit is clear, it means we didn't save all registers 1855 * on syscall entry, so do that now. This only happens on TRACEME 1856 * calls, or if someone attached to us while we were on a syscall. 1857 * We could make this more efficient by not saving r3-r18, but 1858 * then we wouldn't be able to use the common intr_restore path. 1859 * It is only for traced processes anyway, so performance is not 1860 * an issue. 1861 */ 1862 bb,< %r2,30,pt_regs_ok /* Branch if D set */ 1863 ldo TASK_REGS(%r1),%r25 1864 reg_save %r25 /* Save r3 to r18 */ 1865 1866 /* Save the current sr */ 1867 mfsp %sr0,%r2 1868 STREG %r2,TASK_PT_SR0(%r1) 1869 1870 /* Save the scratch sr */ 1871 mfsp %sr1,%r2 1872 STREG %r2,TASK_PT_SR1(%r1) 1873 1874 /* sr2 should be set to zero for userspace syscalls */ 1875 STREG %r0,TASK_PT_SR2(%r1) 1876 1877 LDREG TASK_PT_GR31(%r1),%r2 1878 depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */ 1879 STREG %r2,TASK_PT_IAOQ0(%r1) 1880 ldo 4(%r2),%r2 1881 STREG %r2,TASK_PT_IAOQ1(%r1) 1882 b intr_restore 1883 copy %r25,%r16 1884 1885pt_regs_ok: 1886 LDREG TASK_PT_IAOQ0(%r1),%r2 1887 depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */ 1888 STREG %r2,TASK_PT_IAOQ0(%r1) 1889 LDREG TASK_PT_IAOQ1(%r1),%r2 1890 depi PRIV_USER,31,2,%r2 1891 STREG %r2,TASK_PT_IAOQ1(%r1) 1892 b intr_restore 1893 copy %r25,%r16 1894 1895syscall_do_resched: 1896 load32 syscall_check_resched,%r2 /* if resched, we start over again */ 1897 load32 schedule,%r19 1898 bv %r0(%r19) /* jumps to schedule() */ 1899#ifdef CONFIG_64BIT 1900 ldo -16(%r30),%r29 /* Reference param save area */ 1901#else 1902 nop 1903#endif 1904END(syscall_exit) 1905 1906 1907#ifdef CONFIG_FUNCTION_TRACER 1908 1909 .import ftrace_function_trampoline,code 1910 .align L1_CACHE_BYTES 1911ENTRY_CFI(mcount, caller) 1912_mcount: 1913 .export _mcount,data 1914 /* 1915 * The 64bit mcount() function pointer needs 4 dwords, of which the 1916 * first two are free. We optimize it here and put 2 instructions for 1917 * calling mcount(), and 2 instructions for ftrace_stub(). That way we 1918 * have all on one L1 cacheline. 1919 */ 1920 ldi 0, %arg3 1921 b ftrace_function_trampoline 1922 copy %r3, %arg2 /* caller original %sp */ 1923ftrace_stub: 1924 .globl ftrace_stub 1925 .type ftrace_stub, @function 1926#ifdef CONFIG_64BIT 1927 bve (%rp) 1928#else 1929 bv %r0(%rp) 1930#endif 1931 nop 1932#ifdef CONFIG_64BIT 1933 .dword mcount 1934 .dword 0 /* code in head.S puts value of global gp here */ 1935#endif 1936ENDPROC_CFI(mcount) 1937 1938#ifdef CONFIG_DYNAMIC_FTRACE 1939 1940#ifdef CONFIG_64BIT 1941#define FTRACE_FRAME_SIZE (2*FRAME_SIZE) 1942#else 1943#define FTRACE_FRAME_SIZE FRAME_SIZE 1944#endif 1945ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) 1946ftrace_caller: 1947 .global ftrace_caller 1948 1949 STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp) 1950 ldo -FTRACE_FRAME_SIZE(%sp), %r3 1951 STREG %rp, -RP_OFFSET(%r3) 1952 1953 /* Offset 0 is already allocated for %r1 */ 1954 STREG %r23, 2*REG_SZ(%r3) 1955 STREG %r24, 3*REG_SZ(%r3) 1956 STREG %r25, 4*REG_SZ(%r3) 1957 STREG %r26, 5*REG_SZ(%r3) 1958 STREG %r28, 6*REG_SZ(%r3) 1959 STREG %r29, 7*REG_SZ(%r3) 1960#ifdef CONFIG_64BIT 1961 STREG %r19, 8*REG_SZ(%r3) 1962 STREG %r20, 9*REG_SZ(%r3) 1963 STREG %r21, 10*REG_SZ(%r3) 1964 STREG %r22, 11*REG_SZ(%r3) 1965 STREG %r27, 12*REG_SZ(%r3) 1966 STREG %r31, 13*REG_SZ(%r3) 1967 loadgp 1968 ldo -16(%sp),%r29 1969#endif 1970 LDREG 0(%r3), %r25 1971 copy %rp, %r26 1972 ldo -8(%r25), %r25 1973 ldi 0, %r23 /* no pt_regs */ 1974 b,l ftrace_function_trampoline, %rp 1975 copy %r3, %r24 1976 1977 LDREG -RP_OFFSET(%r3), %rp 1978 LDREG 2*REG_SZ(%r3), %r23 1979 LDREG 3*REG_SZ(%r3), %r24 1980 LDREG 4*REG_SZ(%r3), %r25 1981 LDREG 5*REG_SZ(%r3), %r26 1982 LDREG 6*REG_SZ(%r3), %r28 1983 LDREG 7*REG_SZ(%r3), %r29 1984#ifdef CONFIG_64BIT 1985 LDREG 8*REG_SZ(%r3), %r19 1986 LDREG 9*REG_SZ(%r3), %r20 1987 LDREG 10*REG_SZ(%r3), %r21 1988 LDREG 11*REG_SZ(%r3), %r22 1989 LDREG 12*REG_SZ(%r3), %r27 1990 LDREG 13*REG_SZ(%r3), %r31 1991#endif 1992 LDREG 1*REG_SZ(%r3), %r3 1993 1994 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 1995 /* Adjust return point to jump back to beginning of traced function */ 1996 ldo -4(%r1), %r1 1997 bv,n (%r1) 1998 1999ENDPROC_CFI(ftrace_caller) 2000 2001#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS 2002ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN, 2003 CALLS,SAVE_RP,SAVE_SP) 2004ftrace_regs_caller: 2005 .global ftrace_regs_caller 2006 2007 ldo -FTRACE_FRAME_SIZE(%sp), %r1 2008 STREG %rp, -RP_OFFSET(%r1) 2009 2010 copy %sp, %r1 2011 ldo PT_SZ_ALGN(%sp), %sp 2012 2013 STREG %rp, PT_GR2(%r1) 2014 STREG %r3, PT_GR3(%r1) 2015 STREG %r4, PT_GR4(%r1) 2016 STREG %r5, PT_GR5(%r1) 2017 STREG %r6, PT_GR6(%r1) 2018 STREG %r7, PT_GR7(%r1) 2019 STREG %r8, PT_GR8(%r1) 2020 STREG %r9, PT_GR9(%r1) 2021 STREG %r10, PT_GR10(%r1) 2022 STREG %r11, PT_GR11(%r1) 2023 STREG %r12, PT_GR12(%r1) 2024 STREG %r13, PT_GR13(%r1) 2025 STREG %r14, PT_GR14(%r1) 2026 STREG %r15, PT_GR15(%r1) 2027 STREG %r16, PT_GR16(%r1) 2028 STREG %r17, PT_GR17(%r1) 2029 STREG %r18, PT_GR18(%r1) 2030 STREG %r19, PT_GR19(%r1) 2031 STREG %r20, PT_GR20(%r1) 2032 STREG %r21, PT_GR21(%r1) 2033 STREG %r22, PT_GR22(%r1) 2034 STREG %r23, PT_GR23(%r1) 2035 STREG %r24, PT_GR24(%r1) 2036 STREG %r25, PT_GR25(%r1) 2037 STREG %r26, PT_GR26(%r1) 2038 STREG %r27, PT_GR27(%r1) 2039 STREG %r28, PT_GR28(%r1) 2040 STREG %r29, PT_GR29(%r1) 2041 STREG %r30, PT_GR30(%r1) 2042 STREG %r31, PT_GR31(%r1) 2043 mfctl %cr11, %r26 2044 STREG %r26, PT_SAR(%r1) 2045 2046 copy %rp, %r26 2047 LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25 2048 ldo -8(%r25), %r25 2049 ldo -FTRACE_FRAME_SIZE(%r1), %arg2 2050 b,l ftrace_function_trampoline, %rp 2051 copy %r1, %arg3 /* struct pt_regs */ 2052 2053 ldo -PT_SZ_ALGN(%sp), %r1 2054 2055 LDREG PT_SAR(%r1), %rp 2056 mtctl %rp, %cr11 2057 2058 LDREG PT_GR2(%r1), %rp 2059 LDREG PT_GR3(%r1), %r3 2060 LDREG PT_GR4(%r1), %r4 2061 LDREG PT_GR5(%r1), %r5 2062 LDREG PT_GR6(%r1), %r6 2063 LDREG PT_GR7(%r1), %r7 2064 LDREG PT_GR8(%r1), %r8 2065 LDREG PT_GR9(%r1), %r9 2066 LDREG PT_GR10(%r1),%r10 2067 LDREG PT_GR11(%r1),%r11 2068 LDREG PT_GR12(%r1),%r12 2069 LDREG PT_GR13(%r1),%r13 2070 LDREG PT_GR14(%r1),%r14 2071 LDREG PT_GR15(%r1),%r15 2072 LDREG PT_GR16(%r1),%r16 2073 LDREG PT_GR17(%r1),%r17 2074 LDREG PT_GR18(%r1),%r18 2075 LDREG PT_GR19(%r1),%r19 2076 LDREG PT_GR20(%r1),%r20 2077 LDREG PT_GR21(%r1),%r21 2078 LDREG PT_GR22(%r1),%r22 2079 LDREG PT_GR23(%r1),%r23 2080 LDREG PT_GR24(%r1),%r24 2081 LDREG PT_GR25(%r1),%r25 2082 LDREG PT_GR26(%r1),%r26 2083 LDREG PT_GR27(%r1),%r27 2084 LDREG PT_GR28(%r1),%r28 2085 LDREG PT_GR29(%r1),%r29 2086 LDREG PT_GR30(%r1),%r30 2087 LDREG PT_GR31(%r1),%r31 2088 2089 ldo -PT_SZ_ALGN(%sp), %sp 2090 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 2091 /* Adjust return point to jump back to beginning of traced function */ 2092 ldo -4(%r1), %r1 2093 bv,n (%r1) 2094 2095ENDPROC_CFI(ftrace_regs_caller) 2096 2097#endif 2098#endif 2099 2100#ifdef CONFIG_FUNCTION_GRAPH_TRACER 2101 .align 8 2102ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE) 2103 .export parisc_return_to_handler,data 2104parisc_return_to_handler: 2105 copy %r3,%r1 2106 STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */ 2107 copy %sp,%r3 2108 STREGM %r1,FRAME_SIZE(%sp) 2109 STREG %ret0,8(%r3) 2110 STREG %ret1,16(%r3) 2111 2112#ifdef CONFIG_64BIT 2113 loadgp 2114#endif 2115 2116 /* call ftrace_return_to_handler(0) */ 2117 .import ftrace_return_to_handler,code 2118 load32 ftrace_return_to_handler,%ret0 2119 load32 .Lftrace_ret,%r2 2120#ifdef CONFIG_64BIT 2121 ldo -16(%sp),%ret1 /* Reference param save area */ 2122 bve (%ret0) 2123#else 2124 bv %r0(%ret0) 2125#endif 2126 ldi 0,%r26 2127.Lftrace_ret: 2128 copy %ret0,%rp 2129 2130 /* restore original return values */ 2131 LDREG 8(%r3),%ret0 2132 LDREG 16(%r3),%ret1 2133 2134 /* return from function */ 2135#ifdef CONFIG_64BIT 2136 bve (%rp) 2137#else 2138 bv %r0(%rp) 2139#endif 2140 LDREGM -FRAME_SIZE(%sp),%r3 2141ENDPROC_CFI(return_to_handler) 2142 2143#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 2144 2145#endif /* CONFIG_FUNCTION_TRACER */ 2146 2147#ifdef CONFIG_IRQSTACKS 2148/* void call_on_stack(unsigned long param1, void *func, 2149 unsigned long new_stack) */ 2150ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) 2151ENTRY(_call_on_stack) 2152 copy %sp, %r1 2153 2154 /* Regarding the HPPA calling conventions for function pointers, 2155 we assume the PIC register is not changed across call. For 2156 CONFIG_64BIT, the argument pointer is left to point at the 2157 argument region allocated for the call to call_on_stack. */ 2158 2159 /* Switch to new stack. We allocate two frames. */ 2160 ldo 2*FRAME_SIZE(%arg2), %sp 2161# ifdef CONFIG_64BIT 2162 /* Save previous stack pointer and return pointer in frame marker */ 2163 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp) 2164 /* Calls always use function descriptor */ 2165 LDREG 16(%arg1), %arg1 2166 bve,l (%arg1), %rp 2167 STREG %r1, -FRAME_SIZE-REG_SZ(%sp) 2168 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp 2169 bve (%rp) 2170 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp 2171# else 2172 /* Save previous stack pointer and return pointer in frame marker */ 2173 STREG %r1, -FRAME_SIZE-REG_SZ(%sp) 2174 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp) 2175 /* Calls use function descriptor if PLABEL bit is set */ 2176 bb,>=,n %arg1, 30, 1f 2177 depwi 0,31,2, %arg1 2178 LDREG 0(%arg1), %arg1 21791: 2180 be,l 0(%sr4,%arg1), %sr0, %r31 2181 copy %r31, %rp 2182 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp 2183 bv (%rp) 2184 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp 2185# endif /* CONFIG_64BIT */ 2186ENDPROC_CFI(call_on_stack) 2187#endif /* CONFIG_IRQSTACKS */ 2188 2189ENTRY_CFI(get_register) 2190 /* 2191 * get_register is used by the non access tlb miss handlers to 2192 * copy the value of the general register specified in r8 into 2193 * r1. This routine can't be used for shadowed registers, since 2194 * the rfir will restore the original value. So, for the shadowed 2195 * registers we put a -1 into r1 to indicate that the register 2196 * should not be used (the register being copied could also have 2197 * a -1 in it, but that is OK, it just means that we will have 2198 * to use the slow path instead). 2199 */ 2200 blr %r8,%r0 2201 nop 2202 bv %r0(%r25) /* r0 */ 2203 copy %r0,%r1 2204 bv %r0(%r25) /* r1 - shadowed */ 2205 ldi -1,%r1 2206 bv %r0(%r25) /* r2 */ 2207 copy %r2,%r1 2208 bv %r0(%r25) /* r3 */ 2209 copy %r3,%r1 2210 bv %r0(%r25) /* r4 */ 2211 copy %r4,%r1 2212 bv %r0(%r25) /* r5 */ 2213 copy %r5,%r1 2214 bv %r0(%r25) /* r6 */ 2215 copy %r6,%r1 2216 bv %r0(%r25) /* r7 */ 2217 copy %r7,%r1 2218 bv %r0(%r25) /* r8 - shadowed */ 2219 ldi -1,%r1 2220 bv %r0(%r25) /* r9 - shadowed */ 2221 ldi -1,%r1 2222 bv %r0(%r25) /* r10 */ 2223 copy %r10,%r1 2224 bv %r0(%r25) /* r11 */ 2225 copy %r11,%r1 2226 bv %r0(%r25) /* r12 */ 2227 copy %r12,%r1 2228 bv %r0(%r25) /* r13 */ 2229 copy %r13,%r1 2230 bv %r0(%r25) /* r14 */ 2231 copy %r14,%r1 2232 bv %r0(%r25) /* r15 */ 2233 copy %r15,%r1 2234 bv %r0(%r25) /* r16 - shadowed */ 2235 ldi -1,%r1 2236 bv %r0(%r25) /* r17 - shadowed */ 2237 ldi -1,%r1 2238 bv %r0(%r25) /* r18 */ 2239 copy %r18,%r1 2240 bv %r0(%r25) /* r19 */ 2241 copy %r19,%r1 2242 bv %r0(%r25) /* r20 */ 2243 copy %r20,%r1 2244 bv %r0(%r25) /* r21 */ 2245 copy %r21,%r1 2246 bv %r0(%r25) /* r22 */ 2247 copy %r22,%r1 2248 bv %r0(%r25) /* r23 */ 2249 copy %r23,%r1 2250 bv %r0(%r25) /* r24 - shadowed */ 2251 ldi -1,%r1 2252 bv %r0(%r25) /* r25 - shadowed */ 2253 ldi -1,%r1 2254 bv %r0(%r25) /* r26 */ 2255 copy %r26,%r1 2256 bv %r0(%r25) /* r27 */ 2257 copy %r27,%r1 2258 bv %r0(%r25) /* r28 */ 2259 copy %r28,%r1 2260 bv %r0(%r25) /* r29 */ 2261 copy %r29,%r1 2262 bv %r0(%r25) /* r30 */ 2263 copy %r30,%r1 2264 bv %r0(%r25) /* r31 */ 2265 copy %r31,%r1 2266ENDPROC_CFI(get_register) 2267 2268 2269ENTRY_CFI(set_register) 2270 /* 2271 * set_register is used by the non access tlb miss handlers to 2272 * copy the value of r1 into the general register specified in 2273 * r8. 2274 */ 2275 blr %r8,%r0 2276 nop 2277 bv %r0(%r25) /* r0 (silly, but it is a place holder) */ 2278 copy %r1,%r0 2279 bv %r0(%r25) /* r1 */ 2280 copy %r1,%r1 2281 bv %r0(%r25) /* r2 */ 2282 copy %r1,%r2 2283 bv %r0(%r25) /* r3 */ 2284 copy %r1,%r3 2285 bv %r0(%r25) /* r4 */ 2286 copy %r1,%r4 2287 bv %r0(%r25) /* r5 */ 2288 copy %r1,%r5 2289 bv %r0(%r25) /* r6 */ 2290 copy %r1,%r6 2291 bv %r0(%r25) /* r7 */ 2292 copy %r1,%r7 2293 bv %r0(%r25) /* r8 */ 2294 copy %r1,%r8 2295 bv %r0(%r25) /* r9 */ 2296 copy %r1,%r9 2297 bv %r0(%r25) /* r10 */ 2298 copy %r1,%r10 2299 bv %r0(%r25) /* r11 */ 2300 copy %r1,%r11 2301 bv %r0(%r25) /* r12 */ 2302 copy %r1,%r12 2303 bv %r0(%r25) /* r13 */ 2304 copy %r1,%r13 2305 bv %r0(%r25) /* r14 */ 2306 copy %r1,%r14 2307 bv %r0(%r25) /* r15 */ 2308 copy %r1,%r15 2309 bv %r0(%r25) /* r16 */ 2310 copy %r1,%r16 2311 bv %r0(%r25) /* r17 */ 2312 copy %r1,%r17 2313 bv %r0(%r25) /* r18 */ 2314 copy %r1,%r18 2315 bv %r0(%r25) /* r19 */ 2316 copy %r1,%r19 2317 bv %r0(%r25) /* r20 */ 2318 copy %r1,%r20 2319 bv %r0(%r25) /* r21 */ 2320 copy %r1,%r21 2321 bv %r0(%r25) /* r22 */ 2322 copy %r1,%r22 2323 bv %r0(%r25) /* r23 */ 2324 copy %r1,%r23 2325 bv %r0(%r25) /* r24 */ 2326 copy %r1,%r24 2327 bv %r0(%r25) /* r25 */ 2328 copy %r1,%r25 2329 bv %r0(%r25) /* r26 */ 2330 copy %r1,%r26 2331 bv %r0(%r25) /* r27 */ 2332 copy %r1,%r27 2333 bv %r0(%r25) /* r28 */ 2334 copy %r1,%r28 2335 bv %r0(%r25) /* r29 */ 2336 copy %r1,%r29 2337 bv %r0(%r25) /* r30 */ 2338 copy %r1,%r30 2339 bv %r0(%r25) /* r31 */ 2340 copy %r1,%r31 2341ENDPROC_CFI(set_register) 2342 2343