1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Linux/PA-RISC Project (http://www.parisc-linux.org/) 4 * 5 * kernel entry points (interruptions, system call wrappers) 6 * Copyright (C) 1999,2000 Philipp Rumpf 7 * Copyright (C) 1999 SuSE GmbH Nuernberg 8 * Copyright (C) 2000 Hewlett-Packard (John Marvin) 9 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) 10 */ 11 12#include <asm/asm-offsets.h> 13 14/* we have the following possibilities to act on an interruption: 15 * - handle in assembly and use shadowed registers only 16 * - save registers to kernel stack and handle in assembly or C */ 17 18 19#include <asm/psw.h> 20#include <asm/cache.h> /* for L1_CACHE_SHIFT */ 21#include <asm/assembly.h> /* for LDREG/STREG defines */ 22#include <asm/signal.h> 23#include <asm/unistd.h> 24#include <asm/ldcw.h> 25#include <asm/traps.h> 26#include <asm/thread_info.h> 27#include <asm/alternative.h> 28 29#include <linux/linkage.h> 30#include <linux/pgtable.h> 31 32#ifdef CONFIG_64BIT 33 .level 2.0w 34#else 35 .level 2.0 36#endif 37 38 /* Get aligned page_table_lock address for this mm from cr28/tr4 */ 39 .macro get_ptl reg 40 mfctl %cr28,\reg 41 .endm 42 43 /* space_to_prot macro creates a prot id from a space id */ 44 45#if (SPACEID_SHIFT) == 0 46 .macro space_to_prot spc prot 47 depd,z \spc,62,31,\prot 48 .endm 49#else 50 .macro space_to_prot spc prot 51 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot 52 .endm 53#endif 54 /* 55 * The "get_stack" macros are responsible for determining the 56 * kernel stack value. 57 * 58 * If sr7 == 0 59 * Already using a kernel stack, so call the 60 * get_stack_use_r30 macro to push a pt_regs structure 61 * on the stack, and store registers there. 62 * else 63 * Need to set up a kernel stack, so call the 64 * get_stack_use_cr30 macro to set up a pointer 65 * to the pt_regs structure contained within the 66 * task pointer pointed to by cr30. Load the stack 67 * pointer from the task structure. 68 * 69 * Note that we use shadowed registers for temps until 70 * we can save %r26 and %r29. %r26 is used to preserve 71 * %r8 (a shadowed register) which temporarily contained 72 * either the fault type ("code") or the eirr. We need 73 * to use a non-shadowed register to carry the value over 74 * the rfir in virt_map. We use %r26 since this value winds 75 * up being passed as the argument to either do_cpu_irq_mask 76 * or handle_interruption. %r29 is used to hold a pointer 77 * the register save area, and once again, it needs to 78 * be a non-shadowed register so that it survives the rfir. 79 */ 80 81 .macro get_stack_use_cr30 82 83 /* we save the registers in the task struct */ 84 85 copy %r30, %r17 86 mfctl %cr30, %r1 87 tophys %r1,%r9 /* task_struct */ 88 LDREG TASK_STACK(%r9),%r30 89 ldo PT_SZ_ALGN(%r30),%r30 90 mtsp %r0,%sr7 /* clear sr7 after kernel stack was set! */ 91 mtsp %r16,%sr3 92 ldo TASK_REGS(%r9),%r9 93 STREG %r17,PT_GR30(%r9) 94 STREG %r29,PT_GR29(%r9) 95 STREG %r26,PT_GR26(%r9) 96 STREG %r16,PT_SR7(%r9) 97 copy %r9,%r29 98 .endm 99 100 .macro get_stack_use_r30 101 102 /* we put a struct pt_regs on the stack and save the registers there */ 103 104 tophys %r30,%r9 105 copy %r30,%r1 106 ldo PT_SZ_ALGN(%r30),%r30 107 STREG %r1,PT_GR30(%r9) 108 STREG %r29,PT_GR29(%r9) 109 STREG %r26,PT_GR26(%r9) 110 STREG %r16,PT_SR7(%r9) 111 copy %r9,%r29 112 .endm 113 114 .macro rest_stack 115 LDREG PT_GR1(%r29), %r1 116 LDREG PT_GR30(%r29),%r30 117 LDREG PT_GR29(%r29),%r29 118 .endm 119 120 /* default interruption handler 121 * (calls traps.c:handle_interruption) */ 122 .macro def code 123 b intr_save 124 ldi \code, %r8 125 .align 32 126 .endm 127 128 /* Interrupt interruption handler 129 * (calls irq.c:do_cpu_irq_mask) */ 130 .macro extint code 131 b intr_extint 132 mfsp %sr7,%r16 133 .align 32 134 .endm 135 136 .import os_hpmc, code 137 138 /* HPMC handler */ 139 .macro hpmc code 140 nop /* must be a NOP, will be patched later */ 141 load32 PA(os_hpmc), %r3 142 bv,n 0(%r3) 143 nop 144 .word 0 /* checksum (will be patched) */ 145 .word 0 /* address of handler */ 146 .word 0 /* length of handler */ 147 .endm 148 149 /* 150 * Performance Note: Instructions will be moved up into 151 * this part of the code later on, once we are sure 152 * that the tlb miss handlers are close to final form. 153 */ 154 155 /* Register definitions for tlb miss handler macros */ 156 157 va = r8 /* virtual address for which the trap occurred */ 158 spc = r24 /* space for which the trap occurred */ 159 160#ifndef CONFIG_64BIT 161 162 /* 163 * itlb miss interruption handler (parisc 1.1 - 32 bit) 164 */ 165 166 .macro itlb_11 code 167 168 mfctl %pcsq, spc 169 b itlb_miss_11 170 mfctl %pcoq, va 171 172 .align 32 173 .endm 174#endif 175 176 /* 177 * itlb miss interruption handler (parisc 2.0) 178 */ 179 180 .macro itlb_20 code 181 mfctl %pcsq, spc 182#ifdef CONFIG_64BIT 183 b itlb_miss_20w 184#else 185 b itlb_miss_20 186#endif 187 mfctl %pcoq, va 188 189 .align 32 190 .endm 191 192#ifndef CONFIG_64BIT 193 /* 194 * naitlb miss interruption handler (parisc 1.1 - 32 bit) 195 */ 196 197 .macro naitlb_11 code 198 199 mfctl %isr,spc 200 b naitlb_miss_11 201 mfctl %ior,va 202 203 .align 32 204 .endm 205#endif 206 207 /* 208 * naitlb miss interruption handler (parisc 2.0) 209 */ 210 211 .macro naitlb_20 code 212 213 mfctl %isr,spc 214#ifdef CONFIG_64BIT 215 b naitlb_miss_20w 216#else 217 b naitlb_miss_20 218#endif 219 mfctl %ior,va 220 221 .align 32 222 .endm 223 224#ifndef CONFIG_64BIT 225 /* 226 * dtlb miss interruption handler (parisc 1.1 - 32 bit) 227 */ 228 229 .macro dtlb_11 code 230 231 mfctl %isr, spc 232 b dtlb_miss_11 233 mfctl %ior, va 234 235 .align 32 236 .endm 237#endif 238 239 /* 240 * dtlb miss interruption handler (parisc 2.0) 241 */ 242 243 .macro dtlb_20 code 244 245 mfctl %isr, spc 246#ifdef CONFIG_64BIT 247 b dtlb_miss_20w 248#else 249 b dtlb_miss_20 250#endif 251 mfctl %ior, va 252 253 .align 32 254 .endm 255 256#ifndef CONFIG_64BIT 257 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */ 258 259 .macro nadtlb_11 code 260 261 mfctl %isr,spc 262 b nadtlb_miss_11 263 mfctl %ior,va 264 265 .align 32 266 .endm 267#endif 268 269 /* nadtlb miss interruption handler (parisc 2.0) */ 270 271 .macro nadtlb_20 code 272 273 mfctl %isr,spc 274#ifdef CONFIG_64BIT 275 b nadtlb_miss_20w 276#else 277 b nadtlb_miss_20 278#endif 279 mfctl %ior,va 280 281 .align 32 282 .endm 283 284#ifndef CONFIG_64BIT 285 /* 286 * dirty bit trap interruption handler (parisc 1.1 - 32 bit) 287 */ 288 289 .macro dbit_11 code 290 291 mfctl %isr,spc 292 b dbit_trap_11 293 mfctl %ior,va 294 295 .align 32 296 .endm 297#endif 298 299 /* 300 * dirty bit trap interruption handler (parisc 2.0) 301 */ 302 303 .macro dbit_20 code 304 305 mfctl %isr,spc 306#ifdef CONFIG_64BIT 307 b dbit_trap_20w 308#else 309 b dbit_trap_20 310#endif 311 mfctl %ior,va 312 313 .align 32 314 .endm 315 316 /* In LP64, the space contains part of the upper 32 bits of the 317 * fault. We have to extract this and place it in the va, 318 * zeroing the corresponding bits in the space register */ 319 .macro space_adjust spc,va,tmp 320#ifdef CONFIG_64BIT 321 extrd,u \spc,63,SPACEID_SHIFT,\tmp 322 depd %r0,63,SPACEID_SHIFT,\spc 323 depd \tmp,31,SPACEID_SHIFT,\va 324#endif 325 .endm 326 327 .import swapper_pg_dir,code 328 329 /* Get the pgd. For faults on space zero (kernel space), this 330 * is simply swapper_pg_dir. For user space faults, the 331 * pgd is stored in %cr25 */ 332 .macro get_pgd spc,reg 333 ldil L%PA(swapper_pg_dir),\reg 334 ldo R%PA(swapper_pg_dir)(\reg),\reg 335 or,COND(=) %r0,\spc,%r0 336 mfctl %cr25,\reg 337 .endm 338 339 /* 340 space_check(spc,tmp,fault) 341 342 spc - The space we saw the fault with. 343 tmp - The place to store the current space. 344 fault - Function to call on failure. 345 346 Only allow faults on different spaces from the 347 currently active one if we're the kernel 348 349 */ 350 .macro space_check spc,tmp,fault 351 mfsp %sr7,\tmp 352 /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */ 353 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page 354 * as kernel, so defeat the space 355 * check if it is */ 356 copy \spc,\tmp 357 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */ 358 cmpb,COND(<>),n \tmp,\spc,\fault 359 .endm 360 361 /* Look up a PTE in a 2-Level scheme (faulting at each 362 * level if the entry isn't present 363 * 364 * NOTE: we use ldw even for LP64, since the short pointers 365 * can address up to 1TB 366 */ 367 .macro L2_ptep pmd,pte,index,va,fault 368#if CONFIG_PGTABLE_LEVELS == 3 369 extru_safe \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index 370#else 371 extru_safe \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 372#endif 373 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 374#if CONFIG_PGTABLE_LEVELS < 3 375 copy %r0,\pte 376#endif 377 ldw,s \index(\pmd),\pmd 378 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 379 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 380 SHLREG \pmd,PxD_VALUE_SHIFT,\pmd 381 extru_safe \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 382 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 383 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */ 384 .endm 385 386 /* Look up PTE in a 3-Level scheme. */ 387 .macro L3_ptep pgd,pte,index,va,fault 388#if CONFIG_PGTABLE_LEVELS == 3 389 copy %r0,\pte 390 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 391 ldw,s \index(\pgd),\pgd 392 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault 393 shld \pgd,PxD_VALUE_SHIFT,\pgd 394#endif 395 L2_ptep \pgd,\pte,\index,\va,\fault 396 .endm 397 398 /* Acquire page_table_lock and check page is present. */ 399 .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault 400#ifdef CONFIG_TLB_PTLOCK 40198: cmpib,COND(=),n 0,\spc,2f 402 get_ptl \tmp 4031: LDCW 0(\tmp),\tmp1 404 cmpib,COND(=) 0,\tmp1,1b 405 nop 406 LDREG 0(\ptp),\pte 407 bb,<,n \pte,_PAGE_PRESENT_BIT,3f 408 b \fault 409 stw \spc,0(\tmp) 41099: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 411#endif 4122: LDREG 0(\ptp),\pte 413 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault 4143: 415 .endm 416 417 /* Release page_table_lock without reloading lock address. 418 Note that the values in the register spc are limited to 419 NR_SPACE_IDS (262144). Thus, the stw instruction always 420 stores a nonzero value even when register spc is 64 bits. 421 We use an ordered store to ensure all prior accesses are 422 performed prior to releasing the lock. */ 423 .macro ptl_unlock0 spc,tmp 424#ifdef CONFIG_TLB_PTLOCK 42598: or,COND(=) %r0,\spc,%r0 426 stw,ma \spc,0(\tmp) 42799: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 428#endif 429 .endm 430 431 /* Release page_table_lock. */ 432 .macro ptl_unlock1 spc,tmp 433#ifdef CONFIG_TLB_PTLOCK 43498: get_ptl \tmp 435 ptl_unlock0 \spc,\tmp 43699: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 437#endif 438 .endm 439 440 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and 441 * don't needlessly dirty the cache line if it was already set */ 442 .macro update_accessed ptp,pte,tmp,tmp1 443 ldi _PAGE_ACCESSED,\tmp1 444 or \tmp1,\pte,\tmp 445 and,COND(<>) \tmp1,\pte,%r0 446 STREG \tmp,0(\ptp) 447 .endm 448 449 /* Set the dirty bit (and accessed bit). No need to be 450 * clever, this is only used from the dirty fault */ 451 .macro update_dirty ptp,pte,tmp 452 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp 453 or \tmp,\pte,\pte 454 STREG \pte,0(\ptp) 455 .endm 456 457 /* We have (depending on the page size): 458 * - 38 to 52-bit Physical Page Number 459 * - 12 to 26-bit page offset 460 */ 461 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) 462 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ 463 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) 464 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12) 465 466 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 467 .macro convert_for_tlb_insert20 pte,tmp 468#ifdef CONFIG_HUGETLB_PAGE 469 copy \pte,\tmp 470 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 471 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte 472 473 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 474 (63-58)+PAGE_ADD_SHIFT,\pte 475 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0 476 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\ 477 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte 478#else /* Huge pages disabled */ 479 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 480 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte 481 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 482 (63-58)+PAGE_ADD_SHIFT,\pte 483#endif 484 .endm 485 486 /* Convert the pte and prot to tlb insertion values. How 487 * this happens is quite subtle, read below */ 488 .macro make_insert_tlb spc,pte,prot,tmp 489 space_to_prot \spc \prot /* create prot id from space */ 490 /* The following is the real subtlety. This is depositing 491 * T <-> _PAGE_REFTRAP 492 * D <-> _PAGE_DIRTY 493 * B <-> _PAGE_DMB (memory break) 494 * 495 * Then incredible subtlety: The access rights are 496 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE 497 * See 3-14 of the parisc 2.0 manual 498 * 499 * Finally, _PAGE_READ goes in the top bit of PL1 (so we 500 * trigger an access rights trap in user space if the user 501 * tries to read an unreadable page */ 502 depd \pte,8,7,\prot 503 504 /* PAGE_USER indicates the page can be read with user privileges, 505 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 506 * contains _PAGE_READ) */ 507 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 508 depdi 7,11,3,\prot 509 /* If we're a gateway page, drop PL2 back to zero for promotion 510 * to kernel privilege (so we can execute the page as kernel). 511 * Any privilege promotion page always denys read and write */ 512 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 513 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 514 515 /* Enforce uncacheable pages. 516 * This should ONLY be use for MMIO on PA 2.0 machines. 517 * Memory/DMA is cache coherent on all PA2.0 machines we support 518 * (that means T-class is NOT supported) and the memory controllers 519 * on most of those machines only handles cache transactions. 520 */ 521 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 522 depdi 1,12,1,\prot 523 524 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 525 convert_for_tlb_insert20 \pte \tmp 526 .endm 527 528 /* Identical macro to make_insert_tlb above, except it 529 * makes the tlb entry for the differently formatted pa11 530 * insertion instructions */ 531 .macro make_insert_tlb_11 spc,pte,prot 532 zdep \spc,30,15,\prot 533 dep \pte,8,7,\prot 534 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0 535 depi 1,12,1,\prot 536 extru,= \pte,_PAGE_USER_BIT,1,%r0 537 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */ 538 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0 539 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 540 541 /* Get rid of prot bits and convert to page addr for iitlba */ 542 543 depi 0,31,ASM_PFN_PTE_SHIFT,\pte 544 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte 545 .endm 546 547 /* This is for ILP32 PA2.0 only. The TLB insertion needs 548 * to extend into I/O space if the address is 0xfXXXXXXX 549 * so we extend the f's into the top word of the pte in 550 * this case */ 551 .macro f_extend pte,tmp 552 extrd,s \pte,42,4,\tmp 553 addi,<> 1,\tmp,%r0 554 extrd,s \pte,63,25,\pte 555 .endm 556 557 /* The alias region is an 8MB aligned 16MB to do clear and 558 * copy user pages at addresses congruent with the user 559 * virtual address. 560 * 561 * To use the alias page, you set %r26 up with the to TLB 562 * entry (identifying the physical page) and %r23 up with 563 * the from tlb entry (or nothing if only a to entry---for 564 * clear_user_page_asm) */ 565 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype 566 cmpib,COND(<>),n 0,\spc,\fault 567 ldil L%(TMPALIAS_MAP_START),\tmp 568#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000) 569 /* on LP64, ldi will sign extend into the upper 32 bits, 570 * which is behaviour we don't want */ 571 depdi 0,31,32,\tmp 572#endif 573 copy \va,\tmp1 574 depi 0,31,23,\tmp1 575 cmpb,COND(<>),n \tmp,\tmp1,\fault 576 mfctl %cr19,\tmp /* iir */ 577 /* get the opcode (first six bits) into \tmp */ 578 extrw,u \tmp,5,6,\tmp 579 /* 580 * Only setting the T bit prevents data cache movein 581 * Setting access rights to zero prevents instruction cache movein 582 * 583 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go 584 * to type field and _PAGE_READ goes to top bit of PL1 585 */ 586 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot 587 /* 588 * so if the opcode is one (i.e. this is a memory management 589 * instruction) nullify the next load so \prot is only T. 590 * Otherwise this is a normal data operation 591 */ 592 cmpiclr,= 0x01,\tmp,%r0 593 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot 594.ifc \patype,20 595 depd,z \prot,8,7,\prot 596.else 597.ifc \patype,11 598 depw,z \prot,8,7,\prot 599.else 600 .error "undefined PA type to do_alias" 601.endif 602.endif 603 /* 604 * OK, it is in the temp alias region, check whether "from" or "to". 605 * Check "subtle" note in pacache.S re: r23/r26. 606 */ 607#ifdef CONFIG_64BIT 608 extrd,u,*= \va,41,1,%r0 609#else 610 extrw,u,= \va,9,1,%r0 611#endif 612 or,COND(tr) %r23,%r0,\pte 613 or %r26,%r0,\pte 614 .endm 615 616 617 /* 618 * Fault_vectors are architecturally required to be aligned on a 2K 619 * boundary 620 */ 621 622 .section .text.hot 623 .align 2048 624 625ENTRY(fault_vector_20) 626 /* First vector is invalid (0) */ 627 .ascii "cows can fly" 628 .byte 0 629 .align 32 630 631 hpmc 1 632 def 2 633 def 3 634 extint 4 635 def 5 636 itlb_20 PARISC_ITLB_TRAP 637 def 7 638 def 8 639 def 9 640 def 10 641 def 11 642 def 12 643 def 13 644 def 14 645 dtlb_20 15 646 naitlb_20 16 647 nadtlb_20 17 648 def 18 649 def 19 650 dbit_20 20 651 def 21 652 def 22 653 def 23 654 def 24 655 def 25 656 def 26 657 def 27 658 def 28 659 def 29 660 def 30 661 def 31 662END(fault_vector_20) 663 664#ifndef CONFIG_64BIT 665 666 .align 2048 667 668ENTRY(fault_vector_11) 669 /* First vector is invalid (0) */ 670 .ascii "cows can fly" 671 .byte 0 672 .align 32 673 674 hpmc 1 675 def 2 676 def 3 677 extint 4 678 def 5 679 itlb_11 PARISC_ITLB_TRAP 680 def 7 681 def 8 682 def 9 683 def 10 684 def 11 685 def 12 686 def 13 687 def 14 688 dtlb_11 15 689 naitlb_11 16 690 nadtlb_11 17 691 def 18 692 def 19 693 dbit_11 20 694 def 21 695 def 22 696 def 23 697 def 24 698 def 25 699 def 26 700 def 27 701 def 28 702 def 29 703 def 30 704 def 31 705END(fault_vector_11) 706 707#endif 708 /* Fault vector is separately protected and *must* be on its own page */ 709 .align PAGE_SIZE 710 711 .import handle_interruption,code 712 .import do_cpu_irq_mask,code 713 714 /* 715 * Child Returns here 716 * 717 * copy_thread moved args into task save area. 718 */ 719 720ENTRY(ret_from_kernel_thread) 721 /* Call schedule_tail first though */ 722 BL schedule_tail, %r2 723 nop 724 725 mfctl %cr30,%r1 /* task_struct */ 726 LDREG TASK_PT_GR25(%r1), %r26 727#ifdef CONFIG_64BIT 728 LDREG TASK_PT_GR27(%r1), %r27 729#endif 730 LDREG TASK_PT_GR26(%r1), %r1 731 ble 0(%sr7, %r1) 732 copy %r31, %r2 733 b finish_child_return 734 nop 735END(ret_from_kernel_thread) 736 737 738 /* 739 * struct task_struct *_switch_to(struct task_struct *prev, 740 * struct task_struct *next) 741 * 742 * switch kernel stacks and return prev */ 743ENTRY_CFI(_switch_to) 744 STREG %r2, -RP_OFFSET(%r30) 745 746 callee_save_float 747 callee_save 748 749 load32 _switch_to_ret, %r2 750 751 STREG %r2, TASK_PT_KPC(%r26) 752 LDREG TASK_PT_KPC(%r25), %r2 753 754 STREG %r30, TASK_PT_KSP(%r26) 755 LDREG TASK_PT_KSP(%r25), %r30 756 bv %r0(%r2) 757 mtctl %r25,%cr30 758 759ENTRY(_switch_to_ret) 760 mtctl %r0, %cr0 /* Needed for single stepping */ 761 callee_rest 762 callee_rest_float 763 764 LDREG -RP_OFFSET(%r30), %r2 765 bv %r0(%r2) 766 copy %r26, %r28 767ENDPROC_CFI(_switch_to) 768 769 /* 770 * Common rfi return path for interruptions, kernel execve, and 771 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will 772 * return via this path if the signal was received when the process 773 * was running; if the process was blocked on a syscall then the 774 * normal syscall_exit path is used. All syscalls for traced 775 * proceses exit via intr_restore. 776 * 777 * XXX If any syscalls that change a processes space id ever exit 778 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and 779 * adjust IASQ[0..1]. 780 * 781 */ 782 783 .align PAGE_SIZE 784 785ENTRY_CFI(syscall_exit_rfi) 786 mfctl %cr30,%r16 /* task_struct */ 787 ldo TASK_REGS(%r16),%r16 788 /* Force iaoq to userspace, as the user has had access to our current 789 * context via sigcontext. Also Filter the PSW for the same reason. 790 */ 791 LDREG PT_IAOQ0(%r16),%r19 792 depi PRIV_USER,31,2,%r19 793 STREG %r19,PT_IAOQ0(%r16) 794 LDREG PT_IAOQ1(%r16),%r19 795 depi PRIV_USER,31,2,%r19 796 STREG %r19,PT_IAOQ1(%r16) 797 LDREG PT_PSW(%r16),%r19 798 load32 USER_PSW_MASK,%r1 799#ifdef CONFIG_64BIT 800 load32 USER_PSW_HI_MASK,%r20 801 depd %r20,31,32,%r1 802#endif 803 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */ 804 load32 USER_PSW,%r1 805 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */ 806 STREG %r19,PT_PSW(%r16) 807 808 /* 809 * If we aren't being traced, we never saved space registers 810 * (we don't store them in the sigcontext), so set them 811 * to "proper" values now (otherwise we'll wind up restoring 812 * whatever was last stored in the task structure, which might 813 * be inconsistent if an interrupt occurred while on the gateway 814 * page). Note that we may be "trashing" values the user put in 815 * them, but we don't support the user changing them. 816 */ 817 818 STREG %r0,PT_SR2(%r16) 819 mfsp %sr3,%r19 820 STREG %r19,PT_SR0(%r16) 821 STREG %r19,PT_SR1(%r16) 822 STREG %r19,PT_SR3(%r16) 823 STREG %r19,PT_SR4(%r16) 824 STREG %r19,PT_SR5(%r16) 825 STREG %r19,PT_SR6(%r16) 826 STREG %r19,PT_SR7(%r16) 827 828ENTRY(intr_return) 829 /* check for reschedule */ 830 mfctl %cr30,%r1 831 LDREG TASK_TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ 832 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ 833 834 .import do_notify_resume,code 835intr_check_sig: 836 /* As above */ 837 mfctl %cr30,%r1 838 LDREG TASK_TI_FLAGS(%r1),%r19 839 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20 840 and,COND(<>) %r19, %r20, %r0 841 b,n intr_restore /* skip past if we've nothing to do */ 842 843 /* This check is critical to having LWS 844 * working. The IASQ is zero on the gateway 845 * page and we cannot deliver any signals until 846 * we get off the gateway page. 847 * 848 * Only do signals if we are returning to user space 849 */ 850 LDREG PT_IASQ0(%r16), %r20 851 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ 852 LDREG PT_IASQ1(%r16), %r20 853 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ 854 855 copy %r0, %r25 /* long in_syscall = 0 */ 856#ifdef CONFIG_64BIT 857 ldo -16(%r30),%r29 /* Reference param save area */ 858#endif 859 860 /* NOTE: We need to enable interrupts if we have to deliver 861 * signals. We used to do this earlier but it caused kernel 862 * stack overflows. */ 863 ssm PSW_SM_I, %r0 864 865 BL do_notify_resume,%r2 866 copy %r16, %r26 /* struct pt_regs *regs */ 867 868 b,n intr_check_sig 869 870intr_restore: 871 copy %r16,%r29 872 ldo PT_FR31(%r29),%r1 873 rest_fp %r1 874 rest_general %r29 875 876 /* inverse of virt_map */ 877 pcxt_ssm_bug 878 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */ 879 tophys_r1 %r29 880 881 /* Restore space id's and special cr's from PT_REGS 882 * structure pointed to by r29 883 */ 884 rest_specials %r29 885 886 /* IMPORTANT: rest_stack restores r29 last (we are using it)! 887 * It also restores r1 and r30. 888 */ 889 rest_stack 890 891 rfi 892 nop 893 894#ifndef CONFIG_PREEMPTION 895# define intr_do_preempt intr_restore 896#endif /* !CONFIG_PREEMPTION */ 897 898 .import schedule,code 899intr_do_resched: 900 /* Only call schedule on return to userspace. If we're returning 901 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise 902 * we jump back to intr_restore. 903 */ 904 LDREG PT_IASQ0(%r16), %r20 905 cmpib,COND(=) 0, %r20, intr_do_preempt 906 nop 907 LDREG PT_IASQ1(%r16), %r20 908 cmpib,COND(=) 0, %r20, intr_do_preempt 909 nop 910 911 /* NOTE: We need to enable interrupts if we schedule. We used 912 * to do this earlier but it caused kernel stack overflows. */ 913 ssm PSW_SM_I, %r0 914 915#ifdef CONFIG_64BIT 916 ldo -16(%r30),%r29 /* Reference param save area */ 917#endif 918 919 ldil L%intr_check_sig, %r2 920#ifndef CONFIG_64BIT 921 b schedule 922#else 923 load32 schedule, %r20 924 bv %r0(%r20) 925#endif 926 ldo R%intr_check_sig(%r2), %r2 927 928 /* preempt the current task on returning to kernel 929 * mode from an interrupt, iff need_resched is set, 930 * and preempt_count is 0. otherwise, we continue on 931 * our merry way back to the current running task. 932 */ 933#ifdef CONFIG_PREEMPTION 934 .import preempt_schedule_irq,code 935intr_do_preempt: 936 rsm PSW_SM_I, %r0 /* disable interrupts */ 937 938 /* current_thread_info()->preempt_count */ 939 mfctl %cr30, %r1 940 ldw TI_PRE_COUNT(%r1), %r19 941 cmpib,<> 0, %r19, intr_restore /* if preempt_count > 0 */ 942 nop /* prev insn branched backwards */ 943 944 /* check if we interrupted a critical path */ 945 LDREG PT_PSW(%r16), %r20 946 bb,<,n %r20, 31 - PSW_SM_I, intr_restore 947 nop 948 949 /* ssm PSW_SM_I done later in intr_restore */ 950#ifdef CONFIG_MLONGCALLS 951 ldil L%intr_restore, %r2 952 load32 preempt_schedule_irq, %r1 953 bv %r0(%r1) 954 ldo R%intr_restore(%r2), %r2 955#else 956 ldil L%intr_restore, %r1 957 BL preempt_schedule_irq, %r2 958 ldo R%intr_restore(%r1), %r2 959#endif 960#endif /* CONFIG_PREEMPTION */ 961 962 /* 963 * External interrupts. 964 */ 965 966intr_extint: 967 cmpib,COND(=),n 0,%r16,1f 968 969 get_stack_use_cr30 970 b,n 2f 971 9721: 973 get_stack_use_r30 9742: 975 save_specials %r29 976 virt_map 977 save_general %r29 978 979 ldo PT_FR0(%r29), %r24 980 save_fp %r24 981 982 loadgp 983 984 copy %r29, %r26 /* arg0 is pt_regs */ 985 copy %r29, %r16 /* save pt_regs */ 986 987 ldil L%intr_return, %r2 988 989#ifdef CONFIG_64BIT 990 ldo -16(%r30),%r29 /* Reference param save area */ 991#endif 992 993 b do_cpu_irq_mask 994 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ 995ENDPROC_CFI(syscall_exit_rfi) 996 997 998 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ 999 1000ENTRY_CFI(intr_save) /* for os_hpmc */ 1001 mfsp %sr7,%r16 1002 cmpib,COND(=),n 0,%r16,1f 1003 get_stack_use_cr30 1004 b 2f 1005 copy %r8,%r26 1006 10071: 1008 get_stack_use_r30 1009 copy %r8,%r26 1010 10112: 1012 save_specials %r29 1013 1014 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */ 1015 cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior 1016 1017 1018 mfctl %isr, %r16 1019 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */ 1020 mfctl %ior, %r17 1021 1022 1023#ifdef CONFIG_64BIT 1024 /* 1025 * If the interrupted code was running with W bit off (32 bit), 1026 * clear the b bits (bits 0 & 1) in the ior. 1027 * save_specials left ipsw value in r8 for us to test. 1028 */ 1029 extrd,u,*<> %r8,PSW_W_BIT,1,%r0 1030 depdi 0,1,2,%r17 1031 1032 /* adjust isr/ior: get high bits from isr and deposit in ior */ 1033 space_adjust %r16,%r17,%r1 1034#endif 1035 STREG %r16, PT_ISR(%r29) 1036 STREG %r17, PT_IOR(%r29) 1037 1038#if 0 && defined(CONFIG_64BIT) 1039 /* Revisit when we have 64-bit code above 4Gb */ 1040 b,n intr_save2 1041 1042skip_save_ior: 1043 /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we 1044 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior 1045 * above. 1046 */ 1047 extrd,u,* %r8,PSW_W_BIT,1,%r1 1048 cmpib,COND(=),n 1,%r1,intr_save2 1049 LDREG PT_IASQ0(%r29), %r16 1050 LDREG PT_IAOQ0(%r29), %r17 1051 /* adjust iasq/iaoq */ 1052 space_adjust %r16,%r17,%r1 1053 STREG %r16, PT_IASQ0(%r29) 1054 STREG %r17, PT_IAOQ0(%r29) 1055#else 1056skip_save_ior: 1057#endif 1058 1059intr_save2: 1060 virt_map 1061 save_general %r29 1062 1063 ldo PT_FR0(%r29), %r25 1064 save_fp %r25 1065 1066 loadgp 1067 1068 copy %r29, %r25 /* arg1 is pt_regs */ 1069#ifdef CONFIG_64BIT 1070 ldo -16(%r30),%r29 /* Reference param save area */ 1071#endif 1072 1073 ldil L%intr_check_sig, %r2 1074 copy %r25, %r16 /* save pt_regs */ 1075 1076 b handle_interruption 1077 ldo R%intr_check_sig(%r2), %r2 1078ENDPROC_CFI(intr_save) 1079 1080 1081 /* 1082 * Note for all tlb miss handlers: 1083 * 1084 * cr24 contains a pointer to the kernel address space 1085 * page directory. 1086 * 1087 * cr25 contains a pointer to the current user address 1088 * space page directory. 1089 * 1090 * sr3 will contain the space id of the user address space 1091 * of the current running thread while that thread is 1092 * running in the kernel. 1093 */ 1094 1095 /* 1096 * register number allocations. Note that these are all 1097 * in the shadowed registers 1098 */ 1099 1100 t0 = r1 /* temporary register 0 */ 1101 va = r8 /* virtual address for which the trap occurred */ 1102 t1 = r9 /* temporary register 1 */ 1103 pte = r16 /* pte/phys page # */ 1104 prot = r17 /* prot bits */ 1105 spc = r24 /* space for which the trap occurred */ 1106 ptp = r25 /* page directory/page table pointer */ 1107 1108#ifdef CONFIG_64BIT 1109 1110dtlb_miss_20w: 1111 space_adjust spc,va,t0 1112 get_pgd spc,ptp 1113 space_check spc,t0,dtlb_fault 1114 1115 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w 1116 1117 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w 1118 update_accessed ptp,pte,t0,t1 1119 1120 make_insert_tlb spc,pte,prot,t1 1121 1122 idtlbt pte,prot 1123 1124 ptl_unlock1 spc,t0 1125 rfir 1126 nop 1127 1128dtlb_check_alias_20w: 1129 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20 1130 1131 idtlbt pte,prot 1132 1133 rfir 1134 nop 1135 1136nadtlb_miss_20w: 1137 space_adjust spc,va,t0 1138 get_pgd spc,ptp 1139 space_check spc,t0,nadtlb_fault 1140 1141 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w 1142 1143 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w 1144 update_accessed ptp,pte,t0,t1 1145 1146 make_insert_tlb spc,pte,prot,t1 1147 1148 idtlbt pte,prot 1149 1150 ptl_unlock1 spc,t0 1151 rfir 1152 nop 1153 1154nadtlb_check_alias_20w: 1155 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20 1156 1157 idtlbt pte,prot 1158 1159 rfir 1160 nop 1161 1162#else 1163 1164dtlb_miss_11: 1165 get_pgd spc,ptp 1166 1167 space_check spc,t0,dtlb_fault 1168 1169 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 1170 1171 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 1172 update_accessed ptp,pte,t0,t1 1173 1174 make_insert_tlb_11 spc,pte,prot 1175 1176 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1177 mtsp spc,%sr1 1178 1179 idtlba pte,(%sr1,va) 1180 idtlbp prot,(%sr1,va) 1181 1182 mtsp t1, %sr1 /* Restore sr1 */ 1183 1184 ptl_unlock1 spc,t0 1185 rfir 1186 nop 1187 1188dtlb_check_alias_11: 1189 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11 1190 1191 idtlba pte,(va) 1192 idtlbp prot,(va) 1193 1194 rfir 1195 nop 1196 1197nadtlb_miss_11: 1198 get_pgd spc,ptp 1199 1200 space_check spc,t0,nadtlb_fault 1201 1202 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 1203 1204 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 1205 update_accessed ptp,pte,t0,t1 1206 1207 make_insert_tlb_11 spc,pte,prot 1208 1209 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1210 mtsp spc,%sr1 1211 1212 idtlba pte,(%sr1,va) 1213 idtlbp prot,(%sr1,va) 1214 1215 mtsp t1, %sr1 /* Restore sr1 */ 1216 1217 ptl_unlock1 spc,t0 1218 rfir 1219 nop 1220 1221nadtlb_check_alias_11: 1222 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11 1223 1224 idtlba pte,(va) 1225 idtlbp prot,(va) 1226 1227 rfir 1228 nop 1229 1230dtlb_miss_20: 1231 space_adjust spc,va,t0 1232 get_pgd spc,ptp 1233 space_check spc,t0,dtlb_fault 1234 1235 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 1236 1237 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 1238 update_accessed ptp,pte,t0,t1 1239 1240 make_insert_tlb spc,pte,prot,t1 1241 1242 f_extend pte,t1 1243 1244 idtlbt pte,prot 1245 1246 ptl_unlock1 spc,t0 1247 rfir 1248 nop 1249 1250dtlb_check_alias_20: 1251 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20 1252 1253 idtlbt pte,prot 1254 1255 rfir 1256 nop 1257 1258nadtlb_miss_20: 1259 get_pgd spc,ptp 1260 1261 space_check spc,t0,nadtlb_fault 1262 1263 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 1264 1265 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 1266 update_accessed ptp,pte,t0,t1 1267 1268 make_insert_tlb spc,pte,prot,t1 1269 1270 f_extend pte,t1 1271 1272 idtlbt pte,prot 1273 1274 ptl_unlock1 spc,t0 1275 rfir 1276 nop 1277 1278nadtlb_check_alias_20: 1279 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20 1280 1281 idtlbt pte,prot 1282 1283 rfir 1284 nop 1285 1286#endif 1287 1288nadtlb_emulate: 1289 1290 /* 1291 * Non-access misses can be caused by fdc,fic,pdc,lpa,probe and 1292 * probei instructions. The kernel no longer faults doing flushes. 1293 * Use of lpa and probe instructions is rare. Given the issue 1294 * with shadow registers, we defer everything to the "slow" path. 1295 */ 1296 b,n nadtlb_fault 1297 1298#ifdef CONFIG_64BIT 1299itlb_miss_20w: 1300 1301 /* 1302 * I miss is a little different, since we allow users to fault 1303 * on the gateway page which is in the kernel address space. 1304 */ 1305 1306 space_adjust spc,va,t0 1307 get_pgd spc,ptp 1308 space_check spc,t0,itlb_fault 1309 1310 L3_ptep ptp,pte,t0,va,itlb_fault 1311 1312 ptl_lock spc,ptp,pte,t0,t1,itlb_fault 1313 update_accessed ptp,pte,t0,t1 1314 1315 make_insert_tlb spc,pte,prot,t1 1316 1317 iitlbt pte,prot 1318 1319 ptl_unlock1 spc,t0 1320 rfir 1321 nop 1322 1323naitlb_miss_20w: 1324 1325 /* 1326 * I miss is a little different, since we allow users to fault 1327 * on the gateway page which is in the kernel address space. 1328 */ 1329 1330 space_adjust spc,va,t0 1331 get_pgd spc,ptp 1332 space_check spc,t0,naitlb_fault 1333 1334 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w 1335 1336 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w 1337 update_accessed ptp,pte,t0,t1 1338 1339 make_insert_tlb spc,pte,prot,t1 1340 1341 iitlbt pte,prot 1342 1343 ptl_unlock1 spc,t0 1344 rfir 1345 nop 1346 1347naitlb_check_alias_20w: 1348 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20 1349 1350 iitlbt pte,prot 1351 1352 rfir 1353 nop 1354 1355#else 1356 1357itlb_miss_11: 1358 get_pgd spc,ptp 1359 1360 space_check spc,t0,itlb_fault 1361 1362 L2_ptep ptp,pte,t0,va,itlb_fault 1363 1364 ptl_lock spc,ptp,pte,t0,t1,itlb_fault 1365 update_accessed ptp,pte,t0,t1 1366 1367 make_insert_tlb_11 spc,pte,prot 1368 1369 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1370 mtsp spc,%sr1 1371 1372 iitlba pte,(%sr1,va) 1373 iitlbp prot,(%sr1,va) 1374 1375 mtsp t1, %sr1 /* Restore sr1 */ 1376 1377 ptl_unlock1 spc,t0 1378 rfir 1379 nop 1380 1381naitlb_miss_11: 1382 get_pgd spc,ptp 1383 1384 space_check spc,t0,naitlb_fault 1385 1386 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 1387 1388 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 1389 update_accessed ptp,pte,t0,t1 1390 1391 make_insert_tlb_11 spc,pte,prot 1392 1393 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1394 mtsp spc,%sr1 1395 1396 iitlba pte,(%sr1,va) 1397 iitlbp prot,(%sr1,va) 1398 1399 mtsp t1, %sr1 /* Restore sr1 */ 1400 1401 ptl_unlock1 spc,t0 1402 rfir 1403 nop 1404 1405naitlb_check_alias_11: 1406 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11 1407 1408 iitlba pte,(%sr0, va) 1409 iitlbp prot,(%sr0, va) 1410 1411 rfir 1412 nop 1413 1414 1415itlb_miss_20: 1416 get_pgd spc,ptp 1417 1418 space_check spc,t0,itlb_fault 1419 1420 L2_ptep ptp,pte,t0,va,itlb_fault 1421 1422 ptl_lock spc,ptp,pte,t0,t1,itlb_fault 1423 update_accessed ptp,pte,t0,t1 1424 1425 make_insert_tlb spc,pte,prot,t1 1426 1427 f_extend pte,t1 1428 1429 iitlbt pte,prot 1430 1431 ptl_unlock1 spc,t0 1432 rfir 1433 nop 1434 1435naitlb_miss_20: 1436 get_pgd spc,ptp 1437 1438 space_check spc,t0,naitlb_fault 1439 1440 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 1441 1442 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 1443 update_accessed ptp,pte,t0,t1 1444 1445 make_insert_tlb spc,pte,prot,t1 1446 1447 f_extend pte,t1 1448 1449 iitlbt pte,prot 1450 1451 ptl_unlock1 spc,t0 1452 rfir 1453 nop 1454 1455naitlb_check_alias_20: 1456 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20 1457 1458 iitlbt pte,prot 1459 1460 rfir 1461 nop 1462 1463#endif 1464 1465#ifdef CONFIG_64BIT 1466 1467dbit_trap_20w: 1468 space_adjust spc,va,t0 1469 get_pgd spc,ptp 1470 space_check spc,t0,dbit_fault 1471 1472 L3_ptep ptp,pte,t0,va,dbit_fault 1473 1474 ptl_lock spc,ptp,pte,t0,t1,dbit_fault 1475 update_dirty ptp,pte,t1 1476 1477 make_insert_tlb spc,pte,prot,t1 1478 1479 idtlbt pte,prot 1480 1481 ptl_unlock0 spc,t0 1482 rfir 1483 nop 1484#else 1485 1486dbit_trap_11: 1487 1488 get_pgd spc,ptp 1489 1490 space_check spc,t0,dbit_fault 1491 1492 L2_ptep ptp,pte,t0,va,dbit_fault 1493 1494 ptl_lock spc,ptp,pte,t0,t1,dbit_fault 1495 update_dirty ptp,pte,t1 1496 1497 make_insert_tlb_11 spc,pte,prot 1498 1499 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1500 mtsp spc,%sr1 1501 1502 idtlba pte,(%sr1,va) 1503 idtlbp prot,(%sr1,va) 1504 1505 mtsp t1, %sr1 /* Restore sr1 */ 1506 1507 ptl_unlock0 spc,t0 1508 rfir 1509 nop 1510 1511dbit_trap_20: 1512 get_pgd spc,ptp 1513 1514 space_check spc,t0,dbit_fault 1515 1516 L2_ptep ptp,pte,t0,va,dbit_fault 1517 1518 ptl_lock spc,ptp,pte,t0,t1,dbit_fault 1519 update_dirty ptp,pte,t1 1520 1521 make_insert_tlb spc,pte,prot,t1 1522 1523 f_extend pte,t1 1524 1525 idtlbt pte,prot 1526 1527 ptl_unlock0 spc,t0 1528 rfir 1529 nop 1530#endif 1531 1532 .import handle_interruption,code 1533 1534kernel_bad_space: 1535 b intr_save 1536 ldi 31,%r8 /* Use an unused code */ 1537 1538dbit_fault: 1539 b intr_save 1540 ldi 20,%r8 1541 1542itlb_fault: 1543 b intr_save 1544 ldi PARISC_ITLB_TRAP,%r8 1545 1546nadtlb_fault: 1547 b intr_save 1548 ldi 17,%r8 1549 1550naitlb_fault: 1551 b intr_save 1552 ldi 16,%r8 1553 1554dtlb_fault: 1555 b intr_save 1556 ldi 15,%r8 1557 1558 /* Register saving semantics for system calls: 1559 1560 %r1 clobbered by system call macro in userspace 1561 %r2 saved in PT_REGS by gateway page 1562 %r3 - %r18 preserved by C code (saved by signal code) 1563 %r19 - %r20 saved in PT_REGS by gateway page 1564 %r21 - %r22 non-standard syscall args 1565 stored in kernel stack by gateway page 1566 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page 1567 %r27 - %r30 saved in PT_REGS by gateway page 1568 %r31 syscall return pointer 1569 */ 1570 1571 /* Floating point registers (FIXME: what do we do with these?) 1572 1573 %fr0 - %fr3 status/exception, not preserved 1574 %fr4 - %fr7 arguments 1575 %fr8 - %fr11 not preserved by C code 1576 %fr12 - %fr21 preserved by C code 1577 %fr22 - %fr31 not preserved by C code 1578 */ 1579 1580 .macro reg_save regs 1581 STREG %r3, PT_GR3(\regs) 1582 STREG %r4, PT_GR4(\regs) 1583 STREG %r5, PT_GR5(\regs) 1584 STREG %r6, PT_GR6(\regs) 1585 STREG %r7, PT_GR7(\regs) 1586 STREG %r8, PT_GR8(\regs) 1587 STREG %r9, PT_GR9(\regs) 1588 STREG %r10,PT_GR10(\regs) 1589 STREG %r11,PT_GR11(\regs) 1590 STREG %r12,PT_GR12(\regs) 1591 STREG %r13,PT_GR13(\regs) 1592 STREG %r14,PT_GR14(\regs) 1593 STREG %r15,PT_GR15(\regs) 1594 STREG %r16,PT_GR16(\regs) 1595 STREG %r17,PT_GR17(\regs) 1596 STREG %r18,PT_GR18(\regs) 1597 .endm 1598 1599 .macro reg_restore regs 1600 LDREG PT_GR3(\regs), %r3 1601 LDREG PT_GR4(\regs), %r4 1602 LDREG PT_GR5(\regs), %r5 1603 LDREG PT_GR6(\regs), %r6 1604 LDREG PT_GR7(\regs), %r7 1605 LDREG PT_GR8(\regs), %r8 1606 LDREG PT_GR9(\regs), %r9 1607 LDREG PT_GR10(\regs),%r10 1608 LDREG PT_GR11(\regs),%r11 1609 LDREG PT_GR12(\regs),%r12 1610 LDREG PT_GR13(\regs),%r13 1611 LDREG PT_GR14(\regs),%r14 1612 LDREG PT_GR15(\regs),%r15 1613 LDREG PT_GR16(\regs),%r16 1614 LDREG PT_GR17(\regs),%r17 1615 LDREG PT_GR18(\regs),%r18 1616 .endm 1617 1618 .macro fork_like name 1619ENTRY_CFI(sys_\name\()_wrapper) 1620 mfctl %cr30,%r1 1621 ldo TASK_REGS(%r1),%r1 1622 reg_save %r1 1623 mfctl %cr27, %r28 1624 ldil L%sys_\name, %r31 1625 be R%sys_\name(%sr4,%r31) 1626 STREG %r28, PT_CR27(%r1) 1627ENDPROC_CFI(sys_\name\()_wrapper) 1628 .endm 1629 1630fork_like clone 1631fork_like clone3 1632fork_like fork 1633fork_like vfork 1634 1635 /* Set the return value for the child */ 1636ENTRY(child_return) 1637 BL schedule_tail, %r2 1638 nop 1639finish_child_return: 1640 mfctl %cr30,%r1 1641 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1642 1643 LDREG PT_CR27(%r1), %r3 1644 mtctl %r3, %cr27 1645 reg_restore %r1 1646 b syscall_exit 1647 copy %r0,%r28 1648END(child_return) 1649 1650ENTRY_CFI(sys_rt_sigreturn_wrapper) 1651 mfctl %cr30,%r26 1652 ldo TASK_REGS(%r26),%r26 /* get pt regs */ 1653 /* Don't save regs, we are going to restore them from sigcontext. */ 1654 STREG %r2, -RP_OFFSET(%r30) 1655#ifdef CONFIG_64BIT 1656 ldo FRAME_SIZE(%r30), %r30 1657 BL sys_rt_sigreturn,%r2 1658 ldo -16(%r30),%r29 /* Reference param save area */ 1659#else 1660 BL sys_rt_sigreturn,%r2 1661 ldo FRAME_SIZE(%r30), %r30 1662#endif 1663 1664 ldo -FRAME_SIZE(%r30), %r30 1665 LDREG -RP_OFFSET(%r30), %r2 1666 1667 /* FIXME: I think we need to restore a few more things here. */ 1668 mfctl %cr30,%r1 1669 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1670 reg_restore %r1 1671 1672 /* If the signal was received while the process was blocked on a 1673 * syscall, then r2 will take us to syscall_exit; otherwise r2 will 1674 * take us to syscall_exit_rfi and on to intr_return. 1675 */ 1676 bv %r0(%r2) 1677 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */ 1678ENDPROC_CFI(sys_rt_sigreturn_wrapper) 1679 1680ENTRY(syscall_exit) 1681 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit 1682 * via syscall_exit_rfi if the signal was received while the process 1683 * was running. 1684 */ 1685 1686 /* save return value now */ 1687 mfctl %cr30, %r1 1688 STREG %r28,TASK_PT_GR28(%r1) 1689 1690 /* Seems to me that dp could be wrong here, if the syscall involved 1691 * calling a module, and nothing got round to restoring dp on return. 1692 */ 1693 loadgp 1694 1695syscall_check_resched: 1696 1697 /* check for reschedule */ 1698 mfctl %cr30,%r19 1699 LDREG TASK_TI_FLAGS(%r19),%r19 /* long */ 1700 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */ 1701 1702 .import do_signal,code 1703syscall_check_sig: 1704 mfctl %cr30,%r19 1705 LDREG TASK_TI_FLAGS(%r19),%r19 1706 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26 1707 and,COND(<>) %r19, %r26, %r0 1708 b,n syscall_restore /* skip past if we've nothing to do */ 1709 1710syscall_do_signal: 1711 /* Save callee-save registers (for sigcontext). 1712 * FIXME: After this point the process structure should be 1713 * consistent with all the relevant state of the process 1714 * before the syscall. We need to verify this. 1715 */ 1716 mfctl %cr30,%r1 1717 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */ 1718 reg_save %r26 1719 1720#ifdef CONFIG_64BIT 1721 ldo -16(%r30),%r29 /* Reference param save area */ 1722#endif 1723 1724 BL do_notify_resume,%r2 1725 ldi 1, %r25 /* long in_syscall = 1 */ 1726 1727 mfctl %cr30,%r1 1728 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */ 1729 reg_restore %r20 1730 1731 b,n syscall_check_sig 1732 1733syscall_restore: 1734 mfctl %cr30,%r1 1735 1736 /* Are we being ptraced? */ 1737 LDREG TASK_TI_FLAGS(%r1),%r19 1738 ldi _TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2 1739 and,COND(=) %r19,%r2,%r0 1740 b,n syscall_restore_rfi 1741 1742 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ 1743 rest_fp %r19 1744 1745 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */ 1746 mtsar %r19 1747 1748 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */ 1749 LDREG TASK_PT_GR19(%r1),%r19 1750 LDREG TASK_PT_GR20(%r1),%r20 1751 LDREG TASK_PT_GR21(%r1),%r21 1752 LDREG TASK_PT_GR22(%r1),%r22 1753 LDREG TASK_PT_GR23(%r1),%r23 1754 LDREG TASK_PT_GR24(%r1),%r24 1755 LDREG TASK_PT_GR25(%r1),%r25 1756 LDREG TASK_PT_GR26(%r1),%r26 1757 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */ 1758 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */ 1759 LDREG TASK_PT_GR29(%r1),%r29 1760 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ 1761 1762 /* NOTE: We use rsm/ssm pair to make this operation atomic */ 1763 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */ 1764 rsm PSW_SM_I, %r0 1765 copy %r1,%r30 /* Restore user sp */ 1766 mfsp %sr3,%r1 /* Get user space id */ 1767 mtsp %r1,%sr7 /* Restore sr7 */ 1768 ssm PSW_SM_I, %r0 1769 1770 /* Set sr2 to zero for userspace syscalls to work. */ 1771 mtsp %r0,%sr2 1772 mtsp %r1,%sr4 /* Restore sr4 */ 1773 mtsp %r1,%sr5 /* Restore sr5 */ 1774 mtsp %r1,%sr6 /* Restore sr6 */ 1775 1776 depi PRIV_USER,31,2,%r31 /* ensure return to user mode. */ 1777 1778#ifdef CONFIG_64BIT 1779 /* decide whether to reset the wide mode bit 1780 * 1781 * For a syscall, the W bit is stored in the lowest bit 1782 * of sp. Extract it and reset W if it is zero */ 1783 extrd,u,*<> %r30,63,1,%r1 1784 rsm PSW_SM_W, %r0 1785 /* now reset the lowest bit of sp if it was set */ 1786 xor %r30,%r1,%r30 1787#endif 1788 be,n 0(%sr3,%r31) /* return to user space */ 1789 1790 /* We have to return via an RFI, so that PSW T and R bits can be set 1791 * appropriately. 1792 * This sets up pt_regs so we can return via intr_restore, which is not 1793 * the most efficient way of doing things, but it works. 1794 */ 1795syscall_restore_rfi: 1796 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */ 1797 mtctl %r2,%cr0 /* for immediate trap */ 1798 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */ 1799 ldi 0x0b,%r20 /* Create new PSW */ 1800 depi -1,13,1,%r20 /* C, Q, D, and I bits */ 1801 1802 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are 1803 * set in thread_info.h and converted to PA bitmap 1804 * numbers in asm-offsets.c */ 1805 1806 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */ 1807 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0 1808 depi -1,27,1,%r20 /* R bit */ 1809 1810 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */ 1811 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 1812 depi -1,7,1,%r20 /* T bit */ 1813 1814 STREG %r20,TASK_PT_PSW(%r1) 1815 1816 /* Always store space registers, since sr3 can be changed (e.g. fork) */ 1817 1818 mfsp %sr3,%r25 1819 STREG %r25,TASK_PT_SR3(%r1) 1820 STREG %r25,TASK_PT_SR4(%r1) 1821 STREG %r25,TASK_PT_SR5(%r1) 1822 STREG %r25,TASK_PT_SR6(%r1) 1823 STREG %r25,TASK_PT_SR7(%r1) 1824 STREG %r25,TASK_PT_IASQ0(%r1) 1825 STREG %r25,TASK_PT_IASQ1(%r1) 1826 1827 /* XXX W bit??? */ 1828 /* Now if old D bit is clear, it means we didn't save all registers 1829 * on syscall entry, so do that now. This only happens on TRACEME 1830 * calls, or if someone attached to us while we were on a syscall. 1831 * We could make this more efficient by not saving r3-r18, but 1832 * then we wouldn't be able to use the common intr_restore path. 1833 * It is only for traced processes anyway, so performance is not 1834 * an issue. 1835 */ 1836 bb,< %r2,30,pt_regs_ok /* Branch if D set */ 1837 ldo TASK_REGS(%r1),%r25 1838 reg_save %r25 /* Save r3 to r18 */ 1839 1840 /* Save the current sr */ 1841 mfsp %sr0,%r2 1842 STREG %r2,TASK_PT_SR0(%r1) 1843 1844 /* Save the scratch sr */ 1845 mfsp %sr1,%r2 1846 STREG %r2,TASK_PT_SR1(%r1) 1847 1848 /* sr2 should be set to zero for userspace syscalls */ 1849 STREG %r0,TASK_PT_SR2(%r1) 1850 1851 LDREG TASK_PT_GR31(%r1),%r2 1852 depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */ 1853 STREG %r2,TASK_PT_IAOQ0(%r1) 1854 ldo 4(%r2),%r2 1855 STREG %r2,TASK_PT_IAOQ1(%r1) 1856 b intr_restore 1857 copy %r25,%r16 1858 1859pt_regs_ok: 1860 LDREG TASK_PT_IAOQ0(%r1),%r2 1861 depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */ 1862 STREG %r2,TASK_PT_IAOQ0(%r1) 1863 LDREG TASK_PT_IAOQ1(%r1),%r2 1864 depi PRIV_USER,31,2,%r2 1865 STREG %r2,TASK_PT_IAOQ1(%r1) 1866 b intr_restore 1867 copy %r25,%r16 1868 1869syscall_do_resched: 1870 load32 syscall_check_resched,%r2 /* if resched, we start over again */ 1871 load32 schedule,%r19 1872 bv %r0(%r19) /* jumps to schedule() */ 1873#ifdef CONFIG_64BIT 1874 ldo -16(%r30),%r29 /* Reference param save area */ 1875#else 1876 nop 1877#endif 1878END(syscall_exit) 1879 1880 1881#ifdef CONFIG_FUNCTION_TRACER 1882 1883 .import ftrace_function_trampoline,code 1884 .align L1_CACHE_BYTES 1885ENTRY_CFI(mcount, caller) 1886_mcount: 1887 .export _mcount,data 1888 /* 1889 * The 64bit mcount() function pointer needs 4 dwords, of which the 1890 * first two are free. We optimize it here and put 2 instructions for 1891 * calling mcount(), and 2 instructions for ftrace_stub(). That way we 1892 * have all on one L1 cacheline. 1893 */ 1894 ldi 0, %arg3 1895 b ftrace_function_trampoline 1896 copy %r3, %arg2 /* caller original %sp */ 1897ftrace_stub: 1898 .globl ftrace_stub 1899 .type ftrace_stub, @function 1900#ifdef CONFIG_64BIT 1901 bve (%rp) 1902#else 1903 bv %r0(%rp) 1904#endif 1905 nop 1906#ifdef CONFIG_64BIT 1907 .dword mcount 1908 .dword 0 /* code in head.S puts value of global gp here */ 1909#endif 1910ENDPROC_CFI(mcount) 1911 1912#ifdef CONFIG_DYNAMIC_FTRACE 1913 1914#ifdef CONFIG_64BIT 1915#define FTRACE_FRAME_SIZE (2*FRAME_SIZE) 1916#else 1917#define FTRACE_FRAME_SIZE FRAME_SIZE 1918#endif 1919ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) 1920ftrace_caller: 1921 .global ftrace_caller 1922 1923 STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp) 1924 ldo -FTRACE_FRAME_SIZE(%sp), %r3 1925 STREG %rp, -RP_OFFSET(%r3) 1926 1927 /* Offset 0 is already allocated for %r1 */ 1928 STREG %r23, 2*REG_SZ(%r3) 1929 STREG %r24, 3*REG_SZ(%r3) 1930 STREG %r25, 4*REG_SZ(%r3) 1931 STREG %r26, 5*REG_SZ(%r3) 1932 STREG %r28, 6*REG_SZ(%r3) 1933 STREG %r29, 7*REG_SZ(%r3) 1934#ifdef CONFIG_64BIT 1935 STREG %r19, 8*REG_SZ(%r3) 1936 STREG %r20, 9*REG_SZ(%r3) 1937 STREG %r21, 10*REG_SZ(%r3) 1938 STREG %r22, 11*REG_SZ(%r3) 1939 STREG %r27, 12*REG_SZ(%r3) 1940 STREG %r31, 13*REG_SZ(%r3) 1941 loadgp 1942 ldo -16(%sp),%r29 1943#endif 1944 LDREG 0(%r3), %r25 1945 copy %rp, %r26 1946 ldo -8(%r25), %r25 1947 ldi 0, %r23 /* no pt_regs */ 1948 b,l ftrace_function_trampoline, %rp 1949 copy %r3, %r24 1950 1951 LDREG -RP_OFFSET(%r3), %rp 1952 LDREG 2*REG_SZ(%r3), %r23 1953 LDREG 3*REG_SZ(%r3), %r24 1954 LDREG 4*REG_SZ(%r3), %r25 1955 LDREG 5*REG_SZ(%r3), %r26 1956 LDREG 6*REG_SZ(%r3), %r28 1957 LDREG 7*REG_SZ(%r3), %r29 1958#ifdef CONFIG_64BIT 1959 LDREG 8*REG_SZ(%r3), %r19 1960 LDREG 9*REG_SZ(%r3), %r20 1961 LDREG 10*REG_SZ(%r3), %r21 1962 LDREG 11*REG_SZ(%r3), %r22 1963 LDREG 12*REG_SZ(%r3), %r27 1964 LDREG 13*REG_SZ(%r3), %r31 1965#endif 1966 LDREG 1*REG_SZ(%r3), %r3 1967 1968 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 1969 /* Adjust return point to jump back to beginning of traced function */ 1970 ldo -4(%r1), %r1 1971 bv,n (%r1) 1972 1973ENDPROC_CFI(ftrace_caller) 1974 1975#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS 1976ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN, 1977 CALLS,SAVE_RP,SAVE_SP) 1978ftrace_regs_caller: 1979 .global ftrace_regs_caller 1980 1981 ldo -FTRACE_FRAME_SIZE(%sp), %r1 1982 STREG %rp, -RP_OFFSET(%r1) 1983 1984 copy %sp, %r1 1985 ldo PT_SZ_ALGN(%sp), %sp 1986 1987 STREG %rp, PT_GR2(%r1) 1988 STREG %r3, PT_GR3(%r1) 1989 STREG %r4, PT_GR4(%r1) 1990 STREG %r5, PT_GR5(%r1) 1991 STREG %r6, PT_GR6(%r1) 1992 STREG %r7, PT_GR7(%r1) 1993 STREG %r8, PT_GR8(%r1) 1994 STREG %r9, PT_GR9(%r1) 1995 STREG %r10, PT_GR10(%r1) 1996 STREG %r11, PT_GR11(%r1) 1997 STREG %r12, PT_GR12(%r1) 1998 STREG %r13, PT_GR13(%r1) 1999 STREG %r14, PT_GR14(%r1) 2000 STREG %r15, PT_GR15(%r1) 2001 STREG %r16, PT_GR16(%r1) 2002 STREG %r17, PT_GR17(%r1) 2003 STREG %r18, PT_GR18(%r1) 2004 STREG %r19, PT_GR19(%r1) 2005 STREG %r20, PT_GR20(%r1) 2006 STREG %r21, PT_GR21(%r1) 2007 STREG %r22, PT_GR22(%r1) 2008 STREG %r23, PT_GR23(%r1) 2009 STREG %r24, PT_GR24(%r1) 2010 STREG %r25, PT_GR25(%r1) 2011 STREG %r26, PT_GR26(%r1) 2012 STREG %r27, PT_GR27(%r1) 2013 STREG %r28, PT_GR28(%r1) 2014 STREG %r29, PT_GR29(%r1) 2015 STREG %r30, PT_GR30(%r1) 2016 STREG %r31, PT_GR31(%r1) 2017 mfctl %cr11, %r26 2018 STREG %r26, PT_SAR(%r1) 2019 2020 copy %rp, %r26 2021 LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25 2022 ldo -8(%r25), %r25 2023 ldo -FTRACE_FRAME_SIZE(%r1), %arg2 2024 b,l ftrace_function_trampoline, %rp 2025 copy %r1, %arg3 /* struct pt_regs */ 2026 2027 ldo -PT_SZ_ALGN(%sp), %r1 2028 2029 LDREG PT_SAR(%r1), %rp 2030 mtctl %rp, %cr11 2031 2032 LDREG PT_GR2(%r1), %rp 2033 LDREG PT_GR3(%r1), %r3 2034 LDREG PT_GR4(%r1), %r4 2035 LDREG PT_GR5(%r1), %r5 2036 LDREG PT_GR6(%r1), %r6 2037 LDREG PT_GR7(%r1), %r7 2038 LDREG PT_GR8(%r1), %r8 2039 LDREG PT_GR9(%r1), %r9 2040 LDREG PT_GR10(%r1),%r10 2041 LDREG PT_GR11(%r1),%r11 2042 LDREG PT_GR12(%r1),%r12 2043 LDREG PT_GR13(%r1),%r13 2044 LDREG PT_GR14(%r1),%r14 2045 LDREG PT_GR15(%r1),%r15 2046 LDREG PT_GR16(%r1),%r16 2047 LDREG PT_GR17(%r1),%r17 2048 LDREG PT_GR18(%r1),%r18 2049 LDREG PT_GR19(%r1),%r19 2050 LDREG PT_GR20(%r1),%r20 2051 LDREG PT_GR21(%r1),%r21 2052 LDREG PT_GR22(%r1),%r22 2053 LDREG PT_GR23(%r1),%r23 2054 LDREG PT_GR24(%r1),%r24 2055 LDREG PT_GR25(%r1),%r25 2056 LDREG PT_GR26(%r1),%r26 2057 LDREG PT_GR27(%r1),%r27 2058 LDREG PT_GR28(%r1),%r28 2059 LDREG PT_GR29(%r1),%r29 2060 LDREG PT_GR30(%r1),%r30 2061 LDREG PT_GR31(%r1),%r31 2062 2063 ldo -PT_SZ_ALGN(%sp), %sp 2064 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 2065 /* Adjust return point to jump back to beginning of traced function */ 2066 ldo -4(%r1), %r1 2067 bv,n (%r1) 2068 2069ENDPROC_CFI(ftrace_regs_caller) 2070 2071#endif 2072#endif 2073 2074#ifdef CONFIG_FUNCTION_GRAPH_TRACER 2075 .align 8 2076ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE) 2077 .export parisc_return_to_handler,data 2078parisc_return_to_handler: 2079 copy %r3,%r1 2080 STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */ 2081 copy %sp,%r3 2082 STREGM %r1,FRAME_SIZE(%sp) 2083 STREG %ret0,8(%r3) 2084 STREG %ret1,16(%r3) 2085 2086#ifdef CONFIG_64BIT 2087 loadgp 2088#endif 2089 2090 /* call ftrace_return_to_handler(0) */ 2091 .import ftrace_return_to_handler,code 2092 load32 ftrace_return_to_handler,%ret0 2093 load32 .Lftrace_ret,%r2 2094#ifdef CONFIG_64BIT 2095 ldo -16(%sp),%ret1 /* Reference param save area */ 2096 bve (%ret0) 2097#else 2098 bv %r0(%ret0) 2099#endif 2100 ldi 0,%r26 2101.Lftrace_ret: 2102 copy %ret0,%rp 2103 2104 /* restore original return values */ 2105 LDREG 8(%r3),%ret0 2106 LDREG 16(%r3),%ret1 2107 2108 /* return from function */ 2109#ifdef CONFIG_64BIT 2110 bve (%rp) 2111#else 2112 bv %r0(%rp) 2113#endif 2114 LDREGM -FRAME_SIZE(%sp),%r3 2115ENDPROC_CFI(return_to_handler) 2116 2117#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 2118 2119#endif /* CONFIG_FUNCTION_TRACER */ 2120 2121#ifdef CONFIG_IRQSTACKS 2122/* void call_on_stack(unsigned long param1, void *func, 2123 unsigned long new_stack) */ 2124ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) 2125ENTRY(_call_on_stack) 2126 copy %sp, %r1 2127 2128 /* Regarding the HPPA calling conventions for function pointers, 2129 we assume the PIC register is not changed across call. For 2130 CONFIG_64BIT, the argument pointer is left to point at the 2131 argument region allocated for the call to call_on_stack. */ 2132 2133 /* Switch to new stack. We allocate two frames. */ 2134 ldo 2*FRAME_SIZE(%arg2), %sp 2135# ifdef CONFIG_64BIT 2136 /* Save previous stack pointer and return pointer in frame marker */ 2137 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp) 2138 /* Calls always use function descriptor */ 2139 LDREG 16(%arg1), %arg1 2140 bve,l (%arg1), %rp 2141 STREG %r1, -FRAME_SIZE-REG_SZ(%sp) 2142 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp 2143 bve (%rp) 2144 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp 2145# else 2146 /* Save previous stack pointer and return pointer in frame marker */ 2147 STREG %r1, -FRAME_SIZE-REG_SZ(%sp) 2148 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp) 2149 /* Calls use function descriptor if PLABEL bit is set */ 2150 bb,>=,n %arg1, 30, 1f 2151 depwi 0,31,2, %arg1 2152 LDREG 0(%arg1), %arg1 21531: 2154 be,l 0(%sr4,%arg1), %sr0, %r31 2155 copy %r31, %rp 2156 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp 2157 bv (%rp) 2158 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp 2159# endif /* CONFIG_64BIT */ 2160ENDPROC_CFI(call_on_stack) 2161#endif /* CONFIG_IRQSTACKS */ 2162 2163ENTRY_CFI(get_register) 2164 /* 2165 * get_register is used by the non access tlb miss handlers to 2166 * copy the value of the general register specified in r8 into 2167 * r1. This routine can't be used for shadowed registers, since 2168 * the rfir will restore the original value. So, for the shadowed 2169 * registers we put a -1 into r1 to indicate that the register 2170 * should not be used (the register being copied could also have 2171 * a -1 in it, but that is OK, it just means that we will have 2172 * to use the slow path instead). 2173 */ 2174 blr %r8,%r0 2175 nop 2176 bv %r0(%r25) /* r0 */ 2177 copy %r0,%r1 2178 bv %r0(%r25) /* r1 - shadowed */ 2179 ldi -1,%r1 2180 bv %r0(%r25) /* r2 */ 2181 copy %r2,%r1 2182 bv %r0(%r25) /* r3 */ 2183 copy %r3,%r1 2184 bv %r0(%r25) /* r4 */ 2185 copy %r4,%r1 2186 bv %r0(%r25) /* r5 */ 2187 copy %r5,%r1 2188 bv %r0(%r25) /* r6 */ 2189 copy %r6,%r1 2190 bv %r0(%r25) /* r7 */ 2191 copy %r7,%r1 2192 bv %r0(%r25) /* r8 - shadowed */ 2193 ldi -1,%r1 2194 bv %r0(%r25) /* r9 - shadowed */ 2195 ldi -1,%r1 2196 bv %r0(%r25) /* r10 */ 2197 copy %r10,%r1 2198 bv %r0(%r25) /* r11 */ 2199 copy %r11,%r1 2200 bv %r0(%r25) /* r12 */ 2201 copy %r12,%r1 2202 bv %r0(%r25) /* r13 */ 2203 copy %r13,%r1 2204 bv %r0(%r25) /* r14 */ 2205 copy %r14,%r1 2206 bv %r0(%r25) /* r15 */ 2207 copy %r15,%r1 2208 bv %r0(%r25) /* r16 - shadowed */ 2209 ldi -1,%r1 2210 bv %r0(%r25) /* r17 - shadowed */ 2211 ldi -1,%r1 2212 bv %r0(%r25) /* r18 */ 2213 copy %r18,%r1 2214 bv %r0(%r25) /* r19 */ 2215 copy %r19,%r1 2216 bv %r0(%r25) /* r20 */ 2217 copy %r20,%r1 2218 bv %r0(%r25) /* r21 */ 2219 copy %r21,%r1 2220 bv %r0(%r25) /* r22 */ 2221 copy %r22,%r1 2222 bv %r0(%r25) /* r23 */ 2223 copy %r23,%r1 2224 bv %r0(%r25) /* r24 - shadowed */ 2225 ldi -1,%r1 2226 bv %r0(%r25) /* r25 - shadowed */ 2227 ldi -1,%r1 2228 bv %r0(%r25) /* r26 */ 2229 copy %r26,%r1 2230 bv %r0(%r25) /* r27 */ 2231 copy %r27,%r1 2232 bv %r0(%r25) /* r28 */ 2233 copy %r28,%r1 2234 bv %r0(%r25) /* r29 */ 2235 copy %r29,%r1 2236 bv %r0(%r25) /* r30 */ 2237 copy %r30,%r1 2238 bv %r0(%r25) /* r31 */ 2239 copy %r31,%r1 2240ENDPROC_CFI(get_register) 2241 2242 2243ENTRY_CFI(set_register) 2244 /* 2245 * set_register is used by the non access tlb miss handlers to 2246 * copy the value of r1 into the general register specified in 2247 * r8. 2248 */ 2249 blr %r8,%r0 2250 nop 2251 bv %r0(%r25) /* r0 (silly, but it is a place holder) */ 2252 copy %r1,%r0 2253 bv %r0(%r25) /* r1 */ 2254 copy %r1,%r1 2255 bv %r0(%r25) /* r2 */ 2256 copy %r1,%r2 2257 bv %r0(%r25) /* r3 */ 2258 copy %r1,%r3 2259 bv %r0(%r25) /* r4 */ 2260 copy %r1,%r4 2261 bv %r0(%r25) /* r5 */ 2262 copy %r1,%r5 2263 bv %r0(%r25) /* r6 */ 2264 copy %r1,%r6 2265 bv %r0(%r25) /* r7 */ 2266 copy %r1,%r7 2267 bv %r0(%r25) /* r8 */ 2268 copy %r1,%r8 2269 bv %r0(%r25) /* r9 */ 2270 copy %r1,%r9 2271 bv %r0(%r25) /* r10 */ 2272 copy %r1,%r10 2273 bv %r0(%r25) /* r11 */ 2274 copy %r1,%r11 2275 bv %r0(%r25) /* r12 */ 2276 copy %r1,%r12 2277 bv %r0(%r25) /* r13 */ 2278 copy %r1,%r13 2279 bv %r0(%r25) /* r14 */ 2280 copy %r1,%r14 2281 bv %r0(%r25) /* r15 */ 2282 copy %r1,%r15 2283 bv %r0(%r25) /* r16 */ 2284 copy %r1,%r16 2285 bv %r0(%r25) /* r17 */ 2286 copy %r1,%r17 2287 bv %r0(%r25) /* r18 */ 2288 copy %r1,%r18 2289 bv %r0(%r25) /* r19 */ 2290 copy %r1,%r19 2291 bv %r0(%r25) /* r20 */ 2292 copy %r1,%r20 2293 bv %r0(%r25) /* r21 */ 2294 copy %r1,%r21 2295 bv %r0(%r25) /* r22 */ 2296 copy %r1,%r22 2297 bv %r0(%r25) /* r23 */ 2298 copy %r1,%r23 2299 bv %r0(%r25) /* r24 */ 2300 copy %r1,%r24 2301 bv %r0(%r25) /* r25 */ 2302 copy %r1,%r25 2303 bv %r0(%r25) /* r26 */ 2304 copy %r1,%r26 2305 bv %r0(%r25) /* r27 */ 2306 copy %r1,%r27 2307 bv %r0(%r25) /* r28 */ 2308 copy %r1,%r28 2309 bv %r0(%r25) /* r29 */ 2310 copy %r1,%r29 2311 bv %r0(%r25) /* r30 */ 2312 copy %r1,%r30 2313 bv %r0(%r25) /* r31 */ 2314 copy %r1,%r31 2315ENDPROC_CFI(set_register) 2316 2317