1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Linux/PA-RISC Project (http://www.parisc-linux.org/) 4 * 5 * kernel entry points (interruptions, system call wrappers) 6 * Copyright (C) 1999,2000 Philipp Rumpf 7 * Copyright (C) 1999 SuSE GmbH Nuernberg 8 * Copyright (C) 2000 Hewlett-Packard (John Marvin) 9 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) 10 */ 11 12#include <asm/asm-offsets.h> 13 14/* we have the following possibilities to act on an interruption: 15 * - handle in assembly and use shadowed registers only 16 * - save registers to kernel stack and handle in assembly or C */ 17 18 19#include <asm/psw.h> 20#include <asm/cache.h> /* for L1_CACHE_SHIFT */ 21#include <asm/assembly.h> /* for LDREG/STREG defines */ 22#include <asm/signal.h> 23#include <asm/unistd.h> 24#include <asm/ldcw.h> 25#include <asm/traps.h> 26#include <asm/thread_info.h> 27#include <asm/alternative.h> 28 29#include <linux/linkage.h> 30#include <linux/pgtable.h> 31 32#ifdef CONFIG_64BIT 33 .level 2.0w 34#else 35 .level 2.0 36#endif 37 38 /* Get aligned page_table_lock address for this mm from cr28/tr4 */ 39 .macro get_ptl reg 40 mfctl %cr28,\reg 41 .endm 42 43 /* space_to_prot macro creates a prot id from a space id */ 44 45#if (SPACEID_SHIFT) == 0 46 .macro space_to_prot spc prot 47 depd,z \spc,62,31,\prot 48 .endm 49#else 50 .macro space_to_prot spc prot 51 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot 52 .endm 53#endif 54 55 /* Switch to virtual mapping, trashing only %r1 */ 56 .macro virt_map 57 /* pcxt_ssm_bug */ 58 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */ 59 mtsp %r0, %sr4 60 mtsp %r0, %sr5 61 mtsp %r0, %sr6 62 tovirt_r1 %r29 63 load32 KERNEL_PSW, %r1 64 65 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */ 66 mtctl %r0, %cr17 /* Clear IIASQ tail */ 67 mtctl %r0, %cr17 /* Clear IIASQ head */ 68 mtctl %r1, %ipsw 69 load32 4f, %r1 70 mtctl %r1, %cr18 /* Set IIAOQ tail */ 71 ldo 4(%r1), %r1 72 mtctl %r1, %cr18 /* Set IIAOQ head */ 73 rfir 74 nop 754: 76 .endm 77 78 /* 79 * The "get_stack" macros are responsible for determining the 80 * kernel stack value. 81 * 82 * If sr7 == 0 83 * Already using a kernel stack, so call the 84 * get_stack_use_r30 macro to push a pt_regs structure 85 * on the stack, and store registers there. 86 * else 87 * Need to set up a kernel stack, so call the 88 * get_stack_use_cr30 macro to set up a pointer 89 * to the pt_regs structure contained within the 90 * task pointer pointed to by cr30. Set the stack 91 * pointer to point to the end of the task structure. 92 * 93 * Note that we use shadowed registers for temps until 94 * we can save %r26 and %r29. %r26 is used to preserve 95 * %r8 (a shadowed register) which temporarily contained 96 * either the fault type ("code") or the eirr. We need 97 * to use a non-shadowed register to carry the value over 98 * the rfir in virt_map. We use %r26 since this value winds 99 * up being passed as the argument to either do_cpu_irq_mask 100 * or handle_interruption. %r29 is used to hold a pointer 101 * the register save area, and once again, it needs to 102 * be a non-shadowed register so that it survives the rfir. 103 * 104 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame. 105 */ 106 107 .macro get_stack_use_cr30 108 109 /* we save the registers in the task struct */ 110 111 copy %r30, %r17 112 mfctl %cr30, %r1 113 ldo THREAD_SZ_ALGN(%r1), %r30 114 mtsp %r0,%sr7 115 mtsp %r16,%sr3 116 tophys %r1,%r9 117 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */ 118 tophys %r1,%r9 119 ldo TASK_REGS(%r9),%r9 120 STREG %r17,PT_GR30(%r9) 121 STREG %r29,PT_GR29(%r9) 122 STREG %r26,PT_GR26(%r9) 123 STREG %r16,PT_SR7(%r9) 124 copy %r9,%r29 125 .endm 126 127 .macro get_stack_use_r30 128 129 /* we put a struct pt_regs on the stack and save the registers there */ 130 131 tophys %r30,%r9 132 copy %r30,%r1 133 ldo PT_SZ_ALGN(%r30),%r30 134 STREG %r1,PT_GR30(%r9) 135 STREG %r29,PT_GR29(%r9) 136 STREG %r26,PT_GR26(%r9) 137 STREG %r16,PT_SR7(%r9) 138 copy %r9,%r29 139 .endm 140 141 .macro rest_stack 142 LDREG PT_GR1(%r29), %r1 143 LDREG PT_GR30(%r29),%r30 144 LDREG PT_GR29(%r29),%r29 145 .endm 146 147 /* default interruption handler 148 * (calls traps.c:handle_interruption) */ 149 .macro def code 150 b intr_save 151 ldi \code, %r8 152 .align 32 153 .endm 154 155 /* Interrupt interruption handler 156 * (calls irq.c:do_cpu_irq_mask) */ 157 .macro extint code 158 b intr_extint 159 mfsp %sr7,%r16 160 .align 32 161 .endm 162 163 .import os_hpmc, code 164 165 /* HPMC handler */ 166 .macro hpmc code 167 nop /* must be a NOP, will be patched later */ 168 load32 PA(os_hpmc), %r3 169 bv,n 0(%r3) 170 nop 171 .word 0 /* checksum (will be patched) */ 172 .word 0 /* address of handler */ 173 .word 0 /* length of handler */ 174 .endm 175 176 /* 177 * Performance Note: Instructions will be moved up into 178 * this part of the code later on, once we are sure 179 * that the tlb miss handlers are close to final form. 180 */ 181 182 /* Register definitions for tlb miss handler macros */ 183 184 va = r8 /* virtual address for which the trap occurred */ 185 spc = r24 /* space for which the trap occurred */ 186 187#ifndef CONFIG_64BIT 188 189 /* 190 * itlb miss interruption handler (parisc 1.1 - 32 bit) 191 */ 192 193 .macro itlb_11 code 194 195 mfctl %pcsq, spc 196 b itlb_miss_11 197 mfctl %pcoq, va 198 199 .align 32 200 .endm 201#endif 202 203 /* 204 * itlb miss interruption handler (parisc 2.0) 205 */ 206 207 .macro itlb_20 code 208 mfctl %pcsq, spc 209#ifdef CONFIG_64BIT 210 b itlb_miss_20w 211#else 212 b itlb_miss_20 213#endif 214 mfctl %pcoq, va 215 216 .align 32 217 .endm 218 219#ifndef CONFIG_64BIT 220 /* 221 * naitlb miss interruption handler (parisc 1.1 - 32 bit) 222 */ 223 224 .macro naitlb_11 code 225 226 mfctl %isr,spc 227 b naitlb_miss_11 228 mfctl %ior,va 229 230 .align 32 231 .endm 232#endif 233 234 /* 235 * naitlb miss interruption handler (parisc 2.0) 236 */ 237 238 .macro naitlb_20 code 239 240 mfctl %isr,spc 241#ifdef CONFIG_64BIT 242 b naitlb_miss_20w 243#else 244 b naitlb_miss_20 245#endif 246 mfctl %ior,va 247 248 .align 32 249 .endm 250 251#ifndef CONFIG_64BIT 252 /* 253 * dtlb miss interruption handler (parisc 1.1 - 32 bit) 254 */ 255 256 .macro dtlb_11 code 257 258 mfctl %isr, spc 259 b dtlb_miss_11 260 mfctl %ior, va 261 262 .align 32 263 .endm 264#endif 265 266 /* 267 * dtlb miss interruption handler (parisc 2.0) 268 */ 269 270 .macro dtlb_20 code 271 272 mfctl %isr, spc 273#ifdef CONFIG_64BIT 274 b dtlb_miss_20w 275#else 276 b dtlb_miss_20 277#endif 278 mfctl %ior, va 279 280 .align 32 281 .endm 282 283#ifndef CONFIG_64BIT 284 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */ 285 286 .macro nadtlb_11 code 287 288 mfctl %isr,spc 289 b nadtlb_miss_11 290 mfctl %ior,va 291 292 .align 32 293 .endm 294#endif 295 296 /* nadtlb miss interruption handler (parisc 2.0) */ 297 298 .macro nadtlb_20 code 299 300 mfctl %isr,spc 301#ifdef CONFIG_64BIT 302 b nadtlb_miss_20w 303#else 304 b nadtlb_miss_20 305#endif 306 mfctl %ior,va 307 308 .align 32 309 .endm 310 311#ifndef CONFIG_64BIT 312 /* 313 * dirty bit trap interruption handler (parisc 1.1 - 32 bit) 314 */ 315 316 .macro dbit_11 code 317 318 mfctl %isr,spc 319 b dbit_trap_11 320 mfctl %ior,va 321 322 .align 32 323 .endm 324#endif 325 326 /* 327 * dirty bit trap interruption handler (parisc 2.0) 328 */ 329 330 .macro dbit_20 code 331 332 mfctl %isr,spc 333#ifdef CONFIG_64BIT 334 b dbit_trap_20w 335#else 336 b dbit_trap_20 337#endif 338 mfctl %ior,va 339 340 .align 32 341 .endm 342 343 /* In LP64, the space contains part of the upper 32 bits of the 344 * fault. We have to extract this and place it in the va, 345 * zeroing the corresponding bits in the space register */ 346 .macro space_adjust spc,va,tmp 347#ifdef CONFIG_64BIT 348 extrd,u \spc,63,SPACEID_SHIFT,\tmp 349 depd %r0,63,SPACEID_SHIFT,\spc 350 depd \tmp,31,SPACEID_SHIFT,\va 351#endif 352 .endm 353 354 .import swapper_pg_dir,code 355 356 /* Get the pgd. For faults on space zero (kernel space), this 357 * is simply swapper_pg_dir. For user space faults, the 358 * pgd is stored in %cr25 */ 359 .macro get_pgd spc,reg 360 ldil L%PA(swapper_pg_dir),\reg 361 ldo R%PA(swapper_pg_dir)(\reg),\reg 362 or,COND(=) %r0,\spc,%r0 363 mfctl %cr25,\reg 364 .endm 365 366 /* 367 space_check(spc,tmp,fault) 368 369 spc - The space we saw the fault with. 370 tmp - The place to store the current space. 371 fault - Function to call on failure. 372 373 Only allow faults on different spaces from the 374 currently active one if we're the kernel 375 376 */ 377 .macro space_check spc,tmp,fault 378 mfsp %sr7,\tmp 379 /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */ 380 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page 381 * as kernel, so defeat the space 382 * check if it is */ 383 copy \spc,\tmp 384 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */ 385 cmpb,COND(<>),n \tmp,\spc,\fault 386 .endm 387 388 /* Look up a PTE in a 2-Level scheme (faulting at each 389 * level if the entry isn't present 390 * 391 * NOTE: we use ldw even for LP64, since the short pointers 392 * can address up to 1TB 393 */ 394 .macro L2_ptep pmd,pte,index,va,fault 395#if CONFIG_PGTABLE_LEVELS == 3 396 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index 397#else 398# if defined(CONFIG_64BIT) 399 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 400 #else 401 # if PAGE_SIZE > 4096 402 extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index 403 # else 404 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 405 # endif 406# endif 407#endif 408 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 409#if CONFIG_PGTABLE_LEVELS < 3 410 copy %r0,\pte 411#endif 412 ldw,s \index(\pmd),\pmd 413 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 414 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 415 SHLREG \pmd,PxD_VALUE_SHIFT,\pmd 416 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 417 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 418 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */ 419 .endm 420 421 /* Look up PTE in a 3-Level scheme. */ 422 .macro L3_ptep pgd,pte,index,va,fault 423#if CONFIG_PGTABLE_LEVELS == 3 424 copy %r0,\pte 425 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 426 ldw,s \index(\pgd),\pgd 427 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault 428 shld \pgd,PxD_VALUE_SHIFT,\pgd 429#endif 430 L2_ptep \pgd,\pte,\index,\va,\fault 431 .endm 432 433 /* Acquire page_table_lock and check page is present. */ 434 .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault 435#ifdef CONFIG_TLB_PTLOCK 43698: cmpib,COND(=),n 0,\spc,2f 437 get_ptl \tmp 4381: LDCW 0(\tmp),\tmp1 439 cmpib,COND(=) 0,\tmp1,1b 440 nop 441 LDREG 0(\ptp),\pte 442 bb,<,n \pte,_PAGE_PRESENT_BIT,3f 443 b \fault 444 stw \spc,0(\tmp) 44599: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 446#endif 4472: LDREG 0(\ptp),\pte 448 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault 4493: 450 .endm 451 452 /* Release page_table_lock without reloading lock address. 453 Note that the values in the register spc are limited to 454 NR_SPACE_IDS (262144). Thus, the stw instruction always 455 stores a nonzero value even when register spc is 64 bits. 456 We use an ordered store to ensure all prior accesses are 457 performed prior to releasing the lock. */ 458 .macro ptl_unlock0 spc,tmp 459#ifdef CONFIG_TLB_PTLOCK 46098: or,COND(=) %r0,\spc,%r0 461 stw,ma \spc,0(\tmp) 46299: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 463#endif 464 .endm 465 466 /* Release page_table_lock. */ 467 .macro ptl_unlock1 spc,tmp 468#ifdef CONFIG_TLB_PTLOCK 46998: get_ptl \tmp 470 ptl_unlock0 \spc,\tmp 47199: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 472#endif 473 .endm 474 475 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and 476 * don't needlessly dirty the cache line if it was already set */ 477 .macro update_accessed ptp,pte,tmp,tmp1 478 ldi _PAGE_ACCESSED,\tmp1 479 or \tmp1,\pte,\tmp 480 and,COND(<>) \tmp1,\pte,%r0 481 STREG \tmp,0(\ptp) 482 .endm 483 484 /* Set the dirty bit (and accessed bit). No need to be 485 * clever, this is only used from the dirty fault */ 486 .macro update_dirty ptp,pte,tmp 487 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp 488 or \tmp,\pte,\pte 489 STREG \pte,0(\ptp) 490 .endm 491 492 /* We have (depending on the page size): 493 * - 38 to 52-bit Physical Page Number 494 * - 12 to 26-bit page offset 495 */ 496 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) 497 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ 498 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) 499 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12) 500 501 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 502 .macro convert_for_tlb_insert20 pte,tmp 503#ifdef CONFIG_HUGETLB_PAGE 504 copy \pte,\tmp 505 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 506 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte 507 508 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 509 (63-58)+PAGE_ADD_SHIFT,\pte 510 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0 511 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\ 512 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte 513#else /* Huge pages disabled */ 514 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 515 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte 516 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 517 (63-58)+PAGE_ADD_SHIFT,\pte 518#endif 519 .endm 520 521 /* Convert the pte and prot to tlb insertion values. How 522 * this happens is quite subtle, read below */ 523 .macro make_insert_tlb spc,pte,prot,tmp 524 space_to_prot \spc \prot /* create prot id from space */ 525 /* The following is the real subtlety. This is depositing 526 * T <-> _PAGE_REFTRAP 527 * D <-> _PAGE_DIRTY 528 * B <-> _PAGE_DMB (memory break) 529 * 530 * Then incredible subtlety: The access rights are 531 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE 532 * See 3-14 of the parisc 2.0 manual 533 * 534 * Finally, _PAGE_READ goes in the top bit of PL1 (so we 535 * trigger an access rights trap in user space if the user 536 * tries to read an unreadable page */ 537 depd \pte,8,7,\prot 538 539 /* PAGE_USER indicates the page can be read with user privileges, 540 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 541 * contains _PAGE_READ) */ 542 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 543 depdi 7,11,3,\prot 544 /* If we're a gateway page, drop PL2 back to zero for promotion 545 * to kernel privilege (so we can execute the page as kernel). 546 * Any privilege promotion page always denys read and write */ 547 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 548 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 549 550 /* Enforce uncacheable pages. 551 * This should ONLY be use for MMIO on PA 2.0 machines. 552 * Memory/DMA is cache coherent on all PA2.0 machines we support 553 * (that means T-class is NOT supported) and the memory controllers 554 * on most of those machines only handles cache transactions. 555 */ 556 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 557 depdi 1,12,1,\prot 558 559 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 560 convert_for_tlb_insert20 \pte \tmp 561 .endm 562 563 /* Identical macro to make_insert_tlb above, except it 564 * makes the tlb entry for the differently formatted pa11 565 * insertion instructions */ 566 .macro make_insert_tlb_11 spc,pte,prot 567 zdep \spc,30,15,\prot 568 dep \pte,8,7,\prot 569 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0 570 depi 1,12,1,\prot 571 extru,= \pte,_PAGE_USER_BIT,1,%r0 572 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */ 573 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0 574 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 575 576 /* Get rid of prot bits and convert to page addr for iitlba */ 577 578 depi 0,31,ASM_PFN_PTE_SHIFT,\pte 579 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte 580 .endm 581 582 /* This is for ILP32 PA2.0 only. The TLB insertion needs 583 * to extend into I/O space if the address is 0xfXXXXXXX 584 * so we extend the f's into the top word of the pte in 585 * this case */ 586 .macro f_extend pte,tmp 587 extrd,s \pte,42,4,\tmp 588 addi,<> 1,\tmp,%r0 589 extrd,s \pte,63,25,\pte 590 .endm 591 592 /* The alias region is an 8MB aligned 16MB to do clear and 593 * copy user pages at addresses congruent with the user 594 * virtual address. 595 * 596 * To use the alias page, you set %r26 up with the to TLB 597 * entry (identifying the physical page) and %r23 up with 598 * the from tlb entry (or nothing if only a to entry---for 599 * clear_user_page_asm) */ 600 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype 601 cmpib,COND(<>),n 0,\spc,\fault 602 ldil L%(TMPALIAS_MAP_START),\tmp 603#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000) 604 /* on LP64, ldi will sign extend into the upper 32 bits, 605 * which is behaviour we don't want */ 606 depdi 0,31,32,\tmp 607#endif 608 copy \va,\tmp1 609 depi 0,31,23,\tmp1 610 cmpb,COND(<>),n \tmp,\tmp1,\fault 611 mfctl %cr19,\tmp /* iir */ 612 /* get the opcode (first six bits) into \tmp */ 613 extrw,u \tmp,5,6,\tmp 614 /* 615 * Only setting the T bit prevents data cache movein 616 * Setting access rights to zero prevents instruction cache movein 617 * 618 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go 619 * to type field and _PAGE_READ goes to top bit of PL1 620 */ 621 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot 622 /* 623 * so if the opcode is one (i.e. this is a memory management 624 * instruction) nullify the next load so \prot is only T. 625 * Otherwise this is a normal data operation 626 */ 627 cmpiclr,= 0x01,\tmp,%r0 628 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot 629.ifc \patype,20 630 depd,z \prot,8,7,\prot 631.else 632.ifc \patype,11 633 depw,z \prot,8,7,\prot 634.else 635 .error "undefined PA type to do_alias" 636.endif 637.endif 638 /* 639 * OK, it is in the temp alias region, check whether "from" or "to". 640 * Check "subtle" note in pacache.S re: r23/r26. 641 */ 642#ifdef CONFIG_64BIT 643 extrd,u,*= \va,41,1,%r0 644#else 645 extrw,u,= \va,9,1,%r0 646#endif 647 or,COND(tr) %r23,%r0,\pte 648 or %r26,%r0,\pte 649 .endm 650 651 652 /* 653 * Fault_vectors are architecturally required to be aligned on a 2K 654 * boundary 655 */ 656 657 .section .text.hot 658 .align 2048 659 660ENTRY(fault_vector_20) 661 /* First vector is invalid (0) */ 662 .ascii "cows can fly" 663 .byte 0 664 .align 32 665 666 hpmc 1 667 def 2 668 def 3 669 extint 4 670 def 5 671 itlb_20 PARISC_ITLB_TRAP 672 def 7 673 def 8 674 def 9 675 def 10 676 def 11 677 def 12 678 def 13 679 def 14 680 dtlb_20 15 681 naitlb_20 16 682 nadtlb_20 17 683 def 18 684 def 19 685 dbit_20 20 686 def 21 687 def 22 688 def 23 689 def 24 690 def 25 691 def 26 692 def 27 693 def 28 694 def 29 695 def 30 696 def 31 697END(fault_vector_20) 698 699#ifndef CONFIG_64BIT 700 701 .align 2048 702 703ENTRY(fault_vector_11) 704 /* First vector is invalid (0) */ 705 .ascii "cows can fly" 706 .byte 0 707 .align 32 708 709 hpmc 1 710 def 2 711 def 3 712 extint 4 713 def 5 714 itlb_11 PARISC_ITLB_TRAP 715 def 7 716 def 8 717 def 9 718 def 10 719 def 11 720 def 12 721 def 13 722 def 14 723 dtlb_11 15 724 naitlb_11 16 725 nadtlb_11 17 726 def 18 727 def 19 728 dbit_11 20 729 def 21 730 def 22 731 def 23 732 def 24 733 def 25 734 def 26 735 def 27 736 def 28 737 def 29 738 def 30 739 def 31 740END(fault_vector_11) 741 742#endif 743 /* Fault vector is separately protected and *must* be on its own page */ 744 .align PAGE_SIZE 745 746 .import handle_interruption,code 747 .import do_cpu_irq_mask,code 748 749 /* 750 * Child Returns here 751 * 752 * copy_thread moved args into task save area. 753 */ 754 755ENTRY(ret_from_kernel_thread) 756 /* Call schedule_tail first though */ 757 BL schedule_tail, %r2 758 nop 759 760 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 761 LDREG TASK_PT_GR25(%r1), %r26 762#ifdef CONFIG_64BIT 763 LDREG TASK_PT_GR27(%r1), %r27 764#endif 765 LDREG TASK_PT_GR26(%r1), %r1 766 ble 0(%sr7, %r1) 767 copy %r31, %r2 768 b finish_child_return 769 nop 770END(ret_from_kernel_thread) 771 772 773 /* 774 * struct task_struct *_switch_to(struct task_struct *prev, 775 * struct task_struct *next) 776 * 777 * switch kernel stacks and return prev */ 778ENTRY_CFI(_switch_to) 779 STREG %r2, -RP_OFFSET(%r30) 780 781 callee_save_float 782 callee_save 783 784 load32 _switch_to_ret, %r2 785 786 STREG %r2, TASK_PT_KPC(%r26) 787 LDREG TASK_PT_KPC(%r25), %r2 788 789 STREG %r30, TASK_PT_KSP(%r26) 790 LDREG TASK_PT_KSP(%r25), %r30 791 LDREG TASK_THREAD_INFO(%r25), %r25 792 bv %r0(%r2) 793 mtctl %r25,%cr30 794 795ENTRY(_switch_to_ret) 796 mtctl %r0, %cr0 /* Needed for single stepping */ 797 callee_rest 798 callee_rest_float 799 800 LDREG -RP_OFFSET(%r30), %r2 801 bv %r0(%r2) 802 copy %r26, %r28 803ENDPROC_CFI(_switch_to) 804 805 /* 806 * Common rfi return path for interruptions, kernel execve, and 807 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will 808 * return via this path if the signal was received when the process 809 * was running; if the process was blocked on a syscall then the 810 * normal syscall_exit path is used. All syscalls for traced 811 * proceses exit via intr_restore. 812 * 813 * XXX If any syscalls that change a processes space id ever exit 814 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and 815 * adjust IASQ[0..1]. 816 * 817 */ 818 819 .align PAGE_SIZE 820 821ENTRY_CFI(syscall_exit_rfi) 822 mfctl %cr30,%r16 823 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */ 824 ldo TASK_REGS(%r16),%r16 825 /* Force iaoq to userspace, as the user has had access to our current 826 * context via sigcontext. Also Filter the PSW for the same reason. 827 */ 828 LDREG PT_IAOQ0(%r16),%r19 829 depi 3,31,2,%r19 830 STREG %r19,PT_IAOQ0(%r16) 831 LDREG PT_IAOQ1(%r16),%r19 832 depi 3,31,2,%r19 833 STREG %r19,PT_IAOQ1(%r16) 834 LDREG PT_PSW(%r16),%r19 835 load32 USER_PSW_MASK,%r1 836#ifdef CONFIG_64BIT 837 load32 USER_PSW_HI_MASK,%r20 838 depd %r20,31,32,%r1 839#endif 840 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */ 841 load32 USER_PSW,%r1 842 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */ 843 STREG %r19,PT_PSW(%r16) 844 845 /* 846 * If we aren't being traced, we never saved space registers 847 * (we don't store them in the sigcontext), so set them 848 * to "proper" values now (otherwise we'll wind up restoring 849 * whatever was last stored in the task structure, which might 850 * be inconsistent if an interrupt occurred while on the gateway 851 * page). Note that we may be "trashing" values the user put in 852 * them, but we don't support the user changing them. 853 */ 854 855 STREG %r0,PT_SR2(%r16) 856 mfsp %sr3,%r19 857 STREG %r19,PT_SR0(%r16) 858 STREG %r19,PT_SR1(%r16) 859 STREG %r19,PT_SR3(%r16) 860 STREG %r19,PT_SR4(%r16) 861 STREG %r19,PT_SR5(%r16) 862 STREG %r19,PT_SR6(%r16) 863 STREG %r19,PT_SR7(%r16) 864 865ENTRY(intr_return) 866 /* check for reschedule */ 867 mfctl %cr30,%r1 868 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ 869 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ 870 871 .import do_notify_resume,code 872intr_check_sig: 873 /* As above */ 874 mfctl %cr30,%r1 875 LDREG TI_FLAGS(%r1),%r19 876 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20 877 and,COND(<>) %r19, %r20, %r0 878 b,n intr_restore /* skip past if we've nothing to do */ 879 880 /* This check is critical to having LWS 881 * working. The IASQ is zero on the gateway 882 * page and we cannot deliver any signals until 883 * we get off the gateway page. 884 * 885 * Only do signals if we are returning to user space 886 */ 887 LDREG PT_IASQ0(%r16), %r20 888 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ 889 LDREG PT_IASQ1(%r16), %r20 890 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ 891 892 copy %r0, %r25 /* long in_syscall = 0 */ 893#ifdef CONFIG_64BIT 894 ldo -16(%r30),%r29 /* Reference param save area */ 895#endif 896 897 /* NOTE: We need to enable interrupts if we have to deliver 898 * signals. We used to do this earlier but it caused kernel 899 * stack overflows. */ 900 ssm PSW_SM_I, %r0 901 902 BL do_notify_resume,%r2 903 copy %r16, %r26 /* struct pt_regs *regs */ 904 905 b,n intr_check_sig 906 907intr_restore: 908 copy %r16,%r29 909 ldo PT_FR31(%r29),%r1 910 rest_fp %r1 911 rest_general %r29 912 913 /* inverse of virt_map */ 914 pcxt_ssm_bug 915 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */ 916 tophys_r1 %r29 917 918 /* Restore space id's and special cr's from PT_REGS 919 * structure pointed to by r29 920 */ 921 rest_specials %r29 922 923 /* IMPORTANT: rest_stack restores r29 last (we are using it)! 924 * It also restores r1 and r30. 925 */ 926 rest_stack 927 928 rfi 929 nop 930 931#ifndef CONFIG_PREEMPTION 932# define intr_do_preempt intr_restore 933#endif /* !CONFIG_PREEMPTION */ 934 935 .import schedule,code 936intr_do_resched: 937 /* Only call schedule on return to userspace. If we're returning 938 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise 939 * we jump back to intr_restore. 940 */ 941 LDREG PT_IASQ0(%r16), %r20 942 cmpib,COND(=) 0, %r20, intr_do_preempt 943 nop 944 LDREG PT_IASQ1(%r16), %r20 945 cmpib,COND(=) 0, %r20, intr_do_preempt 946 nop 947 948 /* NOTE: We need to enable interrupts if we schedule. We used 949 * to do this earlier but it caused kernel stack overflows. */ 950 ssm PSW_SM_I, %r0 951 952#ifdef CONFIG_64BIT 953 ldo -16(%r30),%r29 /* Reference param save area */ 954#endif 955 956 ldil L%intr_check_sig, %r2 957#ifndef CONFIG_64BIT 958 b schedule 959#else 960 load32 schedule, %r20 961 bv %r0(%r20) 962#endif 963 ldo R%intr_check_sig(%r2), %r2 964 965 /* preempt the current task on returning to kernel 966 * mode from an interrupt, iff need_resched is set, 967 * and preempt_count is 0. otherwise, we continue on 968 * our merry way back to the current running task. 969 */ 970#ifdef CONFIG_PREEMPTION 971 .import preempt_schedule_irq,code 972intr_do_preempt: 973 rsm PSW_SM_I, %r0 /* disable interrupts */ 974 975 /* current_thread_info()->preempt_count */ 976 mfctl %cr30, %r1 977 LDREG TI_PRE_COUNT(%r1), %r19 978 cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */ 979 nop /* prev insn branched backwards */ 980 981 /* check if we interrupted a critical path */ 982 LDREG PT_PSW(%r16), %r20 983 bb,<,n %r20, 31 - PSW_SM_I, intr_restore 984 nop 985 986 /* ssm PSW_SM_I done later in intr_restore */ 987#ifdef CONFIG_MLONGCALLS 988 ldil L%intr_restore, %r2 989 load32 preempt_schedule_irq, %r1 990 bv %r0(%r1) 991 ldo R%intr_restore(%r2), %r2 992#else 993 ldil L%intr_restore, %r1 994 BL preempt_schedule_irq, %r2 995 ldo R%intr_restore(%r1), %r2 996#endif 997#endif /* CONFIG_PREEMPTION */ 998 999 /* 1000 * External interrupts. 1001 */ 1002 1003intr_extint: 1004 cmpib,COND(=),n 0,%r16,1f 1005 1006 get_stack_use_cr30 1007 b,n 2f 1008 10091: 1010 get_stack_use_r30 10112: 1012 save_specials %r29 1013 virt_map 1014 save_general %r29 1015 1016 ldo PT_FR0(%r29), %r24 1017 save_fp %r24 1018 1019 loadgp 1020 1021 copy %r29, %r26 /* arg0 is pt_regs */ 1022 copy %r29, %r16 /* save pt_regs */ 1023 1024 ldil L%intr_return, %r2 1025 1026#ifdef CONFIG_64BIT 1027 ldo -16(%r30),%r29 /* Reference param save area */ 1028#endif 1029 1030 b do_cpu_irq_mask 1031 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ 1032ENDPROC_CFI(syscall_exit_rfi) 1033 1034 1035 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ 1036 1037ENTRY_CFI(intr_save) /* for os_hpmc */ 1038 mfsp %sr7,%r16 1039 cmpib,COND(=),n 0,%r16,1f 1040 get_stack_use_cr30 1041 b 2f 1042 copy %r8,%r26 1043 10441: 1045 get_stack_use_r30 1046 copy %r8,%r26 1047 10482: 1049 save_specials %r29 1050 1051 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */ 1052 cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior 1053 1054 1055 mfctl %isr, %r16 1056 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */ 1057 mfctl %ior, %r17 1058 1059 1060#ifdef CONFIG_64BIT 1061 /* 1062 * If the interrupted code was running with W bit off (32 bit), 1063 * clear the b bits (bits 0 & 1) in the ior. 1064 * save_specials left ipsw value in r8 for us to test. 1065 */ 1066 extrd,u,*<> %r8,PSW_W_BIT,1,%r0 1067 depdi 0,1,2,%r17 1068 1069 /* adjust isr/ior: get high bits from isr and deposit in ior */ 1070 space_adjust %r16,%r17,%r1 1071#endif 1072 STREG %r16, PT_ISR(%r29) 1073 STREG %r17, PT_IOR(%r29) 1074 1075#if 0 && defined(CONFIG_64BIT) 1076 /* Revisit when we have 64-bit code above 4Gb */ 1077 b,n intr_save2 1078 1079skip_save_ior: 1080 /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we 1081 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior 1082 * above. 1083 */ 1084 extrd,u,* %r8,PSW_W_BIT,1,%r1 1085 cmpib,COND(=),n 1,%r1,intr_save2 1086 LDREG PT_IASQ0(%r29), %r16 1087 LDREG PT_IAOQ0(%r29), %r17 1088 /* adjust iasq/iaoq */ 1089 space_adjust %r16,%r17,%r1 1090 STREG %r16, PT_IASQ0(%r29) 1091 STREG %r17, PT_IAOQ0(%r29) 1092#else 1093skip_save_ior: 1094#endif 1095 1096intr_save2: 1097 virt_map 1098 save_general %r29 1099 1100 ldo PT_FR0(%r29), %r25 1101 save_fp %r25 1102 1103 loadgp 1104 1105 copy %r29, %r25 /* arg1 is pt_regs */ 1106#ifdef CONFIG_64BIT 1107 ldo -16(%r30),%r29 /* Reference param save area */ 1108#endif 1109 1110 ldil L%intr_check_sig, %r2 1111 copy %r25, %r16 /* save pt_regs */ 1112 1113 b handle_interruption 1114 ldo R%intr_check_sig(%r2), %r2 1115ENDPROC_CFI(intr_save) 1116 1117 1118 /* 1119 * Note for all tlb miss handlers: 1120 * 1121 * cr24 contains a pointer to the kernel address space 1122 * page directory. 1123 * 1124 * cr25 contains a pointer to the current user address 1125 * space page directory. 1126 * 1127 * sr3 will contain the space id of the user address space 1128 * of the current running thread while that thread is 1129 * running in the kernel. 1130 */ 1131 1132 /* 1133 * register number allocations. Note that these are all 1134 * in the shadowed registers 1135 */ 1136 1137 t0 = r1 /* temporary register 0 */ 1138 va = r8 /* virtual address for which the trap occurred */ 1139 t1 = r9 /* temporary register 1 */ 1140 pte = r16 /* pte/phys page # */ 1141 prot = r17 /* prot bits */ 1142 spc = r24 /* space for which the trap occurred */ 1143 ptp = r25 /* page directory/page table pointer */ 1144 1145#ifdef CONFIG_64BIT 1146 1147dtlb_miss_20w: 1148 space_adjust spc,va,t0 1149 get_pgd spc,ptp 1150 space_check spc,t0,dtlb_fault 1151 1152 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w 1153 1154 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w 1155 update_accessed ptp,pte,t0,t1 1156 1157 make_insert_tlb spc,pte,prot,t1 1158 1159 idtlbt pte,prot 1160 1161 ptl_unlock1 spc,t0 1162 rfir 1163 nop 1164 1165dtlb_check_alias_20w: 1166 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20 1167 1168 idtlbt pte,prot 1169 1170 rfir 1171 nop 1172 1173nadtlb_miss_20w: 1174 space_adjust spc,va,t0 1175 get_pgd spc,ptp 1176 space_check spc,t0,nadtlb_fault 1177 1178 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w 1179 1180 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w 1181 update_accessed ptp,pte,t0,t1 1182 1183 make_insert_tlb spc,pte,prot,t1 1184 1185 idtlbt pte,prot 1186 1187 ptl_unlock1 spc,t0 1188 rfir 1189 nop 1190 1191nadtlb_check_alias_20w: 1192 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20 1193 1194 idtlbt pte,prot 1195 1196 rfir 1197 nop 1198 1199#else 1200 1201dtlb_miss_11: 1202 get_pgd spc,ptp 1203 1204 space_check spc,t0,dtlb_fault 1205 1206 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 1207 1208 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 1209 update_accessed ptp,pte,t0,t1 1210 1211 make_insert_tlb_11 spc,pte,prot 1212 1213 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1214 mtsp spc,%sr1 1215 1216 idtlba pte,(%sr1,va) 1217 idtlbp prot,(%sr1,va) 1218 1219 mtsp t1, %sr1 /* Restore sr1 */ 1220 1221 ptl_unlock1 spc,t0 1222 rfir 1223 nop 1224 1225dtlb_check_alias_11: 1226 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11 1227 1228 idtlba pte,(va) 1229 idtlbp prot,(va) 1230 1231 rfir 1232 nop 1233 1234nadtlb_miss_11: 1235 get_pgd spc,ptp 1236 1237 space_check spc,t0,nadtlb_fault 1238 1239 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 1240 1241 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 1242 update_accessed ptp,pte,t0,t1 1243 1244 make_insert_tlb_11 spc,pte,prot 1245 1246 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1247 mtsp spc,%sr1 1248 1249 idtlba pte,(%sr1,va) 1250 idtlbp prot,(%sr1,va) 1251 1252 mtsp t1, %sr1 /* Restore sr1 */ 1253 1254 ptl_unlock1 spc,t0 1255 rfir 1256 nop 1257 1258nadtlb_check_alias_11: 1259 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11 1260 1261 idtlba pte,(va) 1262 idtlbp prot,(va) 1263 1264 rfir 1265 nop 1266 1267dtlb_miss_20: 1268 space_adjust spc,va,t0 1269 get_pgd spc,ptp 1270 space_check spc,t0,dtlb_fault 1271 1272 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 1273 1274 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 1275 update_accessed ptp,pte,t0,t1 1276 1277 make_insert_tlb spc,pte,prot,t1 1278 1279 f_extend pte,t1 1280 1281 idtlbt pte,prot 1282 1283 ptl_unlock1 spc,t0 1284 rfir 1285 nop 1286 1287dtlb_check_alias_20: 1288 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20 1289 1290 idtlbt pte,prot 1291 1292 rfir 1293 nop 1294 1295nadtlb_miss_20: 1296 get_pgd spc,ptp 1297 1298 space_check spc,t0,nadtlb_fault 1299 1300 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 1301 1302 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 1303 update_accessed ptp,pte,t0,t1 1304 1305 make_insert_tlb spc,pte,prot,t1 1306 1307 f_extend pte,t1 1308 1309 idtlbt pte,prot 1310 1311 ptl_unlock1 spc,t0 1312 rfir 1313 nop 1314 1315nadtlb_check_alias_20: 1316 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20 1317 1318 idtlbt pte,prot 1319 1320 rfir 1321 nop 1322 1323#endif 1324 1325nadtlb_emulate: 1326 1327 /* 1328 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and 1329 * probei instructions. We don't want to fault for these 1330 * instructions (not only does it not make sense, it can cause 1331 * deadlocks, since some flushes are done with the mmap 1332 * semaphore held). If the translation doesn't exist, we can't 1333 * insert a translation, so have to emulate the side effects 1334 * of the instruction. Since we don't insert a translation 1335 * we can get a lot of faults during a flush loop, so it makes 1336 * sense to try to do it here with minimum overhead. We only 1337 * emulate fdc,fic,pdc,probew,prober instructions whose base 1338 * and index registers are not shadowed. We defer everything 1339 * else to the "slow" path. 1340 */ 1341 1342 mfctl %cr19,%r9 /* Get iir */ 1343 1344 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits. 1345 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */ 1346 1347 /* Checks for fdc,fdce,pdc,"fic,4f" only */ 1348 ldi 0x280,%r16 1349 and %r9,%r16,%r17 1350 cmpb,<>,n %r16,%r17,nadtlb_probe_check 1351 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */ 1352 BL get_register,%r25 1353 extrw,u %r9,15,5,%r8 /* Get index register # */ 1354 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ 1355 copy %r1,%r24 1356 BL get_register,%r25 1357 extrw,u %r9,10,5,%r8 /* Get base register # */ 1358 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ 1359 BL set_register,%r25 1360 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */ 1361 1362nadtlb_nullify: 1363 mfctl %ipsw,%r8 1364 ldil L%PSW_N,%r9 1365 or %r8,%r9,%r8 /* Set PSW_N */ 1366 mtctl %r8,%ipsw 1367 1368 rfir 1369 nop 1370 1371 /* 1372 When there is no translation for the probe address then we 1373 must nullify the insn and return zero in the target register. 1374 This will indicate to the calling code that it does not have 1375 write/read privileges to this address. 1376 1377 This should technically work for prober and probew in PA 1.1, 1378 and also probe,r and probe,w in PA 2.0 1379 1380 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN! 1381 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET. 1382 1383 */ 1384nadtlb_probe_check: 1385 ldi 0x80,%r16 1386 and %r9,%r16,%r17 1387 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/ 1388 BL get_register,%r25 /* Find the target register */ 1389 extrw,u %r9,31,5,%r8 /* Get target register */ 1390 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ 1391 BL set_register,%r25 1392 copy %r0,%r1 /* Write zero to target register */ 1393 b nadtlb_nullify /* Nullify return insn */ 1394 nop 1395 1396 1397#ifdef CONFIG_64BIT 1398itlb_miss_20w: 1399 1400 /* 1401 * I miss is a little different, since we allow users to fault 1402 * on the gateway page which is in the kernel address space. 1403 */ 1404 1405 space_adjust spc,va,t0 1406 get_pgd spc,ptp 1407 space_check spc,t0,itlb_fault 1408 1409 L3_ptep ptp,pte,t0,va,itlb_fault 1410 1411 ptl_lock spc,ptp,pte,t0,t1,itlb_fault 1412 update_accessed ptp,pte,t0,t1 1413 1414 make_insert_tlb spc,pte,prot,t1 1415 1416 iitlbt pte,prot 1417 1418 ptl_unlock1 spc,t0 1419 rfir 1420 nop 1421 1422naitlb_miss_20w: 1423 1424 /* 1425 * I miss is a little different, since we allow users to fault 1426 * on the gateway page which is in the kernel address space. 1427 */ 1428 1429 space_adjust spc,va,t0 1430 get_pgd spc,ptp 1431 space_check spc,t0,naitlb_fault 1432 1433 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w 1434 1435 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w 1436 update_accessed ptp,pte,t0,t1 1437 1438 make_insert_tlb spc,pte,prot,t1 1439 1440 iitlbt pte,prot 1441 1442 ptl_unlock1 spc,t0 1443 rfir 1444 nop 1445 1446naitlb_check_alias_20w: 1447 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20 1448 1449 iitlbt pte,prot 1450 1451 rfir 1452 nop 1453 1454#else 1455 1456itlb_miss_11: 1457 get_pgd spc,ptp 1458 1459 space_check spc,t0,itlb_fault 1460 1461 L2_ptep ptp,pte,t0,va,itlb_fault 1462 1463 ptl_lock spc,ptp,pte,t0,t1,itlb_fault 1464 update_accessed ptp,pte,t0,t1 1465 1466 make_insert_tlb_11 spc,pte,prot 1467 1468 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1469 mtsp spc,%sr1 1470 1471 iitlba pte,(%sr1,va) 1472 iitlbp prot,(%sr1,va) 1473 1474 mtsp t1, %sr1 /* Restore sr1 */ 1475 1476 ptl_unlock1 spc,t0 1477 rfir 1478 nop 1479 1480naitlb_miss_11: 1481 get_pgd spc,ptp 1482 1483 space_check spc,t0,naitlb_fault 1484 1485 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 1486 1487 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 1488 update_accessed ptp,pte,t0,t1 1489 1490 make_insert_tlb_11 spc,pte,prot 1491 1492 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1493 mtsp spc,%sr1 1494 1495 iitlba pte,(%sr1,va) 1496 iitlbp prot,(%sr1,va) 1497 1498 mtsp t1, %sr1 /* Restore sr1 */ 1499 1500 ptl_unlock1 spc,t0 1501 rfir 1502 nop 1503 1504naitlb_check_alias_11: 1505 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11 1506 1507 iitlba pte,(%sr0, va) 1508 iitlbp prot,(%sr0, va) 1509 1510 rfir 1511 nop 1512 1513 1514itlb_miss_20: 1515 get_pgd spc,ptp 1516 1517 space_check spc,t0,itlb_fault 1518 1519 L2_ptep ptp,pte,t0,va,itlb_fault 1520 1521 ptl_lock spc,ptp,pte,t0,t1,itlb_fault 1522 update_accessed ptp,pte,t0,t1 1523 1524 make_insert_tlb spc,pte,prot,t1 1525 1526 f_extend pte,t1 1527 1528 iitlbt pte,prot 1529 1530 ptl_unlock1 spc,t0 1531 rfir 1532 nop 1533 1534naitlb_miss_20: 1535 get_pgd spc,ptp 1536 1537 space_check spc,t0,naitlb_fault 1538 1539 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 1540 1541 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 1542 update_accessed ptp,pte,t0,t1 1543 1544 make_insert_tlb spc,pte,prot,t1 1545 1546 f_extend pte,t1 1547 1548 iitlbt pte,prot 1549 1550 ptl_unlock1 spc,t0 1551 rfir 1552 nop 1553 1554naitlb_check_alias_20: 1555 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20 1556 1557 iitlbt pte,prot 1558 1559 rfir 1560 nop 1561 1562#endif 1563 1564#ifdef CONFIG_64BIT 1565 1566dbit_trap_20w: 1567 space_adjust spc,va,t0 1568 get_pgd spc,ptp 1569 space_check spc,t0,dbit_fault 1570 1571 L3_ptep ptp,pte,t0,va,dbit_fault 1572 1573 ptl_lock spc,ptp,pte,t0,t1,dbit_fault 1574 update_dirty ptp,pte,t1 1575 1576 make_insert_tlb spc,pte,prot,t1 1577 1578 idtlbt pte,prot 1579 1580 ptl_unlock0 spc,t0 1581 rfir 1582 nop 1583#else 1584 1585dbit_trap_11: 1586 1587 get_pgd spc,ptp 1588 1589 space_check spc,t0,dbit_fault 1590 1591 L2_ptep ptp,pte,t0,va,dbit_fault 1592 1593 ptl_lock spc,ptp,pte,t0,t1,dbit_fault 1594 update_dirty ptp,pte,t1 1595 1596 make_insert_tlb_11 spc,pte,prot 1597 1598 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1599 mtsp spc,%sr1 1600 1601 idtlba pte,(%sr1,va) 1602 idtlbp prot,(%sr1,va) 1603 1604 mtsp t1, %sr1 /* Restore sr1 */ 1605 1606 ptl_unlock0 spc,t0 1607 rfir 1608 nop 1609 1610dbit_trap_20: 1611 get_pgd spc,ptp 1612 1613 space_check spc,t0,dbit_fault 1614 1615 L2_ptep ptp,pte,t0,va,dbit_fault 1616 1617 ptl_lock spc,ptp,pte,t0,t1,dbit_fault 1618 update_dirty ptp,pte,t1 1619 1620 make_insert_tlb spc,pte,prot,t1 1621 1622 f_extend pte,t1 1623 1624 idtlbt pte,prot 1625 1626 ptl_unlock0 spc,t0 1627 rfir 1628 nop 1629#endif 1630 1631 .import handle_interruption,code 1632 1633kernel_bad_space: 1634 b intr_save 1635 ldi 31,%r8 /* Use an unused code */ 1636 1637dbit_fault: 1638 b intr_save 1639 ldi 20,%r8 1640 1641itlb_fault: 1642 b intr_save 1643 ldi PARISC_ITLB_TRAP,%r8 1644 1645nadtlb_fault: 1646 b intr_save 1647 ldi 17,%r8 1648 1649naitlb_fault: 1650 b intr_save 1651 ldi 16,%r8 1652 1653dtlb_fault: 1654 b intr_save 1655 ldi 15,%r8 1656 1657 /* Register saving semantics for system calls: 1658 1659 %r1 clobbered by system call macro in userspace 1660 %r2 saved in PT_REGS by gateway page 1661 %r3 - %r18 preserved by C code (saved by signal code) 1662 %r19 - %r20 saved in PT_REGS by gateway page 1663 %r21 - %r22 non-standard syscall args 1664 stored in kernel stack by gateway page 1665 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page 1666 %r27 - %r30 saved in PT_REGS by gateway page 1667 %r31 syscall return pointer 1668 */ 1669 1670 /* Floating point registers (FIXME: what do we do with these?) 1671 1672 %fr0 - %fr3 status/exception, not preserved 1673 %fr4 - %fr7 arguments 1674 %fr8 - %fr11 not preserved by C code 1675 %fr12 - %fr21 preserved by C code 1676 %fr22 - %fr31 not preserved by C code 1677 */ 1678 1679 .macro reg_save regs 1680 STREG %r3, PT_GR3(\regs) 1681 STREG %r4, PT_GR4(\regs) 1682 STREG %r5, PT_GR5(\regs) 1683 STREG %r6, PT_GR6(\regs) 1684 STREG %r7, PT_GR7(\regs) 1685 STREG %r8, PT_GR8(\regs) 1686 STREG %r9, PT_GR9(\regs) 1687 STREG %r10,PT_GR10(\regs) 1688 STREG %r11,PT_GR11(\regs) 1689 STREG %r12,PT_GR12(\regs) 1690 STREG %r13,PT_GR13(\regs) 1691 STREG %r14,PT_GR14(\regs) 1692 STREG %r15,PT_GR15(\regs) 1693 STREG %r16,PT_GR16(\regs) 1694 STREG %r17,PT_GR17(\regs) 1695 STREG %r18,PT_GR18(\regs) 1696 .endm 1697 1698 .macro reg_restore regs 1699 LDREG PT_GR3(\regs), %r3 1700 LDREG PT_GR4(\regs), %r4 1701 LDREG PT_GR5(\regs), %r5 1702 LDREG PT_GR6(\regs), %r6 1703 LDREG PT_GR7(\regs), %r7 1704 LDREG PT_GR8(\regs), %r8 1705 LDREG PT_GR9(\regs), %r9 1706 LDREG PT_GR10(\regs),%r10 1707 LDREG PT_GR11(\regs),%r11 1708 LDREG PT_GR12(\regs),%r12 1709 LDREG PT_GR13(\regs),%r13 1710 LDREG PT_GR14(\regs),%r14 1711 LDREG PT_GR15(\regs),%r15 1712 LDREG PT_GR16(\regs),%r16 1713 LDREG PT_GR17(\regs),%r17 1714 LDREG PT_GR18(\regs),%r18 1715 .endm 1716 1717 .macro fork_like name 1718ENTRY_CFI(sys_\name\()_wrapper) 1719 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 1720 ldo TASK_REGS(%r1),%r1 1721 reg_save %r1 1722 mfctl %cr27, %r28 1723 ldil L%sys_\name, %r31 1724 be R%sys_\name(%sr4,%r31) 1725 STREG %r28, PT_CR27(%r1) 1726ENDPROC_CFI(sys_\name\()_wrapper) 1727 .endm 1728 1729fork_like clone 1730fork_like clone3 1731fork_like fork 1732fork_like vfork 1733 1734 /* Set the return value for the child */ 1735ENTRY(child_return) 1736 BL schedule_tail, %r2 1737 nop 1738finish_child_return: 1739 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 1740 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1741 1742 LDREG PT_CR27(%r1), %r3 1743 mtctl %r3, %cr27 1744 reg_restore %r1 1745 b syscall_exit 1746 copy %r0,%r28 1747END(child_return) 1748 1749ENTRY_CFI(sys_rt_sigreturn_wrapper) 1750 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 1751 ldo TASK_REGS(%r26),%r26 /* get pt regs */ 1752 /* Don't save regs, we are going to restore them from sigcontext. */ 1753 STREG %r2, -RP_OFFSET(%r30) 1754#ifdef CONFIG_64BIT 1755 ldo FRAME_SIZE(%r30), %r30 1756 BL sys_rt_sigreturn,%r2 1757 ldo -16(%r30),%r29 /* Reference param save area */ 1758#else 1759 BL sys_rt_sigreturn,%r2 1760 ldo FRAME_SIZE(%r30), %r30 1761#endif 1762 1763 ldo -FRAME_SIZE(%r30), %r30 1764 LDREG -RP_OFFSET(%r30), %r2 1765 1766 /* FIXME: I think we need to restore a few more things here. */ 1767 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1768 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1769 reg_restore %r1 1770 1771 /* If the signal was received while the process was blocked on a 1772 * syscall, then r2 will take us to syscall_exit; otherwise r2 will 1773 * take us to syscall_exit_rfi and on to intr_return. 1774 */ 1775 bv %r0(%r2) 1776 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */ 1777ENDPROC_CFI(sys_rt_sigreturn_wrapper) 1778 1779ENTRY(syscall_exit) 1780 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit 1781 * via syscall_exit_rfi if the signal was received while the process 1782 * was running. 1783 */ 1784 1785 /* save return value now */ 1786 1787 mfctl %cr30, %r1 1788 LDREG TI_TASK(%r1),%r1 1789 STREG %r28,TASK_PT_GR28(%r1) 1790 1791 /* Seems to me that dp could be wrong here, if the syscall involved 1792 * calling a module, and nothing got round to restoring dp on return. 1793 */ 1794 loadgp 1795 1796syscall_check_resched: 1797 1798 /* check for reschedule */ 1799 1800 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */ 1801 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */ 1802 1803 .import do_signal,code 1804syscall_check_sig: 1805 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 1806 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26 1807 and,COND(<>) %r19, %r26, %r0 1808 b,n syscall_restore /* skip past if we've nothing to do */ 1809 1810syscall_do_signal: 1811 /* Save callee-save registers (for sigcontext). 1812 * FIXME: After this point the process structure should be 1813 * consistent with all the relevant state of the process 1814 * before the syscall. We need to verify this. 1815 */ 1816 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1817 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */ 1818 reg_save %r26 1819 1820#ifdef CONFIG_64BIT 1821 ldo -16(%r30),%r29 /* Reference param save area */ 1822#endif 1823 1824 BL do_notify_resume,%r2 1825 ldi 1, %r25 /* long in_syscall = 1 */ 1826 1827 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1828 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */ 1829 reg_restore %r20 1830 1831 b,n syscall_check_sig 1832 1833syscall_restore: 1834 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1835 1836 /* Are we being ptraced? */ 1837 ldw TASK_FLAGS(%r1),%r19 1838 ldi _TIF_SYSCALL_TRACE_MASK,%r2 1839 and,COND(=) %r19,%r2,%r0 1840 b,n syscall_restore_rfi 1841 1842 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ 1843 rest_fp %r19 1844 1845 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */ 1846 mtsar %r19 1847 1848 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */ 1849 LDREG TASK_PT_GR19(%r1),%r19 1850 LDREG TASK_PT_GR20(%r1),%r20 1851 LDREG TASK_PT_GR21(%r1),%r21 1852 LDREG TASK_PT_GR22(%r1),%r22 1853 LDREG TASK_PT_GR23(%r1),%r23 1854 LDREG TASK_PT_GR24(%r1),%r24 1855 LDREG TASK_PT_GR25(%r1),%r25 1856 LDREG TASK_PT_GR26(%r1),%r26 1857 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */ 1858 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */ 1859 LDREG TASK_PT_GR29(%r1),%r29 1860 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ 1861 1862 /* NOTE: We use rsm/ssm pair to make this operation atomic */ 1863 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */ 1864 rsm PSW_SM_I, %r0 1865 copy %r1,%r30 /* Restore user sp */ 1866 mfsp %sr3,%r1 /* Get user space id */ 1867 mtsp %r1,%sr7 /* Restore sr7 */ 1868 ssm PSW_SM_I, %r0 1869 1870 /* Set sr2 to zero for userspace syscalls to work. */ 1871 mtsp %r0,%sr2 1872 mtsp %r1,%sr4 /* Restore sr4 */ 1873 mtsp %r1,%sr5 /* Restore sr5 */ 1874 mtsp %r1,%sr6 /* Restore sr6 */ 1875 1876 depi 3,31,2,%r31 /* ensure return to user mode. */ 1877 1878#ifdef CONFIG_64BIT 1879 /* decide whether to reset the wide mode bit 1880 * 1881 * For a syscall, the W bit is stored in the lowest bit 1882 * of sp. Extract it and reset W if it is zero */ 1883 extrd,u,*<> %r30,63,1,%r1 1884 rsm PSW_SM_W, %r0 1885 /* now reset the lowest bit of sp if it was set */ 1886 xor %r30,%r1,%r30 1887#endif 1888 be,n 0(%sr3,%r31) /* return to user space */ 1889 1890 /* We have to return via an RFI, so that PSW T and R bits can be set 1891 * appropriately. 1892 * This sets up pt_regs so we can return via intr_restore, which is not 1893 * the most efficient way of doing things, but it works. 1894 */ 1895syscall_restore_rfi: 1896 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */ 1897 mtctl %r2,%cr0 /* for immediate trap */ 1898 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */ 1899 ldi 0x0b,%r20 /* Create new PSW */ 1900 depi -1,13,1,%r20 /* C, Q, D, and I bits */ 1901 1902 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are 1903 * set in thread_info.h and converted to PA bitmap 1904 * numbers in asm-offsets.c */ 1905 1906 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */ 1907 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0 1908 depi -1,27,1,%r20 /* R bit */ 1909 1910 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */ 1911 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 1912 depi -1,7,1,%r20 /* T bit */ 1913 1914 STREG %r20,TASK_PT_PSW(%r1) 1915 1916 /* Always store space registers, since sr3 can be changed (e.g. fork) */ 1917 1918 mfsp %sr3,%r25 1919 STREG %r25,TASK_PT_SR3(%r1) 1920 STREG %r25,TASK_PT_SR4(%r1) 1921 STREG %r25,TASK_PT_SR5(%r1) 1922 STREG %r25,TASK_PT_SR6(%r1) 1923 STREG %r25,TASK_PT_SR7(%r1) 1924 STREG %r25,TASK_PT_IASQ0(%r1) 1925 STREG %r25,TASK_PT_IASQ1(%r1) 1926 1927 /* XXX W bit??? */ 1928 /* Now if old D bit is clear, it means we didn't save all registers 1929 * on syscall entry, so do that now. This only happens on TRACEME 1930 * calls, or if someone attached to us while we were on a syscall. 1931 * We could make this more efficient by not saving r3-r18, but 1932 * then we wouldn't be able to use the common intr_restore path. 1933 * It is only for traced processes anyway, so performance is not 1934 * an issue. 1935 */ 1936 bb,< %r2,30,pt_regs_ok /* Branch if D set */ 1937 ldo TASK_REGS(%r1),%r25 1938 reg_save %r25 /* Save r3 to r18 */ 1939 1940 /* Save the current sr */ 1941 mfsp %sr0,%r2 1942 STREG %r2,TASK_PT_SR0(%r1) 1943 1944 /* Save the scratch sr */ 1945 mfsp %sr1,%r2 1946 STREG %r2,TASK_PT_SR1(%r1) 1947 1948 /* sr2 should be set to zero for userspace syscalls */ 1949 STREG %r0,TASK_PT_SR2(%r1) 1950 1951 LDREG TASK_PT_GR31(%r1),%r2 1952 depi 3,31,2,%r2 /* ensure return to user mode. */ 1953 STREG %r2,TASK_PT_IAOQ0(%r1) 1954 ldo 4(%r2),%r2 1955 STREG %r2,TASK_PT_IAOQ1(%r1) 1956 b intr_restore 1957 copy %r25,%r16 1958 1959pt_regs_ok: 1960 LDREG TASK_PT_IAOQ0(%r1),%r2 1961 depi 3,31,2,%r2 /* ensure return to user mode. */ 1962 STREG %r2,TASK_PT_IAOQ0(%r1) 1963 LDREG TASK_PT_IAOQ1(%r1),%r2 1964 depi 3,31,2,%r2 1965 STREG %r2,TASK_PT_IAOQ1(%r1) 1966 b intr_restore 1967 copy %r25,%r16 1968 1969syscall_do_resched: 1970 load32 syscall_check_resched,%r2 /* if resched, we start over again */ 1971 load32 schedule,%r19 1972 bv %r0(%r19) /* jumps to schedule() */ 1973#ifdef CONFIG_64BIT 1974 ldo -16(%r30),%r29 /* Reference param save area */ 1975#else 1976 nop 1977#endif 1978END(syscall_exit) 1979 1980 1981#ifdef CONFIG_FUNCTION_TRACER 1982 1983 .import ftrace_function_trampoline,code 1984 .align L1_CACHE_BYTES 1985ENTRY_CFI(mcount, caller) 1986_mcount: 1987 .export _mcount,data 1988 /* 1989 * The 64bit mcount() function pointer needs 4 dwords, of which the 1990 * first two are free. We optimize it here and put 2 instructions for 1991 * calling mcount(), and 2 instructions for ftrace_stub(). That way we 1992 * have all on one L1 cacheline. 1993 */ 1994 ldi 0, %arg3 1995 b ftrace_function_trampoline 1996 copy %r3, %arg2 /* caller original %sp */ 1997ftrace_stub: 1998 .globl ftrace_stub 1999 .type ftrace_stub, @function 2000#ifdef CONFIG_64BIT 2001 bve (%rp) 2002#else 2003 bv %r0(%rp) 2004#endif 2005 nop 2006#ifdef CONFIG_64BIT 2007 .dword mcount 2008 .dword 0 /* code in head.S puts value of global gp here */ 2009#endif 2010ENDPROC_CFI(mcount) 2011 2012#ifdef CONFIG_DYNAMIC_FTRACE 2013 2014#ifdef CONFIG_64BIT 2015#define FTRACE_FRAME_SIZE (2*FRAME_SIZE) 2016#else 2017#define FTRACE_FRAME_SIZE FRAME_SIZE 2018#endif 2019ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) 2020ftrace_caller: 2021 .global ftrace_caller 2022 2023 STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp) 2024 ldo -FTRACE_FRAME_SIZE(%sp), %r3 2025 STREG %rp, -RP_OFFSET(%r3) 2026 2027 /* Offset 0 is already allocated for %r1 */ 2028 STREG %r23, 2*REG_SZ(%r3) 2029 STREG %r24, 3*REG_SZ(%r3) 2030 STREG %r25, 4*REG_SZ(%r3) 2031 STREG %r26, 5*REG_SZ(%r3) 2032 STREG %r28, 6*REG_SZ(%r3) 2033 STREG %r29, 7*REG_SZ(%r3) 2034#ifdef CONFIG_64BIT 2035 STREG %r19, 8*REG_SZ(%r3) 2036 STREG %r20, 9*REG_SZ(%r3) 2037 STREG %r21, 10*REG_SZ(%r3) 2038 STREG %r22, 11*REG_SZ(%r3) 2039 STREG %r27, 12*REG_SZ(%r3) 2040 STREG %r31, 13*REG_SZ(%r3) 2041 loadgp 2042 ldo -16(%sp),%r29 2043#endif 2044 LDREG 0(%r3), %r25 2045 copy %rp, %r26 2046 ldo -8(%r25), %r25 2047 ldi 0, %r23 /* no pt_regs */ 2048 b,l ftrace_function_trampoline, %rp 2049 copy %r3, %r24 2050 2051 LDREG -RP_OFFSET(%r3), %rp 2052 LDREG 2*REG_SZ(%r3), %r23 2053 LDREG 3*REG_SZ(%r3), %r24 2054 LDREG 4*REG_SZ(%r3), %r25 2055 LDREG 5*REG_SZ(%r3), %r26 2056 LDREG 6*REG_SZ(%r3), %r28 2057 LDREG 7*REG_SZ(%r3), %r29 2058#ifdef CONFIG_64BIT 2059 LDREG 8*REG_SZ(%r3), %r19 2060 LDREG 9*REG_SZ(%r3), %r20 2061 LDREG 10*REG_SZ(%r3), %r21 2062 LDREG 11*REG_SZ(%r3), %r22 2063 LDREG 12*REG_SZ(%r3), %r27 2064 LDREG 13*REG_SZ(%r3), %r31 2065#endif 2066 LDREG 1*REG_SZ(%r3), %r3 2067 2068 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 2069 /* Adjust return point to jump back to beginning of traced function */ 2070 ldo -4(%r1), %r1 2071 bv,n (%r1) 2072 2073ENDPROC_CFI(ftrace_caller) 2074 2075#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS 2076ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN, 2077 CALLS,SAVE_RP,SAVE_SP) 2078ftrace_regs_caller: 2079 .global ftrace_regs_caller 2080 2081 ldo -FTRACE_FRAME_SIZE(%sp), %r1 2082 STREG %rp, -RP_OFFSET(%r1) 2083 2084 copy %sp, %r1 2085 ldo PT_SZ_ALGN(%sp), %sp 2086 2087 STREG %rp, PT_GR2(%r1) 2088 STREG %r3, PT_GR3(%r1) 2089 STREG %r4, PT_GR4(%r1) 2090 STREG %r5, PT_GR5(%r1) 2091 STREG %r6, PT_GR6(%r1) 2092 STREG %r7, PT_GR7(%r1) 2093 STREG %r8, PT_GR8(%r1) 2094 STREG %r9, PT_GR9(%r1) 2095 STREG %r10, PT_GR10(%r1) 2096 STREG %r11, PT_GR11(%r1) 2097 STREG %r12, PT_GR12(%r1) 2098 STREG %r13, PT_GR13(%r1) 2099 STREG %r14, PT_GR14(%r1) 2100 STREG %r15, PT_GR15(%r1) 2101 STREG %r16, PT_GR16(%r1) 2102 STREG %r17, PT_GR17(%r1) 2103 STREG %r18, PT_GR18(%r1) 2104 STREG %r19, PT_GR19(%r1) 2105 STREG %r20, PT_GR20(%r1) 2106 STREG %r21, PT_GR21(%r1) 2107 STREG %r22, PT_GR22(%r1) 2108 STREG %r23, PT_GR23(%r1) 2109 STREG %r24, PT_GR24(%r1) 2110 STREG %r25, PT_GR25(%r1) 2111 STREG %r26, PT_GR26(%r1) 2112 STREG %r27, PT_GR27(%r1) 2113 STREG %r28, PT_GR28(%r1) 2114 STREG %r29, PT_GR29(%r1) 2115 STREG %r30, PT_GR30(%r1) 2116 STREG %r31, PT_GR31(%r1) 2117 mfctl %cr11, %r26 2118 STREG %r26, PT_SAR(%r1) 2119 2120 copy %rp, %r26 2121 LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25 2122 ldo -8(%r25), %r25 2123 ldo -FTRACE_FRAME_SIZE(%r1), %arg2 2124 b,l ftrace_function_trampoline, %rp 2125 copy %r1, %arg3 /* struct pt_regs */ 2126 2127 ldo -PT_SZ_ALGN(%sp), %r1 2128 2129 LDREG PT_SAR(%r1), %rp 2130 mtctl %rp, %cr11 2131 2132 LDREG PT_GR2(%r1), %rp 2133 LDREG PT_GR3(%r1), %r3 2134 LDREG PT_GR4(%r1), %r4 2135 LDREG PT_GR5(%r1), %r5 2136 LDREG PT_GR6(%r1), %r6 2137 LDREG PT_GR7(%r1), %r7 2138 LDREG PT_GR8(%r1), %r8 2139 LDREG PT_GR9(%r1), %r9 2140 LDREG PT_GR10(%r1),%r10 2141 LDREG PT_GR11(%r1),%r11 2142 LDREG PT_GR12(%r1),%r12 2143 LDREG PT_GR13(%r1),%r13 2144 LDREG PT_GR14(%r1),%r14 2145 LDREG PT_GR15(%r1),%r15 2146 LDREG PT_GR16(%r1),%r16 2147 LDREG PT_GR17(%r1),%r17 2148 LDREG PT_GR18(%r1),%r18 2149 LDREG PT_GR19(%r1),%r19 2150 LDREG PT_GR20(%r1),%r20 2151 LDREG PT_GR21(%r1),%r21 2152 LDREG PT_GR22(%r1),%r22 2153 LDREG PT_GR23(%r1),%r23 2154 LDREG PT_GR24(%r1),%r24 2155 LDREG PT_GR25(%r1),%r25 2156 LDREG PT_GR26(%r1),%r26 2157 LDREG PT_GR27(%r1),%r27 2158 LDREG PT_GR28(%r1),%r28 2159 LDREG PT_GR29(%r1),%r29 2160 LDREG PT_GR30(%r1),%r30 2161 LDREG PT_GR31(%r1),%r31 2162 2163 ldo -PT_SZ_ALGN(%sp), %sp 2164 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 2165 /* Adjust return point to jump back to beginning of traced function */ 2166 ldo -4(%r1), %r1 2167 bv,n (%r1) 2168 2169ENDPROC_CFI(ftrace_regs_caller) 2170 2171#endif 2172#endif 2173 2174#ifdef CONFIG_FUNCTION_GRAPH_TRACER 2175 .align 8 2176ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE) 2177 .export parisc_return_to_handler,data 2178parisc_return_to_handler: 2179 copy %r3,%r1 2180 STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */ 2181 copy %sp,%r3 2182 STREGM %r1,FRAME_SIZE(%sp) 2183 STREG %ret0,8(%r3) 2184 STREG %ret1,16(%r3) 2185 2186#ifdef CONFIG_64BIT 2187 loadgp 2188#endif 2189 2190 /* call ftrace_return_to_handler(0) */ 2191 .import ftrace_return_to_handler,code 2192 load32 ftrace_return_to_handler,%ret0 2193 load32 .Lftrace_ret,%r2 2194#ifdef CONFIG_64BIT 2195 ldo -16(%sp),%ret1 /* Reference param save area */ 2196 bve (%ret0) 2197#else 2198 bv %r0(%ret0) 2199#endif 2200 ldi 0,%r26 2201.Lftrace_ret: 2202 copy %ret0,%rp 2203 2204 /* restore original return values */ 2205 LDREG 8(%r3),%ret0 2206 LDREG 16(%r3),%ret1 2207 2208 /* return from function */ 2209#ifdef CONFIG_64BIT 2210 bve (%rp) 2211#else 2212 bv %r0(%rp) 2213#endif 2214 LDREGM -FRAME_SIZE(%sp),%r3 2215ENDPROC_CFI(return_to_handler) 2216 2217#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 2218 2219#endif /* CONFIG_FUNCTION_TRACER */ 2220 2221#ifdef CONFIG_IRQSTACKS 2222/* void call_on_stack(unsigned long param1, void *func, 2223 unsigned long new_stack) */ 2224ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) 2225ENTRY(_call_on_stack) 2226 copy %sp, %r1 2227 2228 /* Regarding the HPPA calling conventions for function pointers, 2229 we assume the PIC register is not changed across call. For 2230 CONFIG_64BIT, the argument pointer is left to point at the 2231 argument region allocated for the call to call_on_stack. */ 2232 2233 /* Switch to new stack. We allocate two frames. */ 2234 ldo 2*FRAME_SIZE(%arg2), %sp 2235# ifdef CONFIG_64BIT 2236 /* Save previous stack pointer and return pointer in frame marker */ 2237 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp) 2238 /* Calls always use function descriptor */ 2239 LDREG 16(%arg1), %arg1 2240 bve,l (%arg1), %rp 2241 STREG %r1, -FRAME_SIZE-REG_SZ(%sp) 2242 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp 2243 bve (%rp) 2244 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp 2245# else 2246 /* Save previous stack pointer and return pointer in frame marker */ 2247 STREG %r1, -FRAME_SIZE-REG_SZ(%sp) 2248 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp) 2249 /* Calls use function descriptor if PLABEL bit is set */ 2250 bb,>=,n %arg1, 30, 1f 2251 depwi 0,31,2, %arg1 2252 LDREG 0(%arg1), %arg1 22531: 2254 be,l 0(%sr4,%arg1), %sr0, %r31 2255 copy %r31, %rp 2256 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp 2257 bv (%rp) 2258 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp 2259# endif /* CONFIG_64BIT */ 2260ENDPROC_CFI(call_on_stack) 2261#endif /* CONFIG_IRQSTACKS */ 2262 2263ENTRY_CFI(get_register) 2264 /* 2265 * get_register is used by the non access tlb miss handlers to 2266 * copy the value of the general register specified in r8 into 2267 * r1. This routine can't be used for shadowed registers, since 2268 * the rfir will restore the original value. So, for the shadowed 2269 * registers we put a -1 into r1 to indicate that the register 2270 * should not be used (the register being copied could also have 2271 * a -1 in it, but that is OK, it just means that we will have 2272 * to use the slow path instead). 2273 */ 2274 blr %r8,%r0 2275 nop 2276 bv %r0(%r25) /* r0 */ 2277 copy %r0,%r1 2278 bv %r0(%r25) /* r1 - shadowed */ 2279 ldi -1,%r1 2280 bv %r0(%r25) /* r2 */ 2281 copy %r2,%r1 2282 bv %r0(%r25) /* r3 */ 2283 copy %r3,%r1 2284 bv %r0(%r25) /* r4 */ 2285 copy %r4,%r1 2286 bv %r0(%r25) /* r5 */ 2287 copy %r5,%r1 2288 bv %r0(%r25) /* r6 */ 2289 copy %r6,%r1 2290 bv %r0(%r25) /* r7 */ 2291 copy %r7,%r1 2292 bv %r0(%r25) /* r8 - shadowed */ 2293 ldi -1,%r1 2294 bv %r0(%r25) /* r9 - shadowed */ 2295 ldi -1,%r1 2296 bv %r0(%r25) /* r10 */ 2297 copy %r10,%r1 2298 bv %r0(%r25) /* r11 */ 2299 copy %r11,%r1 2300 bv %r0(%r25) /* r12 */ 2301 copy %r12,%r1 2302 bv %r0(%r25) /* r13 */ 2303 copy %r13,%r1 2304 bv %r0(%r25) /* r14 */ 2305 copy %r14,%r1 2306 bv %r0(%r25) /* r15 */ 2307 copy %r15,%r1 2308 bv %r0(%r25) /* r16 - shadowed */ 2309 ldi -1,%r1 2310 bv %r0(%r25) /* r17 - shadowed */ 2311 ldi -1,%r1 2312 bv %r0(%r25) /* r18 */ 2313 copy %r18,%r1 2314 bv %r0(%r25) /* r19 */ 2315 copy %r19,%r1 2316 bv %r0(%r25) /* r20 */ 2317 copy %r20,%r1 2318 bv %r0(%r25) /* r21 */ 2319 copy %r21,%r1 2320 bv %r0(%r25) /* r22 */ 2321 copy %r22,%r1 2322 bv %r0(%r25) /* r23 */ 2323 copy %r23,%r1 2324 bv %r0(%r25) /* r24 - shadowed */ 2325 ldi -1,%r1 2326 bv %r0(%r25) /* r25 - shadowed */ 2327 ldi -1,%r1 2328 bv %r0(%r25) /* r26 */ 2329 copy %r26,%r1 2330 bv %r0(%r25) /* r27 */ 2331 copy %r27,%r1 2332 bv %r0(%r25) /* r28 */ 2333 copy %r28,%r1 2334 bv %r0(%r25) /* r29 */ 2335 copy %r29,%r1 2336 bv %r0(%r25) /* r30 */ 2337 copy %r30,%r1 2338 bv %r0(%r25) /* r31 */ 2339 copy %r31,%r1 2340ENDPROC_CFI(get_register) 2341 2342 2343ENTRY_CFI(set_register) 2344 /* 2345 * set_register is used by the non access tlb miss handlers to 2346 * copy the value of r1 into the general register specified in 2347 * r8. 2348 */ 2349 blr %r8,%r0 2350 nop 2351 bv %r0(%r25) /* r0 (silly, but it is a place holder) */ 2352 copy %r1,%r0 2353 bv %r0(%r25) /* r1 */ 2354 copy %r1,%r1 2355 bv %r0(%r25) /* r2 */ 2356 copy %r1,%r2 2357 bv %r0(%r25) /* r3 */ 2358 copy %r1,%r3 2359 bv %r0(%r25) /* r4 */ 2360 copy %r1,%r4 2361 bv %r0(%r25) /* r5 */ 2362 copy %r1,%r5 2363 bv %r0(%r25) /* r6 */ 2364 copy %r1,%r6 2365 bv %r0(%r25) /* r7 */ 2366 copy %r1,%r7 2367 bv %r0(%r25) /* r8 */ 2368 copy %r1,%r8 2369 bv %r0(%r25) /* r9 */ 2370 copy %r1,%r9 2371 bv %r0(%r25) /* r10 */ 2372 copy %r1,%r10 2373 bv %r0(%r25) /* r11 */ 2374 copy %r1,%r11 2375 bv %r0(%r25) /* r12 */ 2376 copy %r1,%r12 2377 bv %r0(%r25) /* r13 */ 2378 copy %r1,%r13 2379 bv %r0(%r25) /* r14 */ 2380 copy %r1,%r14 2381 bv %r0(%r25) /* r15 */ 2382 copy %r1,%r15 2383 bv %r0(%r25) /* r16 */ 2384 copy %r1,%r16 2385 bv %r0(%r25) /* r17 */ 2386 copy %r1,%r17 2387 bv %r0(%r25) /* r18 */ 2388 copy %r1,%r18 2389 bv %r0(%r25) /* r19 */ 2390 copy %r1,%r19 2391 bv %r0(%r25) /* r20 */ 2392 copy %r1,%r20 2393 bv %r0(%r25) /* r21 */ 2394 copy %r1,%r21 2395 bv %r0(%r25) /* r22 */ 2396 copy %r1,%r22 2397 bv %r0(%r25) /* r23 */ 2398 copy %r1,%r23 2399 bv %r0(%r25) /* r24 */ 2400 copy %r1,%r24 2401 bv %r0(%r25) /* r25 */ 2402 copy %r1,%r25 2403 bv %r0(%r25) /* r26 */ 2404 copy %r1,%r26 2405 bv %r0(%r25) /* r27 */ 2406 copy %r1,%r27 2407 bv %r0(%r25) /* r28 */ 2408 copy %r1,%r28 2409 bv %r0(%r25) /* r29 */ 2410 copy %r1,%r29 2411 bv %r0(%r25) /* r30 */ 2412 copy %r1,%r30 2413 bv %r0(%r25) /* r31 */ 2414 copy %r1,%r31 2415ENDPROC_CFI(set_register) 2416 2417