1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Linux/PA-RISC Project (http://www.parisc-linux.org/) 4 * 5 * kernel entry points (interruptions, system call wrappers) 6 * Copyright (C) 1999,2000 Philipp Rumpf 7 * Copyright (C) 1999 SuSE GmbH Nuernberg 8 * Copyright (C) 2000 Hewlett-Packard (John Marvin) 9 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) 10 */ 11 12#include <asm/asm-offsets.h> 13 14/* we have the following possibilities to act on an interruption: 15 * - handle in assembly and use shadowed registers only 16 * - save registers to kernel stack and handle in assembly or C */ 17 18 19#include <asm/psw.h> 20#include <asm/cache.h> /* for L1_CACHE_SHIFT */ 21#include <asm/assembly.h> /* for LDREG/STREG defines */ 22#include <asm/signal.h> 23#include <asm/unistd.h> 24#include <asm/ldcw.h> 25#include <asm/traps.h> 26#include <asm/thread_info.h> 27#include <asm/alternative.h> 28 29#include <linux/linkage.h> 30#include <linux/pgtable.h> 31 32#ifdef CONFIG_64BIT 33 .level 2.0w 34#else 35 .level 2.0 36#endif 37 38 .import pa_tlb_lock,data 39 .macro load_pa_tlb_lock reg 40 mfctl %cr25,\reg 41 addil L%(PAGE_SIZE << (PGD_ALLOC_ORDER - 1)),\reg 42 .endm 43 44 /* space_to_prot macro creates a prot id from a space id */ 45 46#if (SPACEID_SHIFT) == 0 47 .macro space_to_prot spc prot 48 depd,z \spc,62,31,\prot 49 .endm 50#else 51 .macro space_to_prot spc prot 52 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot 53 .endm 54#endif 55 56 /* Switch to virtual mapping, trashing only %r1 */ 57 .macro virt_map 58 /* pcxt_ssm_bug */ 59 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */ 60 mtsp %r0, %sr4 61 mtsp %r0, %sr5 62 mtsp %r0, %sr6 63 tovirt_r1 %r29 64 load32 KERNEL_PSW, %r1 65 66 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */ 67 mtctl %r0, %cr17 /* Clear IIASQ tail */ 68 mtctl %r0, %cr17 /* Clear IIASQ head */ 69 mtctl %r1, %ipsw 70 load32 4f, %r1 71 mtctl %r1, %cr18 /* Set IIAOQ tail */ 72 ldo 4(%r1), %r1 73 mtctl %r1, %cr18 /* Set IIAOQ head */ 74 rfir 75 nop 764: 77 .endm 78 79 /* 80 * The "get_stack" macros are responsible for determining the 81 * kernel stack value. 82 * 83 * If sr7 == 0 84 * Already using a kernel stack, so call the 85 * get_stack_use_r30 macro to push a pt_regs structure 86 * on the stack, and store registers there. 87 * else 88 * Need to set up a kernel stack, so call the 89 * get_stack_use_cr30 macro to set up a pointer 90 * to the pt_regs structure contained within the 91 * task pointer pointed to by cr30. Set the stack 92 * pointer to point to the end of the task structure. 93 * 94 * Note that we use shadowed registers for temps until 95 * we can save %r26 and %r29. %r26 is used to preserve 96 * %r8 (a shadowed register) which temporarily contained 97 * either the fault type ("code") or the eirr. We need 98 * to use a non-shadowed register to carry the value over 99 * the rfir in virt_map. We use %r26 since this value winds 100 * up being passed as the argument to either do_cpu_irq_mask 101 * or handle_interruption. %r29 is used to hold a pointer 102 * the register save area, and once again, it needs to 103 * be a non-shadowed register so that it survives the rfir. 104 * 105 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame. 106 */ 107 108 .macro get_stack_use_cr30 109 110 /* we save the registers in the task struct */ 111 112 copy %r30, %r17 113 mfctl %cr30, %r1 114 ldo THREAD_SZ_ALGN(%r1), %r30 115 mtsp %r0,%sr7 116 mtsp %r16,%sr3 117 tophys %r1,%r9 118 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */ 119 tophys %r1,%r9 120 ldo TASK_REGS(%r9),%r9 121 STREG %r17,PT_GR30(%r9) 122 STREG %r29,PT_GR29(%r9) 123 STREG %r26,PT_GR26(%r9) 124 STREG %r16,PT_SR7(%r9) 125 copy %r9,%r29 126 .endm 127 128 .macro get_stack_use_r30 129 130 /* we put a struct pt_regs on the stack and save the registers there */ 131 132 tophys %r30,%r9 133 copy %r30,%r1 134 ldo PT_SZ_ALGN(%r30),%r30 135 STREG %r1,PT_GR30(%r9) 136 STREG %r29,PT_GR29(%r9) 137 STREG %r26,PT_GR26(%r9) 138 STREG %r16,PT_SR7(%r9) 139 copy %r9,%r29 140 .endm 141 142 .macro rest_stack 143 LDREG PT_GR1(%r29), %r1 144 LDREG PT_GR30(%r29),%r30 145 LDREG PT_GR29(%r29),%r29 146 .endm 147 148 /* default interruption handler 149 * (calls traps.c:handle_interruption) */ 150 .macro def code 151 b intr_save 152 ldi \code, %r8 153 .align 32 154 .endm 155 156 /* Interrupt interruption handler 157 * (calls irq.c:do_cpu_irq_mask) */ 158 .macro extint code 159 b intr_extint 160 mfsp %sr7,%r16 161 .align 32 162 .endm 163 164 .import os_hpmc, code 165 166 /* HPMC handler */ 167 .macro hpmc code 168 nop /* must be a NOP, will be patched later */ 169 load32 PA(os_hpmc), %r3 170 bv,n 0(%r3) 171 nop 172 .word 0 /* checksum (will be patched) */ 173 .word 0 /* address of handler */ 174 .word 0 /* length of handler */ 175 .endm 176 177 /* 178 * Performance Note: Instructions will be moved up into 179 * this part of the code later on, once we are sure 180 * that the tlb miss handlers are close to final form. 181 */ 182 183 /* Register definitions for tlb miss handler macros */ 184 185 va = r8 /* virtual address for which the trap occurred */ 186 spc = r24 /* space for which the trap occurred */ 187 188#ifndef CONFIG_64BIT 189 190 /* 191 * itlb miss interruption handler (parisc 1.1 - 32 bit) 192 */ 193 194 .macro itlb_11 code 195 196 mfctl %pcsq, spc 197 b itlb_miss_11 198 mfctl %pcoq, va 199 200 .align 32 201 .endm 202#endif 203 204 /* 205 * itlb miss interruption handler (parisc 2.0) 206 */ 207 208 .macro itlb_20 code 209 mfctl %pcsq, spc 210#ifdef CONFIG_64BIT 211 b itlb_miss_20w 212#else 213 b itlb_miss_20 214#endif 215 mfctl %pcoq, va 216 217 .align 32 218 .endm 219 220#ifndef CONFIG_64BIT 221 /* 222 * naitlb miss interruption handler (parisc 1.1 - 32 bit) 223 */ 224 225 .macro naitlb_11 code 226 227 mfctl %isr,spc 228 b naitlb_miss_11 229 mfctl %ior,va 230 231 .align 32 232 .endm 233#endif 234 235 /* 236 * naitlb miss interruption handler (parisc 2.0) 237 */ 238 239 .macro naitlb_20 code 240 241 mfctl %isr,spc 242#ifdef CONFIG_64BIT 243 b naitlb_miss_20w 244#else 245 b naitlb_miss_20 246#endif 247 mfctl %ior,va 248 249 .align 32 250 .endm 251 252#ifndef CONFIG_64BIT 253 /* 254 * dtlb miss interruption handler (parisc 1.1 - 32 bit) 255 */ 256 257 .macro dtlb_11 code 258 259 mfctl %isr, spc 260 b dtlb_miss_11 261 mfctl %ior, va 262 263 .align 32 264 .endm 265#endif 266 267 /* 268 * dtlb miss interruption handler (parisc 2.0) 269 */ 270 271 .macro dtlb_20 code 272 273 mfctl %isr, spc 274#ifdef CONFIG_64BIT 275 b dtlb_miss_20w 276#else 277 b dtlb_miss_20 278#endif 279 mfctl %ior, va 280 281 .align 32 282 .endm 283 284#ifndef CONFIG_64BIT 285 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */ 286 287 .macro nadtlb_11 code 288 289 mfctl %isr,spc 290 b nadtlb_miss_11 291 mfctl %ior,va 292 293 .align 32 294 .endm 295#endif 296 297 /* nadtlb miss interruption handler (parisc 2.0) */ 298 299 .macro nadtlb_20 code 300 301 mfctl %isr,spc 302#ifdef CONFIG_64BIT 303 b nadtlb_miss_20w 304#else 305 b nadtlb_miss_20 306#endif 307 mfctl %ior,va 308 309 .align 32 310 .endm 311 312#ifndef CONFIG_64BIT 313 /* 314 * dirty bit trap interruption handler (parisc 1.1 - 32 bit) 315 */ 316 317 .macro dbit_11 code 318 319 mfctl %isr,spc 320 b dbit_trap_11 321 mfctl %ior,va 322 323 .align 32 324 .endm 325#endif 326 327 /* 328 * dirty bit trap interruption handler (parisc 2.0) 329 */ 330 331 .macro dbit_20 code 332 333 mfctl %isr,spc 334#ifdef CONFIG_64BIT 335 b dbit_trap_20w 336#else 337 b dbit_trap_20 338#endif 339 mfctl %ior,va 340 341 .align 32 342 .endm 343 344 /* In LP64, the space contains part of the upper 32 bits of the 345 * fault. We have to extract this and place it in the va, 346 * zeroing the corresponding bits in the space register */ 347 .macro space_adjust spc,va,tmp 348#ifdef CONFIG_64BIT 349 extrd,u \spc,63,SPACEID_SHIFT,\tmp 350 depd %r0,63,SPACEID_SHIFT,\spc 351 depd \tmp,31,SPACEID_SHIFT,\va 352#endif 353 .endm 354 355 .import swapper_pg_dir,code 356 357 /* Get the pgd. For faults on space zero (kernel space), this 358 * is simply swapper_pg_dir. For user space faults, the 359 * pgd is stored in %cr25 */ 360 .macro get_pgd spc,reg 361 ldil L%PA(swapper_pg_dir),\reg 362 ldo R%PA(swapper_pg_dir)(\reg),\reg 363 or,COND(=) %r0,\spc,%r0 364 mfctl %cr25,\reg 365 .endm 366 367 /* 368 space_check(spc,tmp,fault) 369 370 spc - The space we saw the fault with. 371 tmp - The place to store the current space. 372 fault - Function to call on failure. 373 374 Only allow faults on different spaces from the 375 currently active one if we're the kernel 376 377 */ 378 .macro space_check spc,tmp,fault 379 mfsp %sr7,\tmp 380 /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */ 381 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page 382 * as kernel, so defeat the space 383 * check if it is */ 384 copy \spc,\tmp 385 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */ 386 cmpb,COND(<>),n \tmp,\spc,\fault 387 .endm 388 389 /* Look up a PTE in a 2-Level scheme (faulting at each 390 * level if the entry isn't present 391 * 392 * NOTE: we use ldw even for LP64, since the short pointers 393 * can address up to 1TB 394 */ 395 .macro L2_ptep pmd,pte,index,va,fault 396#if CONFIG_PGTABLE_LEVELS == 3 397 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index 398#else 399# if defined(CONFIG_64BIT) 400 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 401 #else 402 # if PAGE_SIZE > 4096 403 extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index 404 # else 405 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 406 # endif 407# endif 408#endif 409 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 410 copy %r0,\pte 411 ldw,s \index(\pmd),\pmd 412 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 413 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 414 SHLREG \pmd,PxD_VALUE_SHIFT,\pmd 415 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 416 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 417 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */ 418 .endm 419 420 /* Look up PTE in a 3-Level scheme. 421 * 422 * Here we implement a Hybrid L2/L3 scheme: we allocate the 423 * first pmd adjacent to the pgd. This means that we can 424 * subtract a constant offset to get to it. The pmd and pgd 425 * sizes are arranged so that a single pmd covers 4GB (giving 426 * a full LP64 process access to 8TB) so our lookups are 427 * effectively L2 for the first 4GB of the kernel (i.e. for 428 * all ILP32 processes and all the kernel for machines with 429 * under 4GB of memory) */ 430 .macro L3_ptep pgd,pte,index,va,fault 431#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */ 432 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 433 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 434 ldw,s \index(\pgd),\pgd 435 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 436 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault 437 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 438 shld \pgd,PxD_VALUE_SHIFT,\index 439 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 440 copy \index,\pgd 441 extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 442 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd 443#endif 444 L2_ptep \pgd,\pte,\index,\va,\fault 445 .endm 446 447 /* Acquire pa_tlb_lock lock and check page is present. */ 448 .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault 449#ifdef CONFIG_SMP 45098: cmpib,COND(=),n 0,\spc,2f 451 load_pa_tlb_lock \tmp 4521: LDCW 0(\tmp),\tmp1 453 cmpib,COND(=) 0,\tmp1,1b 454 nop 455 LDREG 0(\ptp),\pte 456 bb,<,n \pte,_PAGE_PRESENT_BIT,3f 457 b \fault 458 stw \spc,0(\tmp) 45999: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 460#endif 4612: LDREG 0(\ptp),\pte 462 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault 4633: 464 .endm 465 466 /* Release pa_tlb_lock lock without reloading lock address. 467 Note that the values in the register spc are limited to 468 NR_SPACE_IDS (262144). Thus, the stw instruction always 469 stores a nonzero value even when register spc is 64 bits. 470 We use an ordered store to ensure all prior accesses are 471 performed prior to releasing the lock. */ 472 .macro tlb_unlock0 spc,tmp 473#ifdef CONFIG_SMP 47498: or,COND(=) %r0,\spc,%r0 475 stw,ma \spc,0(\tmp) 47699: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 477#endif 478 .endm 479 480 /* Release pa_tlb_lock lock. */ 481 .macro tlb_unlock1 spc,tmp 482#ifdef CONFIG_SMP 48398: load_pa_tlb_lock \tmp 48499: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 485 tlb_unlock0 \spc,\tmp 486#endif 487 .endm 488 489 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and 490 * don't needlessly dirty the cache line if it was already set */ 491 .macro update_accessed ptp,pte,tmp,tmp1 492 ldi _PAGE_ACCESSED,\tmp1 493 or \tmp1,\pte,\tmp 494 and,COND(<>) \tmp1,\pte,%r0 495 STREG \tmp,0(\ptp) 496 .endm 497 498 /* Set the dirty bit (and accessed bit). No need to be 499 * clever, this is only used from the dirty fault */ 500 .macro update_dirty ptp,pte,tmp 501 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp 502 or \tmp,\pte,\pte 503 STREG \pte,0(\ptp) 504 .endm 505 506 /* We have (depending on the page size): 507 * - 38 to 52-bit Physical Page Number 508 * - 12 to 26-bit page offset 509 */ 510 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) 511 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ 512 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) 513 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12) 514 515 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 516 .macro convert_for_tlb_insert20 pte,tmp 517#ifdef CONFIG_HUGETLB_PAGE 518 copy \pte,\tmp 519 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 520 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte 521 522 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 523 (63-58)+PAGE_ADD_SHIFT,\pte 524 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0 525 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\ 526 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte 527#else /* Huge pages disabled */ 528 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 529 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte 530 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 531 (63-58)+PAGE_ADD_SHIFT,\pte 532#endif 533 .endm 534 535 /* Convert the pte and prot to tlb insertion values. How 536 * this happens is quite subtle, read below */ 537 .macro make_insert_tlb spc,pte,prot,tmp 538 space_to_prot \spc \prot /* create prot id from space */ 539 /* The following is the real subtlety. This is depositing 540 * T <-> _PAGE_REFTRAP 541 * D <-> _PAGE_DIRTY 542 * B <-> _PAGE_DMB (memory break) 543 * 544 * Then incredible subtlety: The access rights are 545 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE 546 * See 3-14 of the parisc 2.0 manual 547 * 548 * Finally, _PAGE_READ goes in the top bit of PL1 (so we 549 * trigger an access rights trap in user space if the user 550 * tries to read an unreadable page */ 551 depd \pte,8,7,\prot 552 553 /* PAGE_USER indicates the page can be read with user privileges, 554 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 555 * contains _PAGE_READ) */ 556 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 557 depdi 7,11,3,\prot 558 /* If we're a gateway page, drop PL2 back to zero for promotion 559 * to kernel privilege (so we can execute the page as kernel). 560 * Any privilege promotion page always denys read and write */ 561 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 562 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 563 564 /* Enforce uncacheable pages. 565 * This should ONLY be use for MMIO on PA 2.0 machines. 566 * Memory/DMA is cache coherent on all PA2.0 machines we support 567 * (that means T-class is NOT supported) and the memory controllers 568 * on most of those machines only handles cache transactions. 569 */ 570 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 571 depdi 1,12,1,\prot 572 573 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 574 convert_for_tlb_insert20 \pte \tmp 575 .endm 576 577 /* Identical macro to make_insert_tlb above, except it 578 * makes the tlb entry for the differently formatted pa11 579 * insertion instructions */ 580 .macro make_insert_tlb_11 spc,pte,prot 581 zdep \spc,30,15,\prot 582 dep \pte,8,7,\prot 583 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0 584 depi 1,12,1,\prot 585 extru,= \pte,_PAGE_USER_BIT,1,%r0 586 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */ 587 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0 588 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 589 590 /* Get rid of prot bits and convert to page addr for iitlba */ 591 592 depi 0,31,ASM_PFN_PTE_SHIFT,\pte 593 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte 594 .endm 595 596 /* This is for ILP32 PA2.0 only. The TLB insertion needs 597 * to extend into I/O space if the address is 0xfXXXXXXX 598 * so we extend the f's into the top word of the pte in 599 * this case */ 600 .macro f_extend pte,tmp 601 extrd,s \pte,42,4,\tmp 602 addi,<> 1,\tmp,%r0 603 extrd,s \pte,63,25,\pte 604 .endm 605 606 /* The alias region is an 8MB aligned 16MB to do clear and 607 * copy user pages at addresses congruent with the user 608 * virtual address. 609 * 610 * To use the alias page, you set %r26 up with the to TLB 611 * entry (identifying the physical page) and %r23 up with 612 * the from tlb entry (or nothing if only a to entry---for 613 * clear_user_page_asm) */ 614 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype 615 cmpib,COND(<>),n 0,\spc,\fault 616 ldil L%(TMPALIAS_MAP_START),\tmp 617#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000) 618 /* on LP64, ldi will sign extend into the upper 32 bits, 619 * which is behaviour we don't want */ 620 depdi 0,31,32,\tmp 621#endif 622 copy \va,\tmp1 623 depi 0,31,23,\tmp1 624 cmpb,COND(<>),n \tmp,\tmp1,\fault 625 mfctl %cr19,\tmp /* iir */ 626 /* get the opcode (first six bits) into \tmp */ 627 extrw,u \tmp,5,6,\tmp 628 /* 629 * Only setting the T bit prevents data cache movein 630 * Setting access rights to zero prevents instruction cache movein 631 * 632 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go 633 * to type field and _PAGE_READ goes to top bit of PL1 634 */ 635 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot 636 /* 637 * so if the opcode is one (i.e. this is a memory management 638 * instruction) nullify the next load so \prot is only T. 639 * Otherwise this is a normal data operation 640 */ 641 cmpiclr,= 0x01,\tmp,%r0 642 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot 643.ifc \patype,20 644 depd,z \prot,8,7,\prot 645.else 646.ifc \patype,11 647 depw,z \prot,8,7,\prot 648.else 649 .error "undefined PA type to do_alias" 650.endif 651.endif 652 /* 653 * OK, it is in the temp alias region, check whether "from" or "to". 654 * Check "subtle" note in pacache.S re: r23/r26. 655 */ 656#ifdef CONFIG_64BIT 657 extrd,u,*= \va,41,1,%r0 658#else 659 extrw,u,= \va,9,1,%r0 660#endif 661 or,COND(tr) %r23,%r0,\pte 662 or %r26,%r0,\pte 663 .endm 664 665 666 /* 667 * Fault_vectors are architecturally required to be aligned on a 2K 668 * boundary 669 */ 670 671 .section .text.hot 672 .align 2048 673 674ENTRY(fault_vector_20) 675 /* First vector is invalid (0) */ 676 .ascii "cows can fly" 677 .byte 0 678 .align 32 679 680 hpmc 1 681 def 2 682 def 3 683 extint 4 684 def 5 685 itlb_20 PARISC_ITLB_TRAP 686 def 7 687 def 8 688 def 9 689 def 10 690 def 11 691 def 12 692 def 13 693 def 14 694 dtlb_20 15 695 naitlb_20 16 696 nadtlb_20 17 697 def 18 698 def 19 699 dbit_20 20 700 def 21 701 def 22 702 def 23 703 def 24 704 def 25 705 def 26 706 def 27 707 def 28 708 def 29 709 def 30 710 def 31 711END(fault_vector_20) 712 713#ifndef CONFIG_64BIT 714 715 .align 2048 716 717ENTRY(fault_vector_11) 718 /* First vector is invalid (0) */ 719 .ascii "cows can fly" 720 .byte 0 721 .align 32 722 723 hpmc 1 724 def 2 725 def 3 726 extint 4 727 def 5 728 itlb_11 PARISC_ITLB_TRAP 729 def 7 730 def 8 731 def 9 732 def 10 733 def 11 734 def 12 735 def 13 736 def 14 737 dtlb_11 15 738 naitlb_11 16 739 nadtlb_11 17 740 def 18 741 def 19 742 dbit_11 20 743 def 21 744 def 22 745 def 23 746 def 24 747 def 25 748 def 26 749 def 27 750 def 28 751 def 29 752 def 30 753 def 31 754END(fault_vector_11) 755 756#endif 757 /* Fault vector is separately protected and *must* be on its own page */ 758 .align PAGE_SIZE 759 760 .import handle_interruption,code 761 .import do_cpu_irq_mask,code 762 763 /* 764 * Child Returns here 765 * 766 * copy_thread moved args into task save area. 767 */ 768 769ENTRY(ret_from_kernel_thread) 770 /* Call schedule_tail first though */ 771 BL schedule_tail, %r2 772 nop 773 774 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 775 LDREG TASK_PT_GR25(%r1), %r26 776#ifdef CONFIG_64BIT 777 LDREG TASK_PT_GR27(%r1), %r27 778#endif 779 LDREG TASK_PT_GR26(%r1), %r1 780 ble 0(%sr7, %r1) 781 copy %r31, %r2 782 b finish_child_return 783 nop 784END(ret_from_kernel_thread) 785 786 787 /* 788 * struct task_struct *_switch_to(struct task_struct *prev, 789 * struct task_struct *next) 790 * 791 * switch kernel stacks and return prev */ 792ENTRY_CFI(_switch_to) 793 STREG %r2, -RP_OFFSET(%r30) 794 795 callee_save_float 796 callee_save 797 798 load32 _switch_to_ret, %r2 799 800 STREG %r2, TASK_PT_KPC(%r26) 801 LDREG TASK_PT_KPC(%r25), %r2 802 803 STREG %r30, TASK_PT_KSP(%r26) 804 LDREG TASK_PT_KSP(%r25), %r30 805 LDREG TASK_THREAD_INFO(%r25), %r25 806 bv %r0(%r2) 807 mtctl %r25,%cr30 808 809ENTRY(_switch_to_ret) 810 mtctl %r0, %cr0 /* Needed for single stepping */ 811 callee_rest 812 callee_rest_float 813 814 LDREG -RP_OFFSET(%r30), %r2 815 bv %r0(%r2) 816 copy %r26, %r28 817ENDPROC_CFI(_switch_to) 818 819 /* 820 * Common rfi return path for interruptions, kernel execve, and 821 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will 822 * return via this path if the signal was received when the process 823 * was running; if the process was blocked on a syscall then the 824 * normal syscall_exit path is used. All syscalls for traced 825 * proceses exit via intr_restore. 826 * 827 * XXX If any syscalls that change a processes space id ever exit 828 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and 829 * adjust IASQ[0..1]. 830 * 831 */ 832 833 .align PAGE_SIZE 834 835ENTRY_CFI(syscall_exit_rfi) 836 mfctl %cr30,%r16 837 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */ 838 ldo TASK_REGS(%r16),%r16 839 /* Force iaoq to userspace, as the user has had access to our current 840 * context via sigcontext. Also Filter the PSW for the same reason. 841 */ 842 LDREG PT_IAOQ0(%r16),%r19 843 depi 3,31,2,%r19 844 STREG %r19,PT_IAOQ0(%r16) 845 LDREG PT_IAOQ1(%r16),%r19 846 depi 3,31,2,%r19 847 STREG %r19,PT_IAOQ1(%r16) 848 LDREG PT_PSW(%r16),%r19 849 load32 USER_PSW_MASK,%r1 850#ifdef CONFIG_64BIT 851 load32 USER_PSW_HI_MASK,%r20 852 depd %r20,31,32,%r1 853#endif 854 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */ 855 load32 USER_PSW,%r1 856 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */ 857 STREG %r19,PT_PSW(%r16) 858 859 /* 860 * If we aren't being traced, we never saved space registers 861 * (we don't store them in the sigcontext), so set them 862 * to "proper" values now (otherwise we'll wind up restoring 863 * whatever was last stored in the task structure, which might 864 * be inconsistent if an interrupt occurred while on the gateway 865 * page). Note that we may be "trashing" values the user put in 866 * them, but we don't support the user changing them. 867 */ 868 869 STREG %r0,PT_SR2(%r16) 870 mfsp %sr3,%r19 871 STREG %r19,PT_SR0(%r16) 872 STREG %r19,PT_SR1(%r16) 873 STREG %r19,PT_SR3(%r16) 874 STREG %r19,PT_SR4(%r16) 875 STREG %r19,PT_SR5(%r16) 876 STREG %r19,PT_SR6(%r16) 877 STREG %r19,PT_SR7(%r16) 878 879ENTRY(intr_return) 880 /* check for reschedule */ 881 mfctl %cr30,%r1 882 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ 883 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ 884 885 .import do_notify_resume,code 886intr_check_sig: 887 /* As above */ 888 mfctl %cr30,%r1 889 LDREG TI_FLAGS(%r1),%r19 890 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20 891 and,COND(<>) %r19, %r20, %r0 892 b,n intr_restore /* skip past if we've nothing to do */ 893 894 /* This check is critical to having LWS 895 * working. The IASQ is zero on the gateway 896 * page and we cannot deliver any signals until 897 * we get off the gateway page. 898 * 899 * Only do signals if we are returning to user space 900 */ 901 LDREG PT_IASQ0(%r16), %r20 902 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ 903 LDREG PT_IASQ1(%r16), %r20 904 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ 905 906 copy %r0, %r25 /* long in_syscall = 0 */ 907#ifdef CONFIG_64BIT 908 ldo -16(%r30),%r29 /* Reference param save area */ 909#endif 910 911 /* NOTE: We need to enable interrupts if we have to deliver 912 * signals. We used to do this earlier but it caused kernel 913 * stack overflows. */ 914 ssm PSW_SM_I, %r0 915 916 BL do_notify_resume,%r2 917 copy %r16, %r26 /* struct pt_regs *regs */ 918 919 b,n intr_check_sig 920 921intr_restore: 922 copy %r16,%r29 923 ldo PT_FR31(%r29),%r1 924 rest_fp %r1 925 rest_general %r29 926 927 /* inverse of virt_map */ 928 pcxt_ssm_bug 929 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */ 930 tophys_r1 %r29 931 932 /* Restore space id's and special cr's from PT_REGS 933 * structure pointed to by r29 934 */ 935 rest_specials %r29 936 937 /* IMPORTANT: rest_stack restores r29 last (we are using it)! 938 * It also restores r1 and r30. 939 */ 940 rest_stack 941 942 rfi 943 nop 944 945#ifndef CONFIG_PREEMPTION 946# define intr_do_preempt intr_restore 947#endif /* !CONFIG_PREEMPTION */ 948 949 .import schedule,code 950intr_do_resched: 951 /* Only call schedule on return to userspace. If we're returning 952 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise 953 * we jump back to intr_restore. 954 */ 955 LDREG PT_IASQ0(%r16), %r20 956 cmpib,COND(=) 0, %r20, intr_do_preempt 957 nop 958 LDREG PT_IASQ1(%r16), %r20 959 cmpib,COND(=) 0, %r20, intr_do_preempt 960 nop 961 962 /* NOTE: We need to enable interrupts if we schedule. We used 963 * to do this earlier but it caused kernel stack overflows. */ 964 ssm PSW_SM_I, %r0 965 966#ifdef CONFIG_64BIT 967 ldo -16(%r30),%r29 /* Reference param save area */ 968#endif 969 970 ldil L%intr_check_sig, %r2 971#ifndef CONFIG_64BIT 972 b schedule 973#else 974 load32 schedule, %r20 975 bv %r0(%r20) 976#endif 977 ldo R%intr_check_sig(%r2), %r2 978 979 /* preempt the current task on returning to kernel 980 * mode from an interrupt, iff need_resched is set, 981 * and preempt_count is 0. otherwise, we continue on 982 * our merry way back to the current running task. 983 */ 984#ifdef CONFIG_PREEMPTION 985 .import preempt_schedule_irq,code 986intr_do_preempt: 987 rsm PSW_SM_I, %r0 /* disable interrupts */ 988 989 /* current_thread_info()->preempt_count */ 990 mfctl %cr30, %r1 991 LDREG TI_PRE_COUNT(%r1), %r19 992 cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */ 993 nop /* prev insn branched backwards */ 994 995 /* check if we interrupted a critical path */ 996 LDREG PT_PSW(%r16), %r20 997 bb,<,n %r20, 31 - PSW_SM_I, intr_restore 998 nop 999 1000 BL preempt_schedule_irq, %r2 1001 nop 1002 1003 b,n intr_restore /* ssm PSW_SM_I done by intr_restore */ 1004#endif /* CONFIG_PREEMPTION */ 1005 1006 /* 1007 * External interrupts. 1008 */ 1009 1010intr_extint: 1011 cmpib,COND(=),n 0,%r16,1f 1012 1013 get_stack_use_cr30 1014 b,n 2f 1015 10161: 1017 get_stack_use_r30 10182: 1019 save_specials %r29 1020 virt_map 1021 save_general %r29 1022 1023 ldo PT_FR0(%r29), %r24 1024 save_fp %r24 1025 1026 loadgp 1027 1028 copy %r29, %r26 /* arg0 is pt_regs */ 1029 copy %r29, %r16 /* save pt_regs */ 1030 1031 ldil L%intr_return, %r2 1032 1033#ifdef CONFIG_64BIT 1034 ldo -16(%r30),%r29 /* Reference param save area */ 1035#endif 1036 1037 b do_cpu_irq_mask 1038 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ 1039ENDPROC_CFI(syscall_exit_rfi) 1040 1041 1042 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ 1043 1044ENTRY_CFI(intr_save) /* for os_hpmc */ 1045 mfsp %sr7,%r16 1046 cmpib,COND(=),n 0,%r16,1f 1047 get_stack_use_cr30 1048 b 2f 1049 copy %r8,%r26 1050 10511: 1052 get_stack_use_r30 1053 copy %r8,%r26 1054 10552: 1056 save_specials %r29 1057 1058 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */ 1059 cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior 1060 1061 1062 mfctl %isr, %r16 1063 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */ 1064 mfctl %ior, %r17 1065 1066 1067#ifdef CONFIG_64BIT 1068 /* 1069 * If the interrupted code was running with W bit off (32 bit), 1070 * clear the b bits (bits 0 & 1) in the ior. 1071 * save_specials left ipsw value in r8 for us to test. 1072 */ 1073 extrd,u,*<> %r8,PSW_W_BIT,1,%r0 1074 depdi 0,1,2,%r17 1075 1076 /* adjust isr/ior: get high bits from isr and deposit in ior */ 1077 space_adjust %r16,%r17,%r1 1078#endif 1079 STREG %r16, PT_ISR(%r29) 1080 STREG %r17, PT_IOR(%r29) 1081 1082#if 0 && defined(CONFIG_64BIT) 1083 /* Revisit when we have 64-bit code above 4Gb */ 1084 b,n intr_save2 1085 1086skip_save_ior: 1087 /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we 1088 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior 1089 * above. 1090 */ 1091 extrd,u,* %r8,PSW_W_BIT,1,%r1 1092 cmpib,COND(=),n 1,%r1,intr_save2 1093 LDREG PT_IASQ0(%r29), %r16 1094 LDREG PT_IAOQ0(%r29), %r17 1095 /* adjust iasq/iaoq */ 1096 space_adjust %r16,%r17,%r1 1097 STREG %r16, PT_IASQ0(%r29) 1098 STREG %r17, PT_IAOQ0(%r29) 1099#else 1100skip_save_ior: 1101#endif 1102 1103intr_save2: 1104 virt_map 1105 save_general %r29 1106 1107 ldo PT_FR0(%r29), %r25 1108 save_fp %r25 1109 1110 loadgp 1111 1112 copy %r29, %r25 /* arg1 is pt_regs */ 1113#ifdef CONFIG_64BIT 1114 ldo -16(%r30),%r29 /* Reference param save area */ 1115#endif 1116 1117 ldil L%intr_check_sig, %r2 1118 copy %r25, %r16 /* save pt_regs */ 1119 1120 b handle_interruption 1121 ldo R%intr_check_sig(%r2), %r2 1122ENDPROC_CFI(intr_save) 1123 1124 1125 /* 1126 * Note for all tlb miss handlers: 1127 * 1128 * cr24 contains a pointer to the kernel address space 1129 * page directory. 1130 * 1131 * cr25 contains a pointer to the current user address 1132 * space page directory. 1133 * 1134 * sr3 will contain the space id of the user address space 1135 * of the current running thread while that thread is 1136 * running in the kernel. 1137 */ 1138 1139 /* 1140 * register number allocations. Note that these are all 1141 * in the shadowed registers 1142 */ 1143 1144 t0 = r1 /* temporary register 0 */ 1145 va = r8 /* virtual address for which the trap occurred */ 1146 t1 = r9 /* temporary register 1 */ 1147 pte = r16 /* pte/phys page # */ 1148 prot = r17 /* prot bits */ 1149 spc = r24 /* space for which the trap occurred */ 1150 ptp = r25 /* page directory/page table pointer */ 1151 1152#ifdef CONFIG_64BIT 1153 1154dtlb_miss_20w: 1155 space_adjust spc,va,t0 1156 get_pgd spc,ptp 1157 space_check spc,t0,dtlb_fault 1158 1159 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w 1160 1161 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w 1162 update_accessed ptp,pte,t0,t1 1163 1164 make_insert_tlb spc,pte,prot,t1 1165 1166 idtlbt pte,prot 1167 1168 tlb_unlock1 spc,t0 1169 rfir 1170 nop 1171 1172dtlb_check_alias_20w: 1173 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20 1174 1175 idtlbt pte,prot 1176 1177 rfir 1178 nop 1179 1180nadtlb_miss_20w: 1181 space_adjust spc,va,t0 1182 get_pgd spc,ptp 1183 space_check spc,t0,nadtlb_fault 1184 1185 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w 1186 1187 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w 1188 update_accessed ptp,pte,t0,t1 1189 1190 make_insert_tlb spc,pte,prot,t1 1191 1192 idtlbt pte,prot 1193 1194 tlb_unlock1 spc,t0 1195 rfir 1196 nop 1197 1198nadtlb_check_alias_20w: 1199 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20 1200 1201 idtlbt pte,prot 1202 1203 rfir 1204 nop 1205 1206#else 1207 1208dtlb_miss_11: 1209 get_pgd spc,ptp 1210 1211 space_check spc,t0,dtlb_fault 1212 1213 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 1214 1215 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 1216 update_accessed ptp,pte,t0,t1 1217 1218 make_insert_tlb_11 spc,pte,prot 1219 1220 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1221 mtsp spc,%sr1 1222 1223 idtlba pte,(%sr1,va) 1224 idtlbp prot,(%sr1,va) 1225 1226 mtsp t1, %sr1 /* Restore sr1 */ 1227 1228 tlb_unlock1 spc,t0 1229 rfir 1230 nop 1231 1232dtlb_check_alias_11: 1233 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11 1234 1235 idtlba pte,(va) 1236 idtlbp prot,(va) 1237 1238 rfir 1239 nop 1240 1241nadtlb_miss_11: 1242 get_pgd spc,ptp 1243 1244 space_check spc,t0,nadtlb_fault 1245 1246 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 1247 1248 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 1249 update_accessed ptp,pte,t0,t1 1250 1251 make_insert_tlb_11 spc,pte,prot 1252 1253 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1254 mtsp spc,%sr1 1255 1256 idtlba pte,(%sr1,va) 1257 idtlbp prot,(%sr1,va) 1258 1259 mtsp t1, %sr1 /* Restore sr1 */ 1260 1261 tlb_unlock1 spc,t0 1262 rfir 1263 nop 1264 1265nadtlb_check_alias_11: 1266 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11 1267 1268 idtlba pte,(va) 1269 idtlbp prot,(va) 1270 1271 rfir 1272 nop 1273 1274dtlb_miss_20: 1275 space_adjust spc,va,t0 1276 get_pgd spc,ptp 1277 space_check spc,t0,dtlb_fault 1278 1279 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 1280 1281 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 1282 update_accessed ptp,pte,t0,t1 1283 1284 make_insert_tlb spc,pte,prot,t1 1285 1286 f_extend pte,t1 1287 1288 idtlbt pte,prot 1289 1290 tlb_unlock1 spc,t0 1291 rfir 1292 nop 1293 1294dtlb_check_alias_20: 1295 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20 1296 1297 idtlbt pte,prot 1298 1299 rfir 1300 nop 1301 1302nadtlb_miss_20: 1303 get_pgd spc,ptp 1304 1305 space_check spc,t0,nadtlb_fault 1306 1307 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 1308 1309 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 1310 update_accessed ptp,pte,t0,t1 1311 1312 make_insert_tlb spc,pte,prot,t1 1313 1314 f_extend pte,t1 1315 1316 idtlbt pte,prot 1317 1318 tlb_unlock1 spc,t0 1319 rfir 1320 nop 1321 1322nadtlb_check_alias_20: 1323 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20 1324 1325 idtlbt pte,prot 1326 1327 rfir 1328 nop 1329 1330#endif 1331 1332nadtlb_emulate: 1333 1334 /* 1335 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and 1336 * probei instructions. We don't want to fault for these 1337 * instructions (not only does it not make sense, it can cause 1338 * deadlocks, since some flushes are done with the mmap 1339 * semaphore held). If the translation doesn't exist, we can't 1340 * insert a translation, so have to emulate the side effects 1341 * of the instruction. Since we don't insert a translation 1342 * we can get a lot of faults during a flush loop, so it makes 1343 * sense to try to do it here with minimum overhead. We only 1344 * emulate fdc,fic,pdc,probew,prober instructions whose base 1345 * and index registers are not shadowed. We defer everything 1346 * else to the "slow" path. 1347 */ 1348 1349 mfctl %cr19,%r9 /* Get iir */ 1350 1351 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits. 1352 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */ 1353 1354 /* Checks for fdc,fdce,pdc,"fic,4f" only */ 1355 ldi 0x280,%r16 1356 and %r9,%r16,%r17 1357 cmpb,<>,n %r16,%r17,nadtlb_probe_check 1358 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */ 1359 BL get_register,%r25 1360 extrw,u %r9,15,5,%r8 /* Get index register # */ 1361 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ 1362 copy %r1,%r24 1363 BL get_register,%r25 1364 extrw,u %r9,10,5,%r8 /* Get base register # */ 1365 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ 1366 BL set_register,%r25 1367 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */ 1368 1369nadtlb_nullify: 1370 mfctl %ipsw,%r8 1371 ldil L%PSW_N,%r9 1372 or %r8,%r9,%r8 /* Set PSW_N */ 1373 mtctl %r8,%ipsw 1374 1375 rfir 1376 nop 1377 1378 /* 1379 When there is no translation for the probe address then we 1380 must nullify the insn and return zero in the target register. 1381 This will indicate to the calling code that it does not have 1382 write/read privileges to this address. 1383 1384 This should technically work for prober and probew in PA 1.1, 1385 and also probe,r and probe,w in PA 2.0 1386 1387 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN! 1388 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET. 1389 1390 */ 1391nadtlb_probe_check: 1392 ldi 0x80,%r16 1393 and %r9,%r16,%r17 1394 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/ 1395 BL get_register,%r25 /* Find the target register */ 1396 extrw,u %r9,31,5,%r8 /* Get target register */ 1397 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ 1398 BL set_register,%r25 1399 copy %r0,%r1 /* Write zero to target register */ 1400 b nadtlb_nullify /* Nullify return insn */ 1401 nop 1402 1403 1404#ifdef CONFIG_64BIT 1405itlb_miss_20w: 1406 1407 /* 1408 * I miss is a little different, since we allow users to fault 1409 * on the gateway page which is in the kernel address space. 1410 */ 1411 1412 space_adjust spc,va,t0 1413 get_pgd spc,ptp 1414 space_check spc,t0,itlb_fault 1415 1416 L3_ptep ptp,pte,t0,va,itlb_fault 1417 1418 tlb_lock spc,ptp,pte,t0,t1,itlb_fault 1419 update_accessed ptp,pte,t0,t1 1420 1421 make_insert_tlb spc,pte,prot,t1 1422 1423 iitlbt pte,prot 1424 1425 tlb_unlock1 spc,t0 1426 rfir 1427 nop 1428 1429naitlb_miss_20w: 1430 1431 /* 1432 * I miss is a little different, since we allow users to fault 1433 * on the gateway page which is in the kernel address space. 1434 */ 1435 1436 space_adjust spc,va,t0 1437 get_pgd spc,ptp 1438 space_check spc,t0,naitlb_fault 1439 1440 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w 1441 1442 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w 1443 update_accessed ptp,pte,t0,t1 1444 1445 make_insert_tlb spc,pte,prot,t1 1446 1447 iitlbt pte,prot 1448 1449 tlb_unlock1 spc,t0 1450 rfir 1451 nop 1452 1453naitlb_check_alias_20w: 1454 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20 1455 1456 iitlbt pte,prot 1457 1458 rfir 1459 nop 1460 1461#else 1462 1463itlb_miss_11: 1464 get_pgd spc,ptp 1465 1466 space_check spc,t0,itlb_fault 1467 1468 L2_ptep ptp,pte,t0,va,itlb_fault 1469 1470 tlb_lock spc,ptp,pte,t0,t1,itlb_fault 1471 update_accessed ptp,pte,t0,t1 1472 1473 make_insert_tlb_11 spc,pte,prot 1474 1475 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1476 mtsp spc,%sr1 1477 1478 iitlba pte,(%sr1,va) 1479 iitlbp prot,(%sr1,va) 1480 1481 mtsp t1, %sr1 /* Restore sr1 */ 1482 1483 tlb_unlock1 spc,t0 1484 rfir 1485 nop 1486 1487naitlb_miss_11: 1488 get_pgd spc,ptp 1489 1490 space_check spc,t0,naitlb_fault 1491 1492 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 1493 1494 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 1495 update_accessed ptp,pte,t0,t1 1496 1497 make_insert_tlb_11 spc,pte,prot 1498 1499 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1500 mtsp spc,%sr1 1501 1502 iitlba pte,(%sr1,va) 1503 iitlbp prot,(%sr1,va) 1504 1505 mtsp t1, %sr1 /* Restore sr1 */ 1506 1507 tlb_unlock1 spc,t0 1508 rfir 1509 nop 1510 1511naitlb_check_alias_11: 1512 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11 1513 1514 iitlba pte,(%sr0, va) 1515 iitlbp prot,(%sr0, va) 1516 1517 rfir 1518 nop 1519 1520 1521itlb_miss_20: 1522 get_pgd spc,ptp 1523 1524 space_check spc,t0,itlb_fault 1525 1526 L2_ptep ptp,pte,t0,va,itlb_fault 1527 1528 tlb_lock spc,ptp,pte,t0,t1,itlb_fault 1529 update_accessed ptp,pte,t0,t1 1530 1531 make_insert_tlb spc,pte,prot,t1 1532 1533 f_extend pte,t1 1534 1535 iitlbt pte,prot 1536 1537 tlb_unlock1 spc,t0 1538 rfir 1539 nop 1540 1541naitlb_miss_20: 1542 get_pgd spc,ptp 1543 1544 space_check spc,t0,naitlb_fault 1545 1546 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 1547 1548 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 1549 update_accessed ptp,pte,t0,t1 1550 1551 make_insert_tlb spc,pte,prot,t1 1552 1553 f_extend pte,t1 1554 1555 iitlbt pte,prot 1556 1557 tlb_unlock1 spc,t0 1558 rfir 1559 nop 1560 1561naitlb_check_alias_20: 1562 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20 1563 1564 iitlbt pte,prot 1565 1566 rfir 1567 nop 1568 1569#endif 1570 1571#ifdef CONFIG_64BIT 1572 1573dbit_trap_20w: 1574 space_adjust spc,va,t0 1575 get_pgd spc,ptp 1576 space_check spc,t0,dbit_fault 1577 1578 L3_ptep ptp,pte,t0,va,dbit_fault 1579 1580 tlb_lock spc,ptp,pte,t0,t1,dbit_fault 1581 update_dirty ptp,pte,t1 1582 1583 make_insert_tlb spc,pte,prot,t1 1584 1585 idtlbt pte,prot 1586 1587 tlb_unlock0 spc,t0 1588 rfir 1589 nop 1590#else 1591 1592dbit_trap_11: 1593 1594 get_pgd spc,ptp 1595 1596 space_check spc,t0,dbit_fault 1597 1598 L2_ptep ptp,pte,t0,va,dbit_fault 1599 1600 tlb_lock spc,ptp,pte,t0,t1,dbit_fault 1601 update_dirty ptp,pte,t1 1602 1603 make_insert_tlb_11 spc,pte,prot 1604 1605 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1606 mtsp spc,%sr1 1607 1608 idtlba pte,(%sr1,va) 1609 idtlbp prot,(%sr1,va) 1610 1611 mtsp t1, %sr1 /* Restore sr1 */ 1612 1613 tlb_unlock0 spc,t0 1614 rfir 1615 nop 1616 1617dbit_trap_20: 1618 get_pgd spc,ptp 1619 1620 space_check spc,t0,dbit_fault 1621 1622 L2_ptep ptp,pte,t0,va,dbit_fault 1623 1624 tlb_lock spc,ptp,pte,t0,t1,dbit_fault 1625 update_dirty ptp,pte,t1 1626 1627 make_insert_tlb spc,pte,prot,t1 1628 1629 f_extend pte,t1 1630 1631 idtlbt pte,prot 1632 1633 tlb_unlock0 spc,t0 1634 rfir 1635 nop 1636#endif 1637 1638 .import handle_interruption,code 1639 1640kernel_bad_space: 1641 b intr_save 1642 ldi 31,%r8 /* Use an unused code */ 1643 1644dbit_fault: 1645 b intr_save 1646 ldi 20,%r8 1647 1648itlb_fault: 1649 b intr_save 1650 ldi PARISC_ITLB_TRAP,%r8 1651 1652nadtlb_fault: 1653 b intr_save 1654 ldi 17,%r8 1655 1656naitlb_fault: 1657 b intr_save 1658 ldi 16,%r8 1659 1660dtlb_fault: 1661 b intr_save 1662 ldi 15,%r8 1663 1664 /* Register saving semantics for system calls: 1665 1666 %r1 clobbered by system call macro in userspace 1667 %r2 saved in PT_REGS by gateway page 1668 %r3 - %r18 preserved by C code (saved by signal code) 1669 %r19 - %r20 saved in PT_REGS by gateway page 1670 %r21 - %r22 non-standard syscall args 1671 stored in kernel stack by gateway page 1672 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page 1673 %r27 - %r30 saved in PT_REGS by gateway page 1674 %r31 syscall return pointer 1675 */ 1676 1677 /* Floating point registers (FIXME: what do we do with these?) 1678 1679 %fr0 - %fr3 status/exception, not preserved 1680 %fr4 - %fr7 arguments 1681 %fr8 - %fr11 not preserved by C code 1682 %fr12 - %fr21 preserved by C code 1683 %fr22 - %fr31 not preserved by C code 1684 */ 1685 1686 .macro reg_save regs 1687 STREG %r3, PT_GR3(\regs) 1688 STREG %r4, PT_GR4(\regs) 1689 STREG %r5, PT_GR5(\regs) 1690 STREG %r6, PT_GR6(\regs) 1691 STREG %r7, PT_GR7(\regs) 1692 STREG %r8, PT_GR8(\regs) 1693 STREG %r9, PT_GR9(\regs) 1694 STREG %r10,PT_GR10(\regs) 1695 STREG %r11,PT_GR11(\regs) 1696 STREG %r12,PT_GR12(\regs) 1697 STREG %r13,PT_GR13(\regs) 1698 STREG %r14,PT_GR14(\regs) 1699 STREG %r15,PT_GR15(\regs) 1700 STREG %r16,PT_GR16(\regs) 1701 STREG %r17,PT_GR17(\regs) 1702 STREG %r18,PT_GR18(\regs) 1703 .endm 1704 1705 .macro reg_restore regs 1706 LDREG PT_GR3(\regs), %r3 1707 LDREG PT_GR4(\regs), %r4 1708 LDREG PT_GR5(\regs), %r5 1709 LDREG PT_GR6(\regs), %r6 1710 LDREG PT_GR7(\regs), %r7 1711 LDREG PT_GR8(\regs), %r8 1712 LDREG PT_GR9(\regs), %r9 1713 LDREG PT_GR10(\regs),%r10 1714 LDREG PT_GR11(\regs),%r11 1715 LDREG PT_GR12(\regs),%r12 1716 LDREG PT_GR13(\regs),%r13 1717 LDREG PT_GR14(\regs),%r14 1718 LDREG PT_GR15(\regs),%r15 1719 LDREG PT_GR16(\regs),%r16 1720 LDREG PT_GR17(\regs),%r17 1721 LDREG PT_GR18(\regs),%r18 1722 .endm 1723 1724 .macro fork_like name 1725ENTRY_CFI(sys_\name\()_wrapper) 1726 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 1727 ldo TASK_REGS(%r1),%r1 1728 reg_save %r1 1729 mfctl %cr27, %r28 1730 ldil L%sys_\name, %r31 1731 be R%sys_\name(%sr4,%r31) 1732 STREG %r28, PT_CR27(%r1) 1733ENDPROC_CFI(sys_\name\()_wrapper) 1734 .endm 1735 1736fork_like clone 1737fork_like clone3 1738fork_like fork 1739fork_like vfork 1740 1741 /* Set the return value for the child */ 1742ENTRY(child_return) 1743 BL schedule_tail, %r2 1744 nop 1745finish_child_return: 1746 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 1747 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1748 1749 LDREG PT_CR27(%r1), %r3 1750 mtctl %r3, %cr27 1751 reg_restore %r1 1752 b syscall_exit 1753 copy %r0,%r28 1754END(child_return) 1755 1756ENTRY_CFI(sys_rt_sigreturn_wrapper) 1757 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 1758 ldo TASK_REGS(%r26),%r26 /* get pt regs */ 1759 /* Don't save regs, we are going to restore them from sigcontext. */ 1760 STREG %r2, -RP_OFFSET(%r30) 1761#ifdef CONFIG_64BIT 1762 ldo FRAME_SIZE(%r30), %r30 1763 BL sys_rt_sigreturn,%r2 1764 ldo -16(%r30),%r29 /* Reference param save area */ 1765#else 1766 BL sys_rt_sigreturn,%r2 1767 ldo FRAME_SIZE(%r30), %r30 1768#endif 1769 1770 ldo -FRAME_SIZE(%r30), %r30 1771 LDREG -RP_OFFSET(%r30), %r2 1772 1773 /* FIXME: I think we need to restore a few more things here. */ 1774 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1775 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1776 reg_restore %r1 1777 1778 /* If the signal was received while the process was blocked on a 1779 * syscall, then r2 will take us to syscall_exit; otherwise r2 will 1780 * take us to syscall_exit_rfi and on to intr_return. 1781 */ 1782 bv %r0(%r2) 1783 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */ 1784ENDPROC_CFI(sys_rt_sigreturn_wrapper) 1785 1786ENTRY(syscall_exit) 1787 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit 1788 * via syscall_exit_rfi if the signal was received while the process 1789 * was running. 1790 */ 1791 1792 /* save return value now */ 1793 1794 mfctl %cr30, %r1 1795 LDREG TI_TASK(%r1),%r1 1796 STREG %r28,TASK_PT_GR28(%r1) 1797 1798 /* Seems to me that dp could be wrong here, if the syscall involved 1799 * calling a module, and nothing got round to restoring dp on return. 1800 */ 1801 loadgp 1802 1803syscall_check_resched: 1804 1805 /* check for reschedule */ 1806 1807 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */ 1808 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */ 1809 1810 .import do_signal,code 1811syscall_check_sig: 1812 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 1813 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26 1814 and,COND(<>) %r19, %r26, %r0 1815 b,n syscall_restore /* skip past if we've nothing to do */ 1816 1817syscall_do_signal: 1818 /* Save callee-save registers (for sigcontext). 1819 * FIXME: After this point the process structure should be 1820 * consistent with all the relevant state of the process 1821 * before the syscall. We need to verify this. 1822 */ 1823 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1824 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */ 1825 reg_save %r26 1826 1827#ifdef CONFIG_64BIT 1828 ldo -16(%r30),%r29 /* Reference param save area */ 1829#endif 1830 1831 BL do_notify_resume,%r2 1832 ldi 1, %r25 /* long in_syscall = 1 */ 1833 1834 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1835 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */ 1836 reg_restore %r20 1837 1838 b,n syscall_check_sig 1839 1840syscall_restore: 1841 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1842 1843 /* Are we being ptraced? */ 1844 ldw TASK_FLAGS(%r1),%r19 1845 ldi _TIF_SYSCALL_TRACE_MASK,%r2 1846 and,COND(=) %r19,%r2,%r0 1847 b,n syscall_restore_rfi 1848 1849 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ 1850 rest_fp %r19 1851 1852 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */ 1853 mtsar %r19 1854 1855 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */ 1856 LDREG TASK_PT_GR19(%r1),%r19 1857 LDREG TASK_PT_GR20(%r1),%r20 1858 LDREG TASK_PT_GR21(%r1),%r21 1859 LDREG TASK_PT_GR22(%r1),%r22 1860 LDREG TASK_PT_GR23(%r1),%r23 1861 LDREG TASK_PT_GR24(%r1),%r24 1862 LDREG TASK_PT_GR25(%r1),%r25 1863 LDREG TASK_PT_GR26(%r1),%r26 1864 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */ 1865 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */ 1866 LDREG TASK_PT_GR29(%r1),%r29 1867 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ 1868 1869 /* NOTE: We use rsm/ssm pair to make this operation atomic */ 1870 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */ 1871 rsm PSW_SM_I, %r0 1872 copy %r1,%r30 /* Restore user sp */ 1873 mfsp %sr3,%r1 /* Get user space id */ 1874 mtsp %r1,%sr7 /* Restore sr7 */ 1875 ssm PSW_SM_I, %r0 1876 1877 /* Set sr2 to zero for userspace syscalls to work. */ 1878 mtsp %r0,%sr2 1879 mtsp %r1,%sr4 /* Restore sr4 */ 1880 mtsp %r1,%sr5 /* Restore sr5 */ 1881 mtsp %r1,%sr6 /* Restore sr6 */ 1882 1883 depi 3,31,2,%r31 /* ensure return to user mode. */ 1884 1885#ifdef CONFIG_64BIT 1886 /* decide whether to reset the wide mode bit 1887 * 1888 * For a syscall, the W bit is stored in the lowest bit 1889 * of sp. Extract it and reset W if it is zero */ 1890 extrd,u,*<> %r30,63,1,%r1 1891 rsm PSW_SM_W, %r0 1892 /* now reset the lowest bit of sp if it was set */ 1893 xor %r30,%r1,%r30 1894#endif 1895 be,n 0(%sr3,%r31) /* return to user space */ 1896 1897 /* We have to return via an RFI, so that PSW T and R bits can be set 1898 * appropriately. 1899 * This sets up pt_regs so we can return via intr_restore, which is not 1900 * the most efficient way of doing things, but it works. 1901 */ 1902syscall_restore_rfi: 1903 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */ 1904 mtctl %r2,%cr0 /* for immediate trap */ 1905 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */ 1906 ldi 0x0b,%r20 /* Create new PSW */ 1907 depi -1,13,1,%r20 /* C, Q, D, and I bits */ 1908 1909 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are 1910 * set in thread_info.h and converted to PA bitmap 1911 * numbers in asm-offsets.c */ 1912 1913 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */ 1914 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0 1915 depi -1,27,1,%r20 /* R bit */ 1916 1917 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */ 1918 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 1919 depi -1,7,1,%r20 /* T bit */ 1920 1921 STREG %r20,TASK_PT_PSW(%r1) 1922 1923 /* Always store space registers, since sr3 can be changed (e.g. fork) */ 1924 1925 mfsp %sr3,%r25 1926 STREG %r25,TASK_PT_SR3(%r1) 1927 STREG %r25,TASK_PT_SR4(%r1) 1928 STREG %r25,TASK_PT_SR5(%r1) 1929 STREG %r25,TASK_PT_SR6(%r1) 1930 STREG %r25,TASK_PT_SR7(%r1) 1931 STREG %r25,TASK_PT_IASQ0(%r1) 1932 STREG %r25,TASK_PT_IASQ1(%r1) 1933 1934 /* XXX W bit??? */ 1935 /* Now if old D bit is clear, it means we didn't save all registers 1936 * on syscall entry, so do that now. This only happens on TRACEME 1937 * calls, or if someone attached to us while we were on a syscall. 1938 * We could make this more efficient by not saving r3-r18, but 1939 * then we wouldn't be able to use the common intr_restore path. 1940 * It is only for traced processes anyway, so performance is not 1941 * an issue. 1942 */ 1943 bb,< %r2,30,pt_regs_ok /* Branch if D set */ 1944 ldo TASK_REGS(%r1),%r25 1945 reg_save %r25 /* Save r3 to r18 */ 1946 1947 /* Save the current sr */ 1948 mfsp %sr0,%r2 1949 STREG %r2,TASK_PT_SR0(%r1) 1950 1951 /* Save the scratch sr */ 1952 mfsp %sr1,%r2 1953 STREG %r2,TASK_PT_SR1(%r1) 1954 1955 /* sr2 should be set to zero for userspace syscalls */ 1956 STREG %r0,TASK_PT_SR2(%r1) 1957 1958 LDREG TASK_PT_GR31(%r1),%r2 1959 depi 3,31,2,%r2 /* ensure return to user mode. */ 1960 STREG %r2,TASK_PT_IAOQ0(%r1) 1961 ldo 4(%r2),%r2 1962 STREG %r2,TASK_PT_IAOQ1(%r1) 1963 b intr_restore 1964 copy %r25,%r16 1965 1966pt_regs_ok: 1967 LDREG TASK_PT_IAOQ0(%r1),%r2 1968 depi 3,31,2,%r2 /* ensure return to user mode. */ 1969 STREG %r2,TASK_PT_IAOQ0(%r1) 1970 LDREG TASK_PT_IAOQ1(%r1),%r2 1971 depi 3,31,2,%r2 1972 STREG %r2,TASK_PT_IAOQ1(%r1) 1973 b intr_restore 1974 copy %r25,%r16 1975 1976syscall_do_resched: 1977 load32 syscall_check_resched,%r2 /* if resched, we start over again */ 1978 load32 schedule,%r19 1979 bv %r0(%r19) /* jumps to schedule() */ 1980#ifdef CONFIG_64BIT 1981 ldo -16(%r30),%r29 /* Reference param save area */ 1982#else 1983 nop 1984#endif 1985END(syscall_exit) 1986 1987 1988#ifdef CONFIG_FUNCTION_TRACER 1989 1990 .import ftrace_function_trampoline,code 1991 .align L1_CACHE_BYTES 1992ENTRY_CFI(mcount, caller) 1993_mcount: 1994 .export _mcount,data 1995 /* 1996 * The 64bit mcount() function pointer needs 4 dwords, of which the 1997 * first two are free. We optimize it here and put 2 instructions for 1998 * calling mcount(), and 2 instructions for ftrace_stub(). That way we 1999 * have all on one L1 cacheline. 2000 */ 2001 ldi 0, %arg3 2002 b ftrace_function_trampoline 2003 copy %r3, %arg2 /* caller original %sp */ 2004ftrace_stub: 2005 .globl ftrace_stub 2006 .type ftrace_stub, @function 2007#ifdef CONFIG_64BIT 2008 bve (%rp) 2009#else 2010 bv %r0(%rp) 2011#endif 2012 nop 2013#ifdef CONFIG_64BIT 2014 .dword mcount 2015 .dword 0 /* code in head.S puts value of global gp here */ 2016#endif 2017ENDPROC_CFI(mcount) 2018 2019#ifdef CONFIG_DYNAMIC_FTRACE 2020 2021#ifdef CONFIG_64BIT 2022#define FTRACE_FRAME_SIZE (2*FRAME_SIZE) 2023#else 2024#define FTRACE_FRAME_SIZE FRAME_SIZE 2025#endif 2026ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) 2027ftrace_caller: 2028 .global ftrace_caller 2029 2030 STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp) 2031 ldo -FTRACE_FRAME_SIZE(%sp), %r3 2032 STREG %rp, -RP_OFFSET(%r3) 2033 2034 /* Offset 0 is already allocated for %r1 */ 2035 STREG %r23, 2*REG_SZ(%r3) 2036 STREG %r24, 3*REG_SZ(%r3) 2037 STREG %r25, 4*REG_SZ(%r3) 2038 STREG %r26, 5*REG_SZ(%r3) 2039 STREG %r28, 6*REG_SZ(%r3) 2040 STREG %r29, 7*REG_SZ(%r3) 2041#ifdef CONFIG_64BIT 2042 STREG %r19, 8*REG_SZ(%r3) 2043 STREG %r20, 9*REG_SZ(%r3) 2044 STREG %r21, 10*REG_SZ(%r3) 2045 STREG %r22, 11*REG_SZ(%r3) 2046 STREG %r27, 12*REG_SZ(%r3) 2047 STREG %r31, 13*REG_SZ(%r3) 2048 loadgp 2049 ldo -16(%sp),%r29 2050#endif 2051 LDREG 0(%r3), %r25 2052 copy %rp, %r26 2053 ldo -8(%r25), %r25 2054 ldi 0, %r23 /* no pt_regs */ 2055 b,l ftrace_function_trampoline, %rp 2056 copy %r3, %r24 2057 2058 LDREG -RP_OFFSET(%r3), %rp 2059 LDREG 2*REG_SZ(%r3), %r23 2060 LDREG 3*REG_SZ(%r3), %r24 2061 LDREG 4*REG_SZ(%r3), %r25 2062 LDREG 5*REG_SZ(%r3), %r26 2063 LDREG 6*REG_SZ(%r3), %r28 2064 LDREG 7*REG_SZ(%r3), %r29 2065#ifdef CONFIG_64BIT 2066 LDREG 8*REG_SZ(%r3), %r19 2067 LDREG 9*REG_SZ(%r3), %r20 2068 LDREG 10*REG_SZ(%r3), %r21 2069 LDREG 11*REG_SZ(%r3), %r22 2070 LDREG 12*REG_SZ(%r3), %r27 2071 LDREG 13*REG_SZ(%r3), %r31 2072#endif 2073 LDREG 1*REG_SZ(%r3), %r3 2074 2075 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 2076 /* Adjust return point to jump back to beginning of traced function */ 2077 ldo -4(%r1), %r1 2078 bv,n (%r1) 2079 2080ENDPROC_CFI(ftrace_caller) 2081 2082#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS 2083ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN, 2084 CALLS,SAVE_RP,SAVE_SP) 2085ftrace_regs_caller: 2086 .global ftrace_regs_caller 2087 2088 ldo -FTRACE_FRAME_SIZE(%sp), %r1 2089 STREG %rp, -RP_OFFSET(%r1) 2090 2091 copy %sp, %r1 2092 ldo PT_SZ_ALGN(%sp), %sp 2093 2094 STREG %rp, PT_GR2(%r1) 2095 STREG %r3, PT_GR3(%r1) 2096 STREG %r4, PT_GR4(%r1) 2097 STREG %r5, PT_GR5(%r1) 2098 STREG %r6, PT_GR6(%r1) 2099 STREG %r7, PT_GR7(%r1) 2100 STREG %r8, PT_GR8(%r1) 2101 STREG %r9, PT_GR9(%r1) 2102 STREG %r10, PT_GR10(%r1) 2103 STREG %r11, PT_GR11(%r1) 2104 STREG %r12, PT_GR12(%r1) 2105 STREG %r13, PT_GR13(%r1) 2106 STREG %r14, PT_GR14(%r1) 2107 STREG %r15, PT_GR15(%r1) 2108 STREG %r16, PT_GR16(%r1) 2109 STREG %r17, PT_GR17(%r1) 2110 STREG %r18, PT_GR18(%r1) 2111 STREG %r19, PT_GR19(%r1) 2112 STREG %r20, PT_GR20(%r1) 2113 STREG %r21, PT_GR21(%r1) 2114 STREG %r22, PT_GR22(%r1) 2115 STREG %r23, PT_GR23(%r1) 2116 STREG %r24, PT_GR24(%r1) 2117 STREG %r25, PT_GR25(%r1) 2118 STREG %r26, PT_GR26(%r1) 2119 STREG %r27, PT_GR27(%r1) 2120 STREG %r28, PT_GR28(%r1) 2121 STREG %r29, PT_GR29(%r1) 2122 STREG %r30, PT_GR30(%r1) 2123 STREG %r31, PT_GR31(%r1) 2124 mfctl %cr11, %r26 2125 STREG %r26, PT_SAR(%r1) 2126 2127 copy %rp, %r26 2128 LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25 2129 ldo -8(%r25), %r25 2130 ldo -FTRACE_FRAME_SIZE(%r1), %arg2 2131 b,l ftrace_function_trampoline, %rp 2132 copy %r1, %arg3 /* struct pt_regs */ 2133 2134 ldo -PT_SZ_ALGN(%sp), %r1 2135 2136 LDREG PT_SAR(%r1), %rp 2137 mtctl %rp, %cr11 2138 2139 LDREG PT_GR2(%r1), %rp 2140 LDREG PT_GR3(%r1), %r3 2141 LDREG PT_GR4(%r1), %r4 2142 LDREG PT_GR5(%r1), %r5 2143 LDREG PT_GR6(%r1), %r6 2144 LDREG PT_GR7(%r1), %r7 2145 LDREG PT_GR8(%r1), %r8 2146 LDREG PT_GR9(%r1), %r9 2147 LDREG PT_GR10(%r1),%r10 2148 LDREG PT_GR11(%r1),%r11 2149 LDREG PT_GR12(%r1),%r12 2150 LDREG PT_GR13(%r1),%r13 2151 LDREG PT_GR14(%r1),%r14 2152 LDREG PT_GR15(%r1),%r15 2153 LDREG PT_GR16(%r1),%r16 2154 LDREG PT_GR17(%r1),%r17 2155 LDREG PT_GR18(%r1),%r18 2156 LDREG PT_GR19(%r1),%r19 2157 LDREG PT_GR20(%r1),%r20 2158 LDREG PT_GR21(%r1),%r21 2159 LDREG PT_GR22(%r1),%r22 2160 LDREG PT_GR23(%r1),%r23 2161 LDREG PT_GR24(%r1),%r24 2162 LDREG PT_GR25(%r1),%r25 2163 LDREG PT_GR26(%r1),%r26 2164 LDREG PT_GR27(%r1),%r27 2165 LDREG PT_GR28(%r1),%r28 2166 LDREG PT_GR29(%r1),%r29 2167 LDREG PT_GR30(%r1),%r30 2168 LDREG PT_GR31(%r1),%r31 2169 2170 ldo -PT_SZ_ALGN(%sp), %sp 2171 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 2172 /* Adjust return point to jump back to beginning of traced function */ 2173 ldo -4(%r1), %r1 2174 bv,n (%r1) 2175 2176ENDPROC_CFI(ftrace_regs_caller) 2177 2178#endif 2179#endif 2180 2181#ifdef CONFIG_FUNCTION_GRAPH_TRACER 2182 .align 8 2183ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE) 2184 .export parisc_return_to_handler,data 2185parisc_return_to_handler: 2186 copy %r3,%r1 2187 STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */ 2188 copy %sp,%r3 2189 STREGM %r1,FRAME_SIZE(%sp) 2190 STREG %ret0,8(%r3) 2191 STREG %ret1,16(%r3) 2192 2193#ifdef CONFIG_64BIT 2194 loadgp 2195#endif 2196 2197 /* call ftrace_return_to_handler(0) */ 2198 .import ftrace_return_to_handler,code 2199 load32 ftrace_return_to_handler,%ret0 2200 load32 .Lftrace_ret,%r2 2201#ifdef CONFIG_64BIT 2202 ldo -16(%sp),%ret1 /* Reference param save area */ 2203 bve (%ret0) 2204#else 2205 bv %r0(%ret0) 2206#endif 2207 ldi 0,%r26 2208.Lftrace_ret: 2209 copy %ret0,%rp 2210 2211 /* restore original return values */ 2212 LDREG 8(%r3),%ret0 2213 LDREG 16(%r3),%ret1 2214 2215 /* return from function */ 2216#ifdef CONFIG_64BIT 2217 bve (%rp) 2218#else 2219 bv %r0(%rp) 2220#endif 2221 LDREGM -FRAME_SIZE(%sp),%r3 2222ENDPROC_CFI(return_to_handler) 2223 2224#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 2225 2226#endif /* CONFIG_FUNCTION_TRACER */ 2227 2228#ifdef CONFIG_IRQSTACKS 2229/* void call_on_stack(unsigned long param1, void *func, 2230 unsigned long new_stack) */ 2231ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) 2232ENTRY(_call_on_stack) 2233 copy %sp, %r1 2234 2235 /* Regarding the HPPA calling conventions for function pointers, 2236 we assume the PIC register is not changed across call. For 2237 CONFIG_64BIT, the argument pointer is left to point at the 2238 argument region allocated for the call to call_on_stack. */ 2239 2240 /* Switch to new stack. We allocate two frames. */ 2241 ldo 2*FRAME_SIZE(%arg2), %sp 2242# ifdef CONFIG_64BIT 2243 /* Save previous stack pointer and return pointer in frame marker */ 2244 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp) 2245 /* Calls always use function descriptor */ 2246 LDREG 16(%arg1), %arg1 2247 bve,l (%arg1), %rp 2248 STREG %r1, -FRAME_SIZE-REG_SZ(%sp) 2249 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp 2250 bve (%rp) 2251 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp 2252# else 2253 /* Save previous stack pointer and return pointer in frame marker */ 2254 STREG %r1, -FRAME_SIZE-REG_SZ(%sp) 2255 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp) 2256 /* Calls use function descriptor if PLABEL bit is set */ 2257 bb,>=,n %arg1, 30, 1f 2258 depwi 0,31,2, %arg1 2259 LDREG 0(%arg1), %arg1 22601: 2261 be,l 0(%sr4,%arg1), %sr0, %r31 2262 copy %r31, %rp 2263 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp 2264 bv (%rp) 2265 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp 2266# endif /* CONFIG_64BIT */ 2267ENDPROC_CFI(call_on_stack) 2268#endif /* CONFIG_IRQSTACKS */ 2269 2270ENTRY_CFI(get_register) 2271 /* 2272 * get_register is used by the non access tlb miss handlers to 2273 * copy the value of the general register specified in r8 into 2274 * r1. This routine can't be used for shadowed registers, since 2275 * the rfir will restore the original value. So, for the shadowed 2276 * registers we put a -1 into r1 to indicate that the register 2277 * should not be used (the register being copied could also have 2278 * a -1 in it, but that is OK, it just means that we will have 2279 * to use the slow path instead). 2280 */ 2281 blr %r8,%r0 2282 nop 2283 bv %r0(%r25) /* r0 */ 2284 copy %r0,%r1 2285 bv %r0(%r25) /* r1 - shadowed */ 2286 ldi -1,%r1 2287 bv %r0(%r25) /* r2 */ 2288 copy %r2,%r1 2289 bv %r0(%r25) /* r3 */ 2290 copy %r3,%r1 2291 bv %r0(%r25) /* r4 */ 2292 copy %r4,%r1 2293 bv %r0(%r25) /* r5 */ 2294 copy %r5,%r1 2295 bv %r0(%r25) /* r6 */ 2296 copy %r6,%r1 2297 bv %r0(%r25) /* r7 */ 2298 copy %r7,%r1 2299 bv %r0(%r25) /* r8 - shadowed */ 2300 ldi -1,%r1 2301 bv %r0(%r25) /* r9 - shadowed */ 2302 ldi -1,%r1 2303 bv %r0(%r25) /* r10 */ 2304 copy %r10,%r1 2305 bv %r0(%r25) /* r11 */ 2306 copy %r11,%r1 2307 bv %r0(%r25) /* r12 */ 2308 copy %r12,%r1 2309 bv %r0(%r25) /* r13 */ 2310 copy %r13,%r1 2311 bv %r0(%r25) /* r14 */ 2312 copy %r14,%r1 2313 bv %r0(%r25) /* r15 */ 2314 copy %r15,%r1 2315 bv %r0(%r25) /* r16 - shadowed */ 2316 ldi -1,%r1 2317 bv %r0(%r25) /* r17 - shadowed */ 2318 ldi -1,%r1 2319 bv %r0(%r25) /* r18 */ 2320 copy %r18,%r1 2321 bv %r0(%r25) /* r19 */ 2322 copy %r19,%r1 2323 bv %r0(%r25) /* r20 */ 2324 copy %r20,%r1 2325 bv %r0(%r25) /* r21 */ 2326 copy %r21,%r1 2327 bv %r0(%r25) /* r22 */ 2328 copy %r22,%r1 2329 bv %r0(%r25) /* r23 */ 2330 copy %r23,%r1 2331 bv %r0(%r25) /* r24 - shadowed */ 2332 ldi -1,%r1 2333 bv %r0(%r25) /* r25 - shadowed */ 2334 ldi -1,%r1 2335 bv %r0(%r25) /* r26 */ 2336 copy %r26,%r1 2337 bv %r0(%r25) /* r27 */ 2338 copy %r27,%r1 2339 bv %r0(%r25) /* r28 */ 2340 copy %r28,%r1 2341 bv %r0(%r25) /* r29 */ 2342 copy %r29,%r1 2343 bv %r0(%r25) /* r30 */ 2344 copy %r30,%r1 2345 bv %r0(%r25) /* r31 */ 2346 copy %r31,%r1 2347ENDPROC_CFI(get_register) 2348 2349 2350ENTRY_CFI(set_register) 2351 /* 2352 * set_register is used by the non access tlb miss handlers to 2353 * copy the value of r1 into the general register specified in 2354 * r8. 2355 */ 2356 blr %r8,%r0 2357 nop 2358 bv %r0(%r25) /* r0 (silly, but it is a place holder) */ 2359 copy %r1,%r0 2360 bv %r0(%r25) /* r1 */ 2361 copy %r1,%r1 2362 bv %r0(%r25) /* r2 */ 2363 copy %r1,%r2 2364 bv %r0(%r25) /* r3 */ 2365 copy %r1,%r3 2366 bv %r0(%r25) /* r4 */ 2367 copy %r1,%r4 2368 bv %r0(%r25) /* r5 */ 2369 copy %r1,%r5 2370 bv %r0(%r25) /* r6 */ 2371 copy %r1,%r6 2372 bv %r0(%r25) /* r7 */ 2373 copy %r1,%r7 2374 bv %r0(%r25) /* r8 */ 2375 copy %r1,%r8 2376 bv %r0(%r25) /* r9 */ 2377 copy %r1,%r9 2378 bv %r0(%r25) /* r10 */ 2379 copy %r1,%r10 2380 bv %r0(%r25) /* r11 */ 2381 copy %r1,%r11 2382 bv %r0(%r25) /* r12 */ 2383 copy %r1,%r12 2384 bv %r0(%r25) /* r13 */ 2385 copy %r1,%r13 2386 bv %r0(%r25) /* r14 */ 2387 copy %r1,%r14 2388 bv %r0(%r25) /* r15 */ 2389 copy %r1,%r15 2390 bv %r0(%r25) /* r16 */ 2391 copy %r1,%r16 2392 bv %r0(%r25) /* r17 */ 2393 copy %r1,%r17 2394 bv %r0(%r25) /* r18 */ 2395 copy %r1,%r18 2396 bv %r0(%r25) /* r19 */ 2397 copy %r1,%r19 2398 bv %r0(%r25) /* r20 */ 2399 copy %r1,%r20 2400 bv %r0(%r25) /* r21 */ 2401 copy %r1,%r21 2402 bv %r0(%r25) /* r22 */ 2403 copy %r1,%r22 2404 bv %r0(%r25) /* r23 */ 2405 copy %r1,%r23 2406 bv %r0(%r25) /* r24 */ 2407 copy %r1,%r24 2408 bv %r0(%r25) /* r25 */ 2409 copy %r1,%r25 2410 bv %r0(%r25) /* r26 */ 2411 copy %r1,%r26 2412 bv %r0(%r25) /* r27 */ 2413 copy %r1,%r27 2414 bv %r0(%r25) /* r28 */ 2415 copy %r1,%r28 2416 bv %r0(%r25) /* r29 */ 2417 copy %r1,%r29 2418 bv %r0(%r25) /* r30 */ 2419 copy %r1,%r30 2420 bv %r0(%r25) /* r31 */ 2421 copy %r1,%r31 2422ENDPROC_CFI(set_register) 2423 2424