1/* 2 * Linux/PA-RISC Project (http://www.parisc-linux.org/) 3 * 4 * kernel entry points (interruptions, system call wrappers) 5 * Copyright (C) 1999,2000 Philipp Rumpf 6 * Copyright (C) 1999 SuSE GmbH Nuernberg 7 * Copyright (C) 2000 Hewlett-Packard (John Marvin) 8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 23 */ 24 25#include <linux/config.h> 26#include <asm/offsets.h> 27 28/* we have the following possibilities to act on an interruption: 29 * - handle in assembly and use shadowed registers only 30 * - save registers to kernel stack and handle in assembly or C */ 31 32 33#include <asm/assembly.h> /* for LDREG/STREG defines */ 34#include <asm/pgtable.h> 35#include <asm/psw.h> 36#include <asm/signal.h> 37#include <asm/unistd.h> 38#include <asm/thread_info.h> 39 40#ifdef __LP64__ 41#define CMPIB cmpib,* 42#define CMPB cmpb,* 43#define COND(x) *x 44 45 .level 2.0w 46#else 47#define CMPIB cmpib, 48#define CMPB cmpb, 49#define COND(x) x 50 51 .level 2.0 52#endif 53 54 .import pa_dbit_lock,data 55 56 /* space_to_prot macro creates a prot id from a space id */ 57 58#if (SPACEID_SHIFT) == 0 59 .macro space_to_prot spc prot 60 depd,z \spc,62,31,\prot 61 .endm 62#else 63 .macro space_to_prot spc prot 64 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot 65 .endm 66#endif 67 68 /* Switch to virtual mapping, trashing only %r1 */ 69 .macro virt_map 70 rsm PSW_SM_Q,%r0 71 tovirt_r1 %r29 72 mfsp %sr7, %r1 73 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */ 74 mtsp %r1, %sr3 75 mtsp %r0, %sr4 76 mtsp %r0, %sr5 77 mtsp %r0, %sr6 78 mtsp %r0, %sr7 79 load32 KERNEL_PSW, %r1 80 mtctl %r1, %cr22 81 mtctl %r0, %cr17 /* Clear IIASQ tail */ 82 mtctl %r0, %cr17 /* Clear IIASQ head */ 83 load32 4f, %r1 84 mtctl %r1, %cr18 /* Set IIAOQ tail */ 85 ldo 4(%r1), %r1 86 mtctl %r1, %cr18 /* Set IIAOQ head */ 87 rfir 88 nop 894: 90 .endm 91 92 /* 93 * The "get_stack" macros are responsible for determining the 94 * kernel stack value. 95 * 96 * For Faults: 97 * If sr7 == 0 98 * Already using a kernel stack, so call the 99 * get_stack_use_r30 macro to push a pt_regs structure 100 * on the stack, and store registers there. 101 * else 102 * Need to set up a kernel stack, so call the 103 * get_stack_use_cr30 macro to set up a pointer 104 * to the pt_regs structure contained within the 105 * task pointer pointed to by cr30. Set the stack 106 * pointer to point to the end of the task structure. 107 * 108 * For Interrupts: 109 * If sr7 == 0 110 * Already using a kernel stack, check to see if r30 111 * is already pointing to the per processor interrupt 112 * stack. If it is, call the get_stack_use_r30 macro 113 * to push a pt_regs structure on the stack, and store 114 * registers there. Otherwise, call get_stack_use_cr31 115 * to get a pointer to the base of the interrupt stack 116 * and push a pt_regs structure on that stack. 117 * else 118 * Need to set up a kernel stack, so call the 119 * get_stack_use_cr30 macro to set up a pointer 120 * to the pt_regs structure contained within the 121 * task pointer pointed to by cr30. Set the stack 122 * pointer to point to the end of the task structure. 123 * N.B: We don't use the interrupt stack for the 124 * first interrupt from userland, because signals/ 125 * resched's are processed when returning to userland, 126 * and we can sleep in those cases. 127 * 128 * Note that we use shadowed registers for temps until 129 * we can save %r26 and %r29. %r26 is used to preserve 130 * %r8 (a shadowed register) which temporarily contained 131 * either the fault type ("code") or the eirr. We need 132 * to use a non-shadowed register to carry the value over 133 * the rfir in virt_map. We use %r26 since this value winds 134 * up being passed as the argument to either do_cpu_irq_mask 135 * or handle_interruption. %r29 is used to hold a pointer 136 * the register save area, and once again, it needs to 137 * be a non-shadowed register so that it survives the rfir. 138 * 139 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame. 140 */ 141 142 .macro get_stack_use_cr30 143 144 /* we save the registers in the task struct */ 145 146 mfctl %cr30, %r1 147 tophys %r1,%r9 148 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */ 149 tophys %r1,%r9 150 ldo TASK_REGS(%r9),%r9 151 STREG %r30, PT_GR30(%r9) 152 STREG %r29,PT_GR29(%r9) 153 STREG %r26,PT_GR26(%r9) 154 copy %r9,%r29 155 mfctl %cr30, %r1 156 ldo THREAD_SZ_ALGN(%r1), %r30 157 .endm 158 159 .macro get_stack_use_r30 160 161 /* we put a struct pt_regs on the stack and save the registers there */ 162 163 tophys %r30,%r9 164 STREG %r30,PT_GR30(%r9) 165 ldo PT_SZ_ALGN(%r30),%r30 166 STREG %r29,PT_GR29(%r9) 167 STREG %r26,PT_GR26(%r9) 168 copy %r9,%r29 169 .endm 170 171 .macro rest_stack 172 LDREG PT_GR1(%r29), %r1 173 LDREG PT_GR30(%r29),%r30 174 LDREG PT_GR29(%r29),%r29 175 .endm 176 177 /* default interruption handler 178 * (calls traps.c:handle_interruption) */ 179 .macro def code 180 b intr_save 181 ldi \code, %r8 182 .align 32 183 .endm 184 185 /* Interrupt interruption handler 186 * (calls irq.c:do_cpu_irq_mask) */ 187 .macro extint code 188 b intr_extint 189 mfsp %sr7,%r16 190 .align 32 191 .endm 192 193 .import os_hpmc, code 194 195 /* HPMC handler */ 196 .macro hpmc code 197 nop /* must be a NOP, will be patched later */ 198 load32 PA(os_hpmc), %r3 199 bv,n 0(%r3) 200 nop 201 .word 0 /* checksum (will be patched) */ 202 .word PA(os_hpmc) /* address of handler */ 203 .word 0 /* length of handler */ 204 .endm 205 206 /* 207 * Performance Note: Instructions will be moved up into 208 * this part of the code later on, once we are sure 209 * that the tlb miss handlers are close to final form. 210 */ 211 212 /* Register definitions for tlb miss handler macros */ 213 214 va = r8 /* virtual address for which the trap occured */ 215 spc = r24 /* space for which the trap occured */ 216 217#ifndef __LP64__ 218 219 /* 220 * itlb miss interruption handler (parisc 1.1 - 32 bit) 221 */ 222 223 .macro itlb_11 code 224 225 mfctl %pcsq, spc 226 b itlb_miss_11 227 mfctl %pcoq, va 228 229 .align 32 230 .endm 231#endif 232 233 /* 234 * itlb miss interruption handler (parisc 2.0) 235 */ 236 237 .macro itlb_20 code 238 mfctl %pcsq, spc 239#ifdef __LP64__ 240 b itlb_miss_20w 241#else 242 b itlb_miss_20 243#endif 244 mfctl %pcoq, va 245 246 .align 32 247 .endm 248 249#ifndef __LP64__ 250 /* 251 * naitlb miss interruption handler (parisc 1.1 - 32 bit) 252 * 253 * Note: naitlb misses will be treated 254 * as an ordinary itlb miss for now. 255 * However, note that naitlb misses 256 * have the faulting address in the 257 * IOR/ISR. 258 */ 259 260 .macro naitlb_11 code 261 262 mfctl %isr,spc 263 b itlb_miss_11 264 mfctl %ior,va 265 /* FIXME: If user causes a naitlb miss, the priv level may not be in 266 * lower bits of va, where the itlb miss handler is expecting them 267 */ 268 269 .align 32 270 .endm 271#endif 272 273 /* 274 * naitlb miss interruption handler (parisc 2.0) 275 * 276 * Note: naitlb misses will be treated 277 * as an ordinary itlb miss for now. 278 * However, note that naitlb misses 279 * have the faulting address in the 280 * IOR/ISR. 281 */ 282 283 .macro naitlb_20 code 284 285 mfctl %isr,spc 286#ifdef __LP64__ 287 b itlb_miss_20w 288#else 289 b itlb_miss_20 290#endif 291 mfctl %ior,va 292 /* FIXME: If user causes a naitlb miss, the priv level may not be in 293 * lower bits of va, where the itlb miss handler is expecting them 294 */ 295 296 .align 32 297 .endm 298 299#ifndef __LP64__ 300 /* 301 * dtlb miss interruption handler (parisc 1.1 - 32 bit) 302 */ 303 304 .macro dtlb_11 code 305 306 mfctl %isr, spc 307 b dtlb_miss_11 308 mfctl %ior, va 309 310 .align 32 311 .endm 312#endif 313 314 /* 315 * dtlb miss interruption handler (parisc 2.0) 316 */ 317 318 .macro dtlb_20 code 319 320 mfctl %isr, spc 321#ifdef __LP64__ 322 b dtlb_miss_20w 323#else 324 b dtlb_miss_20 325#endif 326 mfctl %ior, va 327 328 .align 32 329 .endm 330 331#ifndef __LP64__ 332 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */ 333 334 .macro nadtlb_11 code 335 336 mfctl %isr,spc 337 b nadtlb_miss_11 338 mfctl %ior,va 339 340 .align 32 341 .endm 342#endif 343 344 /* nadtlb miss interruption handler (parisc 2.0) */ 345 346 .macro nadtlb_20 code 347 348 mfctl %isr,spc 349#ifdef __LP64__ 350 b nadtlb_miss_20w 351#else 352 b nadtlb_miss_20 353#endif 354 mfctl %ior,va 355 356 .align 32 357 .endm 358 359#ifndef __LP64__ 360 /* 361 * dirty bit trap interruption handler (parisc 1.1 - 32 bit) 362 */ 363 364 .macro dbit_11 code 365 366 mfctl %isr,spc 367 b dbit_trap_11 368 mfctl %ior,va 369 370 .align 32 371 .endm 372#endif 373 374 /* 375 * dirty bit trap interruption handler (parisc 2.0) 376 */ 377 378 .macro dbit_20 code 379 380 mfctl %isr,spc 381#ifdef __LP64__ 382 b dbit_trap_20w 383#else 384 b dbit_trap_20 385#endif 386 mfctl %ior,va 387 388 .align 32 389 .endm 390 391 /* The following are simple 32 vs 64 bit instruction 392 * abstractions for the macros */ 393 .macro EXTR reg1,start,length,reg2 394#ifdef __LP64__ 395 extrd,u \reg1,32+\start,\length,\reg2 396#else 397 extrw,u \reg1,\start,\length,\reg2 398#endif 399 .endm 400 401 .macro DEP reg1,start,length,reg2 402#ifdef __LP64__ 403 depd \reg1,32+\start,\length,\reg2 404#else 405 depw \reg1,\start,\length,\reg2 406#endif 407 .endm 408 409 .macro DEPI val,start,length,reg 410#ifdef __LP64__ 411 depdi \val,32+\start,\length,\reg 412#else 413 depwi \val,\start,\length,\reg 414#endif 415 .endm 416 417 /* In LP64, the space contains part of the upper 32 bits of the 418 * fault. We have to extract this and place it in the va, 419 * zeroing the corresponding bits in the space register */ 420 .macro space_adjust spc,va,tmp 421#ifdef __LP64__ 422 extrd,u \spc,63,SPACEID_SHIFT,\tmp 423 depd %r0,63,SPACEID_SHIFT,\spc 424 depd \tmp,31,SPACEID_SHIFT,\va 425#endif 426 .endm 427 428 .import swapper_pg_dir,code 429 430 /* Get the pgd. For faults on space zero (kernel space), this 431 * is simply swapper_pg_dir. For user space faults, the 432 * pgd is stored in %cr25 */ 433 .macro get_pgd spc,reg 434 ldil L%PA(swapper_pg_dir),\reg 435 ldo R%PA(swapper_pg_dir)(\reg),\reg 436 or,COND(=) %r0,\spc,%r0 437 mfctl %cr25,\reg 438 .endm 439 440 /* 441 space_check(spc,tmp,fault) 442 443 spc - The space we saw the fault with. 444 tmp - The place to store the current space. 445 fault - Function to call on failure. 446 447 Only allow faults on different spaces from the 448 currently active one if we're the kernel 449 450 */ 451 .macro space_check spc,tmp,fault 452 mfsp %sr7,\tmp 453 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page 454 * as kernel, so defeat the space 455 * check if it is */ 456 copy \spc,\tmp 457 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */ 458 cmpb,COND(<>),n \tmp,\spc,\fault 459 .endm 460 461 /* Look up a PTE in a 2-Level scheme (faulting at each 462 * level if the entry isn't present 463 * 464 * NOTE: we use ldw even for LP64, since the short pointers 465 * can address up to 1TB 466 */ 467 .macro L2_ptep pmd,pte,index,va,fault 468#if PT_NLEVELS == 3 469 EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index 470#else 471 EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 472#endif 473 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 474 copy %r0,\pte 475 ldw,s \index(\pmd),\pmd 476 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 477 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 478 copy \pmd,%r9 479#ifdef __LP64__ 480 shld %r9,PxD_VALUE_SHIFT,\pmd 481#else 482 shlw %r9,PxD_VALUE_SHIFT,\pmd 483#endif 484 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 485 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 486 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd 487 LDREG %r0(\pmd),\pte /* pmd is now pte */ 488 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault 489 .endm 490 491 /* Look up PTE in a 3-Level scheme. 492 * 493 * Here we implement a Hybrid L2/L3 scheme: we allocate the 494 * first pmd adjacent to the pgd. This means that we can 495 * subtract a constant offset to get to it. The pmd and pgd 496 * sizes are arranged so that a single pmd covers 4GB (giving 497 * a full LP64 process access to 8TB) so our lookups are 498 * effectively L2 for the first 4GB of the kernel (i.e. for 499 * all ILP32 processes and all the kernel for machines with 500 * under 4GB of memory) */ 501 .macro L3_ptep pgd,pte,index,va,fault 502 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 503 copy %r0,\pte 504 extrd,u,*= \va,31,32,%r0 505 ldw,s \index(\pgd),\pgd 506 extrd,u,*= \va,31,32,%r0 507 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault 508 extrd,u,*= \va,31,32,%r0 509 shld \pgd,PxD_VALUE_SHIFT,\index 510 extrd,u,*= \va,31,32,%r0 511 copy \index,\pgd 512 extrd,u,*<> \va,31,32,%r0 513 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd 514 L2_ptep \pgd,\pte,\index,\va,\fault 515 .endm 516 517 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and 518 * don't needlessly dirty the cache line if it was already set */ 519 .macro update_ptep ptep,pte,tmp,tmp1 520 ldi _PAGE_ACCESSED,\tmp1 521 or \tmp1,\pte,\tmp 522 and,COND(<>) \tmp1,\pte,%r0 523 STREG \tmp,0(\ptep) 524 .endm 525 526 /* Set the dirty bit (and accessed bit). No need to be 527 * clever, this is only used from the dirty fault */ 528 .macro update_dirty ptep,pte,tmp 529 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp 530 or \tmp,\pte,\pte 531 STREG \pte,0(\ptep) 532 .endm 533 534 /* Convert the pte and prot to tlb insertion values. How 535 * this happens is quite subtle, read below */ 536 .macro make_insert_tlb spc,pte,prot 537 space_to_prot \spc \prot /* create prot id from space */ 538 /* The following is the real subtlety. This is depositing 539 * T <-> _PAGE_REFTRAP 540 * D <-> _PAGE_DIRTY 541 * B <-> _PAGE_DMB (memory break) 542 * 543 * Then incredible subtlety: The access rights are 544 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ 545 * See 3-14 of the parisc 2.0 manual 546 * 547 * Finally, _PAGE_READ goes in the top bit of PL1 (so we 548 * trigger an access rights trap in user space if the user 549 * tries to read an unreadable page */ 550 depd \pte,8,7,\prot 551 552 /* PAGE_USER indicates the page can be read with user privileges, 553 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 554 * contains _PAGE_READ */ 555 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 556 depdi 7,11,3,\prot 557 /* If we're a gateway page, drop PL2 back to zero for promotion 558 * to kernel privilege (so we can execute the page as kernel). 559 * Any privilege promotion page always denys read and write */ 560 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 561 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 562 563 /* Get rid of prot bits and convert to page addr for iitlbt */ 564 565 depd %r0,63,PAGE_SHIFT,\pte 566 extrd,u \pte,56,32,\pte 567 .endm 568 569 /* Identical macro to make_insert_tlb above, except it 570 * makes the tlb entry for the differently formatted pa11 571 * insertion instructions */ 572 .macro make_insert_tlb_11 spc,pte,prot 573 zdep \spc,30,15,\prot 574 dep \pte,8,7,\prot 575 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0 576 depi 1,12,1,\prot 577 extru,= \pte,_PAGE_USER_BIT,1,%r0 578 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */ 579 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0 580 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 581 582 /* Get rid of prot bits and convert to page addr for iitlba */ 583 584 depi 0,31,12,\pte 585 extru \pte,24,25,\pte 586 587 .endm 588 589 /* This is for ILP32 PA2.0 only. The TLB insertion needs 590 * to extend into I/O space if the address is 0xfXXXXXXX 591 * so we extend the f's into the top word of the pte in 592 * this case */ 593 .macro f_extend pte,tmp 594 extrd,s \pte,42,4,\tmp 595 addi,<> 1,\tmp,%r0 596 extrd,s \pte,63,25,\pte 597 .endm 598 599 /* The alias region is an 8MB aligned 16MB to do clear and 600 * copy user pages at addresses congruent with the user 601 * virtual address. 602 * 603 * To use the alias page, you set %r26 up with the to TLB 604 * entry (identifying the physical page) and %r23 up with 605 * the from tlb entry (or nothing if only a to entry---for 606 * clear_user_page_asm) */ 607 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault 608 cmpib,COND(<>),n 0,\spc,\fault 609 ldil L%(TMPALIAS_MAP_START),\tmp 610#if defined(__LP64__) && (TMPALIAS_MAP_START >= 0x80000000) 611 /* on LP64, ldi will sign extend into the upper 32 bits, 612 * which is behaviour we don't want */ 613 depdi 0,31,32,\tmp 614#endif 615 copy \va,\tmp1 616 DEPI 0,31,23,\tmp1 617 cmpb,COND(<>),n \tmp,\tmp1,\fault 618 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot 619 depd,z \prot,8,7,\prot 620 /* 621 * OK, it is in the temp alias region, check whether "from" or "to". 622 * Check "subtle" note in pacache.S re: r23/r26. 623 */ 624#ifdef __LP64__ 625 extrd,u,*= \va,41,1,%r0 626#else 627 extrw,u,= \va,9,1,%r0 628#endif 629 or,COND(tr) %r23,%r0,\pte 630 or %r26,%r0,\pte 631 .endm 632 633 634 /* 635 * Align fault_vector_20 on 4K boundary so that both 636 * fault_vector_11 and fault_vector_20 are on the 637 * same page. This is only necessary as long as we 638 * write protect the kernel text, which we may stop 639 * doing once we use large page translations to cover 640 * the static part of the kernel address space. 641 */ 642 643 .export fault_vector_20 644 645 .text 646 647 .align 4096 648 649fault_vector_20: 650 /* First vector is invalid (0) */ 651 .ascii "cows can fly" 652 .byte 0 653 .align 32 654 655 hpmc 1 656 def 2 657 def 3 658 extint 4 659 def 5 660 itlb_20 6 661 def 7 662 def 8 663 def 9 664 def 10 665 def 11 666 def 12 667 def 13 668 def 14 669 dtlb_20 15 670#if 0 671 naitlb_20 16 672#else 673 def 16 674#endif 675 nadtlb_20 17 676 def 18 677 def 19 678 dbit_20 20 679 def 21 680 def 22 681 def 23 682 def 24 683 def 25 684 def 26 685 def 27 686 def 28 687 def 29 688 def 30 689 def 31 690 691#ifndef __LP64__ 692 693 .export fault_vector_11 694 695 .align 2048 696 697fault_vector_11: 698 /* First vector is invalid (0) */ 699 .ascii "cows can fly" 700 .byte 0 701 .align 32 702 703 hpmc 1 704 def 2 705 def 3 706 extint 4 707 def 5 708 itlb_11 6 709 def 7 710 def 8 711 def 9 712 def 10 713 def 11 714 def 12 715 def 13 716 def 14 717 dtlb_11 15 718#if 0 719 naitlb_11 16 720#else 721 def 16 722#endif 723 nadtlb_11 17 724 def 18 725 def 19 726 dbit_11 20 727 def 21 728 def 22 729 def 23 730 def 24 731 def 25 732 def 26 733 def 27 734 def 28 735 def 29 736 def 30 737 def 31 738 739#endif 740 741 .import handle_interruption,code 742 .import do_cpu_irq_mask,code 743 744 /* 745 * r26 = function to be called 746 * r25 = argument to pass in 747 * r24 = flags for do_fork() 748 * 749 * Kernel threads don't ever return, so they don't need 750 * a true register context. We just save away the arguments 751 * for copy_thread/ret_ to properly set up the child. 752 */ 753 754#define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */ 755#define CLONE_UNTRACED 0x00800000 756 757 .export __kernel_thread, code 758 .import do_fork 759__kernel_thread: 760 STREG %r2, -RP_OFFSET(%r30) 761 762 copy %r30, %r1 763 ldo PT_SZ_ALGN(%r30),%r30 764#ifdef __LP64__ 765 /* Yo, function pointers in wide mode are little structs... -PB */ 766 ldd 24(%r26), %r2 767 STREG %r2, PT_GR27(%r1) /* Store childs %dp */ 768 ldd 16(%r26), %r26 769 770 STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */ 771 copy %r0, %r22 /* user_tid */ 772#endif 773 STREG %r26, PT_GR26(%r1) /* Store function & argument for child */ 774 STREG %r25, PT_GR25(%r1) 775 ldil L%CLONE_UNTRACED, %r26 776 ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */ 777 or %r26, %r24, %r26 /* will have kernel mappings. */ 778 ldi 1, %r25 /* stack_start, signals kernel thread */ 779 stw %r0, -52(%r30) /* user_tid */ 780#ifdef __LP64__ 781 ldo -16(%r30),%r29 /* Reference param save area */ 782#endif 783 BL do_fork, %r2 784 copy %r1, %r24 /* pt_regs */ 785 786 /* Parent Returns here */ 787 788 LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2 789 ldo -PT_SZ_ALGN(%r30), %r30 790 bv %r0(%r2) 791 nop 792 793 /* 794 * Child Returns here 795 * 796 * copy_thread moved args from temp save area set up above 797 * into task save area. 798 */ 799 800 .export ret_from_kernel_thread 801ret_from_kernel_thread: 802 803 /* Call schedule_tail first though */ 804 BL schedule_tail, %r2 805 nop 806 807 LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1 808 LDREG TASK_PT_GR25(%r1), %r26 809#ifdef __LP64__ 810 LDREG TASK_PT_GR27(%r1), %r27 811 LDREG TASK_PT_GR22(%r1), %r22 812#endif 813 LDREG TASK_PT_GR26(%r1), %r1 814 ble 0(%sr7, %r1) 815 copy %r31, %r2 816 817#ifdef __LP64__ 818 ldo -16(%r30),%r29 /* Reference param save area */ 819 loadgp /* Thread could have been in a module */ 820#endif 821 b sys_exit 822 ldi 0, %r26 823 824 .import sys_execve, code 825 .export __execve, code 826__execve: 827 copy %r2, %r15 828 copy %r30, %r16 829 ldo PT_SZ_ALGN(%r30), %r30 830 STREG %r26, PT_GR26(%r16) 831 STREG %r25, PT_GR25(%r16) 832 STREG %r24, PT_GR24(%r16) 833#ifdef __LP64__ 834 ldo -16(%r30),%r29 /* Reference param save area */ 835#endif 836 BL sys_execve, %r2 837 copy %r16, %r26 838 839 cmpib,=,n 0,%r28,intr_return /* forward */ 840 841 /* yes, this will trap and die. */ 842 copy %r15, %r2 843 copy %r16, %r30 844 bv %r0(%r2) 845 nop 846 847 .align 4 848 849 /* 850 * struct task_struct *_switch_to(struct task_struct *prev, 851 * struct task_struct *next) 852 * 853 * switch kernel stacks and return prev */ 854 .export _switch_to, code 855_switch_to: 856 STREG %r2, -RP_OFFSET(%r30) 857 858 callee_save 859 860 load32 _switch_to_ret, %r2 861 862 STREG %r2, TASK_PT_KPC(%r26) 863 LDREG TASK_PT_KPC(%r25), %r2 864 865 STREG %r30, TASK_PT_KSP(%r26) 866 LDREG TASK_PT_KSP(%r25), %r30 867 LDREG TASK_THREAD_INFO(%r25), %r25 868 bv %r0(%r2) 869 mtctl %r25,%cr30 870 871_switch_to_ret: 872 mtctl %r0, %cr0 /* Needed for single stepping */ 873 callee_rest 874 875 LDREG -RP_OFFSET(%r30), %r2 876 bv %r0(%r2) 877 copy %r26, %r28 878 879 /* 880 * Common rfi return path for interruptions, kernel execve, and 881 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will 882 * return via this path if the signal was received when the process 883 * was running; if the process was blocked on a syscall then the 884 * normal syscall_exit path is used. All syscalls for traced 885 * proceses exit via intr_restore. 886 * 887 * XXX If any syscalls that change a processes space id ever exit 888 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and 889 * adjust IASQ[0..1]. 890 * 891 * Note that the following code uses a "relied upon translation". 892 * See the parisc ACD for details. The ssm is necessary due to a 893 * PCXT bug. 894 */ 895 896 .align 4096 897 898 .export syscall_exit_rfi 899syscall_exit_rfi: 900 mfctl %cr30,%r16 901 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */ 902 ldo TASK_REGS(%r16),%r16 903 /* Force iaoq to userspace, as the user has had access to our current 904 * context via sigcontext. Also Filter the PSW for the same reason. 905 */ 906 LDREG PT_IAOQ0(%r16),%r19 907 depi 3,31,2,%r19 908 STREG %r19,PT_IAOQ0(%r16) 909 LDREG PT_IAOQ1(%r16),%r19 910 depi 3,31,2,%r19 911 STREG %r19,PT_IAOQ1(%r16) 912 LDREG PT_PSW(%r16),%r19 913 load32 USER_PSW_MASK,%r1 914#ifdef __LP64__ 915 load32 USER_PSW_HI_MASK,%r20 916 depd %r20,31,32,%r1 917#endif 918 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */ 919 load32 USER_PSW,%r1 920 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */ 921 STREG %r19,PT_PSW(%r16) 922 923 /* 924 * If we aren't being traced, we never saved space registers 925 * (we don't store them in the sigcontext), so set them 926 * to "proper" values now (otherwise we'll wind up restoring 927 * whatever was last stored in the task structure, which might 928 * be inconsistent if an interrupt occured while on the gateway 929 * page) Note that we may be "trashing" values the user put in 930 * them, but we don't support the the user changing them. 931 */ 932 933 STREG %r0,PT_SR2(%r16) 934 mfsp %sr3,%r19 935 STREG %r19,PT_SR0(%r16) 936 STREG %r19,PT_SR1(%r16) 937 STREG %r19,PT_SR3(%r16) 938 STREG %r19,PT_SR4(%r16) 939 STREG %r19,PT_SR5(%r16) 940 STREG %r19,PT_SR6(%r16) 941 STREG %r19,PT_SR7(%r16) 942 943intr_return: 944 /* NOTE: Need to enable interrupts incase we schedule. */ 945 ssm PSW_SM_I, %r0 946 947 /* Check for software interrupts */ 948 949 .import irq_stat,data 950 951 load32 irq_stat,%r19 952#ifdef CONFIG_SMP 953 mfctl %cr30,%r1 954 ldw TI_CPU(%r1),%r1 /* get cpu # - int */ 955 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount 956 ** irq_stat[] is defined using ____cacheline_aligned. 957 */ 958#ifdef __LP64__ 959 shld %r1, 6, %r20 960#else 961 shlw %r1, 5, %r20 962#endif 963 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */ 964#endif /* CONFIG_SMP */ 965 966 LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */ 967 cmpib,<>,n 0,%r20,intr_do_softirq /* forward */ 968 969intr_check_resched: 970 971 /* check for reschedule */ 972 mfctl %cr30,%r1 973 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ 974 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ 975 976intr_check_sig: 977 /* As above */ 978 mfctl %cr30,%r1 979 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_SIGPENDING */ 980 bb,<,n %r19, 31-TIF_SIGPENDING, intr_do_signal /* forward */ 981 982intr_restore: 983 copy %r16,%r29 984 ldo PT_FR31(%r29),%r1 985 rest_fp %r1 986 rest_general %r29 987 988 /* Create a "relied upon translation" PA 2.0 Arch. F-5 */ 989 ssm 0,%r0 990 nop 991 nop 992 nop 993 nop 994 nop 995 nop 996 nop 997 tophys_r1 %r29 998 rsm (PSW_SM_Q|PSW_SM_P|PSW_SM_D|PSW_SM_I),%r0 999 1000 /* Restore space id's and special cr's from PT_REGS 1001 * structure pointed to by r29 */ 1002 rest_specials %r29 1003 1004 /* Important: Note that rest_stack restores r29 1005 * last (we are using it)! It also restores r1 and r30. */ 1006 rest_stack 1007 1008 rfi 1009 nop 1010 nop 1011 nop 1012 nop 1013 nop 1014 nop 1015 nop 1016 nop 1017 1018 .import do_softirq,code 1019intr_do_softirq: 1020 bl do_softirq,%r2 1021#ifdef __LP64__ 1022 ldo -16(%r30),%r29 /* Reference param save area */ 1023#else 1024 nop 1025#endif 1026 b intr_check_resched 1027 nop 1028 1029 .import schedule,code 1030intr_do_resched: 1031 /* Only do reschedule if we are returning to user space */ 1032 LDREG PT_IASQ0(%r16), %r20 1033 CMPIB= 0,%r20,intr_restore /* backward */ 1034 nop 1035 LDREG PT_IASQ1(%r16), %r20 1036 CMPIB= 0,%r20,intr_restore /* backward */ 1037 nop 1038 1039#ifdef __LP64__ 1040 ldo -16(%r30),%r29 /* Reference param save area */ 1041#endif 1042 1043 ldil L%intr_check_sig, %r2 1044 b schedule 1045 ldo R%intr_check_sig(%r2), %r2 1046 1047 1048 .import do_signal,code 1049intr_do_signal: 1050 /* 1051 This check is critical to having LWS 1052 working. The IASQ is zero on the gateway 1053 page and we cannot deliver any signals until 1054 we get off the gateway page. 1055 1056 Only do signals if we are returning to user space 1057 */ 1058 LDREG PT_IASQ0(%r16), %r20 1059 CMPIB= 0,%r20,intr_restore /* backward */ 1060 nop 1061 LDREG PT_IASQ1(%r16), %r20 1062 CMPIB= 0,%r20,intr_restore /* backward */ 1063 nop 1064 1065 copy %r0, %r24 /* unsigned long in_syscall */ 1066 copy %r16, %r25 /* struct pt_regs *regs */ 1067#ifdef __LP64__ 1068 ldo -16(%r30),%r29 /* Reference param save area */ 1069#endif 1070 1071 BL do_signal,%r2 1072 copy %r0, %r26 /* sigset_t *oldset = NULL */ 1073 1074 b intr_check_sig 1075 nop 1076 1077 /* 1078 * External interrupts. 1079 */ 1080 1081intr_extint: 1082 CMPIB=,n 0,%r16,1f 1083 get_stack_use_cr30 1084 b,n 3f 1085 10861: 1087#if 0 /* Interrupt Stack support not working yet! */ 1088 mfctl %cr31,%r1 1089 copy %r30,%r17 1090 /* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/ 1091#ifdef __LP64__ 1092 depdi 0,63,15,%r17 1093#else 1094 depi 0,31,15,%r17 1095#endif 1096 CMPB=,n %r1,%r17,2f 1097 get_stack_use_cr31 1098 b,n 3f 1099#endif 11002: 1101 get_stack_use_r30 1102 11033: 1104 save_specials %r29 1105 virt_map 1106 save_general %r29 1107 1108 ldo PT_FR0(%r29), %r24 1109 save_fp %r24 1110 1111 loadgp 1112 1113 copy %r29, %r26 /* arg0 is pt_regs */ 1114 copy %r29, %r16 /* save pt_regs */ 1115 1116 ldil L%intr_return, %r2 1117 1118#ifdef __LP64__ 1119 ldo -16(%r30),%r29 /* Reference param save area */ 1120#endif 1121 1122 b do_cpu_irq_mask 1123 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ 1124 1125 1126 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ 1127 1128 .export intr_save, code /* for os_hpmc */ 1129 1130intr_save: 1131 mfsp %sr7,%r16 1132 CMPIB=,n 0,%r16,1f 1133 get_stack_use_cr30 1134 b 2f 1135 copy %r8,%r26 1136 11371: 1138 get_stack_use_r30 1139 copy %r8,%r26 1140 11412: 1142 save_specials %r29 1143 1144 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */ 1145 1146 /* 1147 * FIXME: 1) Use a #define for the hardwired "6" below (and in 1148 * traps.c. 1149 * 2) Once we start executing code above 4 Gb, we need 1150 * to adjust iasq/iaoq here in the same way we 1151 * adjust isr/ior below. 1152 */ 1153 1154 CMPIB=,n 6,%r26,skip_save_ior 1155 1156 /* save_specials left ipsw value in r8 for us to test */ 1157 1158 mfctl %cr20, %r16 /* isr */ 1159 mfctl %cr21, %r17 /* ior */ 1160 1161#ifdef __LP64__ 1162 /* 1163 * If the interrupted code was running with W bit off (32 bit), 1164 * clear the b bits (bits 0 & 1) in the ior. 1165 */ 1166 extrd,u,*<> %r8,PSW_W_BIT,1,%r0 1167 depdi 0,1,2,%r17 1168 1169 /* 1170 * FIXME: This code has hardwired assumptions about the split 1171 * between space bits and offset bits. This will change 1172 * when we allow alternate page sizes. 1173 */ 1174 1175 /* adjust isr/ior. */ 1176 1177 extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */ 1178 depd %r1,31,7,%r17 /* deposit them into ior */ 1179 depdi 0,63,7,%r16 /* clear them from isr */ 1180#endif 1181 STREG %r16, PT_ISR(%r29) 1182 STREG %r17, PT_IOR(%r29) 1183 1184 1185skip_save_ior: 1186 virt_map 1187 save_general %r29 1188 1189 ldo PT_FR0(%r29), %r25 1190 save_fp %r25 1191 1192 loadgp 1193 1194 copy %r29, %r25 /* arg1 is pt_regs */ 1195#ifdef __LP64__ 1196 ldo -16(%r30),%r29 /* Reference param save area */ 1197#endif 1198 1199 ldil L%intr_check_sig, %r2 1200 copy %r25, %r16 /* save pt_regs */ 1201 1202 b handle_interruption 1203 ldo R%intr_check_sig(%r2), %r2 1204 1205 1206 /* 1207 * Note for all tlb miss handlers: 1208 * 1209 * cr24 contains a pointer to the kernel address space 1210 * page directory. 1211 * 1212 * cr25 contains a pointer to the current user address 1213 * space page directory. 1214 * 1215 * sr3 will contain the space id of the user address space 1216 * of the current running thread while that thread is 1217 * running in the kernel. 1218 */ 1219 1220 /* 1221 * register number allocations. Note that these are all 1222 * in the shadowed registers 1223 */ 1224 1225 t0 = r1 /* temporary register 0 */ 1226 va = r8 /* virtual address for which the trap occured */ 1227 t1 = r9 /* temporary register 1 */ 1228 pte = r16 /* pte/phys page # */ 1229 prot = r17 /* prot bits */ 1230 spc = r24 /* space for which the trap occured */ 1231 ptp = r25 /* page directory/page table pointer */ 1232 1233#ifdef __LP64__ 1234 1235dtlb_miss_20w: 1236 space_adjust spc,va,t0 1237 get_pgd spc,ptp 1238 space_check spc,t0,dtlb_fault 1239 1240 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w 1241 1242 update_ptep ptp,pte,t0,t1 1243 1244 make_insert_tlb spc,pte,prot 1245 1246 idtlbt pte,prot 1247 1248 rfir 1249 nop 1250 1251dtlb_check_alias_20w: 1252 do_alias spc,t0,t1,va,pte,prot,dtlb_fault 1253 1254 idtlbt pte,prot 1255 1256 rfir 1257 nop 1258 1259nadtlb_miss_20w: 1260 space_adjust spc,va,t0 1261 get_pgd spc,ptp 1262 space_check spc,t0,nadtlb_fault 1263 1264 L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w 1265 1266 update_ptep ptp,pte,t0,t1 1267 1268 make_insert_tlb spc,pte,prot 1269 1270 idtlbt pte,prot 1271 1272 rfir 1273 nop 1274 1275nadtlb_check_flush_20w: 1276 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1277 1278 /* Insert a "flush only" translation */ 1279 1280 depdi,z 7,7,3,prot 1281 depdi 1,10,1,prot 1282 1283 /* Get rid of prot bits and convert to page addr for idtlbt */ 1284 1285 depdi 0,63,12,pte 1286 extrd,u pte,56,52,pte 1287 idtlbt pte,prot 1288 1289 rfir 1290 nop 1291 1292#else 1293 1294dtlb_miss_11: 1295 get_pgd spc,ptp 1296 1297 space_check spc,t0,dtlb_fault 1298 1299 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 1300 1301 update_ptep ptp,pte,t0,t1 1302 1303 make_insert_tlb_11 spc,pte,prot 1304 1305 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1306 mtsp spc,%sr1 1307 1308 idtlba pte,(%sr1,va) 1309 idtlbp prot,(%sr1,va) 1310 1311 mtsp t0, %sr1 /* Restore sr1 */ 1312 1313 rfir 1314 nop 1315 1316dtlb_check_alias_11: 1317 1318 /* Check to see if fault is in the temporary alias region */ 1319 1320 cmpib,<>,n 0,spc,dtlb_fault /* forward */ 1321 ldil L%(TMPALIAS_MAP_START),t0 1322 copy va,t1 1323 depwi 0,31,23,t1 1324 cmpb,<>,n t0,t1,dtlb_fault /* forward */ 1325 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot 1326 depw,z prot,8,7,prot 1327 1328 /* 1329 * OK, it is in the temp alias region, check whether "from" or "to". 1330 * Check "subtle" note in pacache.S re: r23/r26. 1331 */ 1332 1333 extrw,u,= va,9,1,r0 1334 or,tr %r23,%r0,pte /* If "from" use "from" page */ 1335 or %r26,%r0,pte /* else "to", use "to" page */ 1336 1337 idtlba pte,(va) 1338 idtlbp prot,(va) 1339 1340 rfir 1341 nop 1342 1343nadtlb_miss_11: 1344 get_pgd spc,ptp 1345 1346 space_check spc,t0,nadtlb_fault 1347 1348 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11 1349 1350 update_ptep ptp,pte,t0,t1 1351 1352 make_insert_tlb_11 spc,pte,prot 1353 1354 1355 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1356 mtsp spc,%sr1 1357 1358 idtlba pte,(%sr1,va) 1359 idtlbp prot,(%sr1,va) 1360 1361 mtsp t0, %sr1 /* Restore sr1 */ 1362 1363 rfir 1364 nop 1365 1366nadtlb_check_flush_11: 1367 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1368 1369 /* Insert a "flush only" translation */ 1370 1371 zdepi 7,7,3,prot 1372 depi 1,10,1,prot 1373 1374 /* Get rid of prot bits and convert to page addr for idtlba */ 1375 1376 depi 0,31,12,pte 1377 extru pte,24,25,pte 1378 1379 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1380 mtsp spc,%sr1 1381 1382 idtlba pte,(%sr1,va) 1383 idtlbp prot,(%sr1,va) 1384 1385 mtsp t0, %sr1 /* Restore sr1 */ 1386 1387 rfir 1388 nop 1389 1390dtlb_miss_20: 1391 space_adjust spc,va,t0 1392 get_pgd spc,ptp 1393 space_check spc,t0,dtlb_fault 1394 1395 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 1396 1397 update_ptep ptp,pte,t0,t1 1398 1399 make_insert_tlb spc,pte,prot 1400 1401 f_extend pte,t0 1402 1403 idtlbt pte,prot 1404 1405 rfir 1406 nop 1407 1408dtlb_check_alias_20: 1409 do_alias spc,t0,t1,va,pte,prot,dtlb_fault 1410 1411 idtlbt pte,prot 1412 1413 rfir 1414 nop 1415 1416nadtlb_miss_20: 1417 get_pgd spc,ptp 1418 1419 space_check spc,t0,nadtlb_fault 1420 1421 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20 1422 1423 update_ptep ptp,pte,t0,t1 1424 1425 make_insert_tlb spc,pte,prot 1426 1427 f_extend pte,t0 1428 1429 idtlbt pte,prot 1430 1431 rfir 1432 nop 1433 1434nadtlb_check_flush_20: 1435 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1436 1437 /* Insert a "flush only" translation */ 1438 1439 depdi,z 7,7,3,prot 1440 depdi 1,10,1,prot 1441 1442 /* Get rid of prot bits and convert to page addr for idtlbt */ 1443 1444 depdi 0,63,12,pte 1445 extrd,u pte,56,32,pte 1446 idtlbt pte,prot 1447 1448 rfir 1449 nop 1450#endif 1451 1452nadtlb_emulate: 1453 1454 /* 1455 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and 1456 * probei instructions. We don't want to fault for these 1457 * instructions (not only does it not make sense, it can cause 1458 * deadlocks, since some flushes are done with the mmap 1459 * semaphore held). If the translation doesn't exist, we can't 1460 * insert a translation, so have to emulate the side effects 1461 * of the instruction. Since we don't insert a translation 1462 * we can get a lot of faults during a flush loop, so it makes 1463 * sense to try to do it here with minimum overhead. We only 1464 * emulate fdc,fic,pdc,probew,prober instructions whose base 1465 * and index registers are not shadowed. We defer everything 1466 * else to the "slow" path. 1467 */ 1468 1469 mfctl %cr19,%r9 /* Get iir */ 1470 1471 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits. 1472 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */ 1473 1474 /* Checks for fdc,fdce,pdc,"fic,4f" only */ 1475 ldi 0x280,%r16 1476 and %r9,%r16,%r17 1477 cmpb,<>,n %r16,%r17,nadtlb_probe_check 1478 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */ 1479 BL get_register,%r25 1480 extrw,u %r9,15,5,%r8 /* Get index register # */ 1481 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */ 1482 copy %r1,%r24 1483 BL get_register,%r25 1484 extrw,u %r9,10,5,%r8 /* Get base register # */ 1485 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */ 1486 BL set_register,%r25 1487 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */ 1488 1489nadtlb_nullify: 1490 mfctl %cr22,%r8 /* Get ipsw */ 1491 ldil L%PSW_N,%r9 1492 or %r8,%r9,%r8 /* Set PSW_N */ 1493 mtctl %r8,%cr22 1494 1495 rfir 1496 nop 1497 1498 /* 1499 When there is no translation for the probe address then we 1500 must nullify the insn and return zero in the target regsiter. 1501 This will indicate to the calling code that it does not have 1502 write/read privileges to this address. 1503 1504 This should technically work for prober and probew in PA 1.1, 1505 and also probe,r and probe,w in PA 2.0 1506 1507 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN! 1508 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET. 1509 1510 */ 1511nadtlb_probe_check: 1512 ldi 0x80,%r16 1513 and %r9,%r16,%r17 1514 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/ 1515 BL get_register,%r25 /* Find the target register */ 1516 extrw,u %r9,31,5,%r8 /* Get target register */ 1517 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */ 1518 BL set_register,%r25 1519 copy %r0,%r1 /* Write zero to target register */ 1520 b nadtlb_nullify /* Nullify return insn */ 1521 nop 1522 1523 1524#ifdef __LP64__ 1525itlb_miss_20w: 1526 1527 /* 1528 * I miss is a little different, since we allow users to fault 1529 * on the gateway page which is in the kernel address space. 1530 */ 1531 1532 space_adjust spc,va,t0 1533 get_pgd spc,ptp 1534 space_check spc,t0,itlb_fault 1535 1536 L3_ptep ptp,pte,t0,va,itlb_fault 1537 1538 update_ptep ptp,pte,t0,t1 1539 1540 make_insert_tlb spc,pte,prot 1541 1542 iitlbt pte,prot 1543 1544 rfir 1545 nop 1546 1547#else 1548 1549itlb_miss_11: 1550 get_pgd spc,ptp 1551 1552 space_check spc,t0,itlb_fault 1553 1554 L2_ptep ptp,pte,t0,va,itlb_fault 1555 1556 update_ptep ptp,pte,t0,t1 1557 1558 make_insert_tlb_11 spc,pte,prot 1559 1560 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1561 mtsp spc,%sr1 1562 1563 iitlba pte,(%sr1,va) 1564 iitlbp prot,(%sr1,va) 1565 1566 mtsp t0, %sr1 /* Restore sr1 */ 1567 1568 rfir 1569 nop 1570 1571itlb_miss_20: 1572 get_pgd spc,ptp 1573 1574 space_check spc,t0,itlb_fault 1575 1576 L2_ptep ptp,pte,t0,va,itlb_fault 1577 1578 update_ptep ptp,pte,t0,t1 1579 1580 make_insert_tlb spc,pte,prot 1581 1582 f_extend pte,t0 1583 1584 iitlbt pte,prot 1585 1586 rfir 1587 nop 1588 1589#endif 1590 1591#ifdef __LP64__ 1592 1593dbit_trap_20w: 1594 space_adjust spc,va,t0 1595 get_pgd spc,ptp 1596 space_check spc,t0,dbit_fault 1597 1598 L3_ptep ptp,pte,t0,va,dbit_fault 1599 1600#ifdef CONFIG_SMP 1601 CMPIB=,n 0,spc,dbit_nolock_20w 1602 load32 PA(pa_dbit_lock),t0 1603 1604dbit_spin_20w: 1605 ldcw 0(t0),t1 1606 cmpib,= 0,t1,dbit_spin_20w 1607 nop 1608 1609dbit_nolock_20w: 1610#endif 1611 update_dirty ptp,pte,t1 1612 1613 make_insert_tlb spc,pte,prot 1614 1615 idtlbt pte,prot 1616#ifdef CONFIG_SMP 1617 CMPIB=,n 0,spc,dbit_nounlock_20w 1618 ldi 1,t1 1619 stw t1,0(t0) 1620 1621dbit_nounlock_20w: 1622#endif 1623 1624 rfir 1625 nop 1626#else 1627 1628dbit_trap_11: 1629 1630 get_pgd spc,ptp 1631 1632 space_check spc,t0,dbit_fault 1633 1634 L2_ptep ptp,pte,t0,va,dbit_fault 1635 1636#ifdef CONFIG_SMP 1637 CMPIB=,n 0,spc,dbit_nolock_11 1638 load32 PA(pa_dbit_lock),t0 1639 1640dbit_spin_11: 1641 ldcw 0(t0),t1 1642 cmpib,= 0,t1,dbit_spin_11 1643 nop 1644 1645dbit_nolock_11: 1646#endif 1647 update_dirty ptp,pte,t1 1648 1649 make_insert_tlb_11 spc,pte,prot 1650 1651 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1652 mtsp spc,%sr1 1653 1654 idtlba pte,(%sr1,va) 1655 idtlbp prot,(%sr1,va) 1656 1657 mtsp t1, %sr1 /* Restore sr1 */ 1658#ifdef CONFIG_SMP 1659 CMPIB=,n 0,spc,dbit_nounlock_11 1660 ldi 1,t1 1661 stw t1,0(t0) 1662 1663dbit_nounlock_11: 1664#endif 1665 1666 rfir 1667 nop 1668 1669dbit_trap_20: 1670 get_pgd spc,ptp 1671 1672 space_check spc,t0,dbit_fault 1673 1674 L2_ptep ptp,pte,t0,va,dbit_fault 1675 1676#ifdef CONFIG_SMP 1677 CMPIB=,n 0,spc,dbit_nolock_20 1678 load32 PA(pa_dbit_lock),t0 1679 1680dbit_spin_20: 1681 ldcw 0(t0),t1 1682 cmpib,= 0,t1,dbit_spin_20 1683 nop 1684 1685dbit_nolock_20: 1686#endif 1687 update_dirty ptp,pte,t1 1688 1689 make_insert_tlb spc,pte,prot 1690 1691 f_extend pte,t1 1692 1693 idtlbt pte,prot 1694 1695#ifdef CONFIG_SMP 1696 CMPIB=,n 0,spc,dbit_nounlock_20 1697 ldi 1,t1 1698 stw t1,0(t0) 1699 1700dbit_nounlock_20: 1701#endif 1702 1703 rfir 1704 nop 1705#endif 1706 1707 .import handle_interruption,code 1708 1709kernel_bad_space: 1710 b intr_save 1711 ldi 31,%r8 /* Use an unused code */ 1712 1713dbit_fault: 1714 b intr_save 1715 ldi 20,%r8 1716 1717itlb_fault: 1718 b intr_save 1719 ldi 6,%r8 1720 1721nadtlb_fault: 1722 b intr_save 1723 ldi 17,%r8 1724 1725dtlb_fault: 1726 b intr_save 1727 ldi 15,%r8 1728 1729 /* Register saving semantics for system calls: 1730 1731 %r1 clobbered by system call macro in userspace 1732 %r2 saved in PT_REGS by gateway page 1733 %r3 - %r18 preserved by C code (saved by signal code) 1734 %r19 - %r20 saved in PT_REGS by gateway page 1735 %r21 - %r22 non-standard syscall args 1736 stored in kernel stack by gateway page 1737 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page 1738 %r27 - %r30 saved in PT_REGS by gateway page 1739 %r31 syscall return pointer 1740 */ 1741 1742 /* Floating point registers (FIXME: what do we do with these?) 1743 1744 %fr0 - %fr3 status/exception, not preserved 1745 %fr4 - %fr7 arguments 1746 %fr8 - %fr11 not preserved by C code 1747 %fr12 - %fr21 preserved by C code 1748 %fr22 - %fr31 not preserved by C code 1749 */ 1750 1751 .macro reg_save regs 1752 STREG %r3, PT_GR3(\regs) 1753 STREG %r4, PT_GR4(\regs) 1754 STREG %r5, PT_GR5(\regs) 1755 STREG %r6, PT_GR6(\regs) 1756 STREG %r7, PT_GR7(\regs) 1757 STREG %r8, PT_GR8(\regs) 1758 STREG %r9, PT_GR9(\regs) 1759 STREG %r10,PT_GR10(\regs) 1760 STREG %r11,PT_GR11(\regs) 1761 STREG %r12,PT_GR12(\regs) 1762 STREG %r13,PT_GR13(\regs) 1763 STREG %r14,PT_GR14(\regs) 1764 STREG %r15,PT_GR15(\regs) 1765 STREG %r16,PT_GR16(\regs) 1766 STREG %r17,PT_GR17(\regs) 1767 STREG %r18,PT_GR18(\regs) 1768 .endm 1769 1770 .macro reg_restore regs 1771 LDREG PT_GR3(\regs), %r3 1772 LDREG PT_GR4(\regs), %r4 1773 LDREG PT_GR5(\regs), %r5 1774 LDREG PT_GR6(\regs), %r6 1775 LDREG PT_GR7(\regs), %r7 1776 LDREG PT_GR8(\regs), %r8 1777 LDREG PT_GR9(\regs), %r9 1778 LDREG PT_GR10(\regs),%r10 1779 LDREG PT_GR11(\regs),%r11 1780 LDREG PT_GR12(\regs),%r12 1781 LDREG PT_GR13(\regs),%r13 1782 LDREG PT_GR14(\regs),%r14 1783 LDREG PT_GR15(\regs),%r15 1784 LDREG PT_GR16(\regs),%r16 1785 LDREG PT_GR17(\regs),%r17 1786 LDREG PT_GR18(\regs),%r18 1787 .endm 1788 1789 .export sys_fork_wrapper 1790 .export child_return 1791sys_fork_wrapper: 1792 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 1793 ldo TASK_REGS(%r1),%r1 1794 reg_save %r1 1795 mfctl %cr27, %r3 1796 STREG %r3, PT_CR27(%r1) 1797 1798 STREG %r2,-RP_OFFSET(%r30) 1799 ldo FRAME_SIZE(%r30),%r30 1800#ifdef __LP64__ 1801 ldo -16(%r30),%r29 /* Reference param save area */ 1802#endif 1803 1804 /* These are call-clobbered registers and therefore 1805 also syscall-clobbered (we hope). */ 1806 STREG %r2,PT_GR19(%r1) /* save for child */ 1807 STREG %r30,PT_GR21(%r1) 1808 1809 LDREG PT_GR30(%r1),%r25 1810 copy %r1,%r24 1811 BL sys_clone,%r2 1812 ldi SIGCHLD,%r26 1813 1814 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2 1815wrapper_exit: 1816 ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */ 1817 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1818 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1819 1820 LDREG PT_CR27(%r1), %r3 1821 mtctl %r3, %cr27 1822 reg_restore %r1 1823 1824 /* strace expects syscall # to be preserved in r20 */ 1825 ldi __NR_fork,%r20 1826 bv %r0(%r2) 1827 STREG %r20,PT_GR20(%r1) 1828 1829 /* Set the return value for the child */ 1830child_return: 1831 BL schedule_tail, %r2 1832 nop 1833 1834 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1 1835 LDREG TASK_PT_GR19(%r1),%r2 1836 b wrapper_exit 1837 copy %r0,%r28 1838 1839 1840 .export sys_clone_wrapper 1841sys_clone_wrapper: 1842 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1843 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1844 reg_save %r1 1845 mfctl %cr27, %r3 1846 STREG %r3, PT_CR27(%r1) 1847 1848 STREG %r2,-RP_OFFSET(%r30) 1849 ldo FRAME_SIZE(%r30),%r30 1850#ifdef __LP64__ 1851 ldo -16(%r30),%r29 /* Reference param save area */ 1852#endif 1853 1854 STREG %r2,PT_GR19(%r1) /* save for child */ 1855 STREG %r30,PT_GR21(%r1) 1856 BL sys_clone,%r2 1857 copy %r1,%r24 1858 1859 b wrapper_exit 1860 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2 1861 1862 .export sys_vfork_wrapper 1863sys_vfork_wrapper: 1864 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1865 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1866 reg_save %r1 1867 mfctl %cr27, %r3 1868 STREG %r3, PT_CR27(%r1) 1869 1870 STREG %r2,-RP_OFFSET(%r30) 1871 ldo FRAME_SIZE(%r30),%r30 1872#ifdef __LP64__ 1873 ldo -16(%r30),%r29 /* Reference param save area */ 1874#endif 1875 1876 STREG %r2,PT_GR19(%r1) /* save for child */ 1877 STREG %r30,PT_GR21(%r1) 1878 1879 BL sys_vfork,%r2 1880 copy %r1,%r26 1881 1882 b wrapper_exit 1883 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2 1884 1885 1886 .macro execve_wrapper execve 1887 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1888 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1889 1890 /* 1891 * Do we need to save/restore r3-r18 here? 1892 * I don't think so. why would new thread need old 1893 * threads registers? 1894 */ 1895 1896 /* %arg0 - %arg3 are already saved for us. */ 1897 1898 STREG %r2,-RP_OFFSET(%r30) 1899 ldo FRAME_SIZE(%r30),%r30 1900#ifdef __LP64__ 1901 ldo -16(%r30),%r29 /* Reference param save area */ 1902#endif 1903 bl \execve,%r2 1904 copy %r1,%arg0 1905 1906 ldo -FRAME_SIZE(%r30),%r30 1907 LDREG -RP_OFFSET(%r30),%r2 1908 1909 /* If exec succeeded we need to load the args */ 1910 1911 ldo -1024(%r0),%r1 1912 cmpb,>>= %r28,%r1,error_\execve 1913 copy %r2,%r19 1914 1915error_\execve: 1916 bv %r0(%r19) 1917 nop 1918 .endm 1919 1920 .export sys_execve_wrapper 1921 .import sys_execve 1922 1923sys_execve_wrapper: 1924 execve_wrapper sys_execve 1925 1926#ifdef __LP64__ 1927 .export sys32_execve_wrapper 1928 .import sys32_execve 1929 1930sys32_execve_wrapper: 1931 execve_wrapper sys32_execve 1932#endif 1933 1934 .export sys_rt_sigreturn_wrapper 1935sys_rt_sigreturn_wrapper: 1936 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 1937 ldo TASK_REGS(%r26),%r26 /* get pt regs */ 1938 /* Don't save regs, we are going to restore them from sigcontext. */ 1939 STREG %r2, -RP_OFFSET(%r30) 1940#ifdef __LP64__ 1941 ldo FRAME_SIZE(%r30), %r30 1942 BL sys_rt_sigreturn,%r2 1943 ldo -16(%r30),%r29 /* Reference param save area */ 1944#else 1945 BL sys_rt_sigreturn,%r2 1946 ldo FRAME_SIZE(%r30), %r30 1947#endif 1948 1949 ldo -FRAME_SIZE(%r30), %r30 1950 LDREG -RP_OFFSET(%r30), %r2 1951 1952 /* FIXME: I think we need to restore a few more things here. */ 1953 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1954 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1955 reg_restore %r1 1956 1957 /* If the signal was received while the process was blocked on a 1958 * syscall, then r2 will take us to syscall_exit; otherwise r2 will 1959 * take us to syscall_exit_rfi and on to intr_return. 1960 */ 1961 bv %r0(%r2) 1962 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */ 1963 1964 .export sys_sigaltstack_wrapper 1965sys_sigaltstack_wrapper: 1966 /* Get the user stack pointer */ 1967 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1968 ldo TASK_REGS(%r1),%r24 /* get pt regs */ 1969 LDREG TASK_PT_GR30(%r24),%r24 1970 STREG %r2, -RP_OFFSET(%r30) 1971#ifdef __LP64__ 1972 ldo FRAME_SIZE(%r30), %r30 1973 b,l do_sigaltstack,%r2 1974 ldo -16(%r30),%r29 /* Reference param save area */ 1975#else 1976 bl do_sigaltstack,%r2 1977 ldo FRAME_SIZE(%r30), %r30 1978#endif 1979 1980 ldo -FRAME_SIZE(%r30), %r30 1981 LDREG -RP_OFFSET(%r30), %r2 1982 bv %r0(%r2) 1983 nop 1984 1985#ifdef __LP64__ 1986 .export sys32_sigaltstack_wrapper 1987sys32_sigaltstack_wrapper: 1988 /* Get the user stack pointer */ 1989 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24 1990 LDREG TASK_PT_GR30(%r24),%r24 1991 STREG %r2, -RP_OFFSET(%r30) 1992 ldo FRAME_SIZE(%r30), %r30 1993 b,l do_sigaltstack32,%r2 1994 ldo -16(%r30),%r29 /* Reference param save area */ 1995 1996 ldo -FRAME_SIZE(%r30), %r30 1997 LDREG -RP_OFFSET(%r30), %r2 1998 bv %r0(%r2) 1999 nop 2000#endif 2001 2002 .export sys_rt_sigsuspend_wrapper 2003sys_rt_sigsuspend_wrapper: 2004 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 2005 ldo TASK_REGS(%r1),%r24 2006 reg_save %r24 2007 2008 STREG %r2, -RP_OFFSET(%r30) 2009#ifdef __LP64__ 2010 ldo FRAME_SIZE(%r30), %r30 2011 b,l sys_rt_sigsuspend,%r2 2012 ldo -16(%r30),%r29 /* Reference param save area */ 2013#else 2014 bl sys_rt_sigsuspend,%r2 2015 ldo FRAME_SIZE(%r30), %r30 2016#endif 2017 2018 ldo -FRAME_SIZE(%r30), %r30 2019 LDREG -RP_OFFSET(%r30), %r2 2020 2021 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 2022 ldo TASK_REGS(%r1),%r1 2023 reg_restore %r1 2024 2025 bv %r0(%r2) 2026 nop 2027 2028 .export syscall_exit 2029syscall_exit: 2030 2031 /* NOTE: HP-UX syscalls also come through here 2032 * after hpux_syscall_exit fixes up return 2033 * values. */ 2034 2035 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit 2036 * via syscall_exit_rfi if the signal was received while the process 2037 * was running. 2038 */ 2039 2040 /* save return value now */ 2041 2042 mfctl %cr30, %r1 2043 LDREG TI_TASK(%r1),%r1 2044 STREG %r28,TASK_PT_GR28(%r1) 2045 2046#ifdef CONFIG_HPUX 2047 2048/* <linux/personality.h> cannot be easily included */ 2049#define PER_HPUX 0x10 2050 LDREG TASK_PERSONALITY(%r1),%r19 2051 2052 /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */ 2053 ldo -PER_HPUX(%r19), %r19 2054 CMPIB<>,n 0,%r19,1f 2055 2056 /* Save other hpux returns if personality is PER_HPUX */ 2057 STREG %r22,TASK_PT_GR22(%r1) 2058 STREG %r29,TASK_PT_GR29(%r1) 20591: 2060 2061#endif /* CONFIG_HPUX */ 2062 2063 /* Seems to me that dp could be wrong here, if the syscall involved 2064 * calling a module, and nothing got round to restoring dp on return. 2065 */ 2066 loadgp 2067 2068syscall_check_bh: 2069 2070 /* Check for software interrupts */ 2071 2072 .import irq_stat,data 2073 2074 load32 irq_stat,%r19 2075 2076#ifdef CONFIG_SMP 2077 /* sched.h: int processor */ 2078 /* %r26 is used as scratch register to index into irq_stat[] */ 2079 ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */ 2080 2081 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */ 2082#ifdef __LP64__ 2083 shld %r26, 6, %r20 2084#else 2085 shlw %r26, 5, %r20 2086#endif 2087 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */ 2088#endif /* CONFIG_SMP */ 2089 2090 LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */ 2091 cmpib,<>,n 0,%r20,syscall_do_softirq /* forward */ 2092 2093syscall_check_resched: 2094 2095 /* check for reschedule */ 2096 2097 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */ 2098 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */ 2099 2100syscall_check_sig: 2101 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* get ti flags */ 2102 bb,<,n %r19, 31-TIF_SIGPENDING, syscall_do_signal /* forward */ 2103 2104syscall_restore: 2105 /* Are we being ptraced? */ 2106 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2107 2108 LDREG TASK_PTRACE(%r1), %r19 2109 bb,< %r19,31,syscall_restore_rfi 2110 nop 2111 2112 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ 2113 rest_fp %r19 2114 2115 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */ 2116 mtsar %r19 2117 2118 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */ 2119 LDREG TASK_PT_GR19(%r1),%r19 2120 LDREG TASK_PT_GR20(%r1),%r20 2121 LDREG TASK_PT_GR21(%r1),%r21 2122 LDREG TASK_PT_GR22(%r1),%r22 2123 LDREG TASK_PT_GR23(%r1),%r23 2124 LDREG TASK_PT_GR24(%r1),%r24 2125 LDREG TASK_PT_GR25(%r1),%r25 2126 LDREG TASK_PT_GR26(%r1),%r26 2127 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */ 2128 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */ 2129 LDREG TASK_PT_GR29(%r1),%r29 2130 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ 2131 2132 /* NOTE: We use rsm/ssm pair to make this operation atomic */ 2133 rsm PSW_SM_I, %r0 2134 LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */ 2135 mfsp %sr3,%r1 /* Get users space id */ 2136 mtsp %r1,%sr7 /* Restore sr7 */ 2137 ssm PSW_SM_I, %r0 2138 2139 /* Set sr2 to zero for userspace syscalls to work. */ 2140 mtsp %r0,%sr2 2141 mtsp %r1,%sr4 /* Restore sr4 */ 2142 mtsp %r1,%sr5 /* Restore sr5 */ 2143 mtsp %r1,%sr6 /* Restore sr6 */ 2144 2145 depi 3,31,2,%r31 /* ensure return to user mode. */ 2146 2147#ifdef __LP64__ 2148 /* decide whether to reset the wide mode bit 2149 * 2150 * For a syscall, the W bit is stored in the lowest bit 2151 * of sp. Extract it and reset W if it is zero */ 2152 extrd,u,*<> %r30,63,1,%r1 2153 rsm PSW_SM_W, %r0 2154 /* now reset the lowest bit of sp if it was set */ 2155 xor %r30,%r1,%r30 2156#endif 2157 be,n 0(%sr3,%r31) /* return to user space */ 2158 2159 /* We have to return via an RFI, so that PSW T and R bits can be set 2160 * appropriately. 2161 * This sets up pt_regs so we can return via intr_restore, which is not 2162 * the most efficient way of doing things, but it works. 2163 */ 2164syscall_restore_rfi: 2165 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */ 2166 mtctl %r2,%cr0 /* for immediate trap */ 2167 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */ 2168 ldi 0x0b,%r20 /* Create new PSW */ 2169 depi -1,13,1,%r20 /* C, Q, D, and I bits */ 2170 2171 /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are 2172 * set in include/linux/ptrace.h and converted to PA bitmap 2173 * numbers in asm-offsets.c */ 2174 2175 /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */ 2176 extru,= %r19,PA_SINGLESTEP_BIT,1,%r0 2177 depi -1,27,1,%r20 /* R bit */ 2178 2179 /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */ 2180 extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0 2181 depi -1,7,1,%r20 /* T bit */ 2182 2183 STREG %r20,TASK_PT_PSW(%r1) 2184 2185 /* Always store space registers, since sr3 can be changed (e.g. fork) */ 2186 2187 mfsp %sr3,%r25 2188 STREG %r25,TASK_PT_SR3(%r1) 2189 STREG %r25,TASK_PT_SR4(%r1) 2190 STREG %r25,TASK_PT_SR5(%r1) 2191 STREG %r25,TASK_PT_SR6(%r1) 2192 STREG %r25,TASK_PT_SR7(%r1) 2193 STREG %r25,TASK_PT_IASQ0(%r1) 2194 STREG %r25,TASK_PT_IASQ1(%r1) 2195 2196 /* XXX W bit??? */ 2197 /* Now if old D bit is clear, it means we didn't save all registers 2198 * on syscall entry, so do that now. This only happens on TRACEME 2199 * calls, or if someone attached to us while we were on a syscall. 2200 * We could make this more efficient by not saving r3-r18, but 2201 * then we wouldn't be able to use the common intr_restore path. 2202 * It is only for traced processes anyway, so performance is not 2203 * an issue. 2204 */ 2205 bb,< %r2,30,pt_regs_ok /* Branch if D set */ 2206 ldo TASK_REGS(%r1),%r25 2207 reg_save %r25 /* Save r3 to r18 */ 2208 2209 /* Save the current sr */ 2210 mfsp %sr0,%r2 2211 STREG %r2,TASK_PT_SR0(%r1) 2212 2213 /* Save the scratch sr */ 2214 mfsp %sr1,%r2 2215 STREG %r2,TASK_PT_SR1(%r1) 2216 2217 /* sr2 should be set to zero for userspace syscalls */ 2218 STREG %r0,TASK_PT_SR2(%r1) 2219 2220pt_regs_ok: 2221 LDREG TASK_PT_GR31(%r1),%r2 2222 depi 3,31,2,%r2 /* ensure return to user mode. */ 2223 STREG %r2,TASK_PT_IAOQ0(%r1) 2224 ldo 4(%r2),%r2 2225 STREG %r2,TASK_PT_IAOQ1(%r1) 2226 copy %r25,%r16 2227 b intr_restore 2228 nop 2229 2230 .import do_softirq,code 2231syscall_do_softirq: 2232 bl do_softirq,%r2 2233 nop 2234 /* NOTE: We enable I-bit incase we schedule later, 2235 * and we might be going back to userspace if we were 2236 * traced. */ 2237 b syscall_check_resched 2238 ssm PSW_SM_I, %r0 /* do_softirq returns with I bit off */ 2239 2240 .import schedule,code 2241syscall_do_resched: 2242 BL schedule,%r2 2243#ifdef __LP64__ 2244 ldo -16(%r30),%r29 /* Reference param save area */ 2245#else 2246 nop 2247#endif 2248 b syscall_check_bh /* if resched, we start over again */ 2249 nop 2250 2251 .import do_signal,code 2252syscall_do_signal: 2253 /* Save callee-save registers (for sigcontext). 2254 FIXME: After this point the process structure should be 2255 consistent with all the relevant state of the process 2256 before the syscall. We need to verify this. */ 2257 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2258 ldo TASK_REGS(%r1), %r25 /* struct pt_regs *regs */ 2259 reg_save %r25 2260 2261 ldi 1, %r24 /* unsigned long in_syscall */ 2262 2263#ifdef __LP64__ 2264 ldo -16(%r30),%r29 /* Reference param save area */ 2265#endif 2266 BL do_signal,%r2 2267 copy %r0, %r26 /* sigset_t *oldset = NULL */ 2268 2269 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2270 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */ 2271 reg_restore %r20 2272 2273 b,n syscall_check_sig 2274 2275 /* 2276 * get_register is used by the non access tlb miss handlers to 2277 * copy the value of the general register specified in r8 into 2278 * r1. This routine can't be used for shadowed registers, since 2279 * the rfir will restore the original value. So, for the shadowed 2280 * registers we put a -1 into r1 to indicate that the register 2281 * should not be used (the register being copied could also have 2282 * a -1 in it, but that is OK, it just means that we will have 2283 * to use the slow path instead). 2284 */ 2285 2286get_register: 2287 blr %r8,%r0 2288 nop 2289 bv %r0(%r25) /* r0 */ 2290 copy %r0,%r1 2291 bv %r0(%r25) /* r1 - shadowed */ 2292 ldi -1,%r1 2293 bv %r0(%r25) /* r2 */ 2294 copy %r2,%r1 2295 bv %r0(%r25) /* r3 */ 2296 copy %r3,%r1 2297 bv %r0(%r25) /* r4 */ 2298 copy %r4,%r1 2299 bv %r0(%r25) /* r5 */ 2300 copy %r5,%r1 2301 bv %r0(%r25) /* r6 */ 2302 copy %r6,%r1 2303 bv %r0(%r25) /* r7 */ 2304 copy %r7,%r1 2305 bv %r0(%r25) /* r8 - shadowed */ 2306 ldi -1,%r1 2307 bv %r0(%r25) /* r9 - shadowed */ 2308 ldi -1,%r1 2309 bv %r0(%r25) /* r10 */ 2310 copy %r10,%r1 2311 bv %r0(%r25) /* r11 */ 2312 copy %r11,%r1 2313 bv %r0(%r25) /* r12 */ 2314 copy %r12,%r1 2315 bv %r0(%r25) /* r13 */ 2316 copy %r13,%r1 2317 bv %r0(%r25) /* r14 */ 2318 copy %r14,%r1 2319 bv %r0(%r25) /* r15 */ 2320 copy %r15,%r1 2321 bv %r0(%r25) /* r16 - shadowed */ 2322 ldi -1,%r1 2323 bv %r0(%r25) /* r17 - shadowed */ 2324 ldi -1,%r1 2325 bv %r0(%r25) /* r18 */ 2326 copy %r18,%r1 2327 bv %r0(%r25) /* r19 */ 2328 copy %r19,%r1 2329 bv %r0(%r25) /* r20 */ 2330 copy %r20,%r1 2331 bv %r0(%r25) /* r21 */ 2332 copy %r21,%r1 2333 bv %r0(%r25) /* r22 */ 2334 copy %r22,%r1 2335 bv %r0(%r25) /* r23 */ 2336 copy %r23,%r1 2337 bv %r0(%r25) /* r24 - shadowed */ 2338 ldi -1,%r1 2339 bv %r0(%r25) /* r25 - shadowed */ 2340 ldi -1,%r1 2341 bv %r0(%r25) /* r26 */ 2342 copy %r26,%r1 2343 bv %r0(%r25) /* r27 */ 2344 copy %r27,%r1 2345 bv %r0(%r25) /* r28 */ 2346 copy %r28,%r1 2347 bv %r0(%r25) /* r29 */ 2348 copy %r29,%r1 2349 bv %r0(%r25) /* r30 */ 2350 copy %r30,%r1 2351 bv %r0(%r25) /* r31 */ 2352 copy %r31,%r1 2353 2354 /* 2355 * set_register is used by the non access tlb miss handlers to 2356 * copy the value of r1 into the general register specified in 2357 * r8. 2358 */ 2359 2360set_register: 2361 blr %r8,%r0 2362 nop 2363 bv %r0(%r25) /* r0 (silly, but it is a place holder) */ 2364 copy %r1,%r0 2365 bv %r0(%r25) /* r1 */ 2366 copy %r1,%r1 2367 bv %r0(%r25) /* r2 */ 2368 copy %r1,%r2 2369 bv %r0(%r25) /* r3 */ 2370 copy %r1,%r3 2371 bv %r0(%r25) /* r4 */ 2372 copy %r1,%r4 2373 bv %r0(%r25) /* r5 */ 2374 copy %r1,%r5 2375 bv %r0(%r25) /* r6 */ 2376 copy %r1,%r6 2377 bv %r0(%r25) /* r7 */ 2378 copy %r1,%r7 2379 bv %r0(%r25) /* r8 */ 2380 copy %r1,%r8 2381 bv %r0(%r25) /* r9 */ 2382 copy %r1,%r9 2383 bv %r0(%r25) /* r10 */ 2384 copy %r1,%r10 2385 bv %r0(%r25) /* r11 */ 2386 copy %r1,%r11 2387 bv %r0(%r25) /* r12 */ 2388 copy %r1,%r12 2389 bv %r0(%r25) /* r13 */ 2390 copy %r1,%r13 2391 bv %r0(%r25) /* r14 */ 2392 copy %r1,%r14 2393 bv %r0(%r25) /* r15 */ 2394 copy %r1,%r15 2395 bv %r0(%r25) /* r16 */ 2396 copy %r1,%r16 2397 bv %r0(%r25) /* r17 */ 2398 copy %r1,%r17 2399 bv %r0(%r25) /* r18 */ 2400 copy %r1,%r18 2401 bv %r0(%r25) /* r19 */ 2402 copy %r1,%r19 2403 bv %r0(%r25) /* r20 */ 2404 copy %r1,%r20 2405 bv %r0(%r25) /* r21 */ 2406 copy %r1,%r21 2407 bv %r0(%r25) /* r22 */ 2408 copy %r1,%r22 2409 bv %r0(%r25) /* r23 */ 2410 copy %r1,%r23 2411 bv %r0(%r25) /* r24 */ 2412 copy %r1,%r24 2413 bv %r0(%r25) /* r25 */ 2414 copy %r1,%r25 2415 bv %r0(%r25) /* r26 */ 2416 copy %r1,%r26 2417 bv %r0(%r25) /* r27 */ 2418 copy %r1,%r27 2419 bv %r0(%r25) /* r28 */ 2420 copy %r1,%r28 2421 bv %r0(%r25) /* r29 */ 2422 copy %r1,%r29 2423 bv %r0(%r25) /* r30 */ 2424 copy %r1,%r30 2425 bv %r0(%r25) /* r31 */ 2426 copy %r1,%r31 2427