Lines Matching +full:inter +full:- +full:ic
1 /* SPDX-License-Identifier: GPL-2.0 */
7 * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 * Copyright (C) 1999, 2002-2003
46 #include <asm/asm-offsets.h>
75 sxt4 r8=r8 // return 64-bit result
84 * security sensitive state (e.g., if current->mm->dumpable is zero). However,
147 mov out2=16 // stacksize (compensates for 16-byte scratch area)
160 * prev_task <- ia64_switch_to(struct task_struct *next)
215 SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit
226 * specified at the call-site of save_switch_stack.
231 * - r16 holds ar.pfs
232 * - b7 holds address to return to
233 * - rp (b0) holds return address to save
261 st8.spill [r14]=r4,SW(R6)-SW(R4) // spill r4 and prefetch offset 0x1c0
272 st8.spill [r15]=r5,SW(R7)-SW(R5) // spill r5
274 add r2=SW(F2)+16,sp // r2 = &sw->f2
276 st8.spill [r14]=r6,SW(B0)-SW(R6) // spill r6
278 add r3=SW(F3)+16,sp // r3 = &sw->f3
285 st8.spill [r15]=r7,SW(B2)-SW(R7) // spill r7
296 st8 [r14]=r21,SW(B1)-SW(B0) // save b0
297 st8 [r15]=r23,SW(B3)-SW(B2) // save b2
301 st8 [r14]=r22,SW(B4)-SW(B1) // save b1
302 st8 [r15]=r24,SW(AR_PFS)-SW(B3) // save b3
303 mov r21=ar.lc // I-unit
307 st8 [r14]=r25,SW(B5)-SW(B4) // save b4
308 st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS) // save ar.pfs
335 stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30)
336 stf.spill [r3]=f31,SW(PR)-SW(F31)
339 st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT) // save ar.unat
340 st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat
343 st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat
354 * - "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK)
355 * - b7 holds address to return to
356 * - must not touch r8-r11
370 ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE)) // bspstore
371 ld8 r29=[r3],(SW(B1)-SW(AR_UNAT)) // unat
382 ld8 r16=[r2],(SW(PR)-SW(AR_PFS)) // ar.pfs
383 ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC)) // ar.lc
419 mov ar.unat=r29 // establish unat holding the NaT bits for r4-r7
440 mov pr=r28,-1
461 * We need to preserve the scratch registers f6-f11 in case the system
480 (p6) br.cond.sptk strace_error // syscall failed ->
492 // the syscall number may have changed, so re-load it and re-calculate the
493 // syscall entry-point:
497 mov r3=NR_syscalls - 1
499 adds r15=-1024,r15
502 shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
515 (p6) br.cond.sptk strace_error // syscall failed ->
522 (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
533 (p6) mov r10=-1
557 /* call the kernel_thread payload; fn is in r4, arg - in r5 */
586 * Called by ia64_switch_to() after ia64_clone()->copy_thread(). r8 contains the
618 * the following known-to-be-safe values:
622 * r3: 1 (when returning to user-level)
623 * r8-r11: restored (syscall return value(s))
624 * r12: restored (user-level stack pointer)
625 * r13: restored (user-level thread pointer)
628 * r16-r17: cleared
629 * r18: user-level b6
631 * r20: user-level ar.fpsr
632 * r21: user-level b0
634 * r23: user-level ar.bspstore
635 * r24: user-level ar.rnat
636 * r25: user-level ar.unat
637 * r26: user-level ar.pfs
638 * r27: user-level ar.rsc
639 * r28: user-level ip
640 * r29: user-level psr
641 * r30: user-level cfm
642 * r31: user-level pr
643 * f6-f11: cleared
644 * pr: restored (user-level pr)
645 * b0: restored (user-level rp)
648 * ar.unat: restored (user-level ar.unat)
649 * ar.pfs: restored (user-level ar.pfs)
650 * ar.rsc: restored (user-level ar.rsc)
651 * ar.rnat: restored (user-level ar.rnat)
652 * ar.bspstore: restored (user-level ar.bspstore)
653 * ar.fpsr: restored (user-level ar.fpsr)
662 * user- or fsys-mode, hence we disable interrupts early on.
664 * p6 controls whether current_thread_info()->flags needs to be check for
665 * extra work. We always check for extra work when returning to user-level.
668 * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check
677 (pKStk) ld4 r21=[r20] // r21 <- preempt_count
678 (pUStk) mov r21=0 // r21 <- 0
680 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
684 (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
693 (p6) ld4 r31=[r18] // load current_thread_info()->flags
694 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
702 (p6) ld4 r31=[r18] // load current_thread_info()->flags
703 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
708 ld8 r18=[r2],PT(R9)-PT(B6) // load b6
711 ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
716 ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
717 ld8 r11=[r3],PT(CR_IIP)-PT(R11)
741 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
745 ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
746 ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc
749 ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage)
750 ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates
753 ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr
806 * user- or fsys-mode, hence we disable interrupts early on.
808 * p6 controls whether current_thread_info()->flags needs to be check for
809 * extra work. We always check for extra work when returning to user-level.
812 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
821 (pKStk) ld4 r21=[r20] // r21 <- preempt_count
822 (pUStk) mov r21=0 // r21 <- 0
824 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
828 (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
833 (p6) ld4 r31=[r17] // load current_thread_info()->flags
837 lfetch [r21],PT(CR_IPSR)-PT(PR)
845 ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
861 ld8.fill r10=[r3],PT(R17)-PT(R10)
863 ld8.fill r11=[r2],PT(R18)-PT(R11)
891 ld8.fill r31=[r2],PT(F9)-PT(R31)
892 adds r3=PT(F10)-PT(F6),r3
894 ldf.fill f9=[r2],PT(F6)-PT(F9)
895 ldf.fill f10=[r3],PT(F8)-PT(F10)
897 ldf.fill f6=[r2],PT(F7)-PT(F6)
899 ldf.fill f7=[r2],PT(F11)-PT(F7)
902 srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned)
963 // mib : mov add br -> mib : ld8 add br
968 (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
977 (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
995 sub r16=r16,r18 // krbs = old bsp - size of dirty partition
999 add r18=64,r18 // don't force in0-in7 into memory...
1005 * To prevent leaking bits between the kernel and user-space,
1017 alloc loc0=ar.pfs,2,Nregs-2,2,0
1019 sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize
1030 alloc loc0=ar.pfs,2,Nregs-2,2,0
1032 add out0=-Nregs*8,in0
1057 alloc loc0=ar.pfs,2,Nregs-2,2,0
1059 add out0=-Nregs*8,in0
1088 (pLvSys)mov r19=r0 // A clear r19 for leave_syscall, no-op otherwise
1092 (pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise
1096 (pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise
1100 (pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise
1111 mov pr=r31,-1 // I0
1116 * r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPTION)
1117 * r31 = current->thread_info->flags
1119 * p6 = TRUE if work-pending-check needs to be redone
1125 add r2=-8,r2
1126 add r3=-8,r3
1134 .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
1140 .ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check)
1158 * must deposit a non-zero value in pt_regs.r8 to indicate an error. If
1166 (p7) mov r10=-1
1172 * Invoke schedule_tail(task) while preserving in0-in7, which may be needed
1201 adds out1=8,sp // out1=&sigscratch->ar_pfs
1207 st8 [sp]=r9,-16 // allocate space for ar.unat and save it
1208 st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch
1214 ld8 r9=[sp] // load new unat from sigscratch->scratch_unat
1230 adds sp=-16,sp
1235 * leave_kernel() restores f6-f11 from pt_regs, but since the streamlined
1236 * syscall-entry path does not save them we save them here instead. Note: we
1237 * don't need to save any other registers that are not saved by the stream-lined
1294 adds sp=-EXTRA_FRAME_SIZE,sp
1359 adds out0 = -MCOUNT_INSN_SIZE, out0
1390 adds out0 = -MCOUNT_INSN_SIZE, out0