xref: /openbmc/linux/arch/s390/kernel/entry.S (revision c21b37f6)
1/*
2 *  arch/s390/kernel/entry.S
3 *    S390 low-level entry points.
4 *
5 *    Copyright (C) IBM Corp. 1999,2006
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
10 */
11
12#include <linux/sys.h>
13#include <linux/linkage.h>
14#include <asm/cache.h>
15#include <asm/lowcore.h>
16#include <asm/errno.h>
17#include <asm/ptrace.h>
18#include <asm/thread_info.h>
19#include <asm/asm-offsets.h>
20#include <asm/unistd.h>
21#include <asm/page.h>
22
23/*
24 * Stack layout for the system_call stack entry.
25 * The first few entries are identical to the user_regs_struct.
26 */
27SP_PTREGS    =	STACK_FRAME_OVERHEAD
28SP_ARGS      =	STACK_FRAME_OVERHEAD + __PT_ARGS
29SP_PSW	     =	STACK_FRAME_OVERHEAD + __PT_PSW
30SP_R0	     =	STACK_FRAME_OVERHEAD + __PT_GPRS
31SP_R1	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 4
32SP_R2	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 8
33SP_R3	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 12
34SP_R4	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 16
35SP_R5	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 20
36SP_R6	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 24
37SP_R7	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 28
38SP_R8	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 32
39SP_R9	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 36
40SP_R10	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 40
41SP_R11	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 44
42SP_R12	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 48
43SP_R13	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 52
44SP_R14	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 56
45SP_R15	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 60
46SP_ORIG_R2   =	STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
47SP_ILC	     =	STACK_FRAME_OVERHEAD + __PT_ILC
48SP_TRAP      =	STACK_FRAME_OVERHEAD + __PT_TRAP
49SP_SIZE      =	STACK_FRAME_OVERHEAD + __PT_SIZE
50
51_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
52		 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
53_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
54		 _TIF_MCCK_PENDING)
55
56STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
57STACK_SIZE  = 1 << STACK_SHIFT
58
59#define BASED(name) name-system_call(%r13)
60
61#ifdef CONFIG_TRACE_IRQFLAGS
62	.macro	TRACE_IRQS_ON
63	l	%r1,BASED(.Ltrace_irq_on)
64	basr	%r14,%r1
65	.endm
66
67	.macro	TRACE_IRQS_OFF
68	l	%r1,BASED(.Ltrace_irq_off)
69	basr	%r14,%r1
70	.endm
71#else
72#define TRACE_IRQS_ON
73#define TRACE_IRQS_OFF
74#endif
75
76/*
77 * Register usage in interrupt handlers:
78 *    R9  - pointer to current task structure
79 *    R13 - pointer to literal pool
80 *    R14 - return register for function calls
81 *    R15 - kernel stack pointer
82 */
83
84	.macro	STORE_TIMER lc_offset
85#ifdef CONFIG_VIRT_CPU_ACCOUNTING
86	stpt	\lc_offset
87#endif
88	.endm
89
90#ifdef CONFIG_VIRT_CPU_ACCOUNTING
91	.macro	UPDATE_VTIME lc_from,lc_to,lc_sum
92	lm	%r10,%r11,\lc_from
93	sl	%r10,\lc_to
94	sl	%r11,\lc_to+4
95	bc	3,BASED(0f)
96	sl	%r10,BASED(.Lc_1)
970:	al	%r10,\lc_sum
98	al	%r11,\lc_sum+4
99	bc	12,BASED(1f)
100	al	%r10,BASED(.Lc_1)
1011:	stm	%r10,%r11,\lc_sum
102	.endm
103#endif
104
105	.macro	SAVE_ALL_BASE savearea
106	stm	%r12,%r15,\savearea
107	l	%r13,__LC_SVC_NEW_PSW+4	# load &system_call to %r13
108	.endm
109
110	.macro	SAVE_ALL_SVC psworg,savearea
111	la	%r12,\psworg
112	l	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
113	.endm
114
115	.macro	SAVE_ALL_SYNC psworg,savearea
116	la	%r12,\psworg
117	tm	\psworg+1,0x01		# test problem state bit
118	bz	BASED(2f)		# skip stack setup save
119	l	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
120#ifdef CONFIG_CHECK_STACK
121	b	BASED(3f)
1222:	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
123	bz	BASED(stack_overflow)
1243:
125#endif
1262:
127	.endm
128
129	.macro	SAVE_ALL_ASYNC psworg,savearea
130	la	%r12,\psworg
131	tm	\psworg+1,0x01		# test problem state bit
132	bnz	BASED(1f)		# from user -> load async stack
133	clc	\psworg+4(4),BASED(.Lcritical_end)
134	bhe	BASED(0f)
135	clc	\psworg+4(4),BASED(.Lcritical_start)
136	bl	BASED(0f)
137	l	%r14,BASED(.Lcleanup_critical)
138	basr	%r14,%r14
139	tm	1(%r12),0x01		# retest problem state after cleanup
140	bnz	BASED(1f)
1410:	l	%r14,__LC_ASYNC_STACK	# are we already on the async stack ?
142	slr	%r14,%r15
143	sra	%r14,STACK_SHIFT
144	be	BASED(2f)
1451:	l	%r15,__LC_ASYNC_STACK
146#ifdef CONFIG_CHECK_STACK
147	b	BASED(3f)
1482:	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
149	bz	BASED(stack_overflow)
1503:
151#endif
1522:
153	.endm
154
155	.macro	CREATE_STACK_FRAME psworg,savearea
156	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
157	mvc	SP_PSW(8,%r15),0(%r12)	# move user PSW to stack
158	la	%r12,\psworg
159	st	%r2,SP_ORIG_R2(%r15)	# store original content of gpr 2
160	icm	%r12,12,__LC_SVC_ILC
161	stm	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
162	st	%r12,SP_ILC(%r15)
163	mvc	SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
164	la	%r12,0
165	st	%r12,__SF_BACKCHAIN(%r15)	# clear back chain
166	.endm
167
168	.macro	RESTORE_ALL psworg,sync
169	mvc	\psworg(8),SP_PSW(%r15) # move user PSW to lowcore
170	.if !\sync
171	ni	\psworg+1,0xfd		# clear wait state bit
172	.endif
173	lm	%r0,%r15,SP_R0(%r15)	# load gprs 0-15 of user
174	STORE_TIMER __LC_EXIT_TIMER
175	lpsw	\psworg			# back to caller
176	.endm
177
178/*
179 * Scheduler resume function, called by switch_to
180 *  gpr2 = (task_struct *) prev
181 *  gpr3 = (task_struct *) next
182 * Returns:
183 *  gpr2 = prev
184 */
185	.globl	__switch_to
186__switch_to:
187	basr	%r1,0
188__switch_to_base:
189	tm	__THREAD_per(%r3),0xe8		# new process is using per ?
190	bz	__switch_to_noper-__switch_to_base(%r1)	# if not we're fine
191	stctl	%c9,%c11,__SF_EMPTY(%r15)	# We are using per stuff
192	clc	__THREAD_per(12,%r3),__SF_EMPTY(%r15)
193	be	__switch_to_noper-__switch_to_base(%r1)	# we got away w/o bashing TLB's
194	lctl	%c9,%c11,__THREAD_per(%r3)	# Nope we didn't
195__switch_to_noper:
196	l	%r4,__THREAD_info(%r2)		# get thread_info of prev
197	tm	__TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
198	bz	__switch_to_no_mcck-__switch_to_base(%r1)
199	ni	__TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
200	l	%r4,__THREAD_info(%r3)		# get thread_info of next
201	oi	__TI_flags+3(%r4),_TIF_MCCK_PENDING # set it in next
202__switch_to_no_mcck:
203	stm	%r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
204	st	%r15,__THREAD_ksp(%r2)	# store kernel stack to prev->tss.ksp
205	l	%r15,__THREAD_ksp(%r3)	# load kernel stack from next->tss.ksp
206	lm	%r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
207	st	%r3,__LC_CURRENT	# __LC_CURRENT = current task struct
208	lctl	%c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
209	l	%r3,__THREAD_info(%r3)	# load thread_info from task struct
210	st	%r3,__LC_THREAD_INFO
211	ahi	%r3,STACK_SIZE
212	st	%r3,__LC_KERNEL_STACK	# __LC_KERNEL_STACK = new kernel stack
213	br	%r14
214
215__critical_start:
216/*
217 * SVC interrupt handler routine. System calls are synchronous events and
218 * are executed with interrupts enabled.
219 */
220
221	.globl	system_call
222system_call:
223	STORE_TIMER __LC_SYNC_ENTER_TIMER
224sysc_saveall:
225	SAVE_ALL_BASE __LC_SAVE_AREA
226	SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
227	CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
228	lh	%r7,0x8a	  # get svc number from lowcore
229#ifdef CONFIG_VIRT_CPU_ACCOUNTING
230sysc_vtime:
231	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
232	bz	BASED(sysc_do_svc)
233	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
234sysc_stime:
235	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
236sysc_update:
237	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
238#endif
239sysc_do_svc:
240	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
241	sla	%r7,2			# *4 and test for svc 0
242	bnz	BASED(sysc_nr_ok)	# svc number > 0
243	# svc 0: system call number in %r1
244	cl	%r1,BASED(.Lnr_syscalls)
245	bnl	BASED(sysc_nr_ok)
246	lr	%r7,%r1 	  # copy svc number to %r7
247	sla	%r7,2		  # *4
248sysc_nr_ok:
249	mvc	SP_ARGS(4,%r15),SP_R7(%r15)
250sysc_do_restart:
251	l	%r8,BASED(.Lsysc_table)
252	tm	__TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
253	l	%r8,0(%r7,%r8)	  # get system call addr.
254	bnz	BASED(sysc_tracesys)
255	basr	%r14,%r8	  # call sys_xxxx
256	st	%r2,SP_R2(%r15)   # store return value (change R2 on stack)
257
258sysc_return:
259	tm	SP_PSW+1(%r15),0x01	# returning to user ?
260	bno	BASED(sysc_leave)
261	tm	__TI_flags+3(%r9),_TIF_WORK_SVC
262	bnz	BASED(sysc_work)  # there is work to do (signals etc.)
263sysc_leave:
264	RESTORE_ALL __LC_RETURN_PSW,1
265
266#
267# recheck if there is more work to do
268#
269sysc_work_loop:
270	tm	__TI_flags+3(%r9),_TIF_WORK_SVC
271	bz	BASED(sysc_leave)	# there is no work to do
272#
273# One of the work bits is on. Find out which one.
274#
275sysc_work:
276	tm	__TI_flags+3(%r9),_TIF_MCCK_PENDING
277	bo	BASED(sysc_mcck_pending)
278	tm	__TI_flags+3(%r9),_TIF_NEED_RESCHED
279	bo	BASED(sysc_reschedule)
280	tm	__TI_flags+3(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)
281	bnz	BASED(sysc_sigpending)
282	tm	__TI_flags+3(%r9),_TIF_RESTART_SVC
283	bo	BASED(sysc_restart)
284	tm	__TI_flags+3(%r9),_TIF_SINGLE_STEP
285	bo	BASED(sysc_singlestep)
286	b	BASED(sysc_leave)
287
288#
289# _TIF_NEED_RESCHED is set, call schedule
290#
291sysc_reschedule:
292	l	%r1,BASED(.Lschedule)
293	la	%r14,BASED(sysc_work_loop)
294	br	%r1			# call scheduler
295
296#
297# _TIF_MCCK_PENDING is set, call handler
298#
299sysc_mcck_pending:
300	l	%r1,BASED(.Ls390_handle_mcck)
301	la	%r14,BASED(sysc_work_loop)
302	br	%r1			# TIF bit will be cleared by handler
303
304#
305# _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
306#
307sysc_sigpending:
308	ni	__TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
309	la	%r2,SP_PTREGS(%r15)	# load pt_regs
310	l	%r1,BASED(.Ldo_signal)
311	basr	%r14,%r1		# call do_signal
312	tm	__TI_flags+3(%r9),_TIF_RESTART_SVC
313	bo	BASED(sysc_restart)
314	tm	__TI_flags+3(%r9),_TIF_SINGLE_STEP
315	bo	BASED(sysc_singlestep)
316	b	BASED(sysc_work_loop)
317
318#
319# _TIF_RESTART_SVC is set, set up registers and restart svc
320#
321sysc_restart:
322	ni	__TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
323	l	%r7,SP_R2(%r15) 	# load new svc number
324	sla	%r7,2
325	mvc	SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
326	lm	%r2,%r6,SP_R2(%r15)	# load svc arguments
327	b	BASED(sysc_do_restart)	# restart svc
328
329#
330# _TIF_SINGLE_STEP is set, call do_single_step
331#
332sysc_singlestep:
333	ni	__TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
334	mvi	SP_TRAP+1(%r15),0x28	# set trap indication to pgm check
335	la	%r2,SP_PTREGS(%r15)	# address of register-save area
336	l	%r1,BASED(.Lhandle_per)	# load adr. of per handler
337	la	%r14,BASED(sysc_return)	# load adr. of system return
338	br	%r1			# branch to do_single_step
339
340#
341# call trace before and after sys_call
342#
343sysc_tracesys:
344	l	%r1,BASED(.Ltrace)
345	la	%r2,SP_PTREGS(%r15)	# load pt_regs
346	la	%r3,0
347	srl	%r7,2
348	st	%r7,SP_R2(%r15)
349	basr	%r14,%r1
350	clc	SP_R2(4,%r15),BASED(.Lnr_syscalls)
351	bnl	BASED(sysc_tracenogo)
352	l	%r8,BASED(.Lsysc_table)
353	l	%r7,SP_R2(%r15) 	# strace might have changed the
354	sll	%r7,2			#  system call
355	l	%r8,0(%r7,%r8)
356sysc_tracego:
357	lm	%r3,%r6,SP_R3(%r15)
358	l	%r2,SP_ORIG_R2(%r15)
359	basr	%r14,%r8		# call sys_xxx
360	st	%r2,SP_R2(%r15)		# store return value
361sysc_tracenogo:
362	tm	__TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
363	bz	BASED(sysc_return)
364	l	%r1,BASED(.Ltrace)
365	la	%r2,SP_PTREGS(%r15)	# load pt_regs
366	la	%r3,1
367	la	%r14,BASED(sysc_return)
368	br	%r1
369
370#
371# a new process exits the kernel with ret_from_fork
372#
373	.globl	ret_from_fork
374ret_from_fork:
375	l	%r13,__LC_SVC_NEW_PSW+4
376	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
377	tm	SP_PSW+1(%r15),0x01	# forking a kernel thread ?
378	bo	BASED(0f)
379	st	%r15,SP_R15(%r15)	# store stack pointer for new kthread
3800:	l	%r1,BASED(.Lschedtail)
381	basr	%r14,%r1
382	TRACE_IRQS_ON
383	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
384	b	BASED(sysc_return)
385
386#
387# kernel_execve function needs to deal with pt_regs that is not
388# at the usual place
389#
390	.globl	kernel_execve
391kernel_execve:
392	stm	%r12,%r15,48(%r15)
393	lr	%r14,%r15
394	l	%r13,__LC_SVC_NEW_PSW+4
395	s	%r15,BASED(.Lc_spsize)
396	st	%r14,__SF_BACKCHAIN(%r15)
397	la	%r12,SP_PTREGS(%r15)
398	xc	0(__PT_SIZE,%r12),0(%r12)
399	l	%r1,BASED(.Ldo_execve)
400	lr	%r5,%r12
401	basr	%r14,%r1
402	ltr	%r2,%r2
403	be	BASED(0f)
404	a	%r15,BASED(.Lc_spsize)
405	lm	%r12,%r15,48(%r15)
406	br	%r14
407	# execve succeeded.
4080:	stnsm	__SF_EMPTY(%r15),0xfc	# disable interrupts
409	l	%r15,__LC_KERNEL_STACK	# load ksp
410	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
411	l	%r9,__LC_THREAD_INFO
412	mvc	SP_PTREGS(__PT_SIZE,%r15),0(%r12)	# copy pt_regs
413	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
414	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
415	l	%r1,BASED(.Lexecve_tail)
416	basr	%r14,%r1
417	b	BASED(sysc_return)
418
419/*
420 * Program check handler routine
421 */
422
423	.globl	pgm_check_handler
424pgm_check_handler:
425/*
426 * First we need to check for a special case:
427 * Single stepping an instruction that disables the PER event mask will
428 * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
429 * For a single stepped SVC the program check handler gets control after
430 * the SVC new PSW has been loaded. But we want to execute the SVC first and
431 * then handle the PER event. Therefore we update the SVC old PSW to point
432 * to the pgm_check_handler and branch to the SVC handler after we checked
433 * if we have to load the kernel stack register.
434 * For every other possible cause for PER event without the PER mask set
435 * we just ignore the PER event (FIXME: is there anything we have to do
436 * for LPSW?).
437 */
438	STORE_TIMER __LC_SYNC_ENTER_TIMER
439	SAVE_ALL_BASE __LC_SAVE_AREA
440	tm	__LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
441	bnz	BASED(pgm_per)		# got per exception -> special case
442	SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
443	CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
444#ifdef CONFIG_VIRT_CPU_ACCOUNTING
445	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
446	bz	BASED(pgm_no_vtime)
447	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
448	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
449	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
450pgm_no_vtime:
451#endif
452	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
453	l	%r3,__LC_PGM_ILC	# load program interruption code
454	la	%r8,0x7f
455	nr	%r8,%r3
456pgm_do_call:
457	l	%r7,BASED(.Ljump_table)
458	sll	%r8,2
459	l	%r7,0(%r8,%r7)		# load address of handler routine
460	la	%r2,SP_PTREGS(%r15)	# address of register-save area
461	la	%r14,BASED(sysc_return)
462	br	%r7			# branch to interrupt-handler
463
464#
465# handle per exception
466#
467pgm_per:
468	tm	__LC_PGM_OLD_PSW,0x40	# test if per event recording is on
469	bnz	BASED(pgm_per_std)	# ok, normal per event from user space
470# ok its one of the special cases, now we need to find out which one
471	clc	__LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
472	be	BASED(pgm_svcper)
473# no interesting special case, ignore PER event
474	lm	%r12,%r15,__LC_SAVE_AREA
475	lpsw	0x28
476
477#
478# Normal per exception
479#
480pgm_per_std:
481	SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
482	CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
483#ifdef CONFIG_VIRT_CPU_ACCOUNTING
484	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
485	bz	BASED(pgm_no_vtime2)
486	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
487	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
488	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
489pgm_no_vtime2:
490#endif
491	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
492	l	%r1,__TI_task(%r9)
493	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
494	mvc	__THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
495	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
496	oi	__TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
497	tm	SP_PSW+1(%r15),0x01	# kernel per event ?
498	bz	BASED(kernel_per)
499	l	%r3,__LC_PGM_ILC	# load program interruption code
500	la	%r8,0x7f
501	nr	%r8,%r3 		# clear per-event-bit and ilc
502	be	BASED(sysc_return)	# only per or per+check ?
503	b	BASED(pgm_do_call)
504
505#
506# it was a single stepped SVC that is causing all the trouble
507#
508pgm_svcper:
509	SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
510	CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
511#ifdef CONFIG_VIRT_CPU_ACCOUNTING
512	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
513	bz	BASED(pgm_no_vtime3)
514	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
515	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
516	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
517pgm_no_vtime3:
518#endif
519	lh	%r7,0x8a		# get svc number from lowcore
520	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
521	l	%r1,__TI_task(%r9)
522	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
523	mvc	__THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
524	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
525	oi	__TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
526	TRACE_IRQS_ON
527	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
528	b	BASED(sysc_do_svc)
529
530#
531# per was called from kernel, must be kprobes
532#
533kernel_per:
534	mvi	SP_TRAP+1(%r15),0x28	# set trap indication to pgm check
535	la	%r2,SP_PTREGS(%r15)	# address of register-save area
536	l	%r1,BASED(.Lhandle_per)	# load adr. of per handler
537	la	%r14,BASED(sysc_leave)	# load adr. of system return
538	br	%r1			# branch to do_single_step
539
540/*
541 * IO interrupt handler routine
542 */
543
544	.globl io_int_handler
545io_int_handler:
546	STORE_TIMER __LC_ASYNC_ENTER_TIMER
547	stck	__LC_INT_CLOCK
548	SAVE_ALL_BASE __LC_SAVE_AREA+16
549	SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
550	CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
551#ifdef CONFIG_VIRT_CPU_ACCOUNTING
552	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
553	bz	BASED(io_no_vtime)
554	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
555	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
556	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
557io_no_vtime:
558#endif
559	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
560	TRACE_IRQS_OFF
561	l	%r1,BASED(.Ldo_IRQ)	# load address of do_IRQ
562	la	%r2,SP_PTREGS(%r15)	# address of register-save area
563	basr	%r14,%r1		# branch to standard irq handler
564	TRACE_IRQS_ON
565
566io_return:
567	tm	SP_PSW+1(%r15),0x01	# returning to user ?
568#ifdef CONFIG_PREEMPT
569	bno	BASED(io_preempt)	# no -> check for preemptive scheduling
570#else
571	bno	BASED(io_leave) 	# no-> skip resched & signal
572#endif
573	tm	__TI_flags+3(%r9),_TIF_WORK_INT
574	bnz	BASED(io_work)		# there is work to do (signals etc.)
575io_leave:
576	RESTORE_ALL __LC_RETURN_PSW,0
577io_done:
578
579#ifdef CONFIG_PREEMPT
580io_preempt:
581	icm	%r0,15,__TI_precount(%r9)
582	bnz	BASED(io_leave)
583	l	%r1,SP_R15(%r15)
584	s	%r1,BASED(.Lc_spsize)
585	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
586	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
587	lr	%r15,%r1
588io_resume_loop:
589	tm	__TI_flags+3(%r9),_TIF_NEED_RESCHED
590	bno	BASED(io_leave)
591	mvc	__TI_precount(4,%r9),BASED(.Lc_pactive)
592	stosm	__SF_EMPTY(%r15),0x03  # reenable interrupts
593	l	%r1,BASED(.Lschedule)
594	basr	%r14,%r1	       # call schedule
595	stnsm	__SF_EMPTY(%r15),0xfc  # disable I/O and ext. interrupts
596	xc	__TI_precount(4,%r9),__TI_precount(%r9)
597	b	BASED(io_resume_loop)
598#endif
599
600#
601# switch to kernel stack, then check the TIF bits
602#
603io_work:
604	l	%r1,__LC_KERNEL_STACK
605	s	%r1,BASED(.Lc_spsize)
606	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
607	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
608	lr	%r15,%r1
609#
610# One of the work bits is on. Find out which one.
611# Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGMASK, _TIF_NEED_RESCHED
612#		and _TIF_MCCK_PENDING
613#
614io_work_loop:
615	tm	__TI_flags+3(%r9),_TIF_MCCK_PENDING
616	bo	BASED(io_mcck_pending)
617	tm	__TI_flags+3(%r9),_TIF_NEED_RESCHED
618	bo	BASED(io_reschedule)
619	tm	__TI_flags+3(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)
620	bnz	BASED(io_sigpending)
621	b	BASED(io_leave)
622
623#
624# _TIF_MCCK_PENDING is set, call handler
625#
626io_mcck_pending:
627	TRACE_IRQS_OFF
628	l	%r1,BASED(.Ls390_handle_mcck)
629	basr	%r14,%r1		# TIF bit will be cleared by handler
630	TRACE_IRQS_ON
631	b	BASED(io_work_loop)
632
633#
634# _TIF_NEED_RESCHED is set, call schedule
635#
636io_reschedule:
637	l	%r1,BASED(.Lschedule)
638	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
639	basr	%r14,%r1		# call scheduler
640	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
641	tm	__TI_flags+3(%r9),_TIF_WORK_INT
642	bz	BASED(io_leave) 	# there is no work to do
643	b	BASED(io_work_loop)
644
645#
646# _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
647#
648io_sigpending:
649	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
650	la	%r2,SP_PTREGS(%r15)	# load pt_regs
651	l	%r1,BASED(.Ldo_signal)
652	basr	%r14,%r1		# call do_signal
653	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
654	b	BASED(io_work_loop)
655
656/*
657 * External interrupt handler routine
658 */
659
660	.globl	ext_int_handler
661ext_int_handler:
662	STORE_TIMER __LC_ASYNC_ENTER_TIMER
663	stck	__LC_INT_CLOCK
664	SAVE_ALL_BASE __LC_SAVE_AREA+16
665	SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
666	CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
667#ifdef CONFIG_VIRT_CPU_ACCOUNTING
668	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
669	bz	BASED(ext_no_vtime)
670	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
671	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
672	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
673ext_no_vtime:
674#endif
675	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
676	TRACE_IRQS_OFF
677	la	%r2,SP_PTREGS(%r15)	# address of register-save area
678	lh	%r3,__LC_EXT_INT_CODE	# get interruption code
679	l	%r1,BASED(.Ldo_extint)
680	basr	%r14,%r1
681	TRACE_IRQS_ON
682	b	BASED(io_return)
683
684__critical_end:
685
686/*
687 * Machine check handler routines
688 */
689
690	.globl mcck_int_handler
691mcck_int_handler:
692	spt	__LC_CPU_TIMER_SAVE_AREA	# revalidate cpu timer
693	lm	%r0,%r15,__LC_GPREGS_SAVE_AREA	# revalidate gprs
694	SAVE_ALL_BASE __LC_SAVE_AREA+32
695	la	%r12,__LC_MCK_OLD_PSW
696	tm	__LC_MCCK_CODE,0x80	# system damage?
697	bo	BASED(mcck_int_main)	# yes -> rest of mcck code invalid
698#ifdef CONFIG_VIRT_CPU_ACCOUNTING
699	mvc	__LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER
700	mvc	__LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
701	tm	__LC_MCCK_CODE+5,0x02	# stored cpu timer value valid?
702	bo	BASED(1f)
703	la	%r14,__LC_SYNC_ENTER_TIMER
704	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
705	bl	BASED(0f)
706	la	%r14,__LC_ASYNC_ENTER_TIMER
7070:	clc	0(8,%r14),__LC_EXIT_TIMER
708	bl	BASED(0f)
709	la	%r14,__LC_EXIT_TIMER
7100:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
711	bl	BASED(0f)
712	la	%r14,__LC_LAST_UPDATE_TIMER
7130:	spt	0(%r14)
714	mvc	__LC_ASYNC_ENTER_TIMER(8),0(%r14)
7151:
716#endif
717	tm	__LC_MCCK_CODE+2,0x09	# mwp + ia of old psw valid?
718	bno	BASED(mcck_int_main)	# no -> skip cleanup critical
719	tm	__LC_MCK_OLD_PSW+1,0x01	# test problem state bit
720	bnz	BASED(mcck_int_main)	# from user -> load async stack
721	clc	__LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end)
722	bhe	BASED(mcck_int_main)
723	clc	__LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start)
724	bl	BASED(mcck_int_main)
725	l	%r14,BASED(.Lcleanup_critical)
726	basr	%r14,%r14
727mcck_int_main:
728	l	%r14,__LC_PANIC_STACK	# are we already on the panic stack?
729	slr	%r14,%r15
730	sra	%r14,PAGE_SHIFT
731	be	BASED(0f)
732	l	%r15,__LC_PANIC_STACK	# load panic stack
7330:	CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32
734#ifdef CONFIG_VIRT_CPU_ACCOUNTING
735	tm	__LC_MCCK_CODE+2,0x08	# mwp of old psw valid?
736	bno	BASED(mcck_no_vtime)	# no -> skip cleanup critical
737	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
738	bz	BASED(mcck_no_vtime)
739	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
740	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
741	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
742mcck_no_vtime:
743#endif
744	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
745	la	%r2,SP_PTREGS(%r15)	# load pt_regs
746	l	%r1,BASED(.Ls390_mcck)
747	basr	%r14,%r1		# call machine check handler
748	tm	SP_PSW+1(%r15),0x01	# returning to user ?
749	bno	BASED(mcck_return)
750	l	%r1,__LC_KERNEL_STACK	# switch to kernel stack
751	s	%r1,BASED(.Lc_spsize)
752	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
753	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
754	lr	%r15,%r1
755	stosm	__SF_EMPTY(%r15),0x04	# turn dat on
756	tm	__TI_flags+3(%r9),_TIF_MCCK_PENDING
757	bno	BASED(mcck_return)
758	TRACE_IRQS_OFF
759	l	%r1,BASED(.Ls390_handle_mcck)
760	basr	%r14,%r1		# call machine check handler
761	TRACE_IRQS_ON
762mcck_return:
763	mvc	__LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
764	ni	__LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
765#ifdef CONFIG_VIRT_CPU_ACCOUNTING
766	mvc	__LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52
767	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
768	bno	BASED(0f)
769	lm	%r0,%r15,SP_R0(%r15)	# load gprs 0-15
770	stpt	__LC_EXIT_TIMER
771	lpsw	__LC_RETURN_MCCK_PSW	# back to caller
7720:
773#endif
774	lm	%r0,%r15,SP_R0(%r15)	# load gprs 0-15
775	lpsw	__LC_RETURN_MCCK_PSW	# back to caller
776
777	RESTORE_ALL __LC_RETURN_MCCK_PSW,0
778
779/*
780 * Restart interruption handler, kick starter for additional CPUs
781 */
782#ifdef CONFIG_SMP
783#ifndef CONFIG_HOTPLUG_CPU
784	.section .init.text,"ax"
785#endif
786	.globl restart_int_handler
787restart_int_handler:
788	l	%r15,__LC_SAVE_AREA+60	# load ksp
789	lctl	%c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
790	lam	%a0,%a15,__LC_AREGS_SAVE_AREA
791	lm	%r6,%r15,__SF_GPRS(%r15) # load registers from clone
792	stosm	__SF_EMPTY(%r15),0x04	# now we can turn dat on
793	basr	%r14,0
794	l	%r14,restart_addr-.(%r14)
795	br	%r14			# branch to start_secondary
796restart_addr:
797	.long	start_secondary
798#ifndef CONFIG_HOTPLUG_CPU
799	.previous
800#endif
801#else
802/*
803 * If we do not run with SMP enabled, let the new CPU crash ...
804 */
805	.globl restart_int_handler
806restart_int_handler:
807	basr	%r1,0
808restart_base:
809	lpsw	restart_crash-restart_base(%r1)
810	.align	8
811restart_crash:
812	.long	0x000a0000,0x00000000
813restart_go:
814#endif
815
816#ifdef CONFIG_CHECK_STACK
817/*
818 * The synchronous or the asynchronous stack overflowed. We are dead.
819 * No need to properly save the registers, we are going to panic anyway.
820 * Setup a pt_regs so that show_trace can provide a good call trace.
821 */
822stack_overflow:
823	l	%r15,__LC_PANIC_STACK	# change to panic stack
824	sl	%r15,BASED(.Lc_spsize)
825	mvc	SP_PSW(8,%r15),0(%r12)	# move user PSW to stack
826	stm	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
827	la	%r1,__LC_SAVE_AREA
828	ch	%r12,BASED(.L0x020)	# old psw addr == __LC_SVC_OLD_PSW ?
829	be	BASED(0f)
830	ch	%r12,BASED(.L0x028)	# old psw addr == __LC_PGM_OLD_PSW ?
831	be	BASED(0f)
832	la	%r1,__LC_SAVE_AREA+16
8330:	mvc	SP_R12(16,%r15),0(%r1)	# move %r12-%r15 to stack
834	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain
835	l	%r1,BASED(1f)		# branch to kernel_stack_overflow
836	la	%r2,SP_PTREGS(%r15)	# load pt_regs
837	br	%r1
8381:	.long	kernel_stack_overflow
839#endif
840
841cleanup_table_system_call:
842	.long	system_call + 0x80000000, sysc_do_svc + 0x80000000
843cleanup_table_sysc_return:
844	.long	sysc_return + 0x80000000, sysc_leave + 0x80000000
845cleanup_table_sysc_leave:
846	.long	sysc_leave + 0x80000000, sysc_work_loop + 0x80000000
847cleanup_table_sysc_work_loop:
848	.long	sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000
849cleanup_table_io_return:
850	.long	io_return + 0x80000000, io_leave + 0x80000000
851cleanup_table_io_leave:
852	.long	io_leave + 0x80000000, io_done + 0x80000000
853cleanup_table_io_work_loop:
854	.long	io_work_loop + 0x80000000, io_mcck_pending + 0x80000000
855
856cleanup_critical:
857	clc	4(4,%r12),BASED(cleanup_table_system_call)
858	bl	BASED(0f)
859	clc	4(4,%r12),BASED(cleanup_table_system_call+4)
860	bl	BASED(cleanup_system_call)
8610:
862	clc	4(4,%r12),BASED(cleanup_table_sysc_return)
863	bl	BASED(0f)
864	clc	4(4,%r12),BASED(cleanup_table_sysc_return+4)
865	bl	BASED(cleanup_sysc_return)
8660:
867	clc	4(4,%r12),BASED(cleanup_table_sysc_leave)
868	bl	BASED(0f)
869	clc	4(4,%r12),BASED(cleanup_table_sysc_leave+4)
870	bl	BASED(cleanup_sysc_leave)
8710:
872	clc	4(4,%r12),BASED(cleanup_table_sysc_work_loop)
873	bl	BASED(0f)
874	clc	4(4,%r12),BASED(cleanup_table_sysc_work_loop+4)
875	bl	BASED(cleanup_sysc_return)
8760:
877	clc	4(4,%r12),BASED(cleanup_table_io_return)
878	bl	BASED(0f)
879	clc	4(4,%r12),BASED(cleanup_table_io_return+4)
880	bl	BASED(cleanup_io_return)
8810:
882	clc	4(4,%r12),BASED(cleanup_table_io_leave)
883	bl	BASED(0f)
884	clc	4(4,%r12),BASED(cleanup_table_io_leave+4)
885	bl	BASED(cleanup_io_leave)
8860:
887	clc	4(4,%r12),BASED(cleanup_table_io_work_loop)
888	bl	BASED(0f)
889	clc	4(4,%r12),BASED(cleanup_table_io_work_loop+4)
890	bl	BASED(cleanup_io_return)
8910:
892	br	%r14
893
894cleanup_system_call:
895	mvc	__LC_RETURN_PSW(8),0(%r12)
896	c	%r12,BASED(.Lmck_old_psw)
897	be	BASED(0f)
898	la	%r12,__LC_SAVE_AREA+16
899	b	BASED(1f)
9000:	la	%r12,__LC_SAVE_AREA+32
9011:
902#ifdef CONFIG_VIRT_CPU_ACCOUNTING
903	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
904	bh	BASED(0f)
905	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
9060:	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
907	bhe	BASED(cleanup_vtime)
908#endif
909	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
910	bh	BASED(0f)
911	mvc	__LC_SAVE_AREA(16),0(%r12)
9120:	st	%r13,4(%r12)
913	st	%r12,__LC_SAVE_AREA+48	# argh
914	SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
915	CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
916	l	%r12,__LC_SAVE_AREA+48	# argh
917	st	%r15,12(%r12)
918	lh	%r7,0x8a
919#ifdef CONFIG_VIRT_CPU_ACCOUNTING
920cleanup_vtime:
921	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
922	bhe	BASED(cleanup_stime)
923	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
924	bz	BASED(cleanup_novtime)
925	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
926cleanup_stime:
927	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16)
928	bh	BASED(cleanup_update)
929	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
930cleanup_update:
931	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
932cleanup_novtime:
933#endif
934	mvc	__LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
935	la	%r12,__LC_RETURN_PSW
936	br	%r14
937cleanup_system_call_insn:
938	.long	sysc_saveall + 0x80000000
939#ifdef CONFIG_VIRT_CPU_ACCOUNTING
940	.long	system_call + 0x80000000
941	.long	sysc_vtime + 0x80000000
942	.long	sysc_stime + 0x80000000
943	.long	sysc_update + 0x80000000
944#endif
945
946cleanup_sysc_return:
947	mvc	__LC_RETURN_PSW(4),0(%r12)
948	mvc	__LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return)
949	la	%r12,__LC_RETURN_PSW
950	br	%r14
951
952cleanup_sysc_leave:
953	clc	4(4,%r12),BASED(cleanup_sysc_leave_insn)
954	be	BASED(2f)
955#ifdef CONFIG_VIRT_CPU_ACCOUNTING
956	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
957	clc	4(4,%r12),BASED(cleanup_sysc_leave_insn+4)
958	be	BASED(2f)
959#endif
960	mvc	__LC_RETURN_PSW(8),SP_PSW(%r15)
961	c	%r12,BASED(.Lmck_old_psw)
962	bne	BASED(0f)
963	mvc	__LC_SAVE_AREA+32(16),SP_R12(%r15)
964	b	BASED(1f)
9650:	mvc	__LC_SAVE_AREA+16(16),SP_R12(%r15)
9661:	lm	%r0,%r11,SP_R0(%r15)
967	l	%r15,SP_R15(%r15)
9682:	la	%r12,__LC_RETURN_PSW
969	br	%r14
970cleanup_sysc_leave_insn:
971#ifdef CONFIG_VIRT_CPU_ACCOUNTING
972	.long	sysc_leave + 14 + 0x80000000
973#endif
974	.long	sysc_leave + 10 + 0x80000000
975
976cleanup_io_return:
977	mvc	__LC_RETURN_PSW(4),0(%r12)
978	mvc	__LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop)
979	la	%r12,__LC_RETURN_PSW
980	br	%r14
981
982cleanup_io_leave:
983	clc	4(4,%r12),BASED(cleanup_io_leave_insn)
984	be	BASED(2f)
985#ifdef CONFIG_VIRT_CPU_ACCOUNTING
986	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
987	clc	4(4,%r12),BASED(cleanup_io_leave_insn+4)
988	be	BASED(2f)
989#endif
990	mvc	__LC_RETURN_PSW(8),SP_PSW(%r15)
991	c	%r12,BASED(.Lmck_old_psw)
992	bne	BASED(0f)
993	mvc	__LC_SAVE_AREA+32(16),SP_R12(%r15)
994	b	BASED(1f)
9950:	mvc	__LC_SAVE_AREA+16(16),SP_R12(%r15)
9961:	lm	%r0,%r11,SP_R0(%r15)
997	l	%r15,SP_R15(%r15)
9982:	la	%r12,__LC_RETURN_PSW
999	br	%r14
1000cleanup_io_leave_insn:
1001#ifdef CONFIG_VIRT_CPU_ACCOUNTING
1002	.long	io_leave + 18 + 0x80000000
1003#endif
1004	.long	io_leave + 14 + 0x80000000
1005
1006/*
1007 * Integer constants
1008 */
1009		.align	4
1010.Lc_spsize:	.long	SP_SIZE
1011.Lc_overhead:	.long	STACK_FRAME_OVERHEAD
1012.Lc_pactive:	.long	PREEMPT_ACTIVE
1013.Lnr_syscalls:	.long	NR_syscalls
1014.L0x018:	.short	0x018
1015.L0x020:	.short	0x020
1016.L0x028:	.short	0x028
1017.L0x030:	.short	0x030
1018.L0x038:	.short	0x038
1019.Lc_1:		.long	1
1020
1021/*
1022 * Symbol constants
1023 */
1024.Ls390_mcck:	.long	s390_do_machine_check
1025.Ls390_handle_mcck:
1026		.long	s390_handle_mcck
1027.Lmck_old_psw:	.long	__LC_MCK_OLD_PSW
1028.Ldo_IRQ:	.long	do_IRQ
1029.Ldo_extint:	.long	do_extint
1030.Ldo_signal:	.long	do_signal
1031.Lhandle_per:	.long	do_single_step
1032.Ldo_execve:	.long	do_execve
1033.Lexecve_tail:	.long	execve_tail
1034.Ljump_table:	.long	pgm_check_table
1035.Lschedule:	.long	schedule
1036.Ltrace:	.long	syscall_trace
1037.Lschedtail:	.long	schedule_tail
1038.Lsysc_table:	.long	sys_call_table
1039#ifdef CONFIG_TRACE_IRQFLAGS
1040.Ltrace_irq_on: .long	trace_hardirqs_on
1041.Ltrace_irq_off:
1042		.long	trace_hardirqs_off
1043#endif
1044.Lcritical_start:
1045		.long	__critical_start + 0x80000000
1046.Lcritical_end:
1047		.long	__critical_end + 0x80000000
1048.Lcleanup_critical:
1049		.long	cleanup_critical
1050
1051		.section .rodata, "a"
1052#define SYSCALL(esa,esame,emu)	.long esa
1053sys_call_table:
1054#include "syscalls.S"
1055#undef SYSCALL
1056