xref: /openbmc/linux/arch/s390/include/asm/processor.h (revision 2a405f6b)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2c6557e7fSMartin Schwidefsky /*
3c6557e7fSMartin Schwidefsky  *  S390 version
4a53c8fabSHeiko Carstens  *    Copyright IBM Corp. 1999
5c6557e7fSMartin Schwidefsky  *    Author(s): Hartmut Penner (hp@de.ibm.com),
6c6557e7fSMartin Schwidefsky  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
7c6557e7fSMartin Schwidefsky  *
8c6557e7fSMartin Schwidefsky  *  Derived from "include/asm-i386/processor.h"
9c6557e7fSMartin Schwidefsky  *    Copyright (C) 1994, Linus Torvalds
10c6557e7fSMartin Schwidefsky  */
11c6557e7fSMartin Schwidefsky 
12c6557e7fSMartin Schwidefsky #ifndef __ASM_S390_PROCESSOR_H
13c6557e7fSMartin Schwidefsky #define __ASM_S390_PROCESSOR_H
14c6557e7fSMartin Schwidefsky 
15fe6ba88bSMasahiro Yamada #include <linux/bits.h>
1692778b99SHeiko Carstens 
170b0ed657SSven Schnelle #define CIF_NOHZ_DELAY		2	/* delay HZ disable for a tick */
180b0ed657SSven Schnelle #define CIF_FPU			3	/* restore FPU registers */
190b0ed657SSven Schnelle #define CIF_ENABLED_WAIT	5	/* in enabled wait state */
200b0ed657SSven Schnelle #define CIF_MCCK_GUEST		6	/* machine check happening in guest */
210b0ed657SSven Schnelle #define CIF_DEDICATED_CPU	7	/* this CPU is dedicated */
22d3a73acbSMartin Schwidefsky 
23fe6ba88bSMasahiro Yamada #define _CIF_NOHZ_DELAY		BIT(CIF_NOHZ_DELAY)
24fe6ba88bSMasahiro Yamada #define _CIF_FPU		BIT(CIF_FPU)
25fe6ba88bSMasahiro Yamada #define _CIF_ENABLED_WAIT	BIT(CIF_ENABLED_WAIT)
26fe6ba88bSMasahiro Yamada #define _CIF_MCCK_GUEST		BIT(CIF_MCCK_GUEST)
27fe6ba88bSMasahiro Yamada #define _CIF_DEDICATED_CPU	BIT(CIF_DEDICATED_CPU)
28d3a73acbSMartin Schwidefsky 
29915fea04SAlexander Gordeev #define RESTART_FLAG_CTLREGS	_AC(1 << 0, U)
30915fea04SAlexander Gordeev 
31eb608fb3SHeiko Carstens #ifndef __ASSEMBLY__
32eb608fb3SHeiko Carstens 
3338f2c691SMartin Schwidefsky #include <linux/cpumask.h>
34edd53787SHeiko Carstens #include <linux/linkage.h>
35a0616cdeSDavid Howells #include <linux/irqflags.h>
36e86a6ed6SHeiko Carstens #include <asm/cpu.h>
3725097bf1SChristian Ehrhardt #include <asm/page.h>
38c6557e7fSMartin Schwidefsky #include <asm/ptrace.h>
3925097bf1SChristian Ehrhardt #include <asm/setup.h>
40e4b8b3f3SJan Glauber #include <asm/runtime_instr.h>
41b0753902SHendrik Brueckner #include <asm/fpu/types.h>
42b0753902SHendrik Brueckner #include <asm/fpu/internal.h>
4356e62a73SSven Schnelle #include <asm/irqflags.h>
4456e62a73SSven Schnelle 
453a790cc1SSven Schnelle typedef long (*sys_call_ptr_t)(struct pt_regs *regs);
46c6557e7fSMartin Schwidefsky 
set_cpu_flag(int flag)4787f79d88SHeiko Carstens static __always_inline void set_cpu_flag(int flag)
48d3a73acbSMartin Schwidefsky {
49ac25e790SHeiko Carstens 	S390_lowcore.cpu_flags |= (1UL << flag);
50d3a73acbSMartin Schwidefsky }
51d3a73acbSMartin Schwidefsky 
clear_cpu_flag(int flag)5287f79d88SHeiko Carstens static __always_inline void clear_cpu_flag(int flag)
53d3a73acbSMartin Schwidefsky {
54ac25e790SHeiko Carstens 	S390_lowcore.cpu_flags &= ~(1UL << flag);
55d3a73acbSMartin Schwidefsky }
56d3a73acbSMartin Schwidefsky 
test_cpu_flag(int flag)57b977f03eSHeiko Carstens static __always_inline bool test_cpu_flag(int flag)
58d3a73acbSMartin Schwidefsky {
59b977f03eSHeiko Carstens 	return S390_lowcore.cpu_flags & (1UL << flag);
60d3a73acbSMartin Schwidefsky }
61d3a73acbSMartin Schwidefsky 
test_and_set_cpu_flag(int flag)62f96f41aaSHeiko Carstens static __always_inline bool test_and_set_cpu_flag(int flag)
63f96f41aaSHeiko Carstens {
64f96f41aaSHeiko Carstens 	if (test_cpu_flag(flag))
65f96f41aaSHeiko Carstens 		return true;
66f96f41aaSHeiko Carstens 	set_cpu_flag(flag);
67f96f41aaSHeiko Carstens 	return false;
68f96f41aaSHeiko Carstens }
69f96f41aaSHeiko Carstens 
test_and_clear_cpu_flag(int flag)70f96f41aaSHeiko Carstens static __always_inline bool test_and_clear_cpu_flag(int flag)
71f96f41aaSHeiko Carstens {
72f96f41aaSHeiko Carstens 	if (!test_cpu_flag(flag))
73f96f41aaSHeiko Carstens 		return false;
74f96f41aaSHeiko Carstens 	clear_cpu_flag(flag);
75f96f41aaSHeiko Carstens 	return true;
76f96f41aaSHeiko Carstens }
77f96f41aaSHeiko Carstens 
78419123f9SMartin Schwidefsky /*
79419123f9SMartin Schwidefsky  * Test CIF flag of another CPU. The caller needs to ensure that
80419123f9SMartin Schwidefsky  * CPU hotplug can not happen, e.g. by disabling preemption.
81419123f9SMartin Schwidefsky  */
test_cpu_flag_of(int flag,int cpu)82b977f03eSHeiko Carstens static __always_inline bool test_cpu_flag_of(int flag, int cpu)
83419123f9SMartin Schwidefsky {
84c667aeacSHeiko Carstens 	struct lowcore *lc = lowcore_ptr[cpu];
85b977f03eSHeiko Carstens 
86b977f03eSHeiko Carstens 	return lc->cpu_flags & (1UL << flag);
87419123f9SMartin Schwidefsky }
88419123f9SMartin Schwidefsky 
89fe0f4976SMartin Schwidefsky #define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
90fe0f4976SMartin Schwidefsky 
get_cpu_id(struct cpuid * ptr)91e86a6ed6SHeiko Carstens static inline void get_cpu_id(struct cpuid *ptr)
92c6557e7fSMartin Schwidefsky {
93987bcdacSMartin Schwidefsky 	asm volatile("stidp %0" : "=Q" (*ptr));
94c6557e7fSMartin Schwidefsky }
95c6557e7fSMartin Schwidefsky 
96097a116cSHeiko Carstens void s390_adjust_jiffies(void);
97097a116cSHeiko Carstens void s390_update_cpu_mhz(void);
98097a116cSHeiko Carstens void cpu_detect_mhz_feature(void);
99097a116cSHeiko Carstens 
100638ad34aSMartin Schwidefsky extern const struct seq_operations cpuinfo_op;
10165f22a90SAl Viro extern void execve_tail(void);
10257761da4SSven Schnelle unsigned long vdso_size(void);
103c6557e7fSMartin Schwidefsky 
104c6557e7fSMartin Schwidefsky /*
105f481bfafSMartin Schwidefsky  * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
106c6557e7fSMartin Schwidefsky  */
107c6557e7fSMartin Schwidefsky 
108c4538d0fSGuo Ren #define TASK_SIZE		(test_thread_flag(TIF_31BIT) ? \
109f7555608SAlexander Gordeev 					_REGION3_SIZE : TASK_SIZE_MAX)
110c6557e7fSMartin Schwidefsky #define TASK_UNMAPPED_BASE	(test_thread_flag(TIF_31BIT) ? \
111f7555608SAlexander Gordeev 					(_REGION3_SIZE >> 1) : (_REGION2_SIZE >> 1))
1121aea9b3fSMartin Schwidefsky #define TASK_SIZE_MAX		(-PAGE_SIZE)
113c6557e7fSMartin Schwidefsky 
1149e37a2e8SSven Schnelle #define VDSO_BASE		(STACK_TOP + PAGE_SIZE)
1159e37a2e8SSven Schnelle #define VDSO_LIMIT		(test_thread_flag(TIF_31BIT) ? _REGION3_SIZE : _REGION2_SIZE)
1169e37a2e8SSven Schnelle #define STACK_TOP		(VDSO_LIMIT - vdso_size() - PAGE_SIZE)
1179e37a2e8SSven Schnelle #define STACK_TOP_MAX		(_REGION2_SIZE - vdso_size() - PAGE_SIZE)
118c6557e7fSMartin Schwidefsky 
119c6557e7fSMartin Schwidefsky #define HAVE_ARCH_PICK_MMAP_LAYOUT
120c6557e7fSMartin Schwidefsky 
121*2a405f6bSHeiko Carstens #define __stackleak_poison __stackleak_poison
__stackleak_poison(unsigned long erase_low,unsigned long erase_high,unsigned long poison)122*2a405f6bSHeiko Carstens static __always_inline void __stackleak_poison(unsigned long erase_low,
123*2a405f6bSHeiko Carstens 					       unsigned long erase_high,
124*2a405f6bSHeiko Carstens 					       unsigned long poison)
125*2a405f6bSHeiko Carstens {
126*2a405f6bSHeiko Carstens 	unsigned long tmp, count;
127*2a405f6bSHeiko Carstens 
128*2a405f6bSHeiko Carstens 	count = erase_high - erase_low;
129*2a405f6bSHeiko Carstens 	if (!count)
130*2a405f6bSHeiko Carstens 		return;
131*2a405f6bSHeiko Carstens 	asm volatile(
132*2a405f6bSHeiko Carstens 		"	cghi	%[count],8\n"
133*2a405f6bSHeiko Carstens 		"	je	2f\n"
134*2a405f6bSHeiko Carstens 		"	aghi	%[count],-(8+1)\n"
135*2a405f6bSHeiko Carstens 		"	srlg	%[tmp],%[count],8\n"
136*2a405f6bSHeiko Carstens 		"	ltgr	%[tmp],%[tmp]\n"
137*2a405f6bSHeiko Carstens 		"	jz	1f\n"
138*2a405f6bSHeiko Carstens 		"0:	stg	%[poison],0(%[addr])\n"
139*2a405f6bSHeiko Carstens 		"	mvc	8(256-8,%[addr]),0(%[addr])\n"
140*2a405f6bSHeiko Carstens 		"	la	%[addr],256(%[addr])\n"
141*2a405f6bSHeiko Carstens 		"	brctg	%[tmp],0b\n"
142*2a405f6bSHeiko Carstens 		"1:	stg	%[poison],0(%[addr])\n"
143*2a405f6bSHeiko Carstens 		"	larl	%[tmp],3f\n"
144*2a405f6bSHeiko Carstens 		"	ex	%[count],0(%[tmp])\n"
145*2a405f6bSHeiko Carstens 		"	j	4f\n"
146*2a405f6bSHeiko Carstens 		"2:	stg	%[poison],0(%[addr])\n"
147*2a405f6bSHeiko Carstens 		"	j	4f\n"
148*2a405f6bSHeiko Carstens 		"3:	mvc	8(1,%[addr]),0(%[addr])\n"
149*2a405f6bSHeiko Carstens 		"4:\n"
150*2a405f6bSHeiko Carstens 		: [addr] "+&a" (erase_low), [count] "+&d" (count), [tmp] "=&a" (tmp)
151*2a405f6bSHeiko Carstens 		: [poison] "d" (poison)
152*2a405f6bSHeiko Carstens 		: "memory", "cc"
153*2a405f6bSHeiko Carstens 		);
154*2a405f6bSHeiko Carstens }
155*2a405f6bSHeiko Carstens 
156c6557e7fSMartin Schwidefsky /*
157c6557e7fSMartin Schwidefsky  * Thread structure
158c6557e7fSMartin Schwidefsky  */
159c6557e7fSMartin Schwidefsky struct thread_struct {
160c6557e7fSMartin Schwidefsky 	unsigned int  acrs[NUM_ACRS];
161c6557e7fSMartin Schwidefsky 	unsigned long ksp;			/* kernel stack pointer */
16290c53e65SMartin Schwidefsky 	unsigned long user_timer;		/* task cputime in user space */
163b7394a5fSMartin Schwidefsky 	unsigned long guest_timer;		/* task cputime in kvm guest */
16490c53e65SMartin Schwidefsky 	unsigned long system_timer;		/* task cputime in kernel space */
165b7394a5fSMartin Schwidefsky 	unsigned long hardirq_timer;		/* task cputime in hardirq context */
166b7394a5fSMartin Schwidefsky 	unsigned long softirq_timer;		/* task cputime in softirq context */
16756e62a73SSven Schnelle 	const sys_call_ptr_t *sys_call_table;	/* system call table address */
168e5992f2eSMartin Schwidefsky 	unsigned long gmap_addr;		/* address of last gmap fault. */
1694be130a0SMartin Schwidefsky 	unsigned int gmap_write_flag;		/* gmap fault write indication */
1704a494439SDavid Hildenbrand 	unsigned int gmap_int_code;		/* int code of last gmap fault */
17124eb3a82SDominik Dingel 	unsigned int gmap_pfault;		/* signal of a pending guest pfault */
17256e62a73SSven Schnelle 
173f8fc82b4SMartin Schwidefsky 	/* Per-thread information related to debugging */
1745e9a2692SMartin Schwidefsky 	struct per_regs per_user;		/* User specified PER registers */
1755e9a2692SMartin Schwidefsky 	struct per_event per_event;		/* Cause of the last PER trap */
176d35339a4SMartin Schwidefsky 	unsigned long per_flags;		/* Flags to control debug behavior */
177f8fc82b4SMartin Schwidefsky 	unsigned int system_call;		/* system call number in signal */
178ef280c85SMartin Schwidefsky 	unsigned long last_break;		/* last breaking-event-address. */
179c6557e7fSMartin Schwidefsky 	/* pfault_wait is used to block the process on a pfault event */
180c6557e7fSMartin Schwidefsky 	unsigned long pfault_wait;
181f2db2e6cSHeiko Carstens 	struct list_head list;
182e4b8b3f3SJan Glauber 	/* cpu runtime instrumentation */
183e4b8b3f3SJan Glauber 	struct runtime_instr_cb *ri_cb;
184916cda1aSMartin Schwidefsky 	struct gs_cb *gs_cb;			/* Current guarded storage cb */
185916cda1aSMartin Schwidefsky 	struct gs_cb *gs_bc_cb;			/* Broadcast guarded storage cb */
186755112b3SSven Schnelle 	struct pgm_tdb trap_tdb;		/* Transaction abort diagnose block */
1873f6813b9SMartin Schwidefsky 	/*
1883f6813b9SMartin Schwidefsky 	 * Warning: 'fpu' is dynamically-sized. It *MUST* be at
1893f6813b9SMartin Schwidefsky 	 * the end.
1903f6813b9SMartin Schwidefsky 	 */
1913f6813b9SMartin Schwidefsky 	struct fpu fpu;			/* FP and VX register save area */
192c6557e7fSMartin Schwidefsky };
193c6557e7fSMartin Schwidefsky 
19464597f9dSMichael Mueller /* Flag to disable transactions. */
19564597f9dSMichael Mueller #define PER_FLAG_NO_TE			1UL
19664597f9dSMichael Mueller /* Flag to enable random transaction aborts. */
19764597f9dSMichael Mueller #define PER_FLAG_TE_ABORT_RAND		2UL
19864597f9dSMichael Mueller /* Flag to specify random transaction abort mode:
19964597f9dSMichael Mueller  * - abort each transaction at a random instruction before TEND if set.
20064597f9dSMichael Mueller  * - abort random transactions at a random instruction if cleared.
20164597f9dSMichael Mueller  */
20264597f9dSMichael Mueller #define PER_FLAG_TE_ABORT_RAND_TEND	4UL
203d35339a4SMartin Schwidefsky 
204c6557e7fSMartin Schwidefsky typedef struct thread_struct thread_struct;
205c6557e7fSMartin Schwidefsky 
206c6557e7fSMartin Schwidefsky #define ARCH_MIN_TASKALIGN	8
207c6557e7fSMartin Schwidefsky 
208c6557e7fSMartin Schwidefsky #define INIT_THREAD {							\
209c6557e7fSMartin Schwidefsky 	.ksp = sizeof(init_stack) + (unsigned long) &init_stack,	\
2103f6813b9SMartin Schwidefsky 	.fpu.regs = (void *) init_task.thread.fpu.fprs,			\
2110b38b5e1SSven Schnelle 	.last_break = 1,						\
212c6557e7fSMartin Schwidefsky }
213c6557e7fSMartin Schwidefsky 
214c6557e7fSMartin Schwidefsky /*
215c6557e7fSMartin Schwidefsky  * Do necessary setup to start up a new thread.
216c6557e7fSMartin Schwidefsky  */
217c6557e7fSMartin Schwidefsky #define start_thread(regs, new_psw, new_stackp) do {			\
218e258d719SMartin Schwidefsky 	regs->psw.mask	= PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA;	\
219fecc868aSHeiko Carstens 	regs->psw.addr	= new_psw;					\
220c6557e7fSMartin Schwidefsky 	regs->gprs[15]	= new_stackp;					\
22165f22a90SAl Viro 	execve_tail();							\
222c6557e7fSMartin Schwidefsky } while (0)
223c6557e7fSMartin Schwidefsky 
224c6557e7fSMartin Schwidefsky #define start_thread31(regs, new_psw, new_stackp) do {			\
225e258d719SMartin Schwidefsky 	regs->psw.mask	= PSW_USER_BITS | PSW_MASK_BA;			\
226fecc868aSHeiko Carstens 	regs->psw.addr	= new_psw;					\
227c6557e7fSMartin Schwidefsky 	regs->gprs[15]	= new_stackp;					\
22865f22a90SAl Viro 	execve_tail();							\
229c6557e7fSMartin Schwidefsky } while (0)
230c6557e7fSMartin Schwidefsky 
231c6557e7fSMartin Schwidefsky /* Forward declaration, a strange C thing */
232c6557e7fSMartin Schwidefsky struct task_struct;
233c6557e7fSMartin Schwidefsky struct mm_struct;
234c6557e7fSMartin Schwidefsky struct seq_file;
235b5a882fcSHeiko Carstens struct pt_regs;
236c6557e7fSMartin Schwidefsky 
237b5a882fcSHeiko Carstens void show_registers(struct pt_regs *regs);
2385a79859aSHeiko Carstens void show_cacheinfo(struct seq_file *m);
2396668022cSHeiko Carstens 
2407b83c629SHeiko Carstens /* Free guarded storage control block */
2417b83c629SHeiko Carstens void guarded_storage_release(struct task_struct *tsk);
24256e62a73SSven Schnelle void gs_load_bc_cb(struct pt_regs *regs);
243916cda1aSMartin Schwidefsky 
24442a20f86SKees Cook unsigned long __get_wchan(struct task_struct *p);
245c6557e7fSMartin Schwidefsky #define task_pt_regs(tsk) ((struct pt_regs *) \
246c6557e7fSMartin Schwidefsky         (task_stack_page(tsk) + THREAD_SIZE) - 1)
247c6557e7fSMartin Schwidefsky #define KSTK_EIP(tsk)	(task_pt_regs(tsk)->psw.addr)
248c6557e7fSMartin Schwidefsky #define KSTK_ESP(tsk)	(task_pt_regs(tsk)->gprs[15])
249c6557e7fSMartin Schwidefsky 
2505ebf250dSHeiko Carstens /* Has task runtime instrumentation enabled ? */
2515ebf250dSHeiko Carstens #define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
2525ebf250dSHeiko Carstens 
253e3c11025SVasily Gorbik /* avoid using global register due to gcc bug in versions < 8.4 */
254e3c11025SVasily Gorbik #define current_stack_pointer (__current_stack_pointer())
255e3c11025SVasily Gorbik 
__current_stack_pointer(void)256e3c11025SVasily Gorbik static __always_inline unsigned long __current_stack_pointer(void)
257e3c11025SVasily Gorbik {
258e3c11025SVasily Gorbik 	unsigned long sp;
259e3c11025SVasily Gorbik 
260e3c11025SVasily Gorbik 	asm volatile("lgr %0,15" : "=d" (sp));
261e3c11025SVasily Gorbik 	return sp;
262e3c11025SVasily Gorbik }
26376737ce1SHeiko Carstens 
on_thread_stack(void)26422ca1e77SHeiko Carstens static __always_inline bool on_thread_stack(void)
26522ca1e77SHeiko Carstens {
26622ca1e77SHeiko Carstens 	unsigned long ksp = S390_lowcore.kernel_stack;
26722ca1e77SHeiko Carstens 
26822ca1e77SHeiko Carstens 	return !((ksp ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
26922ca1e77SHeiko Carstens }
27022ca1e77SHeiko Carstens 
stap(void)2719c9a915aSHeiko Carstens static __always_inline unsigned short stap(void)
272a0616cdeSDavid Howells {
273a0616cdeSDavid Howells 	unsigned short cpu_address;
274a0616cdeSDavid Howells 
27511776eaaSVasily Gorbik 	asm volatile("stap %0" : "=Q" (cpu_address));
276a0616cdeSDavid Howells 	return cpu_address;
277a0616cdeSDavid Howells }
278a0616cdeSDavid Howells 
27922b6430dSChristian Borntraeger #define cpu_relax() barrier()
280083986e8SHeiko Carstens 
281097a116cSHeiko Carstens #define ECAG_CACHE_ATTRIBUTE	0
282097a116cSHeiko Carstens #define ECAG_CPU_ATTRIBUTE	1
283097a116cSHeiko Carstens 
__ecag(unsigned int asi,unsigned char parm)284097a116cSHeiko Carstens static inline unsigned long __ecag(unsigned int asi, unsigned char parm)
285097a116cSHeiko Carstens {
286097a116cSHeiko Carstens 	unsigned long val;
287097a116cSHeiko Carstens 
288731efc96SVasily Gorbik 	asm volatile("ecag %0,0,0(%1)" : "=d" (val) : "a" (asi << 8 | parm));
289097a116cSHeiko Carstens 	return val;
290097a116cSHeiko Carstens }
291097a116cSHeiko Carstens 
psw_set_key(unsigned int key)292c6557e7fSMartin Schwidefsky static inline void psw_set_key(unsigned int key)
293c6557e7fSMartin Schwidefsky {
294c6557e7fSMartin Schwidefsky 	asm volatile("spka 0(%0)" : : "d" (key));
295c6557e7fSMartin Schwidefsky }
296c6557e7fSMartin Schwidefsky 
297c6557e7fSMartin Schwidefsky /*
298c6557e7fSMartin Schwidefsky  * Set PSW to specified value.
299c6557e7fSMartin Schwidefsky  */
__load_psw(psw_t psw)300c6557e7fSMartin Schwidefsky static inline void __load_psw(psw_t psw)
301c6557e7fSMartin Schwidefsky {
302987bcdacSMartin Schwidefsky 	asm volatile("lpswe %0" : : "Q" (psw) : "cc");
303c6557e7fSMartin Schwidefsky }
304c6557e7fSMartin Schwidefsky 
305c6557e7fSMartin Schwidefsky /*
306c6557e7fSMartin Schwidefsky  * Set PSW mask to specified value, while leaving the
307c6557e7fSMartin Schwidefsky  * PSW addr pointing to the next instruction.
308c6557e7fSMartin Schwidefsky  */
__load_psw_mask(unsigned long mask)3099c9a915aSHeiko Carstens static __always_inline void __load_psw_mask(unsigned long mask)
310c6557e7fSMartin Schwidefsky {
311c6557e7fSMartin Schwidefsky 	unsigned long addr;
312c6557e7fSMartin Schwidefsky 	psw_t psw;
313c6557e7fSMartin Schwidefsky 
314c6557e7fSMartin Schwidefsky 	psw.mask = mask;
315c6557e7fSMartin Schwidefsky 
316c6557e7fSMartin Schwidefsky 	asm volatile(
317c6557e7fSMartin Schwidefsky 		"	larl	%0,1f\n"
3180a113efcSArnd Bergmann 		"	stg	%0,%1\n"
3190a113efcSArnd Bergmann 		"	lpswe	%2\n"
320c6557e7fSMartin Schwidefsky 		"1:"
3210a113efcSArnd Bergmann 		: "=&d" (addr), "=Q" (psw.addr) : "Q" (psw) : "memory", "cc");
322c6557e7fSMartin Schwidefsky }
323c6557e7fSMartin Schwidefsky 
324c6557e7fSMartin Schwidefsky /*
32522362a0eSMartin Schwidefsky  * Extract current PSW mask
32622362a0eSMartin Schwidefsky  */
__extract_psw(void)32722362a0eSMartin Schwidefsky static inline unsigned long __extract_psw(void)
32822362a0eSMartin Schwidefsky {
32922362a0eSMartin Schwidefsky 	unsigned int reg1, reg2;
33022362a0eSMartin Schwidefsky 
33122362a0eSMartin Schwidefsky 	asm volatile("epsw %0,%1" : "=d" (reg1), "=a" (reg2));
33222362a0eSMartin Schwidefsky 	return (((unsigned long) reg1) << 32) | ((unsigned long) reg2);
33322362a0eSMartin Schwidefsky }
33422362a0eSMartin Schwidefsky 
local_mcck_enable(void)335ecbafda8SHeiko Carstens static inline void local_mcck_enable(void)
336ecbafda8SHeiko Carstens {
337ecbafda8SHeiko Carstens 	__load_psw_mask(__extract_psw() | PSW_MASK_MCHECK);
338ecbafda8SHeiko Carstens }
339ecbafda8SHeiko Carstens 
local_mcck_disable(void)340ecbafda8SHeiko Carstens static inline void local_mcck_disable(void)
341ecbafda8SHeiko Carstens {
342ecbafda8SHeiko Carstens 	__load_psw_mask(__extract_psw() & ~PSW_MASK_MCHECK);
343ecbafda8SHeiko Carstens }
344ecbafda8SHeiko Carstens 
34522362a0eSMartin Schwidefsky /*
346ccf45cafSMartin Schwidefsky  * Rewind PSW instruction address by specified number of bytes.
347ccf45cafSMartin Schwidefsky  */
__rewind_psw(psw_t psw,unsigned long ilc)348ccf45cafSMartin Schwidefsky static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
349ccf45cafSMartin Schwidefsky {
350ccf45cafSMartin Schwidefsky 	unsigned long mask;
351ccf45cafSMartin Schwidefsky 
352ccf45cafSMartin Schwidefsky 	mask = (psw.mask & PSW_MASK_EA) ? -1UL :
353ccf45cafSMartin Schwidefsky 	       (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 :
354ccf45cafSMartin Schwidefsky 					  (1UL << 24) - 1;
355ccf45cafSMartin Schwidefsky 	return (psw.addr - ilc) & mask;
356ccf45cafSMartin Schwidefsky }
357ccf45cafSMartin Schwidefsky 
358b5f87f15SMartin Schwidefsky /*
359c6557e7fSMartin Schwidefsky  * Function to drop a processor into disabled wait state
360c6557e7fSMartin Schwidefsky  */
disabled_wait(void)361c2e06e15SVasily Gorbik static __always_inline void __noreturn disabled_wait(void)
362c6557e7fSMartin Schwidefsky {
363f9e6edfbSHeiko Carstens 	psw_t psw;
364c6557e7fSMartin Schwidefsky 
365f9e6edfbSHeiko Carstens 	psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA;
36698587c2dSMartin Schwidefsky 	psw.addr = _THIS_IP_;
367f9e6edfbSHeiko Carstens 	__load_psw(psw);
368edd53787SHeiko Carstens 	while (1);
369c6557e7fSMartin Schwidefsky }
370c6557e7fSMartin Schwidefsky 
371c6557e7fSMartin Schwidefsky #define ARCH_LOW_ADDRESS_LIMIT	0x7fffffffUL
372c6557e7fSMartin Schwidefsky 
regs_irqs_disabled(struct pt_regs * regs)37356e62a73SSven Schnelle static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
37456e62a73SSven Schnelle {
37556e62a73SSven Schnelle 	return arch_irqs_disabled_flags(regs->psw.mask);
37656e62a73SSven Schnelle }
37756e62a73SSven Schnelle 
378eb608fb3SHeiko Carstens #endif /* __ASSEMBLY__ */
379eb608fb3SHeiko Carstens 
380c6557e7fSMartin Schwidefsky #endif /* __ASM_S390_PROCESSOR_H */
381