xref: /openbmc/linux/arch/sh/include/asm/processor_32.h (revision d3402925)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * include/asm-sh/processor.h
4  *
5  * Copyright (C) 1999, 2000  Niibe Yutaka
6  * Copyright (C) 2002, 2003  Paul Mundt
7  */
8 
9 #ifndef __ASM_SH_PROCESSOR_32_H
10 #define __ASM_SH_PROCESSOR_32_H
11 
12 #include <linux/compiler.h>
13 #include <linux/linkage.h>
14 #include <asm/page.h>
15 #include <asm/types.h>
16 #include <asm/hw_breakpoint.h>
17 
18 /* Core Processor Version Register */
19 #define CCN_PVR		0xff000030
20 #define CCN_CVR		0xff000040
21 #define CCN_PRR		0xff000044
22 
23 /*
24  * User space process size: 2GB.
25  *
26  * Since SH7709 and SH7750 have "area 7", we can't use 0x7c000000--0x7fffffff
27  */
28 #define TASK_SIZE	0x7c000000UL
29 
30 #define STACK_TOP	TASK_SIZE
31 #define STACK_TOP_MAX	STACK_TOP
32 
33 /* This decides where the kernel will search for a free chunk of vm
34  * space during mmap's.
35  */
36 #define TASK_UNMAPPED_BASE	PAGE_ALIGN(TASK_SIZE / 3)
37 
38 /*
39  * Bit of SR register
40  *
41  * FD-bit:
42  *     When it's set, it means the processor doesn't have right to use FPU,
43  *     and it results exception when the floating operation is executed.
44  *
45  * IMASK-bit:
46  *     Interrupt level mask
47  */
48 #define SR_DSP		0x00001000
49 #define SR_IMASK	0x000000f0
50 #define SR_FD		0x00008000
51 #define SR_MD		0x40000000
52 
53 #define SR_USER_MASK	0x00000303	// M, Q, S, T bits
54 /*
55  * DSP structure and data
56  */
57 struct sh_dsp_struct {
58 	unsigned long dsp_regs[14];
59 	long status;
60 };
61 
62 /*
63  * FPU structure and data
64  */
65 
66 struct sh_fpu_hard_struct {
67 	unsigned long fp_regs[16];
68 	unsigned long xfp_regs[16];
69 	unsigned long fpscr;
70 	unsigned long fpul;
71 
72 	long status; /* software status information */
73 };
74 
75 /* Dummy fpu emulator  */
76 struct sh_fpu_soft_struct {
77 	unsigned long fp_regs[16];
78 	unsigned long xfp_regs[16];
79 	unsigned long fpscr;
80 	unsigned long fpul;
81 
82 	unsigned char lookahead;
83 	unsigned long entry_pc;
84 };
85 
86 union thread_xstate {
87 	struct sh_fpu_hard_struct hardfpu;
88 	struct sh_fpu_soft_struct softfpu;
89 };
90 
91 struct thread_struct {
92 	/* Saved registers when thread is descheduled */
93 	unsigned long sp;
94 	unsigned long pc;
95 
96 	/* Various thread flags, see SH_THREAD_xxx */
97 	unsigned long flags;
98 
99 	/* Save middle states of ptrace breakpoints */
100 	struct perf_event *ptrace_bps[HBP_NUM];
101 
102 #ifdef CONFIG_SH_DSP
103 	/* Dsp status information */
104 	struct sh_dsp_struct dsp_status;
105 #endif
106 
107 	/* Extended processor state */
108 	union thread_xstate *xstate;
109 
110 	/*
111 	 * fpu_counter contains the number of consecutive context switches
112 	 * that the FPU is used. If this is over a threshold, the lazy fpu
113 	 * saving becomes unlazy to save the trap. This is an unsigned char
114 	 * so that after 256 times the counter wraps and the behavior turns
115 	 * lazy again; this to deal with bursty apps that only use FPU for
116 	 * a short time
117 	 */
118 	unsigned char fpu_counter;
119 };
120 
121 #define INIT_THREAD  {						\
122 	.sp = sizeof(init_stack) + (long) &init_stack,		\
123 	.flags = 0,						\
124 }
125 
126 /* Forward declaration, a strange C thing */
127 struct task_struct;
128 
129 extern void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned long new_sp);
130 
131 /*
132  * FPU lazy state save handling.
133  */
134 
135 static __inline__ void disable_fpu(void)
136 {
137 	unsigned long __dummy;
138 
139 	/* Set FD flag in SR */
140 	__asm__ __volatile__("stc	sr, %0\n\t"
141 			     "or	%1, %0\n\t"
142 			     "ldc	%0, sr"
143 			     : "=&r" (__dummy)
144 			     : "r" (SR_FD));
145 }
146 
147 static __inline__ void enable_fpu(void)
148 {
149 	unsigned long __dummy;
150 
151 	/* Clear out FD flag in SR */
152 	__asm__ __volatile__("stc	sr, %0\n\t"
153 			     "and	%1, %0\n\t"
154 			     "ldc	%0, sr"
155 			     : "=&r" (__dummy)
156 			     : "r" (~SR_FD));
157 }
158 
159 /* Double presision, NANS as NANS, rounding to nearest, no exceptions */
160 #define FPSCR_INIT  0x00080000
161 
162 #define	FPSCR_CAUSE_MASK	0x0001f000	/* Cause bits */
163 #define	FPSCR_FLAG_MASK		0x0000007c	/* Flag bits */
164 
165 /*
166  * Return saved PC of a blocked thread.
167  */
168 #define thread_saved_pc(tsk)	(tsk->thread.pc)
169 
170 void show_trace(struct task_struct *tsk, unsigned long *sp,
171 		struct pt_regs *regs, const char *loglvl);
172 
173 #ifdef CONFIG_DUMP_CODE
174 void show_code(struct pt_regs *regs);
175 #else
176 static inline void show_code(struct pt_regs *regs)
177 {
178 }
179 #endif
180 
181 extern unsigned long __get_wchan(struct task_struct *p);
182 
183 #define KSTK_EIP(tsk)  (task_pt_regs(tsk)->pc)
184 #define KSTK_ESP(tsk)  (task_pt_regs(tsk)->regs[15])
185 
186 #if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
187 
188 #define PREFETCH_STRIDE		L1_CACHE_BYTES
189 #define ARCH_HAS_PREFETCH
190 #define ARCH_HAS_PREFETCHW
191 
192 static inline void prefetch(const void *x)
193 {
194 	__builtin_prefetch(x, 0, 3);
195 }
196 
197 static inline void prefetchw(const void *x)
198 {
199 	__builtin_prefetch(x, 1, 3);
200 }
201 #endif
202 
203 #endif /* __ASM_SH_PROCESSOR_32_H */
204