xref: /openbmc/linux/arch/sh/include/asm/processor_32.h (revision 6aa7de05)
1 /*
2  * include/asm-sh/processor.h
3  *
4  * Copyright (C) 1999, 2000  Niibe Yutaka
5  * Copyright (C) 2002, 2003  Paul Mundt
6  */
7 
8 #ifndef __ASM_SH_PROCESSOR_32_H
9 #define __ASM_SH_PROCESSOR_32_H
10 #ifdef __KERNEL__
11 
12 #include <linux/compiler.h>
13 #include <linux/linkage.h>
14 #include <asm/page.h>
15 #include <asm/types.h>
16 #include <asm/hw_breakpoint.h>
17 
18 /*
19  * Default implementation of macro that returns current
20  * instruction pointer ("program counter").
21  */
22 #define current_text_addr() ({ void *pc; __asm__("mova	1f, %0\n.align 2\n1:":"=z" (pc)); pc; })
23 
24 /* Core Processor Version Register */
25 #define CCN_PVR		0xff000030
26 #define CCN_CVR		0xff000040
27 #define CCN_PRR		0xff000044
28 
29 /*
30  * User space process size: 2GB.
31  *
32  * Since SH7709 and SH7750 have "area 7", we can't use 0x7c000000--0x7fffffff
33  */
34 #define TASK_SIZE	0x7c000000UL
35 
36 #define STACK_TOP	TASK_SIZE
37 #define STACK_TOP_MAX	STACK_TOP
38 
39 /* This decides where the kernel will search for a free chunk of vm
40  * space during mmap's.
41  */
42 #define TASK_UNMAPPED_BASE	PAGE_ALIGN(TASK_SIZE / 3)
43 
44 /*
45  * Bit of SR register
46  *
47  * FD-bit:
48  *     When it's set, it means the processor doesn't have right to use FPU,
49  *     and it results exception when the floating operation is executed.
50  *
51  * IMASK-bit:
52  *     Interrupt level mask
53  */
54 #define SR_DSP		0x00001000
55 #define SR_IMASK	0x000000f0
56 #define SR_FD		0x00008000
57 #define SR_MD		0x40000000
58 
59 /*
60  * DSP structure and data
61  */
62 struct sh_dsp_struct {
63 	unsigned long dsp_regs[14];
64 	long status;
65 };
66 
67 /*
68  * FPU structure and data
69  */
70 
71 struct sh_fpu_hard_struct {
72 	unsigned long fp_regs[16];
73 	unsigned long xfp_regs[16];
74 	unsigned long fpscr;
75 	unsigned long fpul;
76 
77 	long status; /* software status information */
78 };
79 
80 /* Dummy fpu emulator  */
81 struct sh_fpu_soft_struct {
82 	unsigned long fp_regs[16];
83 	unsigned long xfp_regs[16];
84 	unsigned long fpscr;
85 	unsigned long fpul;
86 
87 	unsigned char lookahead;
88 	unsigned long entry_pc;
89 };
90 
91 union thread_xstate {
92 	struct sh_fpu_hard_struct hardfpu;
93 	struct sh_fpu_soft_struct softfpu;
94 };
95 
96 struct thread_struct {
97 	/* Saved registers when thread is descheduled */
98 	unsigned long sp;
99 	unsigned long pc;
100 
101 	/* Various thread flags, see SH_THREAD_xxx */
102 	unsigned long flags;
103 
104 	/* Save middle states of ptrace breakpoints */
105 	struct perf_event *ptrace_bps[HBP_NUM];
106 
107 #ifdef CONFIG_SH_DSP
108 	/* Dsp status information */
109 	struct sh_dsp_struct dsp_status;
110 #endif
111 
112 	/* Extended processor state */
113 	union thread_xstate *xstate;
114 
115 	/*
116 	 * fpu_counter contains the number of consecutive context switches
117 	 * that the FPU is used. If this is over a threshold, the lazy fpu
118 	 * saving becomes unlazy to save the trap. This is an unsigned char
119 	 * so that after 256 times the counter wraps and the behavior turns
120 	 * lazy again; this to deal with bursty apps that only use FPU for
121 	 * a short time
122 	 */
123 	unsigned char fpu_counter;
124 };
125 
126 #define INIT_THREAD  {						\
127 	.sp = sizeof(init_stack) + (long) &init_stack,		\
128 	.flags = 0,						\
129 }
130 
131 /* Forward declaration, a strange C thing */
132 struct task_struct;
133 
134 extern void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned long new_sp);
135 
136 /* Free all resources held by a thread. */
137 extern void release_thread(struct task_struct *);
138 
139 /*
140  * FPU lazy state save handling.
141  */
142 
143 static __inline__ void disable_fpu(void)
144 {
145 	unsigned long __dummy;
146 
147 	/* Set FD flag in SR */
148 	__asm__ __volatile__("stc	sr, %0\n\t"
149 			     "or	%1, %0\n\t"
150 			     "ldc	%0, sr"
151 			     : "=&r" (__dummy)
152 			     : "r" (SR_FD));
153 }
154 
155 static __inline__ void enable_fpu(void)
156 {
157 	unsigned long __dummy;
158 
159 	/* Clear out FD flag in SR */
160 	__asm__ __volatile__("stc	sr, %0\n\t"
161 			     "and	%1, %0\n\t"
162 			     "ldc	%0, sr"
163 			     : "=&r" (__dummy)
164 			     : "r" (~SR_FD));
165 }
166 
167 /* Double presision, NANS as NANS, rounding to nearest, no exceptions */
168 #define FPSCR_INIT  0x00080000
169 
170 #define	FPSCR_CAUSE_MASK	0x0001f000	/* Cause bits */
171 #define	FPSCR_FLAG_MASK		0x0000007c	/* Flag bits */
172 
173 /*
174  * Return saved PC of a blocked thread.
175  */
176 #define thread_saved_pc(tsk)	(tsk->thread.pc)
177 
178 void show_trace(struct task_struct *tsk, unsigned long *sp,
179 		struct pt_regs *regs);
180 
181 #ifdef CONFIG_DUMP_CODE
182 void show_code(struct pt_regs *regs);
183 #else
184 static inline void show_code(struct pt_regs *regs)
185 {
186 }
187 #endif
188 
189 extern unsigned long get_wchan(struct task_struct *p);
190 
191 #define KSTK_EIP(tsk)  (task_pt_regs(tsk)->pc)
192 #define KSTK_ESP(tsk)  (task_pt_regs(tsk)->regs[15])
193 
194 #if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
195 
196 #define PREFETCH_STRIDE		L1_CACHE_BYTES
197 #define ARCH_HAS_PREFETCH
198 #define ARCH_HAS_PREFETCHW
199 
200 static inline void prefetch(const void *x)
201 {
202 	__builtin_prefetch(x, 0, 3);
203 }
204 
205 static inline void prefetchw(const void *x)
206 {
207 	__builtin_prefetch(x, 1, 3);
208 }
209 #endif
210 
211 #endif /* __KERNEL__ */
212 #endif /* __ASM_SH_PROCESSOR_32_H */
213