xref: /openbmc/linux/arch/sh/kernel/cpu/init.c (revision 9d4436a6fbc8c5eccdfcb8f5884e0a7b4a57f6d2)
1 /*
2  * arch/sh/kernel/cpu/init.c
3  *
4  * CPU init code
5  *
6  * Copyright (C) 2002, 2003  Paul Mundt
7  * Copyright (C) 2003  Richard Curnow
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <asm/processor.h>
16 #include <asm/uaccess.h>
17 #include <asm/page.h>
18 #include <asm/system.h>
19 #include <asm/cacheflush.h>
20 #include <asm/cache.h>
21 #include <asm/io.h>
22 
23 extern void detect_cpu_and_cache_system(void);
24 
25 /*
26  * Generic wrapper for command line arguments to disable on-chip
27  * peripherals (nofpu, nodsp, and so forth).
28  */
29 #define onchip_setup(x)				\
30 static int x##_disabled __initdata = 0;		\
31 						\
32 static int __init x##_setup(char *opts)		\
33 {						\
34 	x##_disabled = 1;			\
35 	return 1;				\
36 }						\
37 __setup("no" __stringify(x), x##_setup);
38 
39 onchip_setup(fpu);
40 onchip_setup(dsp);
41 
42 /*
43  * Generic first-level cache init
44  */
45 static void __init cache_init(void)
46 {
47 	unsigned long ccr, flags;
48 
49 	if (cpu_data->type == CPU_SH_NONE)
50 		panic("Unknown CPU");
51 
52 	jump_to_P2();
53 	ccr = ctrl_inl(CCR);
54 
55 	/*
56 	 * At this point we don't know whether the cache is enabled or not - a
57 	 * bootloader may have enabled it.  There are at least 2 things that
58 	 * could be dirty in the cache at this point:
59 	 * 1. kernel command line set up by boot loader
60 	 * 2. spilled registers from the prolog of this function
61 	 * => before re-initialising the cache, we must do a purge of the whole
62 	 * cache out to memory for safety.  As long as nothing is spilled
63 	 * during the loop to lines that have already been done, this is safe.
64 	 * - RPC
65 	 */
66 	if (ccr & CCR_CACHE_ENABLE) {
67 		unsigned long ways, waysize, addrstart;
68 
69 		waysize = cpu_data->dcache.sets;
70 
71 #ifdef CCR_CACHE_ORA
72 		/*
73 		 * If the OC is already in RAM mode, we only have
74 		 * half of the entries to flush..
75 		 */
76 		if (ccr & CCR_CACHE_ORA)
77 			waysize >>= 1;
78 #endif
79 
80 		waysize <<= cpu_data->dcache.entry_shift;
81 
82 #ifdef CCR_CACHE_EMODE
83 		/* If EMODE is not set, we only have 1 way to flush. */
84 		if (!(ccr & CCR_CACHE_EMODE))
85 			ways = 1;
86 		else
87 #endif
88 			ways = cpu_data->dcache.ways;
89 
90 		addrstart = CACHE_OC_ADDRESS_ARRAY;
91 		do {
92 			unsigned long addr;
93 
94 			for (addr = addrstart;
95 			     addr < addrstart + waysize;
96 			     addr += cpu_data->dcache.linesz)
97 				ctrl_outl(0, addr);
98 
99 			addrstart += cpu_data->dcache.way_incr;
100 		} while (--ways);
101 	}
102 
103 	/*
104 	 * Default CCR values .. enable the caches
105 	 * and invalidate them immediately..
106 	 */
107 	flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE;
108 
109 #ifdef CCR_CACHE_EMODE
110 	/* Force EMODE if possible */
111 	if (cpu_data->dcache.ways > 1)
112 		flags |= CCR_CACHE_EMODE;
113 	else
114 		flags &= ~CCR_CACHE_EMODE;
115 #endif
116 
117 #ifdef CONFIG_SH_WRITETHROUGH
118 	/* Turn on Write-through caching */
119 	flags |= CCR_CACHE_WT;
120 #else
121 	/* .. or default to Write-back */
122 	flags |= CCR_CACHE_CB;
123 #endif
124 
125 #ifdef CONFIG_SH_OCRAM
126 	/* Turn on OCRAM -- halve the OC */
127 	flags |= CCR_CACHE_ORA;
128 	cpu_data->dcache.sets >>= 1;
129 
130 	cpu_data->dcache.way_size = cpu_data->dcache.sets *
131 				    cpu_data->dcache.linesz;
132 #endif
133 
134 	ctrl_outl(flags, CCR);
135 	back_to_P1();
136 }
137 
138 #ifdef CONFIG_SH_DSP
139 static void __init release_dsp(void)
140 {
141 	unsigned long sr;
142 
143 	/* Clear SR.DSP bit */
144 	__asm__ __volatile__ (
145 		"stc\tsr, %0\n\t"
146 		"and\t%1, %0\n\t"
147 		"ldc\t%0, sr\n\t"
148 		: "=&r" (sr)
149 		: "r" (~SR_DSP)
150 	);
151 }
152 
153 static void __init dsp_init(void)
154 {
155 	unsigned long sr;
156 
157 	/*
158 	 * Set the SR.DSP bit, wait for one instruction, and then read
159 	 * back the SR value.
160 	 */
161 	__asm__ __volatile__ (
162 		"stc\tsr, %0\n\t"
163 		"or\t%1, %0\n\t"
164 		"ldc\t%0, sr\n\t"
165 		"nop\n\t"
166 		"stc\tsr, %0\n\t"
167 		: "=&r" (sr)
168 		: "r" (SR_DSP)
169 	);
170 
171 	/* If the DSP bit is still set, this CPU has a DSP */
172 	if (sr & SR_DSP)
173 		cpu_data->flags |= CPU_HAS_DSP;
174 
175 	/* Now that we've determined the DSP status, clear the DSP bit. */
176 	release_dsp();
177 }
178 #endif /* CONFIG_SH_DSP */
179 
180 /**
181  * sh_cpu_init
182  *
183  * This is our initial entry point for each CPU, and is invoked on the boot
184  * CPU prior to calling start_kernel(). For SMP, a combination of this and
185  * start_secondary() will bring up each processor to a ready state prior
186  * to hand forking the idle loop.
187  *
188  * We do all of the basic processor init here, including setting up the
189  * caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is
190  * hit (and subsequently platform_setup()) things like determining the
191  * CPU subtype and initial configuration will all be done.
192  *
193  * Each processor family is still responsible for doing its own probing
194  * and cache configuration in detect_cpu_and_cache_system().
195  */
196 asmlinkage void __init sh_cpu_init(void)
197 {
198 	/* First, probe the CPU */
199 	detect_cpu_and_cache_system();
200 
201 	/* Init the cache */
202 	cache_init();
203 
204 	shm_align_mask = max_t(unsigned long,
205 			       cpu_data->dcache.way_size - 1,
206 			       PAGE_SIZE - 1);
207 
208 	/* Disable the FPU */
209 	if (fpu_disabled) {
210 		printk("FPU Disabled\n");
211 		cpu_data->flags &= ~CPU_HAS_FPU;
212 		disable_fpu();
213 	}
214 
215 	/* FPU initialization */
216 	if ((cpu_data->flags & CPU_HAS_FPU)) {
217 		clear_thread_flag(TIF_USEDFPU);
218 		clear_used_math();
219 	}
220 
221 #ifdef CONFIG_SH_DSP
222 	/* Probe for DSP */
223 	dsp_init();
224 
225 	/* Disable the DSP */
226 	if (dsp_disabled) {
227 		printk("DSP Disabled\n");
228 		cpu_data->flags &= ~CPU_HAS_DSP;
229 		release_dsp();
230 	}
231 #endif
232 
233 #ifdef CONFIG_UBC_WAKEUP
234 	/*
235 	 * Some brain-damaged loaders decided it would be a good idea to put
236 	 * the UBC to sleep. This causes some issues when it comes to things
237 	 * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB.  So ..
238 	 * we wake it up and hope that all is well.
239 	 */
240 	ubc_wakeup();
241 #endif
242 }
243 
244