1 /* 2 * arch/sh/kernel/cpu/init.c 3 * 4 * CPU init code 5 * 6 * Copyright (C) 2002 - 2007 Paul Mundt 7 * Copyright (C) 2003 Richard Curnow 8 * 9 * This file is subject to the terms and conditions of the GNU General Public 10 * License. See the file "COPYING" in the main directory of this archive 11 * for more details. 12 */ 13 #include <linux/init.h> 14 #include <linux/kernel.h> 15 #include <linux/mm.h> 16 #include <asm/mmu_context.h> 17 #include <asm/processor.h> 18 #include <asm/uaccess.h> 19 #include <asm/page.h> 20 #include <asm/system.h> 21 #include <asm/cacheflush.h> 22 #include <asm/cache.h> 23 #include <asm/io.h> 24 #include <asm/ubc.h> 25 26 /* 27 * Generic wrapper for command line arguments to disable on-chip 28 * peripherals (nofpu, nodsp, and so forth). 29 */ 30 #define onchip_setup(x) \ 31 static int x##_disabled __initdata = 0; \ 32 \ 33 static int __init x##_setup(char *opts) \ 34 { \ 35 x##_disabled = 1; \ 36 return 1; \ 37 } \ 38 __setup("no" __stringify(x), x##_setup); 39 40 onchip_setup(fpu); 41 onchip_setup(dsp); 42 43 #ifdef CONFIG_SPECULATIVE_EXECUTION 44 #define CPUOPM 0xff2f0000 45 #define CPUOPM_RABD (1 << 5) 46 47 static void __init speculative_execution_init(void) 48 { 49 /* Clear RABD */ 50 ctrl_outl(ctrl_inl(CPUOPM) & ~CPUOPM_RABD, CPUOPM); 51 52 /* Flush the update */ 53 (void)ctrl_inl(CPUOPM); 54 ctrl_barrier(); 55 } 56 #else 57 #define speculative_execution_init() do { } while (0) 58 #endif 59 60 /* 61 * Generic first-level cache init 62 */ 63 static void __init cache_init(void) 64 { 65 unsigned long ccr, flags; 66 67 /* First setup the rest of the I-cache info */ 68 current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr - 69 current_cpu_data.icache.linesz; 70 71 current_cpu_data.icache.way_size = current_cpu_data.icache.sets * 72 current_cpu_data.icache.linesz; 73 74 /* And the D-cache too */ 75 current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr - 76 current_cpu_data.dcache.linesz; 77 78 current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets * 79 current_cpu_data.dcache.linesz; 80 81 jump_to_P2(); 82 ccr = ctrl_inl(CCR); 83 84 /* 85 * At this point we don't know whether the cache is enabled or not - a 86 * bootloader may have enabled it. There are at least 2 things that 87 * could be dirty in the cache at this point: 88 * 1. kernel command line set up by boot loader 89 * 2. spilled registers from the prolog of this function 90 * => before re-initialising the cache, we must do a purge of the whole 91 * cache out to memory for safety. As long as nothing is spilled 92 * during the loop to lines that have already been done, this is safe. 93 * - RPC 94 */ 95 if (ccr & CCR_CACHE_ENABLE) { 96 unsigned long ways, waysize, addrstart; 97 98 waysize = current_cpu_data.dcache.sets; 99 100 #ifdef CCR_CACHE_ORA 101 /* 102 * If the OC is already in RAM mode, we only have 103 * half of the entries to flush.. 104 */ 105 if (ccr & CCR_CACHE_ORA) 106 waysize >>= 1; 107 #endif 108 109 waysize <<= current_cpu_data.dcache.entry_shift; 110 111 #ifdef CCR_CACHE_EMODE 112 /* If EMODE is not set, we only have 1 way to flush. */ 113 if (!(ccr & CCR_CACHE_EMODE)) 114 ways = 1; 115 else 116 #endif 117 ways = current_cpu_data.dcache.ways; 118 119 addrstart = CACHE_OC_ADDRESS_ARRAY; 120 do { 121 unsigned long addr; 122 123 for (addr = addrstart; 124 addr < addrstart + waysize; 125 addr += current_cpu_data.dcache.linesz) 126 ctrl_outl(0, addr); 127 128 addrstart += current_cpu_data.dcache.way_incr; 129 } while (--ways); 130 } 131 132 /* 133 * Default CCR values .. enable the caches 134 * and invalidate them immediately.. 135 */ 136 flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE; 137 138 #ifdef CCR_CACHE_EMODE 139 /* Force EMODE if possible */ 140 if (current_cpu_data.dcache.ways > 1) 141 flags |= CCR_CACHE_EMODE; 142 else 143 flags &= ~CCR_CACHE_EMODE; 144 #endif 145 146 #ifdef CONFIG_SH_WRITETHROUGH 147 /* Turn on Write-through caching */ 148 flags |= CCR_CACHE_WT; 149 #else 150 /* .. or default to Write-back */ 151 flags |= CCR_CACHE_CB; 152 #endif 153 154 ctrl_outl(flags, CCR); 155 back_to_P1(); 156 } 157 158 #ifdef CONFIG_SH_DSP 159 static void __init release_dsp(void) 160 { 161 unsigned long sr; 162 163 /* Clear SR.DSP bit */ 164 __asm__ __volatile__ ( 165 "stc\tsr, %0\n\t" 166 "and\t%1, %0\n\t" 167 "ldc\t%0, sr\n\t" 168 : "=&r" (sr) 169 : "r" (~SR_DSP) 170 ); 171 } 172 173 static void __init dsp_init(void) 174 { 175 unsigned long sr; 176 177 /* 178 * Set the SR.DSP bit, wait for one instruction, and then read 179 * back the SR value. 180 */ 181 __asm__ __volatile__ ( 182 "stc\tsr, %0\n\t" 183 "or\t%1, %0\n\t" 184 "ldc\t%0, sr\n\t" 185 "nop\n\t" 186 "stc\tsr, %0\n\t" 187 : "=&r" (sr) 188 : "r" (SR_DSP) 189 ); 190 191 /* If the DSP bit is still set, this CPU has a DSP */ 192 if (sr & SR_DSP) 193 current_cpu_data.flags |= CPU_HAS_DSP; 194 195 /* Now that we've determined the DSP status, clear the DSP bit. */ 196 release_dsp(); 197 } 198 #endif /* CONFIG_SH_DSP */ 199 200 /** 201 * sh_cpu_init 202 * 203 * This is our initial entry point for each CPU, and is invoked on the boot 204 * CPU prior to calling start_kernel(). For SMP, a combination of this and 205 * start_secondary() will bring up each processor to a ready state prior 206 * to hand forking the idle loop. 207 * 208 * We do all of the basic processor init here, including setting up the 209 * caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is 210 * hit (and subsequently platform_setup()) things like determining the 211 * CPU subtype and initial configuration will all be done. 212 * 213 * Each processor family is still responsible for doing its own probing 214 * and cache configuration in detect_cpu_and_cache_system(). 215 */ 216 asmlinkage void __init sh_cpu_init(void) 217 { 218 /* First, probe the CPU */ 219 detect_cpu_and_cache_system(); 220 221 if (current_cpu_data.type == CPU_SH_NONE) 222 panic("Unknown CPU"); 223 224 /* Init the cache */ 225 cache_init(); 226 227 shm_align_mask = max_t(unsigned long, 228 current_cpu_data.dcache.way_size - 1, 229 PAGE_SIZE - 1); 230 231 /* Disable the FPU */ 232 if (fpu_disabled) { 233 printk("FPU Disabled\n"); 234 current_cpu_data.flags &= ~CPU_HAS_FPU; 235 disable_fpu(); 236 } 237 238 /* FPU initialization */ 239 if ((current_cpu_data.flags & CPU_HAS_FPU)) { 240 clear_thread_flag(TIF_USEDFPU); 241 clear_used_math(); 242 } 243 244 /* 245 * Initialize the per-CPU ASID cache very early, since the 246 * TLB flushing routines depend on this being setup. 247 */ 248 current_cpu_data.asid_cache = NO_CONTEXT; 249 250 #ifdef CONFIG_SH_DSP 251 /* Probe for DSP */ 252 dsp_init(); 253 254 /* Disable the DSP */ 255 if (dsp_disabled) { 256 printk("DSP Disabled\n"); 257 current_cpu_data.flags &= ~CPU_HAS_DSP; 258 release_dsp(); 259 } 260 #endif 261 262 /* 263 * Some brain-damaged loaders decided it would be a good idea to put 264 * the UBC to sleep. This causes some issues when it comes to things 265 * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So .. 266 * we wake it up and hope that all is well. 267 */ 268 ubc_wakeup(); 269 speculative_execution_init(); 270 } 271