1 /* 2 * arch/sh/kernel/cpu/init.c 3 * 4 * CPU init code 5 * 6 * Copyright (C) 2002, 2003 Paul Mundt 7 * 8 * This file is subject to the terms and conditions of the GNU General Public 9 * License. See the file "COPYING" in the main directory of this archive 10 * for more details. 11 */ 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <asm/processor.h> 15 #include <asm/uaccess.h> 16 #include <asm/system.h> 17 #include <asm/cacheflush.h> 18 #include <asm/cache.h> 19 #include <asm/io.h> 20 21 extern void detect_cpu_and_cache_system(void); 22 23 /* 24 * Generic wrapper for command line arguments to disable on-chip 25 * peripherals (nofpu, nodsp, and so forth). 26 */ 27 #define onchip_setup(x) \ 28 static int x##_disabled __initdata = 0; \ 29 \ 30 static int __init x##_setup(char *opts) \ 31 { \ 32 x##_disabled = 1; \ 33 return 0; \ 34 } \ 35 __setup("no" __stringify(x), x##_setup); 36 37 onchip_setup(fpu); 38 onchip_setup(dsp); 39 40 /* 41 * Generic first-level cache init 42 */ 43 static void __init cache_init(void) 44 { 45 unsigned long ccr, flags; 46 47 if (cpu_data->type == CPU_SH_NONE) 48 panic("Unknown CPU"); 49 50 jump_to_P2(); 51 ccr = ctrl_inl(CCR); 52 53 /* 54 * If the cache is already enabled .. flush it. 55 */ 56 if (ccr & CCR_CACHE_ENABLE) { 57 unsigned long ways, waysize, addrstart; 58 59 waysize = cpu_data->dcache.sets; 60 61 /* 62 * If the OC is already in RAM mode, we only have 63 * half of the entries to flush.. 64 */ 65 if (ccr & CCR_CACHE_ORA) 66 waysize >>= 1; 67 68 waysize <<= cpu_data->dcache.entry_shift; 69 70 #ifdef CCR_CACHE_EMODE 71 /* If EMODE is not set, we only have 1 way to flush. */ 72 if (!(ccr & CCR_CACHE_EMODE)) 73 ways = 1; 74 else 75 #endif 76 ways = cpu_data->dcache.ways; 77 78 addrstart = CACHE_OC_ADDRESS_ARRAY; 79 do { 80 unsigned long addr; 81 82 for (addr = addrstart; 83 addr < addrstart + waysize; 84 addr += cpu_data->dcache.linesz) 85 ctrl_outl(0, addr); 86 87 addrstart += cpu_data->dcache.way_incr; 88 } while (--ways); 89 } 90 91 /* 92 * Default CCR values .. enable the caches 93 * and invalidate them immediately.. 94 */ 95 flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE; 96 97 #ifdef CCR_CACHE_EMODE 98 /* Force EMODE if possible */ 99 if (cpu_data->dcache.ways > 1) 100 flags |= CCR_CACHE_EMODE; 101 #endif 102 103 #ifdef CONFIG_SH_WRITETHROUGH 104 /* Turn on Write-through caching */ 105 flags |= CCR_CACHE_WT; 106 #else 107 /* .. or default to Write-back */ 108 flags |= CCR_CACHE_CB; 109 #endif 110 111 #ifdef CONFIG_SH_OCRAM 112 /* Turn on OCRAM -- halve the OC */ 113 flags |= CCR_CACHE_ORA; 114 cpu_data->dcache.sets >>= 1; 115 #endif 116 117 ctrl_outl(flags, CCR); 118 back_to_P1(); 119 } 120 121 #ifdef CONFIG_SH_DSP 122 static void __init release_dsp(void) 123 { 124 unsigned long sr; 125 126 /* Clear SR.DSP bit */ 127 __asm__ __volatile__ ( 128 "stc\tsr, %0\n\t" 129 "and\t%1, %0\n\t" 130 "ldc\t%0, sr\n\t" 131 : "=&r" (sr) 132 : "r" (~SR_DSP) 133 ); 134 } 135 136 static void __init dsp_init(void) 137 { 138 unsigned long sr; 139 140 /* 141 * Set the SR.DSP bit, wait for one instruction, and then read 142 * back the SR value. 143 */ 144 __asm__ __volatile__ ( 145 "stc\tsr, %0\n\t" 146 "or\t%1, %0\n\t" 147 "ldc\t%0, sr\n\t" 148 "nop\n\t" 149 "stc\tsr, %0\n\t" 150 : "=&r" (sr) 151 : "r" (SR_DSP) 152 ); 153 154 /* If the DSP bit is still set, this CPU has a DSP */ 155 if (sr & SR_DSP) 156 cpu_data->flags |= CPU_HAS_DSP; 157 158 /* Now that we've determined the DSP status, clear the DSP bit. */ 159 release_dsp(); 160 } 161 #endif /* CONFIG_SH_DSP */ 162 163 /** 164 * sh_cpu_init 165 * 166 * This is our initial entry point for each CPU, and is invoked on the boot 167 * CPU prior to calling start_kernel(). For SMP, a combination of this and 168 * start_secondary() will bring up each processor to a ready state prior 169 * to hand forking the idle loop. 170 * 171 * We do all of the basic processor init here, including setting up the 172 * caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is 173 * hit (and subsequently platform_setup()) things like determining the 174 * CPU subtype and initial configuration will all be done. 175 * 176 * Each processor family is still responsible for doing its own probing 177 * and cache configuration in detect_cpu_and_cache_system(). 178 */ 179 asmlinkage void __init sh_cpu_init(void) 180 { 181 /* First, probe the CPU */ 182 detect_cpu_and_cache_system(); 183 184 /* Init the cache */ 185 cache_init(); 186 187 /* Disable the FPU */ 188 if (fpu_disabled) { 189 printk("FPU Disabled\n"); 190 cpu_data->flags &= ~CPU_HAS_FPU; 191 disable_fpu(); 192 } 193 194 /* FPU initialization */ 195 if ((cpu_data->flags & CPU_HAS_FPU)) { 196 clear_thread_flag(TIF_USEDFPU); 197 clear_used_math(); 198 } 199 200 #ifdef CONFIG_SH_DSP 201 /* Probe for DSP */ 202 dsp_init(); 203 204 /* Disable the DSP */ 205 if (dsp_disabled) { 206 printk("DSP Disabled\n"); 207 cpu_data->flags &= ~CPU_HAS_DSP; 208 release_dsp(); 209 } 210 #endif 211 212 #ifdef CONFIG_UBC_WAKEUP 213 /* 214 * Some brain-damaged loaders decided it would be a good idea to put 215 * the UBC to sleep. This causes some issues when it comes to things 216 * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So .. 217 * we wake it up and hope that all is well. 218 */ 219 ubc_wakeup(); 220 #endif 221 } 222 223