1 /* smp.c: Sparc SMP support. 2 * 3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 4 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 5 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org) 6 */ 7 8 #include <asm/head.h> 9 10 #include <linux/kernel.h> 11 #include <linux/sched.h> 12 #include <linux/threads.h> 13 #include <linux/smp.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/init.h> 17 #include <linux/spinlock.h> 18 #include <linux/mm.h> 19 #include <linux/fs.h> 20 #include <linux/seq_file.h> 21 #include <linux/cache.h> 22 #include <linux/delay.h> 23 24 #include <asm/ptrace.h> 25 #include <asm/atomic.h> 26 27 #include <asm/irq.h> 28 #include <asm/page.h> 29 #include <asm/pgalloc.h> 30 #include <asm/pgtable.h> 31 #include <asm/oplib.h> 32 #include <asm/cacheflush.h> 33 #include <asm/tlbflush.h> 34 #include <asm/cpudata.h> 35 #include <asm/leon.h> 36 37 #include "irq.h" 38 39 volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,}; 40 unsigned char boot_cpu_id = 0; 41 unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */ 42 43 cpumask_t smp_commenced_mask = CPU_MASK_NONE; 44 45 /* The only guaranteed locking primitive available on all Sparc 46 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically 47 * places the current byte at the effective address into dest_reg and 48 * places 0xff there afterwards. Pretty lame locking primitive 49 * compared to the Alpha and the Intel no? Most Sparcs have 'swap' 50 * instruction which is much better... 51 */ 52 53 void __cpuinit smp_store_cpu_info(int id) 54 { 55 int cpu_node; 56 57 cpu_data(id).udelay_val = loops_per_jiffy; 58 59 cpu_find_by_mid(id, &cpu_node); 60 cpu_data(id).clock_tick = prom_getintdefault(cpu_node, 61 "clock-frequency", 0); 62 cpu_data(id).prom_node = cpu_node; 63 cpu_data(id).mid = cpu_get_hwmid(cpu_node); 64 65 if (cpu_data(id).mid < 0) 66 panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); 67 } 68 69 void __init smp_cpus_done(unsigned int max_cpus) 70 { 71 extern void smp4m_smp_done(void); 72 extern void smp4d_smp_done(void); 73 unsigned long bogosum = 0; 74 int cpu, num = 0; 75 76 for_each_online_cpu(cpu) { 77 num++; 78 bogosum += cpu_data(cpu).udelay_val; 79 } 80 81 printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 82 num, bogosum/(500000/HZ), 83 (bogosum/(5000/HZ))%100); 84 85 switch(sparc_cpu_model) { 86 case sun4: 87 printk("SUN4\n"); 88 BUG(); 89 break; 90 case sun4c: 91 printk("SUN4C\n"); 92 BUG(); 93 break; 94 case sun4m: 95 smp4m_smp_done(); 96 break; 97 case sun4d: 98 smp4d_smp_done(); 99 break; 100 case sparc_leon: 101 leon_smp_done(); 102 break; 103 case sun4e: 104 printk("SUN4E\n"); 105 BUG(); 106 break; 107 case sun4u: 108 printk("SUN4U\n"); 109 BUG(); 110 break; 111 default: 112 printk("UNKNOWN!\n"); 113 BUG(); 114 break; 115 }; 116 } 117 118 void cpu_panic(void) 119 { 120 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); 121 panic("SMP bolixed\n"); 122 } 123 124 struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 }; 125 126 void smp_send_reschedule(int cpu) 127 { 128 /* See sparc64 */ 129 } 130 131 void smp_send_stop(void) 132 { 133 } 134 135 void smp_flush_cache_all(void) 136 { 137 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all)); 138 local_flush_cache_all(); 139 } 140 141 void smp_flush_tlb_all(void) 142 { 143 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all)); 144 local_flush_tlb_all(); 145 } 146 147 void smp_flush_cache_mm(struct mm_struct *mm) 148 { 149 if(mm->context != NO_CONTEXT) { 150 cpumask_t cpu_mask = *mm_cpumask(mm); 151 cpu_clear(smp_processor_id(), cpu_mask); 152 if (!cpus_empty(cpu_mask)) 153 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); 154 local_flush_cache_mm(mm); 155 } 156 } 157 158 void smp_flush_tlb_mm(struct mm_struct *mm) 159 { 160 if(mm->context != NO_CONTEXT) { 161 cpumask_t cpu_mask = *mm_cpumask(mm); 162 cpu_clear(smp_processor_id(), cpu_mask); 163 if (!cpus_empty(cpu_mask)) { 164 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); 165 if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) 166 cpumask_copy(mm_cpumask(mm), 167 cpumask_of(smp_processor_id())); 168 } 169 local_flush_tlb_mm(mm); 170 } 171 } 172 173 void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start, 174 unsigned long end) 175 { 176 struct mm_struct *mm = vma->vm_mm; 177 178 if (mm->context != NO_CONTEXT) { 179 cpumask_t cpu_mask = *mm_cpumask(mm); 180 cpu_clear(smp_processor_id(), cpu_mask); 181 if (!cpus_empty(cpu_mask)) 182 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end); 183 local_flush_cache_range(vma, start, end); 184 } 185 } 186 187 void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 188 unsigned long end) 189 { 190 struct mm_struct *mm = vma->vm_mm; 191 192 if (mm->context != NO_CONTEXT) { 193 cpumask_t cpu_mask = *mm_cpumask(mm); 194 cpu_clear(smp_processor_id(), cpu_mask); 195 if (!cpus_empty(cpu_mask)) 196 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end); 197 local_flush_tlb_range(vma, start, end); 198 } 199 } 200 201 void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page) 202 { 203 struct mm_struct *mm = vma->vm_mm; 204 205 if(mm->context != NO_CONTEXT) { 206 cpumask_t cpu_mask = *mm_cpumask(mm); 207 cpu_clear(smp_processor_id(), cpu_mask); 208 if (!cpus_empty(cpu_mask)) 209 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page); 210 local_flush_cache_page(vma, page); 211 } 212 } 213 214 void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 215 { 216 struct mm_struct *mm = vma->vm_mm; 217 218 if(mm->context != NO_CONTEXT) { 219 cpumask_t cpu_mask = *mm_cpumask(mm); 220 cpu_clear(smp_processor_id(), cpu_mask); 221 if (!cpus_empty(cpu_mask)) 222 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); 223 local_flush_tlb_page(vma, page); 224 } 225 } 226 227 void smp_reschedule_irq(void) 228 { 229 set_need_resched(); 230 } 231 232 void smp_flush_page_to_ram(unsigned long page) 233 { 234 /* Current theory is that those who call this are the one's 235 * who have just dirtied their cache with the pages contents 236 * in kernel space, therefore we only run this on local cpu. 237 * 238 * XXX This experiment failed, research further... -DaveM 239 */ 240 #if 1 241 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page); 242 #endif 243 local_flush_page_to_ram(page); 244 } 245 246 void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) 247 { 248 cpumask_t cpu_mask = *mm_cpumask(mm); 249 cpu_clear(smp_processor_id(), cpu_mask); 250 if (!cpus_empty(cpu_mask)) 251 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); 252 local_flush_sig_insns(mm, insn_addr); 253 } 254 255 extern unsigned int lvl14_resolution; 256 257 /* /proc/profile writes can call this, don't __init it please. */ 258 static DEFINE_SPINLOCK(prof_setup_lock); 259 260 int setup_profiling_timer(unsigned int multiplier) 261 { 262 int i; 263 unsigned long flags; 264 265 /* Prevent level14 ticker IRQ flooding. */ 266 if((!multiplier) || (lvl14_resolution / multiplier) < 500) 267 return -EINVAL; 268 269 spin_lock_irqsave(&prof_setup_lock, flags); 270 for_each_possible_cpu(i) { 271 load_profile_irq(i, lvl14_resolution / multiplier); 272 prof_multiplier(i) = multiplier; 273 } 274 spin_unlock_irqrestore(&prof_setup_lock, flags); 275 276 return 0; 277 } 278 279 void __init smp_prepare_cpus(unsigned int max_cpus) 280 { 281 extern void __init smp4m_boot_cpus(void); 282 extern void __init smp4d_boot_cpus(void); 283 int i, cpuid, extra; 284 285 printk("Entering SMP Mode...\n"); 286 287 extra = 0; 288 for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) { 289 if (cpuid >= NR_CPUS) 290 extra++; 291 } 292 /* i = number of cpus */ 293 if (extra && max_cpus > i - extra) 294 printk("Warning: NR_CPUS is too low to start all cpus\n"); 295 296 smp_store_cpu_info(boot_cpu_id); 297 298 switch(sparc_cpu_model) { 299 case sun4: 300 printk("SUN4\n"); 301 BUG(); 302 break; 303 case sun4c: 304 printk("SUN4C\n"); 305 BUG(); 306 break; 307 case sun4m: 308 smp4m_boot_cpus(); 309 break; 310 case sun4d: 311 smp4d_boot_cpus(); 312 break; 313 case sparc_leon: 314 leon_boot_cpus(); 315 break; 316 case sun4e: 317 printk("SUN4E\n"); 318 BUG(); 319 break; 320 case sun4u: 321 printk("SUN4U\n"); 322 BUG(); 323 break; 324 default: 325 printk("UNKNOWN!\n"); 326 BUG(); 327 break; 328 }; 329 } 330 331 /* Set this up early so that things like the scheduler can init 332 * properly. We use the same cpu mask for both the present and 333 * possible cpu map. 334 */ 335 void __init smp_setup_cpu_possible_map(void) 336 { 337 int instance, mid; 338 339 instance = 0; 340 while (!cpu_find_by_instance(instance, NULL, &mid)) { 341 if (mid < NR_CPUS) { 342 set_cpu_possible(mid, true); 343 set_cpu_present(mid, true); 344 } 345 instance++; 346 } 347 } 348 349 void __init smp_prepare_boot_cpu(void) 350 { 351 int cpuid = hard_smp_processor_id(); 352 353 if (cpuid >= NR_CPUS) { 354 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); 355 prom_halt(); 356 } 357 if (cpuid != 0) 358 printk("boot cpu id != 0, this could work but is untested\n"); 359 360 current_thread_info()->cpu = cpuid; 361 set_cpu_online(cpuid, true); 362 set_cpu_possible(cpuid, true); 363 } 364 365 int __cpuinit __cpu_up(unsigned int cpu) 366 { 367 extern int __cpuinit smp4m_boot_one_cpu(int); 368 extern int __cpuinit smp4d_boot_one_cpu(int); 369 int ret=0; 370 371 switch(sparc_cpu_model) { 372 case sun4: 373 printk("SUN4\n"); 374 BUG(); 375 break; 376 case sun4c: 377 printk("SUN4C\n"); 378 BUG(); 379 break; 380 case sun4m: 381 ret = smp4m_boot_one_cpu(cpu); 382 break; 383 case sun4d: 384 ret = smp4d_boot_one_cpu(cpu); 385 break; 386 case sparc_leon: 387 ret = leon_boot_one_cpu(cpu); 388 break; 389 case sun4e: 390 printk("SUN4E\n"); 391 BUG(); 392 break; 393 case sun4u: 394 printk("SUN4U\n"); 395 BUG(); 396 break; 397 default: 398 printk("UNKNOWN!\n"); 399 BUG(); 400 break; 401 }; 402 403 if (!ret) { 404 cpu_set(cpu, smp_commenced_mask); 405 while (!cpu_online(cpu)) 406 mb(); 407 } 408 return ret; 409 } 410 411 void smp_bogo(struct seq_file *m) 412 { 413 int i; 414 415 for_each_online_cpu(i) { 416 seq_printf(m, 417 "Cpu%dBogo\t: %lu.%02lu\n", 418 i, 419 cpu_data(i).udelay_val/(500000/HZ), 420 (cpu_data(i).udelay_val/(5000/HZ))%100); 421 } 422 } 423 424 void smp_info(struct seq_file *m) 425 { 426 int i; 427 428 seq_printf(m, "State:\n"); 429 for_each_online_cpu(i) 430 seq_printf(m, "CPU%d\t\t: online\n", i); 431 } 432