1 /* 2 * arch/sh/kernel/smp.c 3 * 4 * SMP support for the SuperH processors. 5 * 6 * Copyright (C) 2002 - 2008 Paul Mundt 7 * Copyright (C) 2006 - 2007 Akio Idehara 8 * 9 * This file is subject to the terms and conditions of the GNU General Public 10 * License. See the file "COPYING" in the main directory of this archive 11 * for more details. 12 */ 13 #include <linux/err.h> 14 #include <linux/cache.h> 15 #include <linux/cpumask.h> 16 #include <linux/delay.h> 17 #include <linux/init.h> 18 #include <linux/spinlock.h> 19 #include <linux/mm.h> 20 #include <linux/module.h> 21 #include <linux/cpu.h> 22 #include <linux/interrupt.h> 23 #include <asm/atomic.h> 24 #include <asm/processor.h> 25 #include <asm/system.h> 26 #include <asm/mmu_context.h> 27 #include <asm/smp.h> 28 #include <asm/cacheflush.h> 29 #include <asm/sections.h> 30 31 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 32 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 33 34 static inline void __init smp_store_cpu_info(unsigned int cpu) 35 { 36 struct sh_cpuinfo *c = cpu_data + cpu; 37 38 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo)); 39 40 c->loops_per_jiffy = loops_per_jiffy; 41 } 42 43 void __init smp_prepare_cpus(unsigned int max_cpus) 44 { 45 unsigned int cpu = smp_processor_id(); 46 47 init_new_context(current, &init_mm); 48 current_thread_info()->cpu = cpu; 49 plat_prepare_cpus(max_cpus); 50 51 #ifndef CONFIG_HOTPLUG_CPU 52 init_cpu_present(&cpu_possible_map); 53 #endif 54 } 55 56 void __devinit smp_prepare_boot_cpu(void) 57 { 58 unsigned int cpu = smp_processor_id(); 59 60 __cpu_number_map[0] = cpu; 61 __cpu_logical_map[0] = cpu; 62 63 set_cpu_online(cpu, true); 64 set_cpu_possible(cpu, true); 65 } 66 67 asmlinkage void __cpuinit start_secondary(void) 68 { 69 unsigned int cpu; 70 struct mm_struct *mm = &init_mm; 71 72 atomic_inc(&mm->mm_count); 73 atomic_inc(&mm->mm_users); 74 current->active_mm = mm; 75 BUG_ON(current->mm); 76 enter_lazy_tlb(mm, current); 77 78 per_cpu_trap_init(); 79 80 preempt_disable(); 81 82 notify_cpu_starting(smp_processor_id()); 83 84 local_irq_enable(); 85 86 cpu = smp_processor_id(); 87 88 /* Enable local timers */ 89 local_timer_setup(cpu); 90 calibrate_delay(); 91 92 smp_store_cpu_info(cpu); 93 94 cpu_set(cpu, cpu_online_map); 95 96 cpu_idle(); 97 } 98 99 extern struct { 100 unsigned long sp; 101 unsigned long bss_start; 102 unsigned long bss_end; 103 void *start_kernel_fn; 104 void *cpu_init_fn; 105 void *thread_info; 106 } stack_start; 107 108 int __cpuinit __cpu_up(unsigned int cpu) 109 { 110 struct task_struct *tsk; 111 unsigned long timeout; 112 113 tsk = fork_idle(cpu); 114 if (IS_ERR(tsk)) { 115 printk(KERN_ERR "Failed forking idle task for cpu %d\n", cpu); 116 return PTR_ERR(tsk); 117 } 118 119 /* Fill in data in head.S for secondary cpus */ 120 stack_start.sp = tsk->thread.sp; 121 stack_start.thread_info = tsk->stack; 122 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */ 123 stack_start.start_kernel_fn = start_secondary; 124 125 flush_cache_all(); 126 127 plat_start_cpu(cpu, (unsigned long)_stext); 128 129 timeout = jiffies + HZ; 130 while (time_before(jiffies, timeout)) { 131 if (cpu_online(cpu)) 132 break; 133 134 udelay(10); 135 } 136 137 if (cpu_online(cpu)) 138 return 0; 139 140 return -ENOENT; 141 } 142 143 void __init smp_cpus_done(unsigned int max_cpus) 144 { 145 unsigned long bogosum = 0; 146 int cpu; 147 148 for_each_online_cpu(cpu) 149 bogosum += cpu_data[cpu].loops_per_jiffy; 150 151 printk(KERN_INFO "SMP: Total of %d processors activated " 152 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(), 153 bogosum / (500000/HZ), 154 (bogosum / (5000/HZ)) % 100); 155 } 156 157 void smp_send_reschedule(int cpu) 158 { 159 plat_send_ipi(cpu, SMP_MSG_RESCHEDULE); 160 } 161 162 static void stop_this_cpu(void *unused) 163 { 164 cpu_clear(smp_processor_id(), cpu_online_map); 165 local_irq_disable(); 166 167 for (;;) 168 cpu_relax(); 169 } 170 171 void smp_send_stop(void) 172 { 173 smp_call_function(stop_this_cpu, 0, 0); 174 } 175 176 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 177 { 178 int cpu; 179 180 for_each_cpu(cpu, mask) 181 plat_send_ipi(cpu, SMP_MSG_FUNCTION); 182 } 183 184 void arch_send_call_function_single_ipi(int cpu) 185 { 186 plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); 187 } 188 189 void smp_timer_broadcast(const struct cpumask *mask) 190 { 191 int cpu; 192 193 for_each_cpu(cpu, mask) 194 plat_send_ipi(cpu, SMP_MSG_TIMER); 195 } 196 197 static void ipi_timer(void) 198 { 199 irq_enter(); 200 local_timer_interrupt(); 201 irq_exit(); 202 } 203 204 void smp_message_recv(unsigned int msg) 205 { 206 switch (msg) { 207 case SMP_MSG_FUNCTION: 208 generic_smp_call_function_interrupt(); 209 break; 210 case SMP_MSG_RESCHEDULE: 211 break; 212 case SMP_MSG_FUNCTION_SINGLE: 213 generic_smp_call_function_single_interrupt(); 214 break; 215 case SMP_MSG_TIMER: 216 ipi_timer(); 217 break; 218 default: 219 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n", 220 smp_processor_id(), __func__, msg); 221 break; 222 } 223 } 224 225 /* Not really SMP stuff ... */ 226 int setup_profiling_timer(unsigned int multiplier) 227 { 228 return 0; 229 } 230 231 static void flush_tlb_all_ipi(void *info) 232 { 233 local_flush_tlb_all(); 234 } 235 236 void flush_tlb_all(void) 237 { 238 on_each_cpu(flush_tlb_all_ipi, 0, 1); 239 } 240 241 static void flush_tlb_mm_ipi(void *mm) 242 { 243 local_flush_tlb_mm((struct mm_struct *)mm); 244 } 245 246 /* 247 * The following tlb flush calls are invoked when old translations are 248 * being torn down, or pte attributes are changing. For single threaded 249 * address spaces, a new context is obtained on the current cpu, and tlb 250 * context on other cpus are invalidated to force a new context allocation 251 * at switch_mm time, should the mm ever be used on other cpus. For 252 * multithreaded address spaces, intercpu interrupts have to be sent. 253 * Another case where intercpu interrupts are required is when the target 254 * mm might be active on another cpu (eg debuggers doing the flushes on 255 * behalf of debugees, kswapd stealing pages from another process etc). 256 * Kanoj 07/00. 257 */ 258 259 void flush_tlb_mm(struct mm_struct *mm) 260 { 261 preempt_disable(); 262 263 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 264 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); 265 } else { 266 int i; 267 for (i = 0; i < num_online_cpus(); i++) 268 if (smp_processor_id() != i) 269 cpu_context(i, mm) = 0; 270 } 271 local_flush_tlb_mm(mm); 272 273 preempt_enable(); 274 } 275 276 struct flush_tlb_data { 277 struct vm_area_struct *vma; 278 unsigned long addr1; 279 unsigned long addr2; 280 }; 281 282 static void flush_tlb_range_ipi(void *info) 283 { 284 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 285 286 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); 287 } 288 289 void flush_tlb_range(struct vm_area_struct *vma, 290 unsigned long start, unsigned long end) 291 { 292 struct mm_struct *mm = vma->vm_mm; 293 294 preempt_disable(); 295 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 296 struct flush_tlb_data fd; 297 298 fd.vma = vma; 299 fd.addr1 = start; 300 fd.addr2 = end; 301 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1); 302 } else { 303 int i; 304 for (i = 0; i < num_online_cpus(); i++) 305 if (smp_processor_id() != i) 306 cpu_context(i, mm) = 0; 307 } 308 local_flush_tlb_range(vma, start, end); 309 preempt_enable(); 310 } 311 312 static void flush_tlb_kernel_range_ipi(void *info) 313 { 314 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 315 316 local_flush_tlb_kernel_range(fd->addr1, fd->addr2); 317 } 318 319 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 320 { 321 struct flush_tlb_data fd; 322 323 fd.addr1 = start; 324 fd.addr2 = end; 325 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1); 326 } 327 328 static void flush_tlb_page_ipi(void *info) 329 { 330 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 331 332 local_flush_tlb_page(fd->vma, fd->addr1); 333 } 334 335 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 336 { 337 preempt_disable(); 338 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || 339 (current->mm != vma->vm_mm)) { 340 struct flush_tlb_data fd; 341 342 fd.vma = vma; 343 fd.addr1 = page; 344 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1); 345 } else { 346 int i; 347 for (i = 0; i < num_online_cpus(); i++) 348 if (smp_processor_id() != i) 349 cpu_context(i, vma->vm_mm) = 0; 350 } 351 local_flush_tlb_page(vma, page); 352 preempt_enable(); 353 } 354 355 static void flush_tlb_one_ipi(void *info) 356 { 357 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 358 local_flush_tlb_one(fd->addr1, fd->addr2); 359 } 360 361 void flush_tlb_one(unsigned long asid, unsigned long vaddr) 362 { 363 struct flush_tlb_data fd; 364 365 fd.addr1 = asid; 366 fd.addr2 = vaddr; 367 368 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1); 369 local_flush_tlb_one(asid, vaddr); 370 } 371