smp.c (f615136c06a791364f5afa8b8ba965315a6440f1) | smp.c (49b424fedaf88d0fa9913082b8c1ccd012a8a972) |
---|---|
1/* 2 * Xtensa SMP support functions. 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 2008 - 2013 Tensilica Inc. --- 26 unchanged lines hidden (view full) --- 35#include <asm/traps.h> 36 37#ifdef CONFIG_SMP 38# if XCHAL_HAVE_S32C1I == 0 39# error "The S32C1I option is required for SMP." 40# endif 41#endif 42 | 1/* 2 * Xtensa SMP support functions. 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 2008 - 2013 Tensilica Inc. --- 26 unchanged lines hidden (view full) --- 35#include <asm/traps.h> 36 37#ifdef CONFIG_SMP 38# if XCHAL_HAVE_S32C1I == 0 39# error "The S32C1I option is required for SMP." 40# endif 41#endif 42 |
43static void system_invalidate_dcache_range(unsigned long start, 44 unsigned long size); 45static void system_flush_invalidate_dcache_range(unsigned long start, 46 unsigned long size); 47 |
|
43/* IPI (Inter Process Interrupt) */ 44 45#define IPI_IRQ 0 46 47static irqreturn_t ipi_interrupt(int irq, void *dev_id); 48static struct irqaction ipi_irqaction = { 49 .handler = ipi_interrupt, 50 .flags = IRQF_PERCPU, --- 50 unchanged lines hidden (view full) --- 101 102void __init smp_cpus_done(unsigned int max_cpus) 103{ 104} 105 106static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */ 107static DECLARE_COMPLETION(cpu_running); 108 | 48/* IPI (Inter Process Interrupt) */ 49 50#define IPI_IRQ 0 51 52static irqreturn_t ipi_interrupt(int irq, void *dev_id); 53static struct irqaction ipi_irqaction = { 54 .handler = ipi_interrupt, 55 .flags = IRQF_PERCPU, --- 50 unchanged lines hidden (view full) --- 106 107void __init smp_cpus_done(unsigned int max_cpus) 108{ 109} 110 111static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */ 112static DECLARE_COMPLETION(cpu_running); 113 |
109void __init secondary_start_kernel(void) | 114void secondary_start_kernel(void) |
110{ 111 struct mm_struct *mm = &init_mm; 112 unsigned int cpu = smp_processor_id(); 113 114 init_mmu(); 115 116#ifdef CONFIG_DEBUG_KERNEL 117 if (boot_secondary_processors == 0) { --- 51 unchanged lines hidden (view full) --- 169 unsigned cpu = (unsigned)p; 170 unsigned long run_stall_mask = get_er(MPSCORE); 171 172 set_er(run_stall_mask | (1u << cpu), MPSCORE); 173 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n", 174 __func__, cpu, run_stall_mask, get_er(MPSCORE)); 175} 176 | 115{ 116 struct mm_struct *mm = &init_mm; 117 unsigned int cpu = smp_processor_id(); 118 119 init_mmu(); 120 121#ifdef CONFIG_DEBUG_KERNEL 122 if (boot_secondary_processors == 0) { --- 51 unchanged lines hidden (view full) --- 174 unsigned cpu = (unsigned)p; 175 unsigned long run_stall_mask = get_er(MPSCORE); 176 177 set_er(run_stall_mask | (1u << cpu), MPSCORE); 178 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n", 179 __func__, cpu, run_stall_mask, get_er(MPSCORE)); 180} 181 |
182#ifdef CONFIG_HOTPLUG_CPU 183unsigned long cpu_start_id __cacheline_aligned; 184#endif |
|
177unsigned long cpu_start_ccount; 178 179static int boot_secondary(unsigned int cpu, struct task_struct *ts) 180{ 181 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 182 unsigned long ccount; 183 int i; 184 | 185unsigned long cpu_start_ccount; 186 187static int boot_secondary(unsigned int cpu, struct task_struct *ts) 188{ 189 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 190 unsigned long ccount; 191 int i; 192 |
193#ifdef CONFIG_HOTPLUG_CPU 194 cpu_start_id = cpu; 195 system_flush_invalidate_dcache_range( 196 (unsigned long)&cpu_start_id, sizeof(cpu_start_id)); 197#endif |
|
185 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); 186 187 for (i = 0; i < 2; ++i) { 188 do 189 ccount = get_ccount(); 190 while (!ccount); 191 192 cpu_start_ccount = ccount; --- 36 unchanged lines hidden (view full) --- 229 } 230 231 if (ret) 232 pr_err("CPU %u failed to boot\n", cpu); 233 234 return ret; 235} 236 | 198 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); 199 200 for (i = 0; i < 2; ++i) { 201 do 202 ccount = get_ccount(); 203 while (!ccount); 204 205 cpu_start_ccount = ccount; --- 36 unchanged lines hidden (view full) --- 242 } 243 244 if (ret) 245 pr_err("CPU %u failed to boot\n", cpu); 246 247 return ret; 248} 249 |
250#ifdef CONFIG_HOTPLUG_CPU 251 252/* 253 * __cpu_disable runs on the processor to be shutdown. 254 */ 255int __cpu_disable(void) 256{ 257 unsigned int cpu = smp_processor_id(); 258 259 /* 260 * Take this CPU offline. Once we clear this, we can't return, 261 * and we must not schedule until we're ready to give up the cpu. 262 */ 263 set_cpu_online(cpu, false); 264 265 /* 266 * OK - migrate IRQs away from this CPU 267 */ 268 migrate_irqs(); 269 270 /* 271 * Flush user cache and TLB mappings, and then remove this CPU 272 * from the vm mask set of all processes. 273 */ 274 local_flush_cache_all(); 275 local_flush_tlb_all(); 276 invalidate_page_directory(); 277 278 clear_tasks_mm_cpumask(cpu); 279 280 return 0; 281} 282 283static void platform_cpu_kill(unsigned int cpu) 284{ 285 smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true); 286} 287 288/* 289 * called on the thread which is asking for a CPU to be shutdown - 290 * waits until shutdown has completed, or it is timed out. 291 */ 292void __cpu_die(unsigned int cpu) 293{ 294 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 295 while (time_before(jiffies, timeout)) { 296 system_invalidate_dcache_range((unsigned long)&cpu_start_id, 297 sizeof(cpu_start_id)); 298 if (cpu_start_id == -cpu) { 299 platform_cpu_kill(cpu); 300 return; 301 } 302 } 303 pr_err("CPU%u: unable to kill\n", cpu); 304} 305 306void arch_cpu_idle_dead(void) 307{ 308 cpu_die(); 309} 310/* 311 * Called from the idle thread for the CPU which has been shutdown. 312 * 313 * Note that we disable IRQs here, but do not re-enable them 314 * before returning to the caller. This is also the behaviour 315 * of the other hotplug-cpu capable cores, so presumably coming 316 * out of idle fixes this. 317 */ 318void __ref cpu_die(void) 319{ 320 idle_task_exit(); 321 local_irq_disable(); 322 __asm__ __volatile__( 323 " movi a2, cpu_restart\n" 324 " jx a2\n"); 325} 326 327#endif /* CONFIG_HOTPLUG_CPU */ 328 |
|
237enum ipi_msg_type { 238 IPI_RESCHEDULE = 0, 239 IPI_CALL_FUNC, 240 IPI_CPU_STOP, 241 IPI_MAX 242}; 243 244static const struct { --- 213 unchanged lines hidden (view full) --- 458void flush_icache_range(unsigned long start, unsigned long end) 459{ 460 struct flush_data fd = { 461 .addr1 = start, 462 .addr2 = end, 463 }; 464 on_each_cpu(ipi_flush_icache_range, &fd, 1); 465} | 329enum ipi_msg_type { 330 IPI_RESCHEDULE = 0, 331 IPI_CALL_FUNC, 332 IPI_CPU_STOP, 333 IPI_MAX 334}; 335 336static const struct { --- 213 unchanged lines hidden (view full) --- 550void flush_icache_range(unsigned long start, unsigned long end) 551{ 552 struct flush_data fd = { 553 .addr1 = start, 554 .addr2 = end, 555 }; 556 on_each_cpu(ipi_flush_icache_range, &fd, 1); 557} |
558 559/* ------------------------------------------------------------------------- */ 560 561static void ipi_invalidate_dcache_range(void *arg) 562{ 563 struct flush_data *fd = arg; 564 __invalidate_dcache_range(fd->addr1, fd->addr2); 565} 566 567static void system_invalidate_dcache_range(unsigned long start, 568 unsigned long size) 569{ 570 struct flush_data fd = { 571 .addr1 = start, 572 .addr2 = size, 573 }; 574 on_each_cpu(ipi_invalidate_dcache_range, &fd, 1); 575} 576 577static void ipi_flush_invalidate_dcache_range(void *arg) 578{ 579 struct flush_data *fd = arg; 580 __flush_invalidate_dcache_range(fd->addr1, fd->addr2); 581} 582 583static void system_flush_invalidate_dcache_range(unsigned long start, 584 unsigned long size) 585{ 586 struct flush_data fd = { 587 .addr1 = start, 588 .addr2 = size, 589 }; 590 on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1); 591} |
|