1 /* 2 * Xtensa SMP support functions. 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 2008 - 2013 Tensilica Inc. 9 * 10 * Chris Zankel <chris@zankel.net> 11 * Joe Taylor <joe@tensilica.com> 12 * Pete Delaney <piet@tensilica.com 13 */ 14 15 #include <linux/cpu.h> 16 #include <linux/cpumask.h> 17 #include <linux/delay.h> 18 #include <linux/init.h> 19 #include <linux/interrupt.h> 20 #include <linux/irqdomain.h> 21 #include <linux/irq.h> 22 #include <linux/kdebug.h> 23 #include <linux/module.h> 24 #include <linux/reboot.h> 25 #include <linux/seq_file.h> 26 #include <linux/smp.h> 27 #include <linux/thread_info.h> 28 29 #include <asm/cacheflush.h> 30 #include <asm/kdebug.h> 31 #include <asm/mmu_context.h> 32 #include <asm/mxregs.h> 33 #include <asm/platform.h> 34 #include <asm/tlbflush.h> 35 #include <asm/traps.h> 36 37 #ifdef CONFIG_SMP 38 # if XCHAL_HAVE_S32C1I == 0 39 # error "The S32C1I option is required for SMP." 40 # endif 41 #endif 42 43 /* IPI (Inter Process Interrupt) */ 44 45 #define IPI_IRQ 0 46 47 static irqreturn_t ipi_interrupt(int irq, void *dev_id); 48 static struct irqaction ipi_irqaction = { 49 .handler = ipi_interrupt, 50 .flags = IRQF_PERCPU, 51 .name = "ipi", 52 }; 53 54 void ipi_init(void) 55 { 56 unsigned irq = irq_create_mapping(NULL, IPI_IRQ); 57 setup_irq(irq, &ipi_irqaction); 58 } 59 60 static inline unsigned int get_core_count(void) 61 { 62 /* Bits 18..21 of SYSCFGID contain the core count minus 1. */ 63 unsigned int syscfgid = get_er(SYSCFGID); 64 return ((syscfgid >> 18) & 0xf) + 1; 65 } 66 67 static inline int get_core_id(void) 68 { 69 /* Bits 0...18 of SYSCFGID contain the core id */ 70 unsigned int core_id = get_er(SYSCFGID); 71 return core_id & 0x3fff; 72 } 73 74 void __init smp_prepare_cpus(unsigned int max_cpus) 75 { 76 unsigned i; 77 78 for (i = 0; i < max_cpus; ++i) 79 set_cpu_present(i, true); 80 } 81 82 void __init smp_init_cpus(void) 83 { 84 unsigned i; 85 unsigned int ncpus = get_core_count(); 86 unsigned int core_id = get_core_id(); 87 88 pr_info("%s: Core Count = %d\n", __func__, ncpus); 89 pr_info("%s: Core Id = %d\n", __func__, core_id); 90 91 for (i = 0; i < ncpus; ++i) 92 set_cpu_possible(i, true); 93 } 94 95 void __init smp_prepare_boot_cpu(void) 96 { 97 unsigned int cpu = smp_processor_id(); 98 BUG_ON(cpu != 0); 99 cpu_asid_cache(cpu) = ASID_USER_FIRST; 100 } 101 102 void __init smp_cpus_done(unsigned int max_cpus) 103 { 104 } 105 106 static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */ 107 static DECLARE_COMPLETION(cpu_running); 108 109 void __init secondary_start_kernel(void) 110 { 111 struct mm_struct *mm = &init_mm; 112 unsigned int cpu = smp_processor_id(); 113 114 init_mmu(); 115 116 #ifdef CONFIG_DEBUG_KERNEL 117 if (boot_secondary_processors == 0) { 118 pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n", 119 __func__, boot_secondary_processors, cpu); 120 for (;;) 121 __asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL)); 122 } 123 124 pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n", 125 __func__, boot_secondary_processors, cpu); 126 #endif 127 /* Init EXCSAVE1 */ 128 129 secondary_trap_init(); 130 131 /* All kernel threads share the same mm context. */ 132 133 atomic_inc(&mm->mm_users); 134 atomic_inc(&mm->mm_count); 135 current->active_mm = mm; 136 cpumask_set_cpu(cpu, mm_cpumask(mm)); 137 enter_lazy_tlb(mm, current); 138 139 preempt_disable(); 140 trace_hardirqs_off(); 141 142 calibrate_delay(); 143 144 notify_cpu_starting(cpu); 145 146 secondary_init_irq(); 147 local_timer_setup(cpu); 148 149 local_irq_enable(); 150 151 set_cpu_online(cpu, true); 152 complete(&cpu_running); 153 154 cpu_startup_entry(CPUHP_ONLINE); 155 } 156 157 static void mx_cpu_start(void *p) 158 { 159 unsigned cpu = (unsigned)p; 160 unsigned long run_stall_mask = get_er(MPSCORE); 161 162 set_er(run_stall_mask & ~(1u << cpu), MPSCORE); 163 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n", 164 __func__, cpu, run_stall_mask, get_er(MPSCORE)); 165 } 166 167 static void mx_cpu_stop(void *p) 168 { 169 unsigned cpu = (unsigned)p; 170 unsigned long run_stall_mask = get_er(MPSCORE); 171 172 set_er(run_stall_mask | (1u << cpu), MPSCORE); 173 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n", 174 __func__, cpu, run_stall_mask, get_er(MPSCORE)); 175 } 176 177 unsigned long cpu_start_ccount; 178 179 static int boot_secondary(unsigned int cpu, struct task_struct *ts) 180 { 181 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 182 unsigned long ccount; 183 int i; 184 185 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); 186 187 for (i = 0; i < 2; ++i) { 188 do 189 ccount = get_ccount(); 190 while (!ccount); 191 192 cpu_start_ccount = ccount; 193 194 while (time_before(jiffies, timeout)) { 195 mb(); 196 if (!cpu_start_ccount) 197 break; 198 } 199 200 if (cpu_start_ccount) { 201 smp_call_function_single(0, mx_cpu_stop, 202 (void *)cpu, 1); 203 cpu_start_ccount = 0; 204 return -EIO; 205 } 206 } 207 return 0; 208 } 209 210 int __cpu_up(unsigned int cpu, struct task_struct *idle) 211 { 212 int ret = 0; 213 214 if (cpu_asid_cache(cpu) == 0) 215 cpu_asid_cache(cpu) = ASID_USER_FIRST; 216 217 start_info.stack = (unsigned long)task_pt_regs(idle); 218 wmb(); 219 220 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n", 221 __func__, cpu, idle, start_info.stack); 222 223 ret = boot_secondary(cpu, idle); 224 if (ret == 0) { 225 wait_for_completion_timeout(&cpu_running, 226 msecs_to_jiffies(1000)); 227 if (!cpu_online(cpu)) 228 ret = -EIO; 229 } 230 231 if (ret) 232 pr_err("CPU %u failed to boot\n", cpu); 233 234 return ret; 235 } 236 237 enum ipi_msg_type { 238 IPI_RESCHEDULE = 0, 239 IPI_CALL_FUNC, 240 IPI_CPU_STOP, 241 IPI_MAX 242 }; 243 244 static const struct { 245 const char *short_text; 246 const char *long_text; 247 } ipi_text[] = { 248 { .short_text = "RES", .long_text = "Rescheduling interrupts" }, 249 { .short_text = "CAL", .long_text = "Function call interrupts" }, 250 { .short_text = "DIE", .long_text = "CPU shutdown interrupts" }, 251 }; 252 253 struct ipi_data { 254 unsigned long ipi_count[IPI_MAX]; 255 }; 256 257 static DEFINE_PER_CPU(struct ipi_data, ipi_data); 258 259 static void send_ipi_message(const struct cpumask *callmask, 260 enum ipi_msg_type msg_id) 261 { 262 int index; 263 unsigned long mask = 0; 264 265 for_each_cpu(index, callmask) 266 if (index != smp_processor_id()) 267 mask |= 1 << index; 268 269 set_er(mask, MIPISET(msg_id)); 270 } 271 272 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 273 { 274 send_ipi_message(mask, IPI_CALL_FUNC); 275 } 276 277 void arch_send_call_function_single_ipi(int cpu) 278 { 279 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); 280 } 281 282 void smp_send_reschedule(int cpu) 283 { 284 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); 285 } 286 287 void smp_send_stop(void) 288 { 289 struct cpumask targets; 290 291 cpumask_copy(&targets, cpu_online_mask); 292 cpumask_clear_cpu(smp_processor_id(), &targets); 293 send_ipi_message(&targets, IPI_CPU_STOP); 294 } 295 296 static void ipi_cpu_stop(unsigned int cpu) 297 { 298 set_cpu_online(cpu, false); 299 machine_halt(); 300 } 301 302 irqreturn_t ipi_interrupt(int irq, void *dev_id) 303 { 304 unsigned int cpu = smp_processor_id(); 305 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 306 unsigned int msg; 307 unsigned i; 308 309 msg = get_er(MIPICAUSE(cpu)); 310 for (i = 0; i < IPI_MAX; i++) 311 if (msg & (1 << i)) { 312 set_er(1 << i, MIPICAUSE(cpu)); 313 ++ipi->ipi_count[i]; 314 } 315 316 if (msg & (1 << IPI_RESCHEDULE)) 317 scheduler_ipi(); 318 if (msg & (1 << IPI_CALL_FUNC)) 319 generic_smp_call_function_interrupt(); 320 if (msg & (1 << IPI_CPU_STOP)) 321 ipi_cpu_stop(cpu); 322 323 return IRQ_HANDLED; 324 } 325 326 void show_ipi_list(struct seq_file *p, int prec) 327 { 328 unsigned int cpu; 329 unsigned i; 330 331 for (i = 0; i < IPI_MAX; ++i) { 332 seq_printf(p, "%*s:", prec, ipi_text[i].short_text); 333 for_each_online_cpu(cpu) 334 seq_printf(p, " %10lu", 335 per_cpu(ipi_data, cpu).ipi_count[i]); 336 seq_printf(p, " %s\n", ipi_text[i].long_text); 337 } 338 } 339 340 int setup_profiling_timer(unsigned int multiplier) 341 { 342 pr_debug("setup_profiling_timer %d\n", multiplier); 343 return 0; 344 } 345 346 /* TLB flush functions */ 347 348 struct flush_data { 349 struct vm_area_struct *vma; 350 unsigned long addr1; 351 unsigned long addr2; 352 }; 353 354 static void ipi_flush_tlb_all(void *arg) 355 { 356 local_flush_tlb_all(); 357 } 358 359 void flush_tlb_all(void) 360 { 361 on_each_cpu(ipi_flush_tlb_all, NULL, 1); 362 } 363 364 static void ipi_flush_tlb_mm(void *arg) 365 { 366 local_flush_tlb_mm(arg); 367 } 368 369 void flush_tlb_mm(struct mm_struct *mm) 370 { 371 on_each_cpu(ipi_flush_tlb_mm, mm, 1); 372 } 373 374 static void ipi_flush_tlb_page(void *arg) 375 { 376 struct flush_data *fd = arg; 377 local_flush_tlb_page(fd->vma, fd->addr1); 378 } 379 380 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) 381 { 382 struct flush_data fd = { 383 .vma = vma, 384 .addr1 = addr, 385 }; 386 on_each_cpu(ipi_flush_tlb_page, &fd, 1); 387 } 388 389 static void ipi_flush_tlb_range(void *arg) 390 { 391 struct flush_data *fd = arg; 392 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); 393 } 394 395 void flush_tlb_range(struct vm_area_struct *vma, 396 unsigned long start, unsigned long end) 397 { 398 struct flush_data fd = { 399 .vma = vma, 400 .addr1 = start, 401 .addr2 = end, 402 }; 403 on_each_cpu(ipi_flush_tlb_range, &fd, 1); 404 } 405 406 /* Cache flush functions */ 407 408 static void ipi_flush_cache_all(void *arg) 409 { 410 local_flush_cache_all(); 411 } 412 413 void flush_cache_all(void) 414 { 415 on_each_cpu(ipi_flush_cache_all, NULL, 1); 416 } 417 418 static void ipi_flush_cache_page(void *arg) 419 { 420 struct flush_data *fd = arg; 421 local_flush_cache_page(fd->vma, fd->addr1, fd->addr2); 422 } 423 424 void flush_cache_page(struct vm_area_struct *vma, 425 unsigned long address, unsigned long pfn) 426 { 427 struct flush_data fd = { 428 .vma = vma, 429 .addr1 = address, 430 .addr2 = pfn, 431 }; 432 on_each_cpu(ipi_flush_cache_page, &fd, 1); 433 } 434 435 static void ipi_flush_cache_range(void *arg) 436 { 437 struct flush_data *fd = arg; 438 local_flush_cache_range(fd->vma, fd->addr1, fd->addr2); 439 } 440 441 void flush_cache_range(struct vm_area_struct *vma, 442 unsigned long start, unsigned long end) 443 { 444 struct flush_data fd = { 445 .vma = vma, 446 .addr1 = start, 447 .addr2 = end, 448 }; 449 on_each_cpu(ipi_flush_cache_range, &fd, 1); 450 } 451 452 static void ipi_flush_icache_range(void *arg) 453 { 454 struct flush_data *fd = arg; 455 local_flush_icache_range(fd->addr1, fd->addr2); 456 } 457 458 void flush_icache_range(unsigned long start, unsigned long end) 459 { 460 struct flush_data fd = { 461 .addr1 = start, 462 .addr2 = end, 463 }; 464 on_each_cpu(ipi_flush_icache_range, &fd, 1); 465 } 466