1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks 7 */ 8 #include <linux/cpu.h> 9 #include <linux/delay.h> 10 #include <linux/smp.h> 11 #include <linux/interrupt.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/sched.h> 14 #include <linux/sched/hotplug.h> 15 #include <linux/init.h> 16 #include <linux/export.h> 17 18 #include <asm/mmu_context.h> 19 #include <asm/time.h> 20 #include <asm/setup.h> 21 22 #include <asm/octeon/octeon.h> 23 24 #include "octeon_boot.h" 25 26 volatile unsigned long octeon_processor_boot = 0xff; 27 volatile unsigned long octeon_processor_sp; 28 volatile unsigned long octeon_processor_gp; 29 #ifdef CONFIG_RELOCATABLE 30 volatile unsigned long octeon_processor_relocated_kernel_entry; 31 #endif /* CONFIG_RELOCATABLE */ 32 33 #ifdef CONFIG_HOTPLUG_CPU 34 uint64_t octeon_bootloader_entry_addr; 35 EXPORT_SYMBOL(octeon_bootloader_entry_addr); 36 #endif 37 38 extern void kernel_entry(unsigned long arg1, ...); 39 40 static void octeon_icache_flush(void) 41 { 42 asm volatile ("synci 0($0)\n"); 43 } 44 45 static void (*octeon_message_functions[8])(void) = { 46 scheduler_ipi, 47 generic_smp_call_function_interrupt, 48 octeon_icache_flush, 49 }; 50 51 static irqreturn_t mailbox_interrupt(int irq, void *dev_id) 52 { 53 u64 mbox_clrx = CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()); 54 u64 action; 55 int i; 56 57 /* 58 * Make sure the function array initialization remains 59 * correct. 60 */ 61 BUILD_BUG_ON(SMP_RESCHEDULE_YOURSELF != (1 << 0)); 62 BUILD_BUG_ON(SMP_CALL_FUNCTION != (1 << 1)); 63 BUILD_BUG_ON(SMP_ICACHE_FLUSH != (1 << 2)); 64 65 /* 66 * Load the mailbox register to figure out what we're supposed 67 * to do. 68 */ 69 action = cvmx_read_csr(mbox_clrx); 70 71 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 72 action &= 0xff; 73 else 74 action &= 0xffff; 75 76 /* Clear the mailbox to clear the interrupt */ 77 cvmx_write_csr(mbox_clrx, action); 78 79 for (i = 0; i < ARRAY_SIZE(octeon_message_functions) && action;) { 80 if (action & 1) { 81 void (*fn)(void) = octeon_message_functions[i]; 82 83 if (fn) 84 fn(); 85 } 86 action >>= 1; 87 i++; 88 } 89 return IRQ_HANDLED; 90 } 91 92 /** 93 * Cause the function described by call_data to be executed on the passed 94 * cpu. When the function has finished, increment the finished field of 95 * call_data. 96 */ 97 void octeon_send_ipi_single(int cpu, unsigned int action) 98 { 99 int coreid = cpu_logical_map(cpu); 100 /* 101 pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu, 102 coreid, action); 103 */ 104 cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action); 105 } 106 107 static inline void octeon_send_ipi_mask(const struct cpumask *mask, 108 unsigned int action) 109 { 110 unsigned int i; 111 112 for_each_cpu(i, mask) 113 octeon_send_ipi_single(i, action); 114 } 115 116 /** 117 * Detect available CPUs, populate cpu_possible_mask 118 */ 119 static void octeon_smp_hotplug_setup(void) 120 { 121 #ifdef CONFIG_HOTPLUG_CPU 122 struct linux_app_boot_info *labi; 123 124 if (!setup_max_cpus) 125 return; 126 127 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); 128 if (labi->labi_signature != LABI_SIGNATURE) { 129 pr_info("The bootloader on this board does not support HOTPLUG_CPU."); 130 return; 131 } 132 133 octeon_bootloader_entry_addr = labi->InitTLBStart_addr; 134 #endif 135 } 136 137 static void __init octeon_smp_setup(void) 138 { 139 const int coreid = cvmx_get_core_num(); 140 int cpus; 141 int id; 142 struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get(); 143 144 #ifdef CONFIG_HOTPLUG_CPU 145 int core_mask = octeon_get_boot_coremask(); 146 unsigned int num_cores = cvmx_octeon_num_cores(); 147 #endif 148 149 /* The present CPUs are initially just the boot cpu (CPU 0). */ 150 for (id = 0; id < NR_CPUS; id++) { 151 set_cpu_possible(id, id == 0); 152 set_cpu_present(id, id == 0); 153 } 154 155 __cpu_number_map[coreid] = 0; 156 __cpu_logical_map[0] = coreid; 157 158 /* The present CPUs get the lowest CPU numbers. */ 159 cpus = 1; 160 for (id = 0; id < NR_CPUS; id++) { 161 if ((id != coreid) && cvmx_coremask_is_core_set(&sysinfo->core_mask, id)) { 162 set_cpu_possible(cpus, true); 163 set_cpu_present(cpus, true); 164 __cpu_number_map[id] = cpus; 165 __cpu_logical_map[cpus] = id; 166 cpus++; 167 } 168 } 169 170 #ifdef CONFIG_HOTPLUG_CPU 171 /* 172 * The possible CPUs are all those present on the chip. We 173 * will assign CPU numbers for possible cores as well. Cores 174 * are always consecutively numberd from 0. 175 */ 176 for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr && 177 id < num_cores && id < NR_CPUS; id++) { 178 if (!(core_mask & (1 << id))) { 179 set_cpu_possible(cpus, true); 180 __cpu_number_map[id] = cpus; 181 __cpu_logical_map[cpus] = id; 182 cpus++; 183 } 184 } 185 #endif 186 187 octeon_smp_hotplug_setup(); 188 } 189 190 191 #ifdef CONFIG_RELOCATABLE 192 int plat_post_relocation(long offset) 193 { 194 unsigned long entry = (unsigned long)kernel_entry; 195 196 /* Send secondaries into relocated kernel */ 197 octeon_processor_relocated_kernel_entry = entry + offset; 198 199 return 0; 200 } 201 #endif /* CONFIG_RELOCATABLE */ 202 203 /** 204 * Firmware CPU startup hook 205 * 206 */ 207 static void octeon_boot_secondary(int cpu, struct task_struct *idle) 208 { 209 int count; 210 211 pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu, 212 cpu_logical_map(cpu)); 213 214 octeon_processor_sp = __KSTK_TOS(idle); 215 octeon_processor_gp = (unsigned long)(task_thread_info(idle)); 216 octeon_processor_boot = cpu_logical_map(cpu); 217 mb(); 218 219 count = 10000; 220 while (octeon_processor_sp && count) { 221 /* Waiting for processor to get the SP and GP */ 222 udelay(1); 223 count--; 224 } 225 if (count == 0) 226 pr_err("Secondary boot timeout\n"); 227 } 228 229 /** 230 * After we've done initial boot, this function is called to allow the 231 * board code to clean up state, if needed 232 */ 233 static void octeon_init_secondary(void) 234 { 235 unsigned int sr; 236 237 sr = set_c0_status(ST0_BEV); 238 write_c0_ebase((u32)ebase); 239 write_c0_status(sr); 240 241 octeon_check_cpu_bist(); 242 octeon_init_cvmcount(); 243 244 octeon_irq_setup_secondary(); 245 } 246 247 /** 248 * Callout to firmware before smp_init 249 * 250 */ 251 static void __init octeon_prepare_cpus(unsigned int max_cpus) 252 { 253 /* 254 * Only the low order mailbox bits are used for IPIs, leave 255 * the other bits alone. 256 */ 257 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff); 258 if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, 259 IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI", 260 mailbox_interrupt)) { 261 panic("Cannot request_irq(OCTEON_IRQ_MBOX0)"); 262 } 263 } 264 265 /** 266 * Last chance for the board code to finish SMP initialization before 267 * the CPU is "online". 268 */ 269 static void octeon_smp_finish(void) 270 { 271 octeon_user_io_init(); 272 273 /* to generate the first CPU timer interrupt */ 274 write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); 275 local_irq_enable(); 276 } 277 278 #ifdef CONFIG_HOTPLUG_CPU 279 280 /* State of each CPU. */ 281 DEFINE_PER_CPU(int, cpu_state); 282 283 static int octeon_cpu_disable(void) 284 { 285 unsigned int cpu = smp_processor_id(); 286 287 if (cpu == 0) 288 return -EBUSY; 289 290 if (!octeon_bootloader_entry_addr) 291 return -ENOTSUPP; 292 293 set_cpu_online(cpu, false); 294 calculate_cpu_foreign_map(); 295 octeon_fixup_irqs(); 296 297 __flush_cache_all(); 298 local_flush_tlb_all(); 299 300 return 0; 301 } 302 303 static void octeon_cpu_die(unsigned int cpu) 304 { 305 int coreid = cpu_logical_map(cpu); 306 uint32_t mask, new_mask; 307 const struct cvmx_bootmem_named_block_desc *block_desc; 308 309 while (per_cpu(cpu_state, cpu) != CPU_DEAD) 310 cpu_relax(); 311 312 /* 313 * This is a bit complicated strategics of getting/settig available 314 * cores mask, copied from bootloader 315 */ 316 317 mask = 1 << coreid; 318 /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */ 319 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); 320 321 if (!block_desc) { 322 struct linux_app_boot_info *labi; 323 324 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); 325 326 labi->avail_coremask |= mask; 327 new_mask = labi->avail_coremask; 328 } else { /* alternative, already initialized */ 329 uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr + 330 AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK); 331 *p |= mask; 332 new_mask = *p; 333 } 334 335 pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask); 336 mb(); 337 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); 338 cvmx_write_csr(CVMX_CIU_PP_RST, 0); 339 } 340 341 void play_dead(void) 342 { 343 int cpu = cpu_number_map(cvmx_get_core_num()); 344 345 idle_task_exit(); 346 octeon_processor_boot = 0xff; 347 per_cpu(cpu_state, cpu) = CPU_DEAD; 348 349 mb(); 350 351 while (1) /* core will be reset here */ 352 ; 353 } 354 355 static void start_after_reset(void) 356 { 357 kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */ 358 } 359 360 static int octeon_update_boot_vector(unsigned int cpu) 361 { 362 363 int coreid = cpu_logical_map(cpu); 364 uint32_t avail_coremask; 365 const struct cvmx_bootmem_named_block_desc *block_desc; 366 struct boot_init_vector *boot_vect = 367 (struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR); 368 369 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); 370 371 if (!block_desc) { 372 struct linux_app_boot_info *labi; 373 374 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); 375 376 avail_coremask = labi->avail_coremask; 377 labi->avail_coremask &= ~(1 << coreid); 378 } else { /* alternative, already initialized */ 379 avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED( 380 block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK); 381 } 382 383 if (!(avail_coremask & (1 << coreid))) { 384 /* core not available, assume, that caught by simple-executive */ 385 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); 386 cvmx_write_csr(CVMX_CIU_PP_RST, 0); 387 } 388 389 boot_vect[coreid].app_start_func_addr = 390 (uint32_t) (unsigned long) start_after_reset; 391 boot_vect[coreid].code_addr = octeon_bootloader_entry_addr; 392 393 mb(); 394 395 cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask); 396 397 return 0; 398 } 399 400 static int register_cavium_notifier(void) 401 { 402 return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE, 403 "mips/cavium:prepare", 404 octeon_update_boot_vector, NULL); 405 } 406 late_initcall(register_cavium_notifier); 407 408 #endif /* CONFIG_HOTPLUG_CPU */ 409 410 struct plat_smp_ops octeon_smp_ops = { 411 .send_ipi_single = octeon_send_ipi_single, 412 .send_ipi_mask = octeon_send_ipi_mask, 413 .init_secondary = octeon_init_secondary, 414 .smp_finish = octeon_smp_finish, 415 .boot_secondary = octeon_boot_secondary, 416 .smp_setup = octeon_smp_setup, 417 .prepare_cpus = octeon_prepare_cpus, 418 #ifdef CONFIG_HOTPLUG_CPU 419 .cpu_disable = octeon_cpu_disable, 420 .cpu_die = octeon_cpu_die, 421 #endif 422 }; 423 424 static irqreturn_t octeon_78xx_reched_interrupt(int irq, void *dev_id) 425 { 426 scheduler_ipi(); 427 return IRQ_HANDLED; 428 } 429 430 static irqreturn_t octeon_78xx_call_function_interrupt(int irq, void *dev_id) 431 { 432 generic_smp_call_function_interrupt(); 433 return IRQ_HANDLED; 434 } 435 436 static irqreturn_t octeon_78xx_icache_flush_interrupt(int irq, void *dev_id) 437 { 438 octeon_icache_flush(); 439 return IRQ_HANDLED; 440 } 441 442 /* 443 * Callout to firmware before smp_init 444 */ 445 static void octeon_78xx_prepare_cpus(unsigned int max_cpus) 446 { 447 if (request_irq(OCTEON_IRQ_MBOX0 + 0, 448 octeon_78xx_reched_interrupt, 449 IRQF_PERCPU | IRQF_NO_THREAD, "Scheduler", 450 octeon_78xx_reched_interrupt)) { 451 panic("Cannot request_irq for SchedulerIPI"); 452 } 453 if (request_irq(OCTEON_IRQ_MBOX0 + 1, 454 octeon_78xx_call_function_interrupt, 455 IRQF_PERCPU | IRQF_NO_THREAD, "SMP-Call", 456 octeon_78xx_call_function_interrupt)) { 457 panic("Cannot request_irq for SMP-Call"); 458 } 459 if (request_irq(OCTEON_IRQ_MBOX0 + 2, 460 octeon_78xx_icache_flush_interrupt, 461 IRQF_PERCPU | IRQF_NO_THREAD, "ICache-Flush", 462 octeon_78xx_icache_flush_interrupt)) { 463 panic("Cannot request_irq for ICache-Flush"); 464 } 465 } 466 467 static void octeon_78xx_send_ipi_single(int cpu, unsigned int action) 468 { 469 int i; 470 471 for (i = 0; i < 8; i++) { 472 if (action & 1) 473 octeon_ciu3_mbox_send(cpu, i); 474 action >>= 1; 475 } 476 } 477 478 static void octeon_78xx_send_ipi_mask(const struct cpumask *mask, 479 unsigned int action) 480 { 481 unsigned int cpu; 482 483 for_each_cpu(cpu, mask) 484 octeon_78xx_send_ipi_single(cpu, action); 485 } 486 487 static struct plat_smp_ops octeon_78xx_smp_ops = { 488 .send_ipi_single = octeon_78xx_send_ipi_single, 489 .send_ipi_mask = octeon_78xx_send_ipi_mask, 490 .init_secondary = octeon_init_secondary, 491 .smp_finish = octeon_smp_finish, 492 .boot_secondary = octeon_boot_secondary, 493 .smp_setup = octeon_smp_setup, 494 .prepare_cpus = octeon_78xx_prepare_cpus, 495 #ifdef CONFIG_HOTPLUG_CPU 496 .cpu_disable = octeon_cpu_disable, 497 .cpu_die = octeon_cpu_die, 498 #endif 499 }; 500 501 void __init octeon_setup_smp(void) 502 { 503 struct plat_smp_ops *ops; 504 505 if (octeon_has_feature(OCTEON_FEATURE_CIU3)) 506 ops = &octeon_78xx_smp_ops; 507 else 508 ops = &octeon_smp_ops; 509 510 register_smp_ops(ops); 511 } 512