1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks 7 */ 8 #include <linux/cpu.h> 9 #include <linux/init.h> 10 #include <linux/delay.h> 11 #include <linux/smp.h> 12 #include <linux/interrupt.h> 13 #include <linux/kernel_stat.h> 14 #include <linux/sched.h> 15 #include <linux/module.h> 16 17 #include <asm/mmu_context.h> 18 #include <asm/system.h> 19 #include <asm/time.h> 20 21 #include <asm/octeon/octeon.h> 22 23 #include "octeon_boot.h" 24 25 volatile unsigned long octeon_processor_boot = 0xff; 26 volatile unsigned long octeon_processor_sp; 27 volatile unsigned long octeon_processor_gp; 28 29 #ifdef CONFIG_HOTPLUG_CPU 30 uint64_t octeon_bootloader_entry_addr; 31 EXPORT_SYMBOL(octeon_bootloader_entry_addr); 32 #endif 33 34 static irqreturn_t mailbox_interrupt(int irq, void *dev_id) 35 { 36 const int coreid = cvmx_get_core_num(); 37 uint64_t action; 38 39 /* Load the mailbox register to figure out what we're supposed to do */ 40 action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)); 41 42 /* Clear the mailbox to clear the interrupt */ 43 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); 44 45 if (action & SMP_CALL_FUNCTION) 46 smp_call_function_interrupt(); 47 48 /* Check if we've been told to flush the icache */ 49 if (action & SMP_ICACHE_FLUSH) 50 asm volatile ("synci 0($0)\n"); 51 return IRQ_HANDLED; 52 } 53 54 /** 55 * Cause the function described by call_data to be executed on the passed 56 * cpu. When the function has finished, increment the finished field of 57 * call_data. 58 */ 59 void octeon_send_ipi_single(int cpu, unsigned int action) 60 { 61 int coreid = cpu_logical_map(cpu); 62 /* 63 pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu, 64 coreid, action); 65 */ 66 cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action); 67 } 68 69 static inline void octeon_send_ipi_mask(const struct cpumask *mask, 70 unsigned int action) 71 { 72 unsigned int i; 73 74 for_each_cpu_mask(i, *mask) 75 octeon_send_ipi_single(i, action); 76 } 77 78 /** 79 * Detect available CPUs, populate cpu_possible_map 80 */ 81 static void octeon_smp_hotplug_setup(void) 82 { 83 #ifdef CONFIG_HOTPLUG_CPU 84 struct linux_app_boot_info *labi; 85 86 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); 87 if (labi->labi_signature != LABI_SIGNATURE) 88 panic("The bootloader version on this board is incorrect."); 89 90 octeon_bootloader_entry_addr = labi->InitTLBStart_addr; 91 #endif 92 } 93 94 static void octeon_smp_setup(void) 95 { 96 const int coreid = cvmx_get_core_num(); 97 int cpus; 98 int id; 99 int core_mask = octeon_get_boot_coremask(); 100 #ifdef CONFIG_HOTPLUG_CPU 101 unsigned int num_cores = cvmx_octeon_num_cores(); 102 #endif 103 104 /* The present CPUs are initially just the boot cpu (CPU 0). */ 105 for (id = 0; id < NR_CPUS; id++) { 106 set_cpu_possible(id, id == 0); 107 set_cpu_present(id, id == 0); 108 } 109 110 __cpu_number_map[coreid] = 0; 111 __cpu_logical_map[0] = coreid; 112 113 /* The present CPUs get the lowest CPU numbers. */ 114 cpus = 1; 115 for (id = 0; id < NR_CPUS; id++) { 116 if ((id != coreid) && (core_mask & (1 << id))) { 117 set_cpu_possible(cpus, true); 118 set_cpu_present(cpus, true); 119 __cpu_number_map[id] = cpus; 120 __cpu_logical_map[cpus] = id; 121 cpus++; 122 } 123 } 124 125 #ifdef CONFIG_HOTPLUG_CPU 126 /* 127 * The possible CPUs are all those present on the chip. We 128 * will assign CPU numbers for possible cores as well. Cores 129 * are always consecutively numberd from 0. 130 */ 131 for (id = 0; id < num_cores && id < NR_CPUS; id++) { 132 if (!(core_mask & (1 << id))) { 133 set_cpu_possible(cpus, true); 134 __cpu_number_map[id] = cpus; 135 __cpu_logical_map[cpus] = id; 136 cpus++; 137 } 138 } 139 #endif 140 141 octeon_smp_hotplug_setup(); 142 } 143 144 /** 145 * Firmware CPU startup hook 146 * 147 */ 148 static void octeon_boot_secondary(int cpu, struct task_struct *idle) 149 { 150 int count; 151 152 pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu, 153 cpu_logical_map(cpu)); 154 155 octeon_processor_sp = __KSTK_TOS(idle); 156 octeon_processor_gp = (unsigned long)(task_thread_info(idle)); 157 octeon_processor_boot = cpu_logical_map(cpu); 158 mb(); 159 160 count = 10000; 161 while (octeon_processor_sp && count) { 162 /* Waiting for processor to get the SP and GP */ 163 udelay(1); 164 count--; 165 } 166 if (count == 0) 167 pr_err("Secondary boot timeout\n"); 168 } 169 170 /** 171 * After we've done initial boot, this function is called to allow the 172 * board code to clean up state, if needed 173 */ 174 static void octeon_init_secondary(void) 175 { 176 const int coreid = cvmx_get_core_num(); 177 union cvmx_ciu_intx_sum0 interrupt_enable; 178 unsigned int sr; 179 180 #ifdef CONFIG_HOTPLUG_CPU 181 struct linux_app_boot_info *labi; 182 183 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); 184 185 if (labi->labi_signature != LABI_SIGNATURE) 186 panic("The bootloader version on this board is incorrect."); 187 #endif 188 189 sr = set_c0_status(ST0_BEV); 190 write_c0_ebase((u32)ebase); 191 write_c0_status(sr); 192 193 octeon_check_cpu_bist(); 194 octeon_init_cvmcount(); 195 /* 196 pr_info("SMP: CPU%d (CoreId %lu) started\n", cpu, coreid); 197 */ 198 /* Enable Mailbox interrupts to this core. These are the only 199 interrupts allowed on line 3 */ 200 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), 0xffffffff); 201 interrupt_enable.u64 = 0; 202 interrupt_enable.s.mbox = 0x3; 203 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), interrupt_enable.u64); 204 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); 205 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); 206 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); 207 /* Enable core interrupt processing for 2,3 and 7 */ 208 set_c0_status(0x8c01); 209 } 210 211 /** 212 * Callout to firmware before smp_init 213 * 214 */ 215 void octeon_prepare_cpus(unsigned int max_cpus) 216 { 217 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff); 218 if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, 219 "mailbox0", mailbox_interrupt)) { 220 panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n"); 221 } 222 if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_DISABLED, 223 "mailbox1", mailbox_interrupt)) { 224 panic("Cannot request_irq(OCTEON_IRQ_MBOX1)\n"); 225 } 226 } 227 228 /** 229 * Last chance for the board code to finish SMP initialization before 230 * the CPU is "online". 231 */ 232 static void octeon_smp_finish(void) 233 { 234 #ifdef CONFIG_CAVIUM_GDB 235 unsigned long tmp; 236 /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0 237 to be not masked by this core so we know the signal is received by 238 someone */ 239 asm volatile ("dmfc0 %0, $22\n" 240 "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp)); 241 #endif 242 243 octeon_user_io_init(); 244 245 /* to generate the first CPU timer interrupt */ 246 write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); 247 } 248 249 /** 250 * Hook for after all CPUs are online 251 */ 252 static void octeon_cpus_done(void) 253 { 254 #ifdef CONFIG_CAVIUM_GDB 255 unsigned long tmp; 256 /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0 257 to be not masked by this core so we know the signal is received by 258 someone */ 259 asm volatile ("dmfc0 %0, $22\n" 260 "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp)); 261 #endif 262 } 263 264 #ifdef CONFIG_HOTPLUG_CPU 265 266 /* State of each CPU. */ 267 DEFINE_PER_CPU(int, cpu_state); 268 269 extern void fixup_irqs(void); 270 271 static DEFINE_SPINLOCK(smp_reserve_lock); 272 273 static int octeon_cpu_disable(void) 274 { 275 unsigned int cpu = smp_processor_id(); 276 277 if (cpu == 0) 278 return -EBUSY; 279 280 spin_lock(&smp_reserve_lock); 281 282 cpu_clear(cpu, cpu_online_map); 283 cpu_clear(cpu, cpu_callin_map); 284 local_irq_disable(); 285 fixup_irqs(); 286 local_irq_enable(); 287 288 flush_cache_all(); 289 local_flush_tlb_all(); 290 291 spin_unlock(&smp_reserve_lock); 292 293 return 0; 294 } 295 296 static void octeon_cpu_die(unsigned int cpu) 297 { 298 int coreid = cpu_logical_map(cpu); 299 uint32_t mask, new_mask; 300 const struct cvmx_bootmem_named_block_desc *block_desc; 301 302 while (per_cpu(cpu_state, cpu) != CPU_DEAD) 303 cpu_relax(); 304 305 /* 306 * This is a bit complicated strategics of getting/settig available 307 * cores mask, copied from bootloader 308 */ 309 310 mask = 1 << coreid; 311 /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */ 312 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); 313 314 if (!block_desc) { 315 struct linux_app_boot_info *labi; 316 317 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); 318 319 labi->avail_coremask |= mask; 320 new_mask = labi->avail_coremask; 321 } else { /* alternative, already initialized */ 322 uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr + 323 AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK); 324 *p |= mask; 325 new_mask = *p; 326 } 327 328 pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask); 329 mb(); 330 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); 331 cvmx_write_csr(CVMX_CIU_PP_RST, 0); 332 } 333 334 void play_dead(void) 335 { 336 int cpu = cpu_number_map(cvmx_get_core_num()); 337 338 idle_task_exit(); 339 octeon_processor_boot = 0xff; 340 per_cpu(cpu_state, cpu) = CPU_DEAD; 341 342 mb(); 343 344 while (1) /* core will be reset here */ 345 ; 346 } 347 348 extern void kernel_entry(unsigned long arg1, ...); 349 350 static void start_after_reset(void) 351 { 352 kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */ 353 } 354 355 static int octeon_update_boot_vector(unsigned int cpu) 356 { 357 358 int coreid = cpu_logical_map(cpu); 359 uint32_t avail_coremask; 360 const struct cvmx_bootmem_named_block_desc *block_desc; 361 struct boot_init_vector *boot_vect = 362 (struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR); 363 364 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); 365 366 if (!block_desc) { 367 struct linux_app_boot_info *labi; 368 369 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); 370 371 avail_coremask = labi->avail_coremask; 372 labi->avail_coremask &= ~(1 << coreid); 373 } else { /* alternative, already initialized */ 374 avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED( 375 block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK); 376 } 377 378 if (!(avail_coremask & (1 << coreid))) { 379 /* core not available, assume, that catched by simple-executive */ 380 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); 381 cvmx_write_csr(CVMX_CIU_PP_RST, 0); 382 } 383 384 boot_vect[coreid].app_start_func_addr = 385 (uint32_t) (unsigned long) start_after_reset; 386 boot_vect[coreid].code_addr = octeon_bootloader_entry_addr; 387 388 mb(); 389 390 cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask); 391 392 return 0; 393 } 394 395 static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb, 396 unsigned long action, void *hcpu) 397 { 398 unsigned int cpu = (unsigned long)hcpu; 399 400 switch (action) { 401 case CPU_UP_PREPARE: 402 octeon_update_boot_vector(cpu); 403 break; 404 case CPU_ONLINE: 405 pr_info("Cpu %d online\n", cpu); 406 break; 407 case CPU_DEAD: 408 break; 409 } 410 411 return NOTIFY_OK; 412 } 413 414 static int __cpuinit register_cavium_notifier(void) 415 { 416 hotcpu_notifier(octeon_cpu_callback, 0); 417 return 0; 418 } 419 late_initcall(register_cavium_notifier); 420 421 #endif /* CONFIG_HOTPLUG_CPU */ 422 423 struct plat_smp_ops octeon_smp_ops = { 424 .send_ipi_single = octeon_send_ipi_single, 425 .send_ipi_mask = octeon_send_ipi_mask, 426 .init_secondary = octeon_init_secondary, 427 .smp_finish = octeon_smp_finish, 428 .cpus_done = octeon_cpus_done, 429 .boot_secondary = octeon_boot_secondary, 430 .smp_setup = octeon_smp_setup, 431 .prepare_cpus = octeon_prepare_cpus, 432 #ifdef CONFIG_HOTPLUG_CPU 433 .cpu_disable = octeon_cpu_disable, 434 .cpu_die = octeon_cpu_die, 435 #endif 436 }; 437