1 /* 2 * This file contains Xilinx specific SMP code, used to start up 3 * the second processor. 4 * 5 * Copyright (C) 2011-2013 Xilinx 6 * 7 * based on linux/arch/arm/mach-realview/platsmp.c 8 * 9 * Copyright (C) 2002 ARM Ltd. 10 * 11 * This software is licensed under the terms of the GNU General Public 12 * License version 2, as published by the Free Software Foundation, and 13 * may be copied, distributed, and modified under those terms. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 */ 20 21 #include <linux/export.h> 22 #include <linux/jiffies.h> 23 #include <linux/init.h> 24 #include <linux/io.h> 25 #include <asm/cacheflush.h> 26 #include <asm/smp_scu.h> 27 #include <linux/irqchip/arm-gic.h> 28 #include "common.h" 29 30 /* 31 * Store number of cores in the system 32 * Because of scu_get_core_count() must be in __init section and can't 33 * be called from zynq_cpun_start() because it is not in __init section. 34 */ 35 static int ncores; 36 37 int zynq_cpun_start(u32 address, int cpu) 38 { 39 u32 trampoline_code_size = &zynq_secondary_trampoline_end - 40 &zynq_secondary_trampoline; 41 42 if (cpu > ncores) { 43 pr_warn("CPU No. is not available in the system\n"); 44 return -1; 45 } 46 47 /* MS: Expectation that SLCR are directly map and accessible */ 48 /* Not possible to jump to non aligned address */ 49 if (!(address & 3) && (!address || (address >= trampoline_code_size))) { 50 /* Store pointer to ioremap area which points to address 0x0 */ 51 static u8 __iomem *zero; 52 u32 trampoline_size = &zynq_secondary_trampoline_jump - 53 &zynq_secondary_trampoline; 54 55 zynq_slcr_cpu_stop(cpu); 56 if (address) { 57 if (__pa(PAGE_OFFSET)) { 58 zero = ioremap(0, trampoline_code_size); 59 if (!zero) { 60 pr_warn("BOOTUP jump vectors not accessible\n"); 61 return -1; 62 } 63 } else { 64 zero = (__force u8 __iomem *)PAGE_OFFSET; 65 } 66 67 /* 68 * This is elegant way how to jump to any address 69 * 0x0: Load address at 0x8 to r0 70 * 0x4: Jump by mov instruction 71 * 0x8: Jumping address 72 */ 73 memcpy((__force void *)zero, &zynq_secondary_trampoline, 74 trampoline_size); 75 writel(address, zero + trampoline_size); 76 77 flush_cache_all(); 78 outer_flush_range(0, trampoline_code_size); 79 smp_wmb(); 80 81 if (__pa(PAGE_OFFSET)) 82 iounmap(zero); 83 } 84 zynq_slcr_cpu_start(cpu); 85 86 return 0; 87 } 88 89 pr_warn("Can't start CPU%d: Wrong starting address %x\n", cpu, address); 90 91 return -1; 92 } 93 EXPORT_SYMBOL(zynq_cpun_start); 94 95 static int zynq_boot_secondary(unsigned int cpu, 96 struct task_struct *idle) 97 { 98 return zynq_cpun_start(virt_to_phys(secondary_startup), cpu); 99 } 100 101 /* 102 * Initialise the CPU possible map early - this describes the CPUs 103 * which may be present or become present in the system. 104 */ 105 static void __init zynq_smp_init_cpus(void) 106 { 107 int i; 108 109 ncores = scu_get_core_count(zynq_scu_base); 110 111 for (i = 0; i < ncores && i < CONFIG_NR_CPUS; i++) 112 set_cpu_possible(i, true); 113 } 114 115 static void __init zynq_smp_prepare_cpus(unsigned int max_cpus) 116 { 117 int i; 118 119 /* 120 * Initialise the present map, which describes the set of CPUs 121 * actually populated at the present time. 122 */ 123 for (i = 0; i < max_cpus; i++) 124 set_cpu_present(i, true); 125 126 scu_enable(zynq_scu_base); 127 } 128 129 struct smp_operations zynq_smp_ops __initdata = { 130 .smp_init_cpus = zynq_smp_init_cpus, 131 .smp_prepare_cpus = zynq_smp_prepare_cpus, 132 .smp_boot_secondary = zynq_boot_secondary, 133 #ifdef CONFIG_HOTPLUG_CPU 134 .cpu_die = zynq_platform_cpu_die, 135 #endif 136 }; 137