1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright 2014-2015 Freescale Semiconductor, Inc. 4 */ 5 6 #include <common.h> 7 #include <asm/io.h> 8 #include <asm/system.h> 9 #include <asm/arch/mp.h> 10 #include <asm/arch/soc.h> 11 #include "cpu.h" 12 #include <asm/arch-fsl-layerscape/soc.h> 13 14 DECLARE_GLOBAL_DATA_PTR; 15 16 void *get_spin_tbl_addr(void) 17 { 18 return &__spin_table; 19 } 20 21 phys_addr_t determine_mp_bootpg(void) 22 { 23 return (phys_addr_t)&secondary_boot_code; 24 } 25 26 void update_os_arch_secondary_cores(uint8_t os_arch) 27 { 28 u64 *table = get_spin_tbl_addr(); 29 int i; 30 31 for (i = 1; i < CONFIG_MAX_CPUS; i++) { 32 if (os_arch == IH_ARCH_DEFAULT) 33 table[i * WORDS_PER_SPIN_TABLE_ENTRY + 34 SPIN_TABLE_ELEM_ARCH_COMP_IDX] = OS_ARCH_SAME; 35 else 36 table[i * WORDS_PER_SPIN_TABLE_ENTRY + 37 SPIN_TABLE_ELEM_ARCH_COMP_IDX] = OS_ARCH_DIFF; 38 } 39 } 40 41 #ifdef CONFIG_FSL_LSCH3 42 void wake_secondary_core_n(int cluster, int core, int cluster_cores) 43 { 44 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 45 struct ccsr_reset __iomem *rst = (void *)(CONFIG_SYS_FSL_RST_ADDR); 46 u32 mpidr = 0; 47 48 mpidr = ((cluster << 8) | core); 49 /* 50 * mpidr_el1 register value of core which needs to be released 51 * is written to scratchrw[6] register 52 */ 53 gur_out32(&gur->scratchrw[6], mpidr); 54 asm volatile("dsb st" : : : "memory"); 55 rst->brrl |= 1 << ((cluster * cluster_cores) + core); 56 asm volatile("dsb st" : : : "memory"); 57 /* 58 * scratchrw[6] register value is polled 59 * when the value becomes zero, this means that this core is up 60 * and running, next core can be released now 61 */ 62 while (gur_in32(&gur->scratchrw[6]) != 0) 63 ; 64 } 65 #endif 66 67 int fsl_layerscape_wake_seconday_cores(void) 68 { 69 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 70 #ifdef CONFIG_FSL_LSCH3 71 struct ccsr_reset __iomem *rst = (void *)(CONFIG_SYS_FSL_RST_ADDR); 72 u32 svr, ver, cluster, type; 73 int j = 0, cluster_cores = 0; 74 #elif defined(CONFIG_FSL_LSCH2) 75 struct ccsr_scfg __iomem *scfg = (void *)(CONFIG_SYS_FSL_SCFG_ADDR); 76 #endif 77 u32 cores, cpu_up_mask = 1; 78 int i, timeout = 10; 79 u64 *table = get_spin_tbl_addr(); 80 81 #ifdef COUNTER_FREQUENCY_REAL 82 /* update for secondary cores */ 83 __real_cntfrq = COUNTER_FREQUENCY_REAL; 84 flush_dcache_range((unsigned long)&__real_cntfrq, 85 (unsigned long)&__real_cntfrq + 8); 86 #endif 87 88 cores = cpu_mask(); 89 /* Clear spin table so that secondary processors 90 * observe the correct value after waking up from wfe. 91 */ 92 memset(table, 0, CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE); 93 flush_dcache_range((unsigned long)table, 94 (unsigned long)table + 95 (CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE)); 96 97 printf("Waking secondary cores to start from %lx\n", gd->relocaddr); 98 99 #ifdef CONFIG_FSL_LSCH3 100 gur_out32(&gur->bootlocptrh, (u32)(gd->relocaddr >> 32)); 101 gur_out32(&gur->bootlocptrl, (u32)gd->relocaddr); 102 103 svr = gur_in32(&gur->svr); 104 ver = SVR_SOC_VER(svr); 105 if (ver == SVR_LS2080A || ver == SVR_LS2085A) { 106 gur_out32(&gur->scratchrw[6], 1); 107 asm volatile("dsb st" : : : "memory"); 108 rst->brrl = cores; 109 asm volatile("dsb st" : : : "memory"); 110 } else { 111 /* 112 * Release the cores out of reset one-at-a-time to avoid 113 * power spikes 114 */ 115 i = 0; 116 cluster = in_le32(&gur->tp_cluster[i].lower); 117 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 118 type = initiator_type(cluster, j); 119 if (type && 120 TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM) 121 cluster_cores++; 122 } 123 124 do { 125 cluster = in_le32(&gur->tp_cluster[i].lower); 126 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 127 type = initiator_type(cluster, j); 128 if (type && 129 TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM) 130 wake_secondary_core_n(i, j, 131 cluster_cores); 132 } 133 i++; 134 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC); 135 } 136 #elif defined(CONFIG_FSL_LSCH2) 137 scfg_out32(&scfg->scratchrw[0], (u32)(gd->relocaddr >> 32)); 138 scfg_out32(&scfg->scratchrw[1], (u32)gd->relocaddr); 139 asm volatile("dsb st" : : : "memory"); 140 gur_out32(&gur->brrl, cores); 141 asm volatile("dsb st" : : : "memory"); 142 143 /* Bootup online cores */ 144 scfg_out32(&scfg->corebcr, cores); 145 #endif 146 /* This is needed as a precautionary measure. 147 * If some code before this has accidentally released the secondary 148 * cores then the pre-bootloader code will trap them in a "wfe" unless 149 * the scratchrw[6] is set. In this case we need a sev here to get these 150 * cores moving again. 151 */ 152 asm volatile("sev"); 153 154 while (timeout--) { 155 flush_dcache_range((unsigned long)table, (unsigned long)table + 156 CONFIG_MAX_CPUS * 64); 157 for (i = 1; i < CONFIG_MAX_CPUS; i++) { 158 if (table[i * WORDS_PER_SPIN_TABLE_ENTRY + 159 SPIN_TABLE_ELEM_STATUS_IDX]) 160 cpu_up_mask |= 1 << i; 161 } 162 if (hweight32(cpu_up_mask) == hweight32(cores)) 163 break; 164 udelay(10); 165 } 166 if (timeout <= 0) { 167 printf("Not all cores (0x%x) are up (0x%x)\n", 168 cores, cpu_up_mask); 169 return 1; 170 } 171 printf("All (%d) cores are up.\n", hweight32(cores)); 172 173 return 0; 174 } 175 176 int is_core_valid(unsigned int core) 177 { 178 return !!((1 << core) & cpu_mask()); 179 } 180 181 static int is_pos_valid(unsigned int pos) 182 { 183 return !!((1 << pos) & cpu_pos_mask()); 184 } 185 186 int is_core_online(u64 cpu_id) 187 { 188 u64 *table; 189 int pos = id_to_core(cpu_id); 190 table = (u64 *)get_spin_tbl_addr() + pos * WORDS_PER_SPIN_TABLE_ENTRY; 191 return table[SPIN_TABLE_ELEM_STATUS_IDX] == 1; 192 } 193 194 int cpu_reset(u32 nr) 195 { 196 puts("Feature is not implemented.\n"); 197 198 return 0; 199 } 200 201 int cpu_disable(u32 nr) 202 { 203 puts("Feature is not implemented.\n"); 204 205 return 0; 206 } 207 208 static int core_to_pos(int nr) 209 { 210 u32 cores = cpu_pos_mask(); 211 int i, count = 0; 212 213 if (nr == 0) { 214 return 0; 215 } else if (nr >= hweight32(cores)) { 216 puts("Not a valid core number.\n"); 217 return -1; 218 } 219 220 for (i = 1; i < 32; i++) { 221 if (is_pos_valid(i)) { 222 count++; 223 if (count == nr) 224 break; 225 } 226 } 227 228 if (count != nr) 229 return -1; 230 231 return i; 232 } 233 234 int cpu_status(u32 nr) 235 { 236 u64 *table; 237 int pos; 238 239 if (nr == 0) { 240 table = (u64 *)get_spin_tbl_addr(); 241 printf("table base @ 0x%p\n", table); 242 } else { 243 pos = core_to_pos(nr); 244 if (pos < 0) 245 return -1; 246 table = (u64 *)get_spin_tbl_addr() + pos * 247 WORDS_PER_SPIN_TABLE_ENTRY; 248 printf("table @ 0x%p\n", table); 249 printf(" addr - 0x%016llx\n", 250 table[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX]); 251 printf(" status - 0x%016llx\n", 252 table[SPIN_TABLE_ELEM_STATUS_IDX]); 253 printf(" lpid - 0x%016llx\n", 254 table[SPIN_TABLE_ELEM_LPID_IDX]); 255 } 256 257 return 0; 258 } 259 260 int cpu_release(u32 nr, int argc, char * const argv[]) 261 { 262 u64 boot_addr; 263 u64 *table = (u64 *)get_spin_tbl_addr(); 264 int pos; 265 266 pos = core_to_pos(nr); 267 if (pos <= 0) 268 return -1; 269 270 table += pos * WORDS_PER_SPIN_TABLE_ENTRY; 271 boot_addr = simple_strtoull(argv[0], NULL, 16); 272 table[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX] = boot_addr; 273 flush_dcache_range((unsigned long)table, 274 (unsigned long)table + SPIN_TABLE_ELEM_SIZE); 275 asm volatile("dsb st"); 276 smp_kick_all_cpus(); /* only those with entry addr set will run */ 277 /* 278 * When the first release command runs, all cores are set to go. Those 279 * without a valid entry address will be trapped by "wfe". "sev" kicks 280 * them off to check the address again. When set, they continue to run. 281 */ 282 asm volatile("sev"); 283 284 return 0; 285 } 286