1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/module.h> 4 #include <linux/init.h> 5 #include <linux/kernel.h> 6 #include <linux/mm.h> 7 #include <linux/sched.h> 8 #include <linux/kernel_stat.h> 9 #include <linux/notifier.h> 10 #include <linux/cpu.h> 11 #include <linux/percpu.h> 12 #include <linux/delay.h> 13 #include <linux/err.h> 14 #include <linux/irq.h> 15 #include <linux/irqdomain.h> 16 #include <linux/of.h> 17 #include <linux/sched/task_stack.h> 18 #include <linux/sched/mm.h> 19 #include <asm/irq.h> 20 #include <asm/traps.h> 21 #include <asm/sections.h> 22 #include <asm/mmu_context.h> 23 #include <asm/pgalloc.h> 24 25 struct ipi_data_struct { 26 unsigned long bits ____cacheline_aligned; 27 }; 28 static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data); 29 30 enum ipi_message_type { 31 IPI_EMPTY, 32 IPI_RESCHEDULE, 33 IPI_CALL_FUNC, 34 IPI_MAX 35 }; 36 37 static irqreturn_t handle_ipi(int irq, void *dev) 38 { 39 while (true) { 40 unsigned long ops; 41 42 ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0); 43 if (ops == 0) 44 return IRQ_HANDLED; 45 46 if (ops & (1 << IPI_RESCHEDULE)) 47 scheduler_ipi(); 48 49 if (ops & (1 << IPI_CALL_FUNC)) 50 generic_smp_call_function_interrupt(); 51 52 BUG_ON((ops >> IPI_MAX) != 0); 53 } 54 55 return IRQ_HANDLED; 56 } 57 58 static void (*send_arch_ipi)(const struct cpumask *mask); 59 60 static int ipi_irq; 61 void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq) 62 { 63 if (send_arch_ipi) 64 return; 65 66 send_arch_ipi = func; 67 ipi_irq = irq; 68 } 69 70 static void 71 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) 72 { 73 int i; 74 75 for_each_cpu(i, to_whom) 76 set_bit(operation, &per_cpu_ptr(&ipi_data, i)->bits); 77 78 smp_mb(); 79 send_arch_ipi(to_whom); 80 } 81 82 void arch_send_call_function_ipi_mask(struct cpumask *mask) 83 { 84 send_ipi_message(mask, IPI_CALL_FUNC); 85 } 86 87 void arch_send_call_function_single_ipi(int cpu) 88 { 89 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); 90 } 91 92 static void ipi_stop(void *unused) 93 { 94 while (1); 95 } 96 97 void smp_send_stop(void) 98 { 99 on_each_cpu(ipi_stop, NULL, 1); 100 } 101 102 void smp_send_reschedule(int cpu) 103 { 104 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); 105 } 106 107 void __init smp_prepare_boot_cpu(void) 108 { 109 } 110 111 void __init smp_prepare_cpus(unsigned int max_cpus) 112 { 113 } 114 115 static void __init enable_smp_ipi(void) 116 { 117 enable_percpu_irq(ipi_irq, 0); 118 } 119 120 static int ipi_dummy_dev; 121 void __init setup_smp_ipi(void) 122 { 123 int rc; 124 125 if (ipi_irq == 0) 126 panic("%s IRQ mapping failed\n", __func__); 127 128 rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt", 129 &ipi_dummy_dev); 130 if (rc) 131 panic("%s IRQ request failed\n", __func__); 132 133 enable_smp_ipi(); 134 } 135 136 void __init setup_smp(void) 137 { 138 struct device_node *node = NULL; 139 int cpu; 140 141 while ((node = of_find_node_by_type(node, "cpu"))) { 142 if (!of_device_is_available(node)) 143 continue; 144 145 if (of_property_read_u32(node, "reg", &cpu)) 146 continue; 147 148 if (cpu >= NR_CPUS) 149 continue; 150 151 set_cpu_possible(cpu, true); 152 set_cpu_present(cpu, true); 153 } 154 } 155 156 extern void _start_smp_secondary(void); 157 158 volatile unsigned int secondary_hint; 159 volatile unsigned int secondary_ccr; 160 volatile unsigned int secondary_stack; 161 162 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 163 { 164 unsigned int tmp; 165 166 secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE; 167 168 secondary_hint = mfcr("cr31"); 169 170 secondary_ccr = mfcr("cr18"); 171 172 /* 173 * Because other CPUs are in reset status, we must flush data 174 * from cache to out and secondary CPUs use them in 175 * csky_start_secondary(void) 176 */ 177 mtcr("cr17", 0x22); 178 179 /* Enable cpu in SMP reset ctrl reg */ 180 tmp = mfcr("cr<29, 0>"); 181 tmp |= 1 << cpu; 182 mtcr("cr<29, 0>", tmp); 183 184 /* Wait for the cpu online */ 185 while (!cpu_online(cpu)); 186 187 secondary_stack = 0; 188 189 return 0; 190 } 191 192 void __init smp_cpus_done(unsigned int max_cpus) 193 { 194 } 195 196 int setup_profiling_timer(unsigned int multiplier) 197 { 198 return -EINVAL; 199 } 200 201 void csky_start_secondary(void) 202 { 203 struct mm_struct *mm = &init_mm; 204 unsigned int cpu = smp_processor_id(); 205 206 mtcr("cr31", secondary_hint); 207 mtcr("cr18", secondary_ccr); 208 209 mtcr("vbr", vec_base); 210 211 flush_tlb_all(); 212 write_mmu_pagemask(0); 213 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); 214 TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); 215 216 asid_cache(smp_processor_id()) = ASID_FIRST_VERSION; 217 218 #ifdef CONFIG_CPU_HAS_FPU 219 init_fpu(); 220 #endif 221 222 enable_smp_ipi(); 223 224 mmget(mm); 225 mmgrab(mm); 226 current->active_mm = mm; 227 cpumask_set_cpu(cpu, mm_cpumask(mm)); 228 229 notify_cpu_starting(cpu); 230 set_cpu_online(cpu, true); 231 232 pr_info("CPU%u Online: %s...\n", cpu, __func__); 233 234 local_irq_enable(); 235 preempt_disable(); 236 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 237 } 238