xref: /openbmc/linux/arch/mips/cavium-octeon/smp.c (revision 1ab142d4)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
7  */
8 #include <linux/cpu.h>
9 #include <linux/init.h>
10 #include <linux/delay.h>
11 #include <linux/smp.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/sched.h>
15 #include <linux/module.h>
16 
17 #include <asm/mmu_context.h>
18 #include <asm/system.h>
19 #include <asm/time.h>
20 
21 #include <asm/octeon/octeon.h>
22 
23 #include "octeon_boot.h"
24 
25 volatile unsigned long octeon_processor_boot = 0xff;
26 volatile unsigned long octeon_processor_sp;
27 volatile unsigned long octeon_processor_gp;
28 
29 #ifdef CONFIG_HOTPLUG_CPU
30 uint64_t octeon_bootloader_entry_addr;
31 EXPORT_SYMBOL(octeon_bootloader_entry_addr);
32 #endif
33 
34 static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
35 {
36 	const int coreid = cvmx_get_core_num();
37 	uint64_t action;
38 
39 	/* Load the mailbox register to figure out what we're supposed to do */
40 	action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff;
41 
42 	/* Clear the mailbox to clear the interrupt */
43 	cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
44 
45 	if (action & SMP_CALL_FUNCTION)
46 		smp_call_function_interrupt();
47 	if (action & SMP_RESCHEDULE_YOURSELF)
48 		scheduler_ipi();
49 
50 	/* Check if we've been told to flush the icache */
51 	if (action & SMP_ICACHE_FLUSH)
52 		asm volatile ("synci 0($0)\n");
53 	return IRQ_HANDLED;
54 }
55 
56 /**
57  * Cause the function described by call_data to be executed on the passed
58  * cpu.  When the function has finished, increment the finished field of
59  * call_data.
60  */
61 void octeon_send_ipi_single(int cpu, unsigned int action)
62 {
63 	int coreid = cpu_logical_map(cpu);
64 	/*
65 	pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
66 	       coreid, action);
67 	*/
68 	cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
69 }
70 
71 static inline void octeon_send_ipi_mask(const struct cpumask *mask,
72 					unsigned int action)
73 {
74 	unsigned int i;
75 
76 	for_each_cpu_mask(i, *mask)
77 		octeon_send_ipi_single(i, action);
78 }
79 
80 /**
81  * Detect available CPUs, populate cpu_possible_map
82  */
83 static void octeon_smp_hotplug_setup(void)
84 {
85 #ifdef CONFIG_HOTPLUG_CPU
86 	struct linux_app_boot_info *labi;
87 
88 	labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
89 	if (labi->labi_signature != LABI_SIGNATURE)
90 		panic("The bootloader version on this board is incorrect.");
91 
92 	octeon_bootloader_entry_addr = labi->InitTLBStart_addr;
93 #endif
94 }
95 
96 static void octeon_smp_setup(void)
97 {
98 	const int coreid = cvmx_get_core_num();
99 	int cpus;
100 	int id;
101 	int core_mask = octeon_get_boot_coremask();
102 #ifdef CONFIG_HOTPLUG_CPU
103 	unsigned int num_cores = cvmx_octeon_num_cores();
104 #endif
105 
106 	/* The present CPUs are initially just the boot cpu (CPU 0). */
107 	for (id = 0; id < NR_CPUS; id++) {
108 		set_cpu_possible(id, id == 0);
109 		set_cpu_present(id, id == 0);
110 	}
111 
112 	__cpu_number_map[coreid] = 0;
113 	__cpu_logical_map[0] = coreid;
114 
115 	/* The present CPUs get the lowest CPU numbers. */
116 	cpus = 1;
117 	for (id = 0; id < NR_CPUS; id++) {
118 		if ((id != coreid) && (core_mask & (1 << id))) {
119 			set_cpu_possible(cpus, true);
120 			set_cpu_present(cpus, true);
121 			__cpu_number_map[id] = cpus;
122 			__cpu_logical_map[cpus] = id;
123 			cpus++;
124 		}
125 	}
126 
127 #ifdef CONFIG_HOTPLUG_CPU
128 	/*
129 	 * The possible CPUs are all those present on the chip.  We
130 	 * will assign CPU numbers for possible cores as well.  Cores
131 	 * are always consecutively numberd from 0.
132 	 */
133 	for (id = 0; id < num_cores && id < NR_CPUS; id++) {
134 		if (!(core_mask & (1 << id))) {
135 			set_cpu_possible(cpus, true);
136 			__cpu_number_map[id] = cpus;
137 			__cpu_logical_map[cpus] = id;
138 			cpus++;
139 		}
140 	}
141 #endif
142 
143 	octeon_smp_hotplug_setup();
144 }
145 
146 /**
147  * Firmware CPU startup hook
148  *
149  */
150 static void octeon_boot_secondary(int cpu, struct task_struct *idle)
151 {
152 	int count;
153 
154 	pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
155 		cpu_logical_map(cpu));
156 
157 	octeon_processor_sp = __KSTK_TOS(idle);
158 	octeon_processor_gp = (unsigned long)(task_thread_info(idle));
159 	octeon_processor_boot = cpu_logical_map(cpu);
160 	mb();
161 
162 	count = 10000;
163 	while (octeon_processor_sp && count) {
164 		/* Waiting for processor to get the SP and GP */
165 		udelay(1);
166 		count--;
167 	}
168 	if (count == 0)
169 		pr_err("Secondary boot timeout\n");
170 }
171 
172 /**
173  * After we've done initial boot, this function is called to allow the
174  * board code to clean up state, if needed
175  */
176 static void __cpuinit octeon_init_secondary(void)
177 {
178 	unsigned int sr;
179 
180 	sr = set_c0_status(ST0_BEV);
181 	write_c0_ebase((u32)ebase);
182 	write_c0_status(sr);
183 
184 	octeon_check_cpu_bist();
185 	octeon_init_cvmcount();
186 
187 	octeon_irq_setup_secondary();
188 	raw_local_irq_enable();
189 }
190 
191 /**
192  * Callout to firmware before smp_init
193  *
194  */
195 void octeon_prepare_cpus(unsigned int max_cpus)
196 {
197 #ifdef CONFIG_HOTPLUG_CPU
198 	struct linux_app_boot_info *labi;
199 
200 	labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
201 
202 	if (labi->labi_signature != LABI_SIGNATURE)
203 		panic("The bootloader version on this board is incorrect.");
204 #endif
205 	/*
206 	 * Only the low order mailbox bits are used for IPIs, leave
207 	 * the other bits alone.
208 	 */
209 	cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
210 	if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
211 			IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
212 			mailbox_interrupt)) {
213 		panic("Cannot request_irq(OCTEON_IRQ_MBOX0)");
214 	}
215 }
216 
217 /**
218  * Last chance for the board code to finish SMP initialization before
219  * the CPU is "online".
220  */
221 static void octeon_smp_finish(void)
222 {
223 #ifdef CONFIG_CAVIUM_GDB
224 	unsigned long tmp;
225 	/* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
226 	   to be not masked by this core so we know the signal is received by
227 	   someone */
228 	asm volatile ("dmfc0 %0, $22\n"
229 		      "ori   %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
230 #endif
231 
232 	octeon_user_io_init();
233 
234 	/* to generate the first CPU timer interrupt */
235 	write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
236 }
237 
238 /**
239  * Hook for after all CPUs are online
240  */
241 static void octeon_cpus_done(void)
242 {
243 #ifdef CONFIG_CAVIUM_GDB
244 	unsigned long tmp;
245 	/* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
246 	   to be not masked by this core so we know the signal is received by
247 	   someone */
248 	asm volatile ("dmfc0 %0, $22\n"
249 		      "ori   %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
250 #endif
251 }
252 
253 #ifdef CONFIG_HOTPLUG_CPU
254 
255 /* State of each CPU. */
256 DEFINE_PER_CPU(int, cpu_state);
257 
258 extern void fixup_irqs(void);
259 
260 static DEFINE_SPINLOCK(smp_reserve_lock);
261 
262 static int octeon_cpu_disable(void)
263 {
264 	unsigned int cpu = smp_processor_id();
265 
266 	if (cpu == 0)
267 		return -EBUSY;
268 
269 	spin_lock(&smp_reserve_lock);
270 
271 	cpu_clear(cpu, cpu_online_map);
272 	cpu_clear(cpu, cpu_callin_map);
273 	local_irq_disable();
274 	fixup_irqs();
275 	local_irq_enable();
276 
277 	flush_cache_all();
278 	local_flush_tlb_all();
279 
280 	spin_unlock(&smp_reserve_lock);
281 
282 	return 0;
283 }
284 
285 static void octeon_cpu_die(unsigned int cpu)
286 {
287 	int coreid = cpu_logical_map(cpu);
288 	uint32_t mask, new_mask;
289 	const struct cvmx_bootmem_named_block_desc *block_desc;
290 
291 	while (per_cpu(cpu_state, cpu) != CPU_DEAD)
292 		cpu_relax();
293 
294 	/*
295 	 * This is a bit complicated strategics of getting/settig available
296 	 * cores mask, copied from bootloader
297 	 */
298 
299 	mask = 1 << coreid;
300 	/* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
301 	block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
302 
303 	if (!block_desc) {
304 		struct linux_app_boot_info *labi;
305 
306 		labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
307 
308 		labi->avail_coremask |= mask;
309 		new_mask = labi->avail_coremask;
310 	} else {		       /* alternative, already initialized */
311 		uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr +
312 							       AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
313 		*p |= mask;
314 		new_mask = *p;
315 	}
316 
317 	pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask);
318 	mb();
319 	cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
320 	cvmx_write_csr(CVMX_CIU_PP_RST, 0);
321 }
322 
323 void play_dead(void)
324 {
325 	int cpu = cpu_number_map(cvmx_get_core_num());
326 
327 	idle_task_exit();
328 	octeon_processor_boot = 0xff;
329 	per_cpu(cpu_state, cpu) = CPU_DEAD;
330 
331 	mb();
332 
333 	while (1)	/* core will be reset here */
334 		;
335 }
336 
337 extern void kernel_entry(unsigned long arg1, ...);
338 
339 static void start_after_reset(void)
340 {
341 	kernel_entry(0, 0, 0);  /* set a2 = 0 for secondary core */
342 }
343 
344 static int octeon_update_boot_vector(unsigned int cpu)
345 {
346 
347 	int coreid = cpu_logical_map(cpu);
348 	uint32_t avail_coremask;
349 	const struct cvmx_bootmem_named_block_desc *block_desc;
350 	struct boot_init_vector *boot_vect =
351 		(struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR);
352 
353 	block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
354 
355 	if (!block_desc) {
356 		struct linux_app_boot_info *labi;
357 
358 		labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
359 
360 		avail_coremask = labi->avail_coremask;
361 		labi->avail_coremask &= ~(1 << coreid);
362 	} else {		       /* alternative, already initialized */
363 		avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED(
364 			block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
365 	}
366 
367 	if (!(avail_coremask & (1 << coreid))) {
368 		/* core not available, assume, that catched by simple-executive */
369 		cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
370 		cvmx_write_csr(CVMX_CIU_PP_RST, 0);
371 	}
372 
373 	boot_vect[coreid].app_start_func_addr =
374 		(uint32_t) (unsigned long) start_after_reset;
375 	boot_vect[coreid].code_addr = octeon_bootloader_entry_addr;
376 
377 	mb();
378 
379 	cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
380 
381 	return 0;
382 }
383 
384 static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb,
385 	unsigned long action, void *hcpu)
386 {
387 	unsigned int cpu = (unsigned long)hcpu;
388 
389 	switch (action) {
390 	case CPU_UP_PREPARE:
391 		octeon_update_boot_vector(cpu);
392 		break;
393 	case CPU_ONLINE:
394 		pr_info("Cpu %d online\n", cpu);
395 		break;
396 	case CPU_DEAD:
397 		break;
398 	}
399 
400 	return NOTIFY_OK;
401 }
402 
403 static int __cpuinit register_cavium_notifier(void)
404 {
405 	hotcpu_notifier(octeon_cpu_callback, 0);
406 	return 0;
407 }
408 late_initcall(register_cavium_notifier);
409 
410 #endif  /* CONFIG_HOTPLUG_CPU */
411 
412 struct plat_smp_ops octeon_smp_ops = {
413 	.send_ipi_single	= octeon_send_ipi_single,
414 	.send_ipi_mask		= octeon_send_ipi_mask,
415 	.init_secondary		= octeon_init_secondary,
416 	.smp_finish		= octeon_smp_finish,
417 	.cpus_done		= octeon_cpus_done,
418 	.boot_secondary		= octeon_boot_secondary,
419 	.smp_setup		= octeon_smp_setup,
420 	.prepare_cpus		= octeon_prepare_cpus,
421 #ifdef CONFIG_HOTPLUG_CPU
422 	.cpu_disable		= octeon_cpu_disable,
423 	.cpu_die		= octeon_cpu_die,
424 #endif
425 };
426