xref: /openbmc/linux/arch/mips/kernel/smp.c (revision a1e58bbd)
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
15  *
16  * Copyright (C) 2000, 2001 Kanoj Sarcar
17  * Copyright (C) 2000, 2001 Ralf Baechle
18  * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19  * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20  */
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/threads.h>
27 #include <linux/module.h>
28 #include <linux/time.h>
29 #include <linux/timex.h>
30 #include <linux/sched.h>
31 #include <linux/cpumask.h>
32 #include <linux/cpu.h>
33 #include <linux/err.h>
34 
35 #include <asm/atomic.h>
36 #include <asm/cpu.h>
37 #include <asm/processor.h>
38 #include <asm/system.h>
39 #include <asm/mmu_context.h>
40 #include <asm/time.h>
41 
42 #ifdef CONFIG_MIPS_MT_SMTC
43 #include <asm/mipsmtregs.h>
44 #endif /* CONFIG_MIPS_MT_SMTC */
45 
46 cpumask_t phys_cpu_present_map;		/* Bitmask of available CPUs */
47 volatile cpumask_t cpu_callin_map;	/* Bitmask of started secondaries */
48 cpumask_t cpu_online_map;		/* Bitmask of currently online CPUs */
49 int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
50 int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
51 
52 EXPORT_SYMBOL(phys_cpu_present_map);
53 EXPORT_SYMBOL(cpu_online_map);
54 
55 extern void cpu_idle(void);
56 
57 /* Number of TCs (or siblings in Intel speak) per CPU core */
58 int smp_num_siblings = 1;
59 EXPORT_SYMBOL(smp_num_siblings);
60 
61 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
62 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
63 EXPORT_SYMBOL(cpu_sibling_map);
64 
65 /* representing cpus for which sibling maps can be computed */
66 static cpumask_t cpu_sibling_setup_map;
67 
68 static inline void set_cpu_sibling_map(int cpu)
69 {
70 	int i;
71 
72 	cpu_set(cpu, cpu_sibling_setup_map);
73 
74 	if (smp_num_siblings > 1) {
75 		for_each_cpu_mask(i, cpu_sibling_setup_map) {
76 			if (cpu_data[cpu].core == cpu_data[i].core) {
77 				cpu_set(i, cpu_sibling_map[cpu]);
78 				cpu_set(cpu, cpu_sibling_map[i]);
79 			}
80 		}
81 	} else
82 		cpu_set(cpu, cpu_sibling_map[cpu]);
83 }
84 
85 struct plat_smp_ops *mp_ops;
86 
87 __cpuinit void register_smp_ops(struct plat_smp_ops *ops)
88 {
89 	if (ops)
90 		printk(KERN_WARNING "Overriding previous set SMP ops\n");
91 
92 	mp_ops = ops;
93 }
94 
95 /*
96  * First C code run on the secondary CPUs after being started up by
97  * the master.
98  */
99 asmlinkage __cpuinit void start_secondary(void)
100 {
101 	unsigned int cpu;
102 
103 #ifdef CONFIG_MIPS_MT_SMTC
104 	/* Only do cpu_probe for first TC of CPU */
105 	if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
106 #endif /* CONFIG_MIPS_MT_SMTC */
107 	cpu_probe();
108 	cpu_report();
109 	per_cpu_trap_init();
110 	mips_clockevent_init();
111 	mp_ops->init_secondary();
112 
113 	/*
114 	 * XXX parity protection should be folded in here when it's converted
115 	 * to an option instead of something based on .cputype
116 	 */
117 
118 	calibrate_delay();
119 	preempt_disable();
120 	cpu = smp_processor_id();
121 	cpu_data[cpu].udelay_val = loops_per_jiffy;
122 
123 	mp_ops->smp_finish();
124 	set_cpu_sibling_map(cpu);
125 
126 	cpu_set(cpu, cpu_callin_map);
127 
128 	cpu_idle();
129 }
130 
131 DEFINE_SPINLOCK(smp_call_lock);
132 
133 struct call_data_struct *call_data;
134 
135 /*
136  * Run a function on all other CPUs.
137  *
138  *  <mask>	cpuset_t of all processors to run the function on.
139  *  <func>      The function to run. This must be fast and non-blocking.
140  *  <info>      An arbitrary pointer to pass to the function.
141  *  <retry>     If true, keep retrying until ready.
142  *  <wait>      If true, wait until function has completed on other CPUs.
143  *  [RETURNS]   0 on success, else a negative status code.
144  *
145  * Does not return until remote CPUs are nearly ready to execute <func>
146  * or are or have executed.
147  *
148  * You must not call this function with disabled interrupts or from a
149  * hardware interrupt handler or from a bottom half handler:
150  *
151  * CPU A                               CPU B
152  * Disable interrupts
153  *                                     smp_call_function()
154  *                                     Take call_lock
155  *                                     Send IPIs
156  *                                     Wait for all cpus to acknowledge IPI
157  *                                     CPU A has not responded, spin waiting
158  *                                     for cpu A to respond, holding call_lock
159  * smp_call_function()
160  * Spin waiting for call_lock
161  * Deadlock                            Deadlock
162  */
163 int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
164 	void *info, int retry, int wait)
165 {
166 	struct call_data_struct data;
167 	int cpu = smp_processor_id();
168 	int cpus;
169 
170 	/*
171 	 * Can die spectacularly if this CPU isn't yet marked online
172 	 */
173 	BUG_ON(!cpu_online(cpu));
174 
175 	cpu_clear(cpu, mask);
176 	cpus = cpus_weight(mask);
177 	if (!cpus)
178 		return 0;
179 
180 	/* Can deadlock when called with interrupts disabled */
181 	WARN_ON(irqs_disabled());
182 
183 	data.func = func;
184 	data.info = info;
185 	atomic_set(&data.started, 0);
186 	data.wait = wait;
187 	if (wait)
188 		atomic_set(&data.finished, 0);
189 
190 	spin_lock(&smp_call_lock);
191 	call_data = &data;
192 	smp_mb();
193 
194 	/* Send a message to all other CPUs and wait for them to respond */
195 	mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
196 
197 	/* Wait for response */
198 	/* FIXME: lock-up detection, backtrace on lock-up */
199 	while (atomic_read(&data.started) != cpus)
200 		barrier();
201 
202 	if (wait)
203 		while (atomic_read(&data.finished) != cpus)
204 			barrier();
205 	call_data = NULL;
206 	spin_unlock(&smp_call_lock);
207 
208 	return 0;
209 }
210 
211 int smp_call_function(void (*func) (void *info), void *info, int retry,
212 	int wait)
213 {
214 	return smp_call_function_mask(cpu_online_map, func, info, retry, wait);
215 }
216 
217 void smp_call_function_interrupt(void)
218 {
219 	void (*func) (void *info) = call_data->func;
220 	void *info = call_data->info;
221 	int wait = call_data->wait;
222 
223 	/*
224 	 * Notify initiating CPU that I've grabbed the data and am
225 	 * about to execute the function.
226 	 */
227 	smp_mb();
228 	atomic_inc(&call_data->started);
229 
230 	/*
231 	 * At this point the info structure may be out of scope unless wait==1.
232 	 */
233 	irq_enter();
234 	(*func)(info);
235 	irq_exit();
236 
237 	if (wait) {
238 		smp_mb();
239 		atomic_inc(&call_data->finished);
240 	}
241 }
242 
243 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
244 			     int retry, int wait)
245 {
246 	int ret, me;
247 
248 	/*
249 	 * Can die spectacularly if this CPU isn't yet marked online
250 	 */
251 	if (!cpu_online(cpu))
252 		return 0;
253 
254 	me = get_cpu();
255 	BUG_ON(!cpu_online(me));
256 
257 	if (cpu == me) {
258 		local_irq_disable();
259 		func(info);
260 		local_irq_enable();
261 		put_cpu();
262 		return 0;
263 	}
264 
265 	ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry,
266 				     wait);
267 
268 	put_cpu();
269 	return 0;
270 }
271 
272 static void stop_this_cpu(void *dummy)
273 {
274 	/*
275 	 * Remove this CPU:
276 	 */
277 	cpu_clear(smp_processor_id(), cpu_online_map);
278 	local_irq_enable();	/* May need to service _machine_restart IPI */
279 	for (;;);		/* Wait if available. */
280 }
281 
282 void smp_send_stop(void)
283 {
284 	smp_call_function(stop_this_cpu, NULL, 1, 0);
285 }
286 
287 void __init smp_cpus_done(unsigned int max_cpus)
288 {
289 	mp_ops->cpus_done();
290 }
291 
292 /* called from main before smp_init() */
293 void __init smp_prepare_cpus(unsigned int max_cpus)
294 {
295 	init_new_context(current, &init_mm);
296 	current_thread_info()->cpu = 0;
297 	mp_ops->prepare_cpus(max_cpus);
298 	set_cpu_sibling_map(0);
299 #ifndef CONFIG_HOTPLUG_CPU
300 	cpu_present_map = cpu_possible_map;
301 #endif
302 }
303 
304 /* preload SMP state for boot cpu */
305 void __devinit smp_prepare_boot_cpu(void)
306 {
307 	/*
308 	 * This assumes that bootup is always handled by the processor
309 	 * with the logic and physical number 0.
310 	 */
311 	__cpu_number_map[0] = 0;
312 	__cpu_logical_map[0] = 0;
313 	cpu_set(0, phys_cpu_present_map);
314 	cpu_set(0, cpu_online_map);
315 	cpu_set(0, cpu_callin_map);
316 }
317 
318 /*
319  * Called once for each "cpu_possible(cpu)".  Needs to spin up the cpu
320  * and keep control until "cpu_online(cpu)" is set.  Note: cpu is
321  * physical, not logical.
322  */
323 int __cpuinit __cpu_up(unsigned int cpu)
324 {
325 	struct task_struct *idle;
326 
327 	/*
328 	 * Processor goes to start_secondary(), sets online flag
329 	 * The following code is purely to make sure
330 	 * Linux can schedule processes on this slave.
331 	 */
332 	idle = fork_idle(cpu);
333 	if (IS_ERR(idle))
334 		panic(KERN_ERR "Fork failed for CPU %d", cpu);
335 
336 	mp_ops->boot_secondary(cpu, idle);
337 
338 	/*
339 	 * Trust is futile.  We should really have timeouts ...
340 	 */
341 	while (!cpu_isset(cpu, cpu_callin_map))
342 		udelay(100);
343 
344 	cpu_set(cpu, cpu_online_map);
345 
346 	return 0;
347 }
348 
349 /* Not really SMP stuff ... */
350 int setup_profiling_timer(unsigned int multiplier)
351 {
352 	return 0;
353 }
354 
355 static void flush_tlb_all_ipi(void *info)
356 {
357 	local_flush_tlb_all();
358 }
359 
360 void flush_tlb_all(void)
361 {
362 	on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1);
363 }
364 
365 static void flush_tlb_mm_ipi(void *mm)
366 {
367 	local_flush_tlb_mm((struct mm_struct *)mm);
368 }
369 
370 /*
371  * Special Variant of smp_call_function for use by TLB functions:
372  *
373  *  o No return value
374  *  o collapses to normal function call on UP kernels
375  *  o collapses to normal function call on systems with a single shared
376  *    primary cache.
377  *  o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
378  */
379 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
380 {
381 #ifndef CONFIG_MIPS_MT_SMTC
382 	smp_call_function(func, info, 1, 1);
383 #endif
384 }
385 
386 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
387 {
388 	preempt_disable();
389 
390 	smp_on_other_tlbs(func, info);
391 	func(info);
392 
393 	preempt_enable();
394 }
395 
396 /*
397  * The following tlb flush calls are invoked when old translations are
398  * being torn down, or pte attributes are changing. For single threaded
399  * address spaces, a new context is obtained on the current cpu, and tlb
400  * context on other cpus are invalidated to force a new context allocation
401  * at switch_mm time, should the mm ever be used on other cpus. For
402  * multithreaded address spaces, intercpu interrupts have to be sent.
403  * Another case where intercpu interrupts are required is when the target
404  * mm might be active on another cpu (eg debuggers doing the flushes on
405  * behalf of debugees, kswapd stealing pages from another process etc).
406  * Kanoj 07/00.
407  */
408 
409 void flush_tlb_mm(struct mm_struct *mm)
410 {
411 	preempt_disable();
412 
413 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
414 		smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
415 	} else {
416 		cpumask_t mask = cpu_online_map;
417 		unsigned int cpu;
418 
419 		cpu_clear(smp_processor_id(), mask);
420 		for_each_cpu_mask(cpu, mask)
421 			if (cpu_context(cpu, mm))
422 				cpu_context(cpu, mm) = 0;
423 	}
424 	local_flush_tlb_mm(mm);
425 
426 	preempt_enable();
427 }
428 
429 struct flush_tlb_data {
430 	struct vm_area_struct *vma;
431 	unsigned long addr1;
432 	unsigned long addr2;
433 };
434 
435 static void flush_tlb_range_ipi(void *info)
436 {
437 	struct flush_tlb_data *fd = info;
438 
439 	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
440 }
441 
442 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
443 {
444 	struct mm_struct *mm = vma->vm_mm;
445 
446 	preempt_disable();
447 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
448 		struct flush_tlb_data fd = {
449 			.vma = vma,
450 			.addr1 = start,
451 			.addr2 = end,
452 		};
453 
454 		smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
455 	} else {
456 		cpumask_t mask = cpu_online_map;
457 		unsigned int cpu;
458 
459 		cpu_clear(smp_processor_id(), mask);
460 		for_each_cpu_mask(cpu, mask)
461 			if (cpu_context(cpu, mm))
462 				cpu_context(cpu, mm) = 0;
463 	}
464 	local_flush_tlb_range(vma, start, end);
465 	preempt_enable();
466 }
467 
468 static void flush_tlb_kernel_range_ipi(void *info)
469 {
470 	struct flush_tlb_data *fd = info;
471 
472 	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
473 }
474 
475 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
476 {
477 	struct flush_tlb_data fd = {
478 		.addr1 = start,
479 		.addr2 = end,
480 	};
481 
482 	on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1);
483 }
484 
485 static void flush_tlb_page_ipi(void *info)
486 {
487 	struct flush_tlb_data *fd = info;
488 
489 	local_flush_tlb_page(fd->vma, fd->addr1);
490 }
491 
492 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
493 {
494 	preempt_disable();
495 	if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
496 		struct flush_tlb_data fd = {
497 			.vma = vma,
498 			.addr1 = page,
499 		};
500 
501 		smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
502 	} else {
503 		cpumask_t mask = cpu_online_map;
504 		unsigned int cpu;
505 
506 		cpu_clear(smp_processor_id(), mask);
507 		for_each_cpu_mask(cpu, mask)
508 			if (cpu_context(cpu, vma->vm_mm))
509 				cpu_context(cpu, vma->vm_mm) = 0;
510 	}
511 	local_flush_tlb_page(vma, page);
512 	preempt_enable();
513 }
514 
515 static void flush_tlb_one_ipi(void *info)
516 {
517 	unsigned long vaddr = (unsigned long) info;
518 
519 	local_flush_tlb_one(vaddr);
520 }
521 
522 void flush_tlb_one(unsigned long vaddr)
523 {
524 	smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
525 }
526 
527 EXPORT_SYMBOL(flush_tlb_page);
528 EXPORT_SYMBOL(flush_tlb_one);
529