xref: /openbmc/linux/arch/mips/kernel/smp.c (revision 22246614)
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
15  *
16  * Copyright (C) 2000, 2001 Kanoj Sarcar
17  * Copyright (C) 2000, 2001 Ralf Baechle
18  * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19  * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20  */
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/threads.h>
27 #include <linux/module.h>
28 #include <linux/time.h>
29 #include <linux/timex.h>
30 #include <linux/sched.h>
31 #include <linux/cpumask.h>
32 #include <linux/cpu.h>
33 #include <linux/err.h>
34 
35 #include <asm/atomic.h>
36 #include <asm/cpu.h>
37 #include <asm/processor.h>
38 #include <asm/r4k-timer.h>
39 #include <asm/system.h>
40 #include <asm/mmu_context.h>
41 #include <asm/time.h>
42 
43 #ifdef CONFIG_MIPS_MT_SMTC
44 #include <asm/mipsmtregs.h>
45 #endif /* CONFIG_MIPS_MT_SMTC */
46 
47 cpumask_t phys_cpu_present_map;		/* Bitmask of available CPUs */
48 volatile cpumask_t cpu_callin_map;	/* Bitmask of started secondaries */
49 cpumask_t cpu_online_map;		/* Bitmask of currently online CPUs */
50 int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
51 int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
52 
53 EXPORT_SYMBOL(phys_cpu_present_map);
54 EXPORT_SYMBOL(cpu_online_map);
55 
56 extern void cpu_idle(void);
57 
58 /* Number of TCs (or siblings in Intel speak) per CPU core */
59 int smp_num_siblings = 1;
60 EXPORT_SYMBOL(smp_num_siblings);
61 
62 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
63 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
64 EXPORT_SYMBOL(cpu_sibling_map);
65 
66 /* representing cpus for which sibling maps can be computed */
67 static cpumask_t cpu_sibling_setup_map;
68 
69 static inline void set_cpu_sibling_map(int cpu)
70 {
71 	int i;
72 
73 	cpu_set(cpu, cpu_sibling_setup_map);
74 
75 	if (smp_num_siblings > 1) {
76 		for_each_cpu_mask(i, cpu_sibling_setup_map) {
77 			if (cpu_data[cpu].core == cpu_data[i].core) {
78 				cpu_set(i, cpu_sibling_map[cpu]);
79 				cpu_set(cpu, cpu_sibling_map[i]);
80 			}
81 		}
82 	} else
83 		cpu_set(cpu, cpu_sibling_map[cpu]);
84 }
85 
86 struct plat_smp_ops *mp_ops;
87 
88 __cpuinit void register_smp_ops(struct plat_smp_ops *ops)
89 {
90 	if (mp_ops)
91 		printk(KERN_WARNING "Overriding previously set SMP ops\n");
92 
93 	mp_ops = ops;
94 }
95 
96 /*
97  * First C code run on the secondary CPUs after being started up by
98  * the master.
99  */
100 asmlinkage __cpuinit void start_secondary(void)
101 {
102 	unsigned int cpu;
103 
104 #ifdef CONFIG_MIPS_MT_SMTC
105 	/* Only do cpu_probe for first TC of CPU */
106 	if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
107 #endif /* CONFIG_MIPS_MT_SMTC */
108 	cpu_probe();
109 	cpu_report();
110 	per_cpu_trap_init();
111 	mips_clockevent_init();
112 	mp_ops->init_secondary();
113 
114 	/*
115 	 * XXX parity protection should be folded in here when it's converted
116 	 * to an option instead of something based on .cputype
117 	 */
118 
119 	calibrate_delay();
120 	preempt_disable();
121 	cpu = smp_processor_id();
122 	cpu_data[cpu].udelay_val = loops_per_jiffy;
123 
124 	mp_ops->smp_finish();
125 	set_cpu_sibling_map(cpu);
126 
127 	cpu_set(cpu, cpu_callin_map);
128 
129 	synchronise_count_slave();
130 
131 	cpu_idle();
132 }
133 
134 DEFINE_SPINLOCK(smp_call_lock);
135 
136 struct call_data_struct *call_data;
137 
138 /*
139  * Run a function on all other CPUs.
140  *
141  *  <mask>	cpuset_t of all processors to run the function on.
142  *  <func>      The function to run. This must be fast and non-blocking.
143  *  <info>      An arbitrary pointer to pass to the function.
144  *  <retry>     If true, keep retrying until ready.
145  *  <wait>      If true, wait until function has completed on other CPUs.
146  *  [RETURNS]   0 on success, else a negative status code.
147  *
148  * Does not return until remote CPUs are nearly ready to execute <func>
149  * or are or have executed.
150  *
151  * You must not call this function with disabled interrupts or from a
152  * hardware interrupt handler or from a bottom half handler:
153  *
154  * CPU A                               CPU B
155  * Disable interrupts
156  *                                     smp_call_function()
157  *                                     Take call_lock
158  *                                     Send IPIs
159  *                                     Wait for all cpus to acknowledge IPI
160  *                                     CPU A has not responded, spin waiting
161  *                                     for cpu A to respond, holding call_lock
162  * smp_call_function()
163  * Spin waiting for call_lock
164  * Deadlock                            Deadlock
165  */
166 int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
167 	void *info, int retry, int wait)
168 {
169 	struct call_data_struct data;
170 	int cpu = smp_processor_id();
171 	int cpus;
172 
173 	/*
174 	 * Can die spectacularly if this CPU isn't yet marked online
175 	 */
176 	BUG_ON(!cpu_online(cpu));
177 
178 	cpu_clear(cpu, mask);
179 	cpus = cpus_weight(mask);
180 	if (!cpus)
181 		return 0;
182 
183 	/* Can deadlock when called with interrupts disabled */
184 	WARN_ON(irqs_disabled());
185 
186 	data.func = func;
187 	data.info = info;
188 	atomic_set(&data.started, 0);
189 	data.wait = wait;
190 	if (wait)
191 		atomic_set(&data.finished, 0);
192 
193 	spin_lock(&smp_call_lock);
194 	call_data = &data;
195 	smp_mb();
196 
197 	/* Send a message to all other CPUs and wait for them to respond */
198 	mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
199 
200 	/* Wait for response */
201 	/* FIXME: lock-up detection, backtrace on lock-up */
202 	while (atomic_read(&data.started) != cpus)
203 		barrier();
204 
205 	if (wait)
206 		while (atomic_read(&data.finished) != cpus)
207 			barrier();
208 	call_data = NULL;
209 	spin_unlock(&smp_call_lock);
210 
211 	return 0;
212 }
213 
214 int smp_call_function(void (*func) (void *info), void *info, int retry,
215 	int wait)
216 {
217 	return smp_call_function_mask(cpu_online_map, func, info, retry, wait);
218 }
219 
220 void smp_call_function_interrupt(void)
221 {
222 	void (*func) (void *info) = call_data->func;
223 	void *info = call_data->info;
224 	int wait = call_data->wait;
225 
226 	/*
227 	 * Notify initiating CPU that I've grabbed the data and am
228 	 * about to execute the function.
229 	 */
230 	smp_mb();
231 	atomic_inc(&call_data->started);
232 
233 	/*
234 	 * At this point the info structure may be out of scope unless wait==1.
235 	 */
236 	irq_enter();
237 	(*func)(info);
238 	irq_exit();
239 
240 	if (wait) {
241 		smp_mb();
242 		atomic_inc(&call_data->finished);
243 	}
244 }
245 
246 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
247 			     int retry, int wait)
248 {
249 	int ret, me;
250 
251 	/*
252 	 * Can die spectacularly if this CPU isn't yet marked online
253 	 */
254 	if (!cpu_online(cpu))
255 		return 0;
256 
257 	me = get_cpu();
258 	BUG_ON(!cpu_online(me));
259 
260 	if (cpu == me) {
261 		local_irq_disable();
262 		func(info);
263 		local_irq_enable();
264 		put_cpu();
265 		return 0;
266 	}
267 
268 	ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry,
269 				     wait);
270 
271 	put_cpu();
272 	return 0;
273 }
274 
275 static void stop_this_cpu(void *dummy)
276 {
277 	/*
278 	 * Remove this CPU:
279 	 */
280 	cpu_clear(smp_processor_id(), cpu_online_map);
281 	local_irq_enable();	/* May need to service _machine_restart IPI */
282 	for (;;);		/* Wait if available. */
283 }
284 
285 void smp_send_stop(void)
286 {
287 	smp_call_function(stop_this_cpu, NULL, 1, 0);
288 }
289 
290 void __init smp_cpus_done(unsigned int max_cpus)
291 {
292 	mp_ops->cpus_done();
293 	synchronise_count_master();
294 }
295 
296 /* called from main before smp_init() */
297 void __init smp_prepare_cpus(unsigned int max_cpus)
298 {
299 	init_new_context(current, &init_mm);
300 	current_thread_info()->cpu = 0;
301 	mp_ops->prepare_cpus(max_cpus);
302 	set_cpu_sibling_map(0);
303 #ifndef CONFIG_HOTPLUG_CPU
304 	cpu_present_map = cpu_possible_map;
305 #endif
306 }
307 
308 /* preload SMP state for boot cpu */
309 void __devinit smp_prepare_boot_cpu(void)
310 {
311 	/*
312 	 * This assumes that bootup is always handled by the processor
313 	 * with the logic and physical number 0.
314 	 */
315 	__cpu_number_map[0] = 0;
316 	__cpu_logical_map[0] = 0;
317 	cpu_set(0, phys_cpu_present_map);
318 	cpu_set(0, cpu_online_map);
319 	cpu_set(0, cpu_callin_map);
320 }
321 
322 /*
323  * Called once for each "cpu_possible(cpu)".  Needs to spin up the cpu
324  * and keep control until "cpu_online(cpu)" is set.  Note: cpu is
325  * physical, not logical.
326  */
327 int __cpuinit __cpu_up(unsigned int cpu)
328 {
329 	struct task_struct *idle;
330 
331 	/*
332 	 * Processor goes to start_secondary(), sets online flag
333 	 * The following code is purely to make sure
334 	 * Linux can schedule processes on this slave.
335 	 */
336 	idle = fork_idle(cpu);
337 	if (IS_ERR(idle))
338 		panic(KERN_ERR "Fork failed for CPU %d", cpu);
339 
340 	mp_ops->boot_secondary(cpu, idle);
341 
342 	/*
343 	 * Trust is futile.  We should really have timeouts ...
344 	 */
345 	while (!cpu_isset(cpu, cpu_callin_map))
346 		udelay(100);
347 
348 	cpu_set(cpu, cpu_online_map);
349 
350 	return 0;
351 }
352 
353 /* Not really SMP stuff ... */
354 int setup_profiling_timer(unsigned int multiplier)
355 {
356 	return 0;
357 }
358 
359 static void flush_tlb_all_ipi(void *info)
360 {
361 	local_flush_tlb_all();
362 }
363 
364 void flush_tlb_all(void)
365 {
366 	on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1);
367 }
368 
369 static void flush_tlb_mm_ipi(void *mm)
370 {
371 	local_flush_tlb_mm((struct mm_struct *)mm);
372 }
373 
374 /*
375  * Special Variant of smp_call_function for use by TLB functions:
376  *
377  *  o No return value
378  *  o collapses to normal function call on UP kernels
379  *  o collapses to normal function call on systems with a single shared
380  *    primary cache.
381  *  o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
382  */
383 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
384 {
385 #ifndef CONFIG_MIPS_MT_SMTC
386 	smp_call_function(func, info, 1, 1);
387 #endif
388 }
389 
390 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
391 {
392 	preempt_disable();
393 
394 	smp_on_other_tlbs(func, info);
395 	func(info);
396 
397 	preempt_enable();
398 }
399 
400 /*
401  * The following tlb flush calls are invoked when old translations are
402  * being torn down, or pte attributes are changing. For single threaded
403  * address spaces, a new context is obtained on the current cpu, and tlb
404  * context on other cpus are invalidated to force a new context allocation
405  * at switch_mm time, should the mm ever be used on other cpus. For
406  * multithreaded address spaces, intercpu interrupts have to be sent.
407  * Another case where intercpu interrupts are required is when the target
408  * mm might be active on another cpu (eg debuggers doing the flushes on
409  * behalf of debugees, kswapd stealing pages from another process etc).
410  * Kanoj 07/00.
411  */
412 
413 void flush_tlb_mm(struct mm_struct *mm)
414 {
415 	preempt_disable();
416 
417 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
418 		smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
419 	} else {
420 		cpumask_t mask = cpu_online_map;
421 		unsigned int cpu;
422 
423 		cpu_clear(smp_processor_id(), mask);
424 		for_each_cpu_mask(cpu, mask)
425 			if (cpu_context(cpu, mm))
426 				cpu_context(cpu, mm) = 0;
427 	}
428 	local_flush_tlb_mm(mm);
429 
430 	preempt_enable();
431 }
432 
433 struct flush_tlb_data {
434 	struct vm_area_struct *vma;
435 	unsigned long addr1;
436 	unsigned long addr2;
437 };
438 
439 static void flush_tlb_range_ipi(void *info)
440 {
441 	struct flush_tlb_data *fd = info;
442 
443 	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
444 }
445 
446 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
447 {
448 	struct mm_struct *mm = vma->vm_mm;
449 
450 	preempt_disable();
451 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
452 		struct flush_tlb_data fd = {
453 			.vma = vma,
454 			.addr1 = start,
455 			.addr2 = end,
456 		};
457 
458 		smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
459 	} else {
460 		cpumask_t mask = cpu_online_map;
461 		unsigned int cpu;
462 
463 		cpu_clear(smp_processor_id(), mask);
464 		for_each_cpu_mask(cpu, mask)
465 			if (cpu_context(cpu, mm))
466 				cpu_context(cpu, mm) = 0;
467 	}
468 	local_flush_tlb_range(vma, start, end);
469 	preempt_enable();
470 }
471 
472 static void flush_tlb_kernel_range_ipi(void *info)
473 {
474 	struct flush_tlb_data *fd = info;
475 
476 	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
477 }
478 
479 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
480 {
481 	struct flush_tlb_data fd = {
482 		.addr1 = start,
483 		.addr2 = end,
484 	};
485 
486 	on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1);
487 }
488 
489 static void flush_tlb_page_ipi(void *info)
490 {
491 	struct flush_tlb_data *fd = info;
492 
493 	local_flush_tlb_page(fd->vma, fd->addr1);
494 }
495 
496 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
497 {
498 	preempt_disable();
499 	if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
500 		struct flush_tlb_data fd = {
501 			.vma = vma,
502 			.addr1 = page,
503 		};
504 
505 		smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
506 	} else {
507 		cpumask_t mask = cpu_online_map;
508 		unsigned int cpu;
509 
510 		cpu_clear(smp_processor_id(), mask);
511 		for_each_cpu_mask(cpu, mask)
512 			if (cpu_context(cpu, vma->vm_mm))
513 				cpu_context(cpu, vma->vm_mm) = 0;
514 	}
515 	local_flush_tlb_page(vma, page);
516 	preempt_enable();
517 }
518 
519 static void flush_tlb_one_ipi(void *info)
520 {
521 	unsigned long vaddr = (unsigned long) info;
522 
523 	local_flush_tlb_one(vaddr);
524 }
525 
526 void flush_tlb_one(unsigned long vaddr)
527 {
528 	smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
529 }
530 
531 EXPORT_SYMBOL(flush_tlb_page);
532 EXPORT_SYMBOL(flush_tlb_one);
533