xref: /openbmc/linux/arch/mips/kernel/smp.c (revision d5cb9783536a41df9f9cba5b0a1d78047ed787f7)
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
15  *
16  * Copyright (C) 2000, 2001 Kanoj Sarcar
17  * Copyright (C) 2000, 2001 Ralf Baechle
18  * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19  * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20  */
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/threads.h>
27 #include <linux/module.h>
28 #include <linux/time.h>
29 #include <linux/timex.h>
30 #include <linux/sched.h>
31 #include <linux/cpumask.h>
32 
33 #include <asm/atomic.h>
34 #include <asm/cpu.h>
35 #include <asm/processor.h>
36 #include <asm/system.h>
37 #include <asm/mmu_context.h>
38 #include <asm/smp.h>
39 
40 cpumask_t phys_cpu_present_map;		/* Bitmask of available CPUs */
41 volatile cpumask_t cpu_callin_map;	/* Bitmask of started secondaries */
42 cpumask_t cpu_online_map;		/* Bitmask of currently online CPUs */
43 int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
44 int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
45 
46 EXPORT_SYMBOL(phys_cpu_present_map);
47 EXPORT_SYMBOL(cpu_online_map);
48 
49 static void smp_tune_scheduling (void)
50 {
51 	struct cache_desc *cd = &current_cpu_data.scache;
52 	unsigned long cachesize;       /* kB   */
53 	unsigned long cpu_khz;
54 
55 	/*
56 	 * Crude estimate until we actually meassure ...
57 	 */
58 	cpu_khz = loops_per_jiffy * 2 * HZ / 1000;
59 
60 	/*
61 	 * Rough estimation for SMP scheduling, this is the number of
62 	 * cycles it takes for a fully memory-limited process to flush
63 	 * the SMP-local cache.
64 	 *
65 	 * (For a P5 this pretty much means we will choose another idle
66 	 *  CPU almost always at wakeup time (this is due to the small
67 	 *  L1 cache), on PIIs it's around 50-100 usecs, depending on
68 	 *  the cache size)
69 	 */
70 	if (!cpu_khz)
71 		return;
72 
73 	cachesize = cd->linesz * cd->sets * cd->ways;
74 }
75 
76 extern void __init calibrate_delay(void);
77 extern ATTRIB_NORET void cpu_idle(void);
78 
79 /*
80  * First C code run on the secondary CPUs after being started up by
81  * the master.
82  */
83 asmlinkage void start_secondary(void)
84 {
85 	unsigned int cpu = smp_processor_id();
86 
87 	cpu_probe();
88 	cpu_report();
89 	per_cpu_trap_init();
90 	prom_init_secondary();
91 
92 	/*
93 	 * XXX parity protection should be folded in here when it's converted
94 	 * to an option instead of something based on .cputype
95 	 */
96 
97 	calibrate_delay();
98 	cpu_data[cpu].udelay_val = loops_per_jiffy;
99 
100 	prom_smp_finish();
101 
102 	cpu_set(cpu, cpu_callin_map);
103 
104 	cpu_idle();
105 }
106 
107 DEFINE_SPINLOCK(smp_call_lock);
108 
109 struct call_data_struct *call_data;
110 
111 /*
112  * Run a function on all other CPUs.
113  *  <func>      The function to run. This must be fast and non-blocking.
114  *  <info>      An arbitrary pointer to pass to the function.
115  *  <retry>     If true, keep retrying until ready.
116  *  <wait>      If true, wait until function has completed on other CPUs.
117  *  [RETURNS]   0 on success, else a negative status code.
118  *
119  * Does not return until remote CPUs are nearly ready to execute <func>
120  * or are or have executed.
121  *
122  * You must not call this function with disabled interrupts or from a
123  * hardware interrupt handler or from a bottom half handler:
124  *
125  * CPU A                               CPU B
126  * Disable interrupts
127  *                                     smp_call_function()
128  *                                     Take call_lock
129  *                                     Send IPIs
130  *                                     Wait for all cpus to acknowledge IPI
131  *                                     CPU A has not responded, spin waiting
132  *                                     for cpu A to respond, holding call_lock
133  * smp_call_function()
134  * Spin waiting for call_lock
135  * Deadlock                            Deadlock
136  */
137 int smp_call_function (void (*func) (void *info), void *info, int retry,
138 								int wait)
139 {
140 	struct call_data_struct data;
141 	int i, cpus = num_online_cpus() - 1;
142 	int cpu = smp_processor_id();
143 
144 	/*
145 	 * Can die spectacularly if this CPU isn't yet marked online
146 	 */
147 	BUG_ON(!cpu_online(cpu));
148 
149 	if (!cpus)
150 		return 0;
151 
152 	/* Can deadlock when called with interrupts disabled */
153 	WARN_ON(irqs_disabled());
154 
155 	data.func = func;
156 	data.info = info;
157 	atomic_set(&data.started, 0);
158 	data.wait = wait;
159 	if (wait)
160 		atomic_set(&data.finished, 0);
161 
162 	spin_lock(&smp_call_lock);
163 	call_data = &data;
164 	mb();
165 
166 	/* Send a message to all other CPUs and wait for them to respond */
167 	for (i = 0; i < NR_CPUS; i++)
168 		if (cpu_online(i) && i != cpu)
169 			core_send_ipi(i, SMP_CALL_FUNCTION);
170 
171 	/* Wait for response */
172 	/* FIXME: lock-up detection, backtrace on lock-up */
173 	while (atomic_read(&data.started) != cpus)
174 		barrier();
175 
176 	if (wait)
177 		while (atomic_read(&data.finished) != cpus)
178 			barrier();
179 	spin_unlock(&smp_call_lock);
180 
181 	return 0;
182 }
183 
184 void smp_call_function_interrupt(void)
185 {
186 	void (*func) (void *info) = call_data->func;
187 	void *info = call_data->info;
188 	int wait = call_data->wait;
189 
190 	/*
191 	 * Notify initiating CPU that I've grabbed the data and am
192 	 * about to execute the function.
193 	 */
194 	mb();
195 	atomic_inc(&call_data->started);
196 
197 	/*
198 	 * At this point the info structure may be out of scope unless wait==1.
199 	 */
200 	irq_enter();
201 	(*func)(info);
202 	irq_exit();
203 
204 	if (wait) {
205 		mb();
206 		atomic_inc(&call_data->finished);
207 	}
208 }
209 
210 static void stop_this_cpu(void *dummy)
211 {
212 	/*
213 	 * Remove this CPU:
214 	 */
215 	cpu_clear(smp_processor_id(), cpu_online_map);
216 	local_irq_enable();	/* May need to service _machine_restart IPI */
217 	for (;;);		/* Wait if available. */
218 }
219 
220 void smp_send_stop(void)
221 {
222 	smp_call_function(stop_this_cpu, NULL, 1, 0);
223 }
224 
225 void __init smp_cpus_done(unsigned int max_cpus)
226 {
227 	prom_cpus_done();
228 }
229 
230 /* called from main before smp_init() */
231 void __init smp_prepare_cpus(unsigned int max_cpus)
232 {
233 	init_new_context(current, &init_mm);
234 	current_thread_info()->cpu = 0;
235 	smp_tune_scheduling();
236 	prom_prepare_cpus(max_cpus);
237 }
238 
239 /* preload SMP state for boot cpu */
240 void __devinit smp_prepare_boot_cpu(void)
241 {
242 	/*
243 	 * This assumes that bootup is always handled by the processor
244 	 * with the logic and physical number 0.
245 	 */
246 	__cpu_number_map[0] = 0;
247 	__cpu_logical_map[0] = 0;
248 	cpu_set(0, phys_cpu_present_map);
249 	cpu_set(0, cpu_online_map);
250 	cpu_set(0, cpu_callin_map);
251 }
252 
253 /*
254  * Called once for each "cpu_possible(cpu)".  Needs to spin up the cpu
255  * and keep control until "cpu_online(cpu)" is set.  Note: cpu is
256  * physical, not logical.
257  */
258 int __devinit __cpu_up(unsigned int cpu)
259 {
260 	struct task_struct *idle;
261 
262 	/*
263 	 * Processor goes to start_secondary(), sets online flag
264 	 * The following code is purely to make sure
265 	 * Linux can schedule processes on this slave.
266 	 */
267 	idle = fork_idle(cpu);
268 	if (IS_ERR(idle))
269 		panic(KERN_ERR "Fork failed for CPU %d", cpu);
270 
271 	prom_boot_secondary(cpu, idle);
272 
273 	/*
274 	 * Trust is futile.  We should really have timeouts ...
275 	 */
276 	while (!cpu_isset(cpu, cpu_callin_map))
277 		udelay(100);
278 
279 	cpu_set(cpu, cpu_online_map);
280 
281 	return 0;
282 }
283 
284 /* Not really SMP stuff ... */
285 int setup_profiling_timer(unsigned int multiplier)
286 {
287 	return 0;
288 }
289 
290 static void flush_tlb_all_ipi(void *info)
291 {
292 	local_flush_tlb_all();
293 }
294 
295 void flush_tlb_all(void)
296 {
297 	on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
298 }
299 
300 static void flush_tlb_mm_ipi(void *mm)
301 {
302 	local_flush_tlb_mm((struct mm_struct *)mm);
303 }
304 
305 /*
306  * The following tlb flush calls are invoked when old translations are
307  * being torn down, or pte attributes are changing. For single threaded
308  * address spaces, a new context is obtained on the current cpu, and tlb
309  * context on other cpus are invalidated to force a new context allocation
310  * at switch_mm time, should the mm ever be used on other cpus. For
311  * multithreaded address spaces, intercpu interrupts have to be sent.
312  * Another case where intercpu interrupts are required is when the target
313  * mm might be active on another cpu (eg debuggers doing the flushes on
314  * behalf of debugees, kswapd stealing pages from another process etc).
315  * Kanoj 07/00.
316  */
317 
318 void flush_tlb_mm(struct mm_struct *mm)
319 {
320 	preempt_disable();
321 
322 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
323 		smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
324 	} else {
325 		int i;
326 		for (i = 0; i < num_online_cpus(); i++)
327 			if (smp_processor_id() != i)
328 				cpu_context(i, mm) = 0;
329 	}
330 	local_flush_tlb_mm(mm);
331 
332 	preempt_enable();
333 }
334 
335 struct flush_tlb_data {
336 	struct vm_area_struct *vma;
337 	unsigned long addr1;
338 	unsigned long addr2;
339 };
340 
341 static void flush_tlb_range_ipi(void *info)
342 {
343 	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
344 
345 	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
346 }
347 
348 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
349 {
350 	struct mm_struct *mm = vma->vm_mm;
351 
352 	preempt_disable();
353 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
354 		struct flush_tlb_data fd;
355 
356 		fd.vma = vma;
357 		fd.addr1 = start;
358 		fd.addr2 = end;
359 		smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
360 	} else {
361 		int i;
362 		for (i = 0; i < num_online_cpus(); i++)
363 			if (smp_processor_id() != i)
364 				cpu_context(i, mm) = 0;
365 	}
366 	local_flush_tlb_range(vma, start, end);
367 	preempt_enable();
368 }
369 
370 static void flush_tlb_kernel_range_ipi(void *info)
371 {
372 	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
373 
374 	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
375 }
376 
377 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
378 {
379 	struct flush_tlb_data fd;
380 
381 	fd.addr1 = start;
382 	fd.addr2 = end;
383 	on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
384 }
385 
386 static void flush_tlb_page_ipi(void *info)
387 {
388 	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
389 
390 	local_flush_tlb_page(fd->vma, fd->addr1);
391 }
392 
393 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
394 {
395 	preempt_disable();
396 	if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
397 		struct flush_tlb_data fd;
398 
399 		fd.vma = vma;
400 		fd.addr1 = page;
401 		smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
402 	} else {
403 		int i;
404 		for (i = 0; i < num_online_cpus(); i++)
405 			if (smp_processor_id() != i)
406 				cpu_context(i, vma->vm_mm) = 0;
407 	}
408 	local_flush_tlb_page(vma, page);
409 	preempt_enable();
410 }
411 
412 static void flush_tlb_one_ipi(void *info)
413 {
414 	unsigned long vaddr = (unsigned long) info;
415 
416 	local_flush_tlb_one(vaddr);
417 }
418 
419 void flush_tlb_one(unsigned long vaddr)
420 {
421 	smp_call_function(flush_tlb_one_ipi, (void *) vaddr, 1, 1);
422 	local_flush_tlb_one(vaddr);
423 }
424 
425 EXPORT_SYMBOL(flush_tlb_page);
426 EXPORT_SYMBOL(flush_tlb_one);
427 EXPORT_SYMBOL(cpu_data);
428 EXPORT_SYMBOL(synchronize_irq);
429