xref: /openbmc/linux/arch/sh/kernel/smp.c (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /*
2  * arch/sh/kernel/smp.c
3  *
4  * SMP support for the SuperH processors.
5  *
6  * Copyright (C) 2002 - 2008 Paul Mundt
7  * Copyright (C) 2006 - 2007 Akio Idehara
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13 #include <linux/err.h>
14 #include <linux/cache.h>
15 #include <linux/cpumask.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/cpu.h>
22 #include <linux/interrupt.h>
23 #include <asm/atomic.h>
24 #include <asm/processor.h>
25 #include <asm/system.h>
26 #include <asm/mmu_context.h>
27 #include <asm/smp.h>
28 #include <asm/cacheflush.h>
29 #include <asm/sections.h>
30 
31 int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
32 int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
33 
34 cpumask_t cpu_possible_map;
35 EXPORT_SYMBOL(cpu_possible_map);
36 
37 cpumask_t cpu_online_map;
38 EXPORT_SYMBOL(cpu_online_map);
39 
40 static inline void __init smp_store_cpu_info(unsigned int cpu)
41 {
42 	struct sh_cpuinfo *c = cpu_data + cpu;
43 
44 	c->loops_per_jiffy = loops_per_jiffy;
45 }
46 
47 void __init smp_prepare_cpus(unsigned int max_cpus)
48 {
49 	unsigned int cpu = smp_processor_id();
50 
51 	init_new_context(current, &init_mm);
52 	current_thread_info()->cpu = cpu;
53 	plat_prepare_cpus(max_cpus);
54 
55 #ifndef CONFIG_HOTPLUG_CPU
56 	cpu_present_map = cpu_possible_map;
57 #endif
58 }
59 
60 void __devinit smp_prepare_boot_cpu(void)
61 {
62 	unsigned int cpu = smp_processor_id();
63 
64 	__cpu_number_map[0] = cpu;
65 	__cpu_logical_map[0] = cpu;
66 
67 	cpu_set(cpu, cpu_online_map);
68 	cpu_set(cpu, cpu_possible_map);
69 }
70 
71 asmlinkage void __cpuinit start_secondary(void)
72 {
73 	unsigned int cpu;
74 	struct mm_struct *mm = &init_mm;
75 
76 	atomic_inc(&mm->mm_count);
77 	atomic_inc(&mm->mm_users);
78 	current->active_mm = mm;
79 	BUG_ON(current->mm);
80 	enter_lazy_tlb(mm, current);
81 
82 	per_cpu_trap_init();
83 
84 	preempt_disable();
85 
86 	notify_cpu_starting(smp_processor_id());
87 
88 	local_irq_enable();
89 
90 	cpu = smp_processor_id();
91 
92 	/* Enable local timers */
93 	local_timer_setup(cpu);
94 	calibrate_delay();
95 
96 	smp_store_cpu_info(cpu);
97 
98 	cpu_set(cpu, cpu_online_map);
99 
100 	cpu_idle();
101 }
102 
103 extern struct {
104 	unsigned long sp;
105 	unsigned long bss_start;
106 	unsigned long bss_end;
107 	void *start_kernel_fn;
108 	void *cpu_init_fn;
109 	void *thread_info;
110 } stack_start;
111 
112 int __cpuinit __cpu_up(unsigned int cpu)
113 {
114 	struct task_struct *tsk;
115 	unsigned long timeout;
116 
117 	tsk = fork_idle(cpu);
118 	if (IS_ERR(tsk)) {
119 		printk(KERN_ERR "Failed forking idle task for cpu %d\n", cpu);
120 		return PTR_ERR(tsk);
121 	}
122 
123 	/* Fill in data in head.S for secondary cpus */
124 	stack_start.sp = tsk->thread.sp;
125 	stack_start.thread_info = tsk->stack;
126 	stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
127 	stack_start.start_kernel_fn = start_secondary;
128 
129 	flush_cache_all();
130 
131 	plat_start_cpu(cpu, (unsigned long)_stext);
132 
133 	timeout = jiffies + HZ;
134 	while (time_before(jiffies, timeout)) {
135 		if (cpu_online(cpu))
136 			break;
137 
138 		udelay(10);
139 	}
140 
141 	if (cpu_online(cpu))
142 		return 0;
143 
144 	return -ENOENT;
145 }
146 
147 void __init smp_cpus_done(unsigned int max_cpus)
148 {
149 	unsigned long bogosum = 0;
150 	int cpu;
151 
152 	for_each_online_cpu(cpu)
153 		bogosum += cpu_data[cpu].loops_per_jiffy;
154 
155 	printk(KERN_INFO "SMP: Total of %d processors activated "
156 	       "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
157 	       bogosum / (500000/HZ),
158 	       (bogosum / (5000/HZ)) % 100);
159 }
160 
161 void smp_send_reschedule(int cpu)
162 {
163 	plat_send_ipi(cpu, SMP_MSG_RESCHEDULE);
164 }
165 
166 static void stop_this_cpu(void *unused)
167 {
168 	cpu_clear(smp_processor_id(), cpu_online_map);
169 	local_irq_disable();
170 
171 	for (;;)
172 		cpu_relax();
173 }
174 
175 void smp_send_stop(void)
176 {
177 	smp_call_function(stop_this_cpu, 0, 0);
178 }
179 
180 void arch_send_call_function_ipi(cpumask_t mask)
181 {
182 	int cpu;
183 
184 	for_each_cpu_mask(cpu, mask)
185 		plat_send_ipi(cpu, SMP_MSG_FUNCTION);
186 }
187 
188 void arch_send_call_function_single_ipi(int cpu)
189 {
190 	plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
191 }
192 
193 void smp_timer_broadcast(cpumask_t mask)
194 {
195 	int cpu;
196 
197 	for_each_cpu_mask(cpu, mask)
198 		plat_send_ipi(cpu, SMP_MSG_TIMER);
199 }
200 
201 static void ipi_timer(void)
202 {
203 	irq_enter();
204 	local_timer_interrupt();
205 	irq_exit();
206 }
207 
208 void smp_message_recv(unsigned int msg)
209 {
210 	switch (msg) {
211 	case SMP_MSG_FUNCTION:
212 		generic_smp_call_function_interrupt();
213 		break;
214 	case SMP_MSG_RESCHEDULE:
215 		break;
216 	case SMP_MSG_FUNCTION_SINGLE:
217 		generic_smp_call_function_single_interrupt();
218 		break;
219 	case SMP_MSG_TIMER:
220 		ipi_timer();
221 		break;
222 	default:
223 		printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
224 		       smp_processor_id(), __func__, msg);
225 		break;
226 	}
227 }
228 
229 /* Not really SMP stuff ... */
230 int setup_profiling_timer(unsigned int multiplier)
231 {
232 	return 0;
233 }
234 
235 static void flush_tlb_all_ipi(void *info)
236 {
237 	local_flush_tlb_all();
238 }
239 
240 void flush_tlb_all(void)
241 {
242 	on_each_cpu(flush_tlb_all_ipi, 0, 1);
243 }
244 
245 static void flush_tlb_mm_ipi(void *mm)
246 {
247 	local_flush_tlb_mm((struct mm_struct *)mm);
248 }
249 
250 /*
251  * The following tlb flush calls are invoked when old translations are
252  * being torn down, or pte attributes are changing. For single threaded
253  * address spaces, a new context is obtained on the current cpu, and tlb
254  * context on other cpus are invalidated to force a new context allocation
255  * at switch_mm time, should the mm ever be used on other cpus. For
256  * multithreaded address spaces, intercpu interrupts have to be sent.
257  * Another case where intercpu interrupts are required is when the target
258  * mm might be active on another cpu (eg debuggers doing the flushes on
259  * behalf of debugees, kswapd stealing pages from another process etc).
260  * Kanoj 07/00.
261  */
262 
263 void flush_tlb_mm(struct mm_struct *mm)
264 {
265 	preempt_disable();
266 
267 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
268 		smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
269 	} else {
270 		int i;
271 		for (i = 0; i < num_online_cpus(); i++)
272 			if (smp_processor_id() != i)
273 				cpu_context(i, mm) = 0;
274 	}
275 	local_flush_tlb_mm(mm);
276 
277 	preempt_enable();
278 }
279 
280 struct flush_tlb_data {
281 	struct vm_area_struct *vma;
282 	unsigned long addr1;
283 	unsigned long addr2;
284 };
285 
286 static void flush_tlb_range_ipi(void *info)
287 {
288 	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
289 
290 	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
291 }
292 
293 void flush_tlb_range(struct vm_area_struct *vma,
294 		     unsigned long start, unsigned long end)
295 {
296 	struct mm_struct *mm = vma->vm_mm;
297 
298 	preempt_disable();
299 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
300 		struct flush_tlb_data fd;
301 
302 		fd.vma = vma;
303 		fd.addr1 = start;
304 		fd.addr2 = end;
305 		smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
306 	} else {
307 		int i;
308 		for (i = 0; i < num_online_cpus(); i++)
309 			if (smp_processor_id() != i)
310 				cpu_context(i, mm) = 0;
311 	}
312 	local_flush_tlb_range(vma, start, end);
313 	preempt_enable();
314 }
315 
316 static void flush_tlb_kernel_range_ipi(void *info)
317 {
318 	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
319 
320 	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
321 }
322 
323 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
324 {
325 	struct flush_tlb_data fd;
326 
327 	fd.addr1 = start;
328 	fd.addr2 = end;
329 	on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
330 }
331 
332 static void flush_tlb_page_ipi(void *info)
333 {
334 	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
335 
336 	local_flush_tlb_page(fd->vma, fd->addr1);
337 }
338 
339 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
340 {
341 	preempt_disable();
342 	if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
343 	    (current->mm != vma->vm_mm)) {
344 		struct flush_tlb_data fd;
345 
346 		fd.vma = vma;
347 		fd.addr1 = page;
348 		smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
349 	} else {
350 		int i;
351 		for (i = 0; i < num_online_cpus(); i++)
352 			if (smp_processor_id() != i)
353 				cpu_context(i, vma->vm_mm) = 0;
354 	}
355 	local_flush_tlb_page(vma, page);
356 	preempt_enable();
357 }
358 
359 static void flush_tlb_one_ipi(void *info)
360 {
361 	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
362 	local_flush_tlb_one(fd->addr1, fd->addr2);
363 }
364 
365 void flush_tlb_one(unsigned long asid, unsigned long vaddr)
366 {
367 	struct flush_tlb_data fd;
368 
369 	fd.addr1 = asid;
370 	fd.addr2 = vaddr;
371 
372 	smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
373 	local_flush_tlb_one(asid, vaddr);
374 }
375