xref: /openbmc/linux/arch/sh/kernel/smp.c (revision 1da177e4)
1 /*
2  * arch/sh/kernel/smp.c
3  *
4  * SMP support for the SuperH processors.
5  *
6  * Copyright (C) 2002, 2003 Paul Mundt
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the
10  * Free Software Foundation; either version 2 of the License, or (at your
11  * option) any later version.
12  */
13 #include <linux/config.h>
14 #include <linux/cache.h>
15 #include <linux/cpumask.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/spinlock.h>
20 #include <linux/threads.h>
21 #include <linux/module.h>
22 #include <linux/time.h>
23 #include <linux/timex.h>
24 #include <linux/sched.h>
25 
26 #include <asm/atomic.h>
27 #include <asm/processor.h>
28 #include <asm/system.h>
29 #include <asm/mmu_context.h>
30 #include <asm/smp.h>
31 
32 /*
33  * This was written with the Sega Saturn (SMP SH-2 7604) in mind,
34  * but is designed to be usable regardless if there's an MMU
35  * present or not.
36  */
37 struct sh_cpuinfo cpu_data[NR_CPUS];
38 
39 extern void per_cpu_trap_init(void);
40 
41 cpumask_t cpu_possible_map;
42 cpumask_t cpu_online_map;
43 static atomic_t cpus_booted = ATOMIC_INIT(0);
44 
45 /* These are defined by the board-specific code. */
46 
47 /*
48  * Cause the function described by call_data to be executed on the passed
49  * cpu.  When the function has finished, increment the finished field of
50  * call_data.
51  */
52 void __smp_send_ipi(unsigned int cpu, unsigned int action);
53 
54 /*
55  * Find the number of available processors
56  */
57 unsigned int __smp_probe_cpus(void);
58 
59 /*
60  * Start a particular processor
61  */
62 void __smp_slave_init(unsigned int cpu);
63 
64 /*
65  * Run specified function on a particular processor.
66  */
67 void __smp_call_function(unsigned int cpu);
68 
69 static inline void __init smp_store_cpu_info(unsigned int cpu)
70 {
71 	cpu_data[cpu].loops_per_jiffy = loops_per_jiffy;
72 }
73 
74 void __init smp_prepare_cpus(unsigned int max_cpus)
75 {
76 	unsigned int cpu = smp_processor_id();
77 	int i;
78 
79 	atomic_set(&cpus_booted, 1);
80 	smp_store_cpu_info(cpu);
81 
82 	for (i = 0; i < __smp_probe_cpus(); i++)
83 		cpu_set(i, cpu_possible_map);
84 }
85 
86 void __devinit smp_prepare_boot_cpu(void)
87 {
88 	unsigned int cpu = smp_processor_id();
89 
90 	cpu_set(cpu, cpu_online_map);
91 	cpu_set(cpu, cpu_possible_map);
92 }
93 
94 int __cpu_up(unsigned int cpu)
95 {
96 	struct task_struct *tsk;
97 
98 	tsk = fork_idle(cpu);
99 
100 	if (IS_ERR(tsk))
101 		panic("Failed forking idle task for cpu %d\n", cpu);
102 
103 	tsk->thread_info->cpu = cpu;
104 
105 	cpu_set(cpu, cpu_online_map);
106 
107 	return 0;
108 }
109 
110 int start_secondary(void *unused)
111 {
112 	unsigned int cpu = smp_processor_id();
113 
114 	atomic_inc(&init_mm.mm_count);
115 	current->active_mm = &init_mm;
116 
117 	smp_store_cpu_info(cpu);
118 
119 	__smp_slave_init(cpu);
120 	per_cpu_trap_init();
121 
122 	atomic_inc(&cpus_booted);
123 
124 	cpu_idle();
125 	return 0;
126 }
127 
128 void __init smp_cpus_done(unsigned int max_cpus)
129 {
130 	smp_mb();
131 }
132 
133 void smp_send_reschedule(int cpu)
134 {
135 	__smp_send_ipi(cpu, SMP_MSG_RESCHEDULE);
136 }
137 
138 static void stop_this_cpu(void *unused)
139 {
140 	cpu_clear(smp_processor_id(), cpu_online_map);
141 	local_irq_disable();
142 
143 	for (;;)
144 		cpu_relax();
145 }
146 
147 void smp_send_stop(void)
148 {
149 	smp_call_function(stop_this_cpu, 0, 1, 0);
150 }
151 
152 
153 struct smp_fn_call_struct smp_fn_call = {
154 	.lock		= SPIN_LOCK_UNLOCKED,
155 	.finished	= ATOMIC_INIT(0),
156 };
157 
158 /*
159  * The caller of this wants the passed function to run on every cpu.  If wait
160  * is set, wait until all cpus have finished the function before returning.
161  * The lock is here to protect the call structure.
162  * You must not call this function with disabled interrupts or from a
163  * hardware interrupt handler or from a bottom half handler.
164  */
165 int smp_call_function(void (*func)(void *info), void *info, int retry, int wait)
166 {
167 	unsigned int nr_cpus = atomic_read(&cpus_booted);
168 	int i;
169 
170 	if (nr_cpus < 2)
171 		return 0;
172 
173 	/* Can deadlock when called with interrupts disabled */
174 	WARN_ON(irqs_disabled());
175 
176 	spin_lock(&smp_fn_call.lock);
177 
178 	atomic_set(&smp_fn_call.finished, 0);
179 	smp_fn_call.fn = func;
180 	smp_fn_call.data = info;
181 
182 	for (i = 0; i < nr_cpus; i++)
183 		if (i != smp_processor_id())
184 			__smp_call_function(i);
185 
186 	if (wait)
187 		while (atomic_read(&smp_fn_call.finished) != (nr_cpus - 1));
188 
189 	spin_unlock(&smp_fn_call.lock);
190 
191 	return 0;
192 }
193 
194 /* Not really SMP stuff ... */
195 int setup_profiling_timer(unsigned int multiplier)
196 {
197 	return 0;
198 }
199 
200