xref: /openbmc/linux/arch/ia64/kernel/smp.c (revision 87c2ce3b)
1 /*
2  * SMP Support
3  *
4  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5  * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com>
6  *
7  * Lots of stuff stolen from arch/alpha/kernel/smp.c
8  *
9  * 01/05/16 Rohit Seth <rohit.seth@intel.com>  IA64-SMP functions. Reorganized
10  * the existing code (on the lines of x86 port).
11  * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy
12  * calibration on each CPU.
13  * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id
14  * 00/03/31 Rohit Seth <rohit.seth@intel.com>	Fixes for Bootstrap Processor
15  * & cpu_online_map now gets done here (instead of setup.c)
16  * 99/10/05 davidm	Update to bring it in sync with new command-line processing
17  *  scheme.
18  * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and
19  *		smp_call_function_single to resend IPI on timeouts
20  */
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/mm.h>
29 #include <linux/cache.h>
30 #include <linux/delay.h>
31 #include <linux/efi.h>
32 #include <linux/bitops.h>
33 
34 #include <asm/atomic.h>
35 #include <asm/current.h>
36 #include <asm/delay.h>
37 #include <asm/machvec.h>
38 #include <asm/io.h>
39 #include <asm/irq.h>
40 #include <asm/page.h>
41 #include <asm/pgalloc.h>
42 #include <asm/pgtable.h>
43 #include <asm/processor.h>
44 #include <asm/ptrace.h>
45 #include <asm/sal.h>
46 #include <asm/system.h>
47 #include <asm/tlbflush.h>
48 #include <asm/unistd.h>
49 #include <asm/mca.h>
50 
51 /*
52  * Structure and data for smp_call_function(). This is designed to minimise static memory
53  * requirements. It also looks cleaner.
54  */
55 static  __cacheline_aligned DEFINE_SPINLOCK(call_lock);
56 
57 struct call_data_struct {
58 	void (*func) (void *info);
59 	void *info;
60 	long wait;
61 	atomic_t started;
62 	atomic_t finished;
63 };
64 
65 static volatile struct call_data_struct *call_data;
66 
67 #define IPI_CALL_FUNC		0
68 #define IPI_CPU_STOP		1
69 
70 /* This needs to be cacheline aligned because it is written to by *other* CPUs.  */
71 static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
72 
73 extern void cpu_halt (void);
74 
75 void
76 lock_ipi_calllock(void)
77 {
78 	spin_lock_irq(&call_lock);
79 }
80 
81 void
82 unlock_ipi_calllock(void)
83 {
84 	spin_unlock_irq(&call_lock);
85 }
86 
87 static void
88 stop_this_cpu (void)
89 {
90 	/*
91 	 * Remove this CPU:
92 	 */
93 	cpu_clear(smp_processor_id(), cpu_online_map);
94 	max_xtp();
95 	local_irq_disable();
96 	cpu_halt();
97 }
98 
99 void
100 cpu_die(void)
101 {
102 	max_xtp();
103 	local_irq_disable();
104 	cpu_halt();
105 	/* Should never be here */
106 	BUG();
107 	for (;;);
108 }
109 
110 irqreturn_t
111 handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
112 {
113 	int this_cpu = get_cpu();
114 	unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
115 	unsigned long ops;
116 
117 	mb();	/* Order interrupt and bit testing. */
118 	while ((ops = xchg(pending_ipis, 0)) != 0) {
119 		mb();	/* Order bit clearing and data access. */
120 		do {
121 			unsigned long which;
122 
123 			which = ffz(~ops);
124 			ops &= ~(1 << which);
125 
126 			switch (which) {
127 			      case IPI_CALL_FUNC:
128 			      {
129 				      struct call_data_struct *data;
130 				      void (*func)(void *info);
131 				      void *info;
132 				      int wait;
133 
134 				      /* release the 'pointer lock' */
135 				      data = (struct call_data_struct *) call_data;
136 				      func = data->func;
137 				      info = data->info;
138 				      wait = data->wait;
139 
140 				      mb();
141 				      atomic_inc(&data->started);
142 				      /*
143 				       * At this point the structure may be gone unless
144 				       * wait is true.
145 				       */
146 				      (*func)(info);
147 
148 				      /* Notify the sending CPU that the task is done.  */
149 				      mb();
150 				      if (wait)
151 					      atomic_inc(&data->finished);
152 			      }
153 			      break;
154 
155 			      case IPI_CPU_STOP:
156 				stop_this_cpu();
157 				break;
158 
159 			      default:
160 				printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
161 				break;
162 			}
163 		} while (ops);
164 		mb();	/* Order data access and bit testing. */
165 	}
166 	put_cpu();
167 	return IRQ_HANDLED;
168 }
169 
170 /*
171  * Called with preeemption disabled.
172  */
173 static inline void
174 send_IPI_single (int dest_cpu, int op)
175 {
176 	set_bit(op, &per_cpu(ipi_operation, dest_cpu));
177 	platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
178 }
179 
180 /*
181  * Called with preeemption disabled.
182  */
183 static inline void
184 send_IPI_allbutself (int op)
185 {
186 	unsigned int i;
187 
188 	for_each_online_cpu(i) {
189 		if (i != smp_processor_id())
190 			send_IPI_single(i, op);
191 	}
192 }
193 
194 /*
195  * Called with preeemption disabled.
196  */
197 static inline void
198 send_IPI_all (int op)
199 {
200 	int i;
201 
202 	for_each_online_cpu(i) {
203 		send_IPI_single(i, op);
204 	}
205 }
206 
207 /*
208  * Called with preeemption disabled.
209  */
210 static inline void
211 send_IPI_self (int op)
212 {
213 	send_IPI_single(smp_processor_id(), op);
214 }
215 
216 /*
217  * Called with preeemption disabled.
218  */
219 void
220 smp_send_reschedule (int cpu)
221 {
222 	platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
223 }
224 
225 void
226 smp_flush_tlb_all (void)
227 {
228 	on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
229 }
230 
231 void
232 smp_flush_tlb_mm (struct mm_struct *mm)
233 {
234 	preempt_disable();
235 	/* this happens for the common case of a single-threaded fork():  */
236 	if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
237 	{
238 		local_finish_flush_tlb_mm(mm);
239 		preempt_enable();
240 		return;
241 	}
242 
243 	preempt_enable();
244 	/*
245 	 * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
246 	 * have been running in the address space.  It's not clear that this is worth the
247 	 * trouble though: to avoid races, we have to raise the IPI on the target CPU
248 	 * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
249 	 * rather trivial.
250 	 */
251 	on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
252 }
253 
254 /*
255  * Run a function on another CPU
256  *  <func>	The function to run. This must be fast and non-blocking.
257  *  <info>	An arbitrary pointer to pass to the function.
258  *  <nonatomic>	Currently unused.
259  *  <wait>	If true, wait until function has completed on other CPUs.
260  *  [RETURNS]   0 on success, else a negative status code.
261  *
262  * Does not return until the remote CPU is nearly ready to execute <func>
263  * or is or has executed.
264  */
265 
266 int
267 smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic,
268 			  int wait)
269 {
270 	struct call_data_struct data;
271 	int cpus = 1;
272 	int me = get_cpu(); /* prevent preemption and reschedule on another processor */
273 
274 	if (cpuid == me) {
275 		printk(KERN_INFO "%s: trying to call self\n", __FUNCTION__);
276 		put_cpu();
277 		return -EBUSY;
278 	}
279 
280 	data.func = func;
281 	data.info = info;
282 	atomic_set(&data.started, 0);
283 	data.wait = wait;
284 	if (wait)
285 		atomic_set(&data.finished, 0);
286 
287 	spin_lock_bh(&call_lock);
288 
289 	call_data = &data;
290 	mb();	/* ensure store to call_data precedes setting of IPI_CALL_FUNC */
291   	send_IPI_single(cpuid, IPI_CALL_FUNC);
292 
293 	/* Wait for response */
294 	while (atomic_read(&data.started) != cpus)
295 		cpu_relax();
296 
297 	if (wait)
298 		while (atomic_read(&data.finished) != cpus)
299 			cpu_relax();
300 	call_data = NULL;
301 
302 	spin_unlock_bh(&call_lock);
303 	put_cpu();
304 	return 0;
305 }
306 EXPORT_SYMBOL(smp_call_function_single);
307 
308 /*
309  * this function sends a 'generic call function' IPI to all other CPUs
310  * in the system.
311  */
312 
313 /*
314  *  [SUMMARY]	Run a function on all other CPUs.
315  *  <func>	The function to run. This must be fast and non-blocking.
316  *  <info>	An arbitrary pointer to pass to the function.
317  *  <nonatomic>	currently unused.
318  *  <wait>	If true, wait (atomically) until function has completed on other CPUs.
319  *  [RETURNS]   0 on success, else a negative status code.
320  *
321  * Does not return until remote CPUs are nearly ready to execute <func> or are or have
322  * executed.
323  *
324  * You must not call this function with disabled interrupts or from a
325  * hardware interrupt handler or from a bottom half handler.
326  */
327 int
328 smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
329 {
330 	struct call_data_struct data;
331 	int cpus = num_online_cpus()-1;
332 
333 	if (!cpus)
334 		return 0;
335 
336 	/* Can deadlock when called with interrupts disabled */
337 	WARN_ON(irqs_disabled());
338 
339 	data.func = func;
340 	data.info = info;
341 	atomic_set(&data.started, 0);
342 	data.wait = wait;
343 	if (wait)
344 		atomic_set(&data.finished, 0);
345 
346 	spin_lock(&call_lock);
347 
348 	call_data = &data;
349 	mb();	/* ensure store to call_data precedes setting of IPI_CALL_FUNC */
350 	send_IPI_allbutself(IPI_CALL_FUNC);
351 
352 	/* Wait for response */
353 	while (atomic_read(&data.started) != cpus)
354 		cpu_relax();
355 
356 	if (wait)
357 		while (atomic_read(&data.finished) != cpus)
358 			cpu_relax();
359 	call_data = NULL;
360 
361 	spin_unlock(&call_lock);
362 	return 0;
363 }
364 EXPORT_SYMBOL(smp_call_function);
365 
366 /*
367  * this function calls the 'stop' function on all other CPUs in the system.
368  */
369 void
370 smp_send_stop (void)
371 {
372 	send_IPI_allbutself(IPI_CPU_STOP);
373 }
374 
375 int __init
376 setup_profiling_timer (unsigned int multiplier)
377 {
378 	return -EINVAL;
379 }
380