xref: /openbmc/linux/arch/ia64/kernel/smpboot.c (revision b595076a)
1 /*
2  * SMP boot-related support
3  *
4  * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
5  *	David Mosberger-Tang <davidm@hpl.hp.com>
6  * Copyright (C) 2001, 2004-2005 Intel Corp
7  * 	Rohit Seth <rohit.seth@intel.com>
8  * 	Suresh Siddha <suresh.b.siddha@intel.com>
9  * 	Gordon Jin <gordon.jin@intel.com>
10  *	Ashok Raj  <ashok.raj@intel.com>
11  *
12  * 01/05/16 Rohit Seth <rohit.seth@intel.com>	Moved SMP booting functions from smp.c to here.
13  * 01/04/27 David Mosberger <davidm@hpl.hp.com>	Added ITC synching code.
14  * 02/07/31 David Mosberger <davidm@hpl.hp.com>	Switch over to hotplug-CPU boot-sequence.
15  *						smp_boot_cpus()/smp_commence() is replaced by
16  *						smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
17  * 04/06/21 Ashok Raj		<ashok.raj@intel.com> Added CPU Hotplug Support
18  * 04/12/26 Jin Gordon <gordon.jin@intel.com>
19  * 04/12/26 Rohit Seth <rohit.seth@intel.com>
20  *						Add multi-threading and multi-core detection
21  * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
22  *						Setup cpu_sibling_map and cpu_core_map
23  */
24 
25 #include <linux/module.h>
26 #include <linux/acpi.h>
27 #include <linux/bootmem.h>
28 #include <linux/cpu.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/irq.h>
33 #include <linux/kernel.h>
34 #include <linux/kernel_stat.h>
35 #include <linux/mm.h>
36 #include <linux/notifier.h>
37 #include <linux/smp.h>
38 #include <linux/spinlock.h>
39 #include <linux/efi.h>
40 #include <linux/percpu.h>
41 #include <linux/bitops.h>
42 
43 #include <asm/atomic.h>
44 #include <asm/cache.h>
45 #include <asm/current.h>
46 #include <asm/delay.h>
47 #include <asm/io.h>
48 #include <asm/irq.h>
49 #include <asm/machvec.h>
50 #include <asm/mca.h>
51 #include <asm/page.h>
52 #include <asm/paravirt.h>
53 #include <asm/pgalloc.h>
54 #include <asm/pgtable.h>
55 #include <asm/processor.h>
56 #include <asm/ptrace.h>
57 #include <asm/sal.h>
58 #include <asm/system.h>
59 #include <asm/tlbflush.h>
60 #include <asm/unistd.h>
61 #include <asm/sn/arch.h>
62 
63 #define SMP_DEBUG 0
64 
65 #if SMP_DEBUG
66 #define Dprintk(x...)  printk(x)
67 #else
68 #define Dprintk(x...)
69 #endif
70 
71 #ifdef CONFIG_HOTPLUG_CPU
72 #ifdef CONFIG_PERMIT_BSP_REMOVE
73 #define bsp_remove_ok	1
74 #else
75 #define bsp_remove_ok	0
76 #endif
77 
78 /*
79  * Store all idle threads, this can be reused instead of creating
80  * a new thread. Also avoids complicated thread destroy functionality
81  * for idle threads.
82  */
83 struct task_struct *idle_thread_array[NR_CPUS];
84 
85 /*
86  * Global array allocated for NR_CPUS at boot time
87  */
88 struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
89 
90 /*
91  * start_ap in head.S uses this to store current booting cpu
92  * info.
93  */
94 struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
95 
96 #define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
97 
98 #define get_idle_for_cpu(x)		(idle_thread_array[(x)])
99 #define set_idle_for_cpu(x,p)	(idle_thread_array[(x)] = (p))
100 
101 #else
102 
103 #define get_idle_for_cpu(x)		(NULL)
104 #define set_idle_for_cpu(x,p)
105 #define set_brendez_area(x)
106 #endif
107 
108 
109 /*
110  * ITC synchronization related stuff:
111  */
112 #define MASTER	(0)
113 #define SLAVE	(SMP_CACHE_BYTES/8)
114 
115 #define NUM_ROUNDS	64	/* magic value */
116 #define NUM_ITERS	5	/* likewise */
117 
118 static DEFINE_SPINLOCK(itc_sync_lock);
119 static volatile unsigned long go[SLAVE + 1];
120 
121 #define DEBUG_ITC_SYNC	0
122 
123 extern void start_ap (void);
124 extern unsigned long ia64_iobase;
125 
126 struct task_struct *task_for_booting_cpu;
127 
128 /*
129  * State for each CPU
130  */
131 DEFINE_PER_CPU(int, cpu_state);
132 
133 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
134 EXPORT_SYMBOL(cpu_core_map);
135 DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
136 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
137 
138 int smp_num_siblings = 1;
139 
140 /* which logical CPU number maps to which CPU (physical APIC ID) */
141 volatile int ia64_cpu_to_sapicid[NR_CPUS];
142 EXPORT_SYMBOL(ia64_cpu_to_sapicid);
143 
144 static volatile cpumask_t cpu_callin_map;
145 
146 struct smp_boot_data smp_boot_data __initdata;
147 
148 unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */
149 
150 char __initdata no_int_routing;
151 
152 unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */
153 
154 #ifdef CONFIG_FORCE_CPEI_RETARGET
155 #define CPEI_OVERRIDE_DEFAULT	(1)
156 #else
157 #define CPEI_OVERRIDE_DEFAULT	(0)
158 #endif
159 
160 unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT;
161 
162 static int __init
163 cmdl_force_cpei(char *str)
164 {
165 	int value=0;
166 
167 	get_option (&str, &value);
168 	force_cpei_retarget = value;
169 
170 	return 1;
171 }
172 
173 __setup("force_cpei=", cmdl_force_cpei);
174 
175 static int __init
176 nointroute (char *str)
177 {
178 	no_int_routing = 1;
179 	printk ("no_int_routing on\n");
180 	return 1;
181 }
182 
183 __setup("nointroute", nointroute);
184 
185 static void fix_b0_for_bsp(void)
186 {
187 #ifdef CONFIG_HOTPLUG_CPU
188 	int cpuid;
189 	static int fix_bsp_b0 = 1;
190 
191 	cpuid = smp_processor_id();
192 
193 	/*
194 	 * Cache the b0 value on the first AP that comes up
195 	 */
196 	if (!(fix_bsp_b0 && cpuid))
197 		return;
198 
199 	sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0];
200 	printk ("Fixed BSP b0 value from CPU %d\n", cpuid);
201 
202 	fix_bsp_b0 = 0;
203 #endif
204 }
205 
206 void
207 sync_master (void *arg)
208 {
209 	unsigned long flags, i;
210 
211 	go[MASTER] = 0;
212 
213 	local_irq_save(flags);
214 	{
215 		for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
216 			while (!go[MASTER])
217 				cpu_relax();
218 			go[MASTER] = 0;
219 			go[SLAVE] = ia64_get_itc();
220 		}
221 	}
222 	local_irq_restore(flags);
223 }
224 
225 /*
226  * Return the number of cycles by which our itc differs from the itc on the master
227  * (time-keeper) CPU.  A positive number indicates our itc is ahead of the master,
228  * negative that it is behind.
229  */
230 static inline long
231 get_delta (long *rt, long *master)
232 {
233 	unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
234 	unsigned long tcenter, t0, t1, tm;
235 	long i;
236 
237 	for (i = 0; i < NUM_ITERS; ++i) {
238 		t0 = ia64_get_itc();
239 		go[MASTER] = 1;
240 		while (!(tm = go[SLAVE]))
241 			cpu_relax();
242 		go[SLAVE] = 0;
243 		t1 = ia64_get_itc();
244 
245 		if (t1 - t0 < best_t1 - best_t0)
246 			best_t0 = t0, best_t1 = t1, best_tm = tm;
247 	}
248 
249 	*rt = best_t1 - best_t0;
250 	*master = best_tm - best_t0;
251 
252 	/* average best_t0 and best_t1 without overflow: */
253 	tcenter = (best_t0/2 + best_t1/2);
254 	if (best_t0 % 2 + best_t1 % 2 == 2)
255 		++tcenter;
256 	return tcenter - best_tm;
257 }
258 
259 /*
260  * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU
261  * (normally the time-keeper CPU).  We use a closed loop to eliminate the possibility of
262  * unaccounted-for errors (such as getting a machine check in the middle of a calibration
263  * step).  The basic idea is for the slave to ask the master what itc value it has and to
264  * read its own itc before and after the master responds.  Each iteration gives us three
265  * timestamps:
266  *
267  *	slave		master
268  *
269  *	t0 ---\
270  *             ---\
271  *		   --->
272  *			tm
273  *		   /---
274  *	       /---
275  *	t1 <---
276  *
277  *
278  * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0
279  * and t1.  If we achieve this, the clocks are synchronized provided the interconnect
280  * between the slave and the master is symmetric.  Even if the interconnect were
281  * asymmetric, we would still know that the synchronization error is smaller than the
282  * roundtrip latency (t0 - t1).
283  *
284  * When the interconnect is quiet and symmetric, this lets us synchronize the itc to
285  * within one or two cycles.  However, we can only *guarantee* that the synchronization is
286  * accurate to within a round-trip time, which is typically in the range of several
287  * hundred cycles (e.g., ~500 cycles).  In practice, this means that the itc's are usually
288  * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better
289  * than half a micro second or so.
290  */
291 void
292 ia64_sync_itc (unsigned int master)
293 {
294 	long i, delta, adj, adjust_latency = 0, done = 0;
295 	unsigned long flags, rt, master_time_stamp, bound;
296 #if DEBUG_ITC_SYNC
297 	struct {
298 		long rt;	/* roundtrip time */
299 		long master;	/* master's timestamp */
300 		long diff;	/* difference between midpoint and master's timestamp */
301 		long lat;	/* estimate of itc adjustment latency */
302 	} t[NUM_ROUNDS];
303 #endif
304 
305 	/*
306 	 * Make sure local timer ticks are disabled while we sync.  If
307 	 * they were enabled, we'd have to worry about nasty issues
308 	 * like setting the ITC ahead of (or a long time before) the
309 	 * next scheduled tick.
310 	 */
311 	BUG_ON((ia64_get_itv() & (1 << 16)) == 0);
312 
313 	go[MASTER] = 1;
314 
315 	if (smp_call_function_single(master, sync_master, NULL, 0) < 0) {
316 		printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
317 		return;
318 	}
319 
320 	while (go[MASTER])
321 		cpu_relax();	/* wait for master to be ready */
322 
323 	spin_lock_irqsave(&itc_sync_lock, flags);
324 	{
325 		for (i = 0; i < NUM_ROUNDS; ++i) {
326 			delta = get_delta(&rt, &master_time_stamp);
327 			if (delta == 0) {
328 				done = 1;	/* let's lock on to this... */
329 				bound = rt;
330 			}
331 
332 			if (!done) {
333 				if (i > 0) {
334 					adjust_latency += -delta;
335 					adj = -delta + adjust_latency/4;
336 				} else
337 					adj = -delta;
338 
339 				ia64_set_itc(ia64_get_itc() + adj);
340 			}
341 #if DEBUG_ITC_SYNC
342 			t[i].rt = rt;
343 			t[i].master = master_time_stamp;
344 			t[i].diff = delta;
345 			t[i].lat = adjust_latency/4;
346 #endif
347 		}
348 	}
349 	spin_unlock_irqrestore(&itc_sync_lock, flags);
350 
351 #if DEBUG_ITC_SYNC
352 	for (i = 0; i < NUM_ROUNDS; ++i)
353 		printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
354 		       t[i].rt, t[i].master, t[i].diff, t[i].lat);
355 #endif
356 
357 	printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
358 	       "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
359 }
360 
361 /*
362  * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
363  */
364 static inline void __devinit
365 smp_setup_percpu_timer (void)
366 {
367 }
368 
369 static void __cpuinit
370 smp_callin (void)
371 {
372 	int cpuid, phys_id, itc_master;
373 	struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo;
374 	extern void ia64_init_itm(void);
375 	extern volatile int time_keeper_id;
376 
377 #ifdef CONFIG_PERFMON
378 	extern void pfm_init_percpu(void);
379 #endif
380 
381 	cpuid = smp_processor_id();
382 	phys_id = hard_smp_processor_id();
383 	itc_master = time_keeper_id;
384 
385 	if (cpu_online(cpuid)) {
386 		printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
387 		       phys_id, cpuid);
388 		BUG();
389 	}
390 
391 	fix_b0_for_bsp();
392 
393 	/*
394 	 * numa_node_id() works after this.
395 	 */
396 	set_numa_node(cpu_to_node_map[cpuid]);
397 	set_numa_mem(local_memory_node(cpu_to_node_map[cpuid]));
398 
399 	ipi_call_lock_irq();
400 	spin_lock(&vector_lock);
401 	/* Setup the per cpu irq handling data structures */
402 	__setup_vector_irq(cpuid);
403 	notify_cpu_starting(cpuid);
404 	cpu_set(cpuid, cpu_online_map);
405 	per_cpu(cpu_state, cpuid) = CPU_ONLINE;
406 	spin_unlock(&vector_lock);
407 	ipi_call_unlock_irq();
408 
409 	smp_setup_percpu_timer();
410 
411 	ia64_mca_cmc_vector_setup();	/* Setup vector on AP */
412 
413 #ifdef CONFIG_PERFMON
414 	pfm_init_percpu();
415 #endif
416 
417 	local_irq_enable();
418 
419 	if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
420 		/*
421 		 * Synchronize the ITC with the BP.  Need to do this after irqs are
422 		 * enabled because ia64_sync_itc() calls smp_call_function_single(), which
423 		 * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
424 		 * local_bh_enable(), which bugs out if irqs are not enabled...
425 		 */
426 		Dprintk("Going to syncup ITC with ITC Master.\n");
427 		ia64_sync_itc(itc_master);
428 	}
429 
430 	/*
431 	 * Get our bogomips.
432 	 */
433 	ia64_init_itm();
434 
435 	/*
436 	 * Delay calibration can be skipped if new processor is identical to the
437 	 * previous processor.
438 	 */
439 	last_cpuinfo = cpu_data(cpuid - 1);
440 	this_cpuinfo = local_cpu_data;
441 	if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq ||
442 	    last_cpuinfo->proc_freq != this_cpuinfo->proc_freq ||
443 	    last_cpuinfo->features != this_cpuinfo->features ||
444 	    last_cpuinfo->revision != this_cpuinfo->revision ||
445 	    last_cpuinfo->family != this_cpuinfo->family ||
446 	    last_cpuinfo->archrev != this_cpuinfo->archrev ||
447 	    last_cpuinfo->model != this_cpuinfo->model)
448 		calibrate_delay();
449 	local_cpu_data->loops_per_jiffy = loops_per_jiffy;
450 
451 	/*
452 	 * Allow the master to continue.
453 	 */
454 	cpu_set(cpuid, cpu_callin_map);
455 	Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
456 }
457 
458 
459 /*
460  * Activate a secondary processor.  head.S calls this.
461  */
462 int __cpuinit
463 start_secondary (void *unused)
464 {
465 	/* Early console may use I/O ports */
466 	ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
467 #ifndef CONFIG_PRINTK_TIME
468 	Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
469 #endif
470 	efi_map_pal_code();
471 	cpu_init();
472 	preempt_disable();
473 	smp_callin();
474 
475 	cpu_idle();
476 	return 0;
477 }
478 
479 struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
480 {
481 	return NULL;
482 }
483 
484 struct create_idle {
485 	struct work_struct work;
486 	struct task_struct *idle;
487 	struct completion done;
488 	int cpu;
489 };
490 
491 void __cpuinit
492 do_fork_idle(struct work_struct *work)
493 {
494 	struct create_idle *c_idle =
495 		container_of(work, struct create_idle, work);
496 
497 	c_idle->idle = fork_idle(c_idle->cpu);
498 	complete(&c_idle->done);
499 }
500 
501 static int __cpuinit
502 do_boot_cpu (int sapicid, int cpu)
503 {
504 	int timeout;
505 	struct create_idle c_idle = {
506 		.work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
507 		.cpu	= cpu,
508 		.done	= COMPLETION_INITIALIZER(c_idle.done),
509 	};
510 
511 	/*
512 	 * We can't use kernel_thread since we must avoid to
513 	 * reschedule the child.
514 	 */
515  	c_idle.idle = get_idle_for_cpu(cpu);
516  	if (c_idle.idle) {
517 		init_idle(c_idle.idle, cpu);
518  		goto do_rest;
519 	}
520 
521 	schedule_work(&c_idle.work);
522 	wait_for_completion(&c_idle.done);
523 
524 	if (IS_ERR(c_idle.idle))
525 		panic("failed fork for CPU %d", cpu);
526 
527 	set_idle_for_cpu(cpu, c_idle.idle);
528 
529 do_rest:
530 	task_for_booting_cpu = c_idle.idle;
531 
532 	Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
533 
534 	set_brendez_area(cpu);
535 	platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
536 
537 	/*
538 	 * Wait 10s total for the AP to start
539 	 */
540 	Dprintk("Waiting on callin_map ...");
541 	for (timeout = 0; timeout < 100000; timeout++) {
542 		if (cpu_isset(cpu, cpu_callin_map))
543 			break;  /* It has booted */
544 		udelay(100);
545 	}
546 	Dprintk("\n");
547 
548 	if (!cpu_isset(cpu, cpu_callin_map)) {
549 		printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
550 		ia64_cpu_to_sapicid[cpu] = -1;
551 		cpu_clear(cpu, cpu_online_map);  /* was set in smp_callin() */
552 		return -EINVAL;
553 	}
554 	return 0;
555 }
556 
557 static int __init
558 decay (char *str)
559 {
560 	int ticks;
561 	get_option (&str, &ticks);
562 	return 1;
563 }
564 
565 __setup("decay=", decay);
566 
567 /*
568  * Initialize the logical CPU number to SAPICID mapping
569  */
570 void __init
571 smp_build_cpu_map (void)
572 {
573 	int sapicid, cpu, i;
574 	int boot_cpu_id = hard_smp_processor_id();
575 
576 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
577 		ia64_cpu_to_sapicid[cpu] = -1;
578 	}
579 
580 	ia64_cpu_to_sapicid[0] = boot_cpu_id;
581 	cpus_clear(cpu_present_map);
582 	set_cpu_present(0, true);
583 	set_cpu_possible(0, true);
584 	for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
585 		sapicid = smp_boot_data.cpu_phys_id[i];
586 		if (sapicid == boot_cpu_id)
587 			continue;
588 		set_cpu_present(cpu, true);
589 		set_cpu_possible(cpu, true);
590 		ia64_cpu_to_sapicid[cpu] = sapicid;
591 		cpu++;
592 	}
593 }
594 
595 /*
596  * Cycle through the APs sending Wakeup IPIs to boot each.
597  */
598 void __init
599 smp_prepare_cpus (unsigned int max_cpus)
600 {
601 	int boot_cpu_id = hard_smp_processor_id();
602 
603 	/*
604 	 * Initialize the per-CPU profiling counter/multiplier
605 	 */
606 
607 	smp_setup_percpu_timer();
608 
609 	/*
610 	 * We have the boot CPU online for sure.
611 	 */
612 	cpu_set(0, cpu_online_map);
613 	cpu_set(0, cpu_callin_map);
614 
615 	local_cpu_data->loops_per_jiffy = loops_per_jiffy;
616 	ia64_cpu_to_sapicid[0] = boot_cpu_id;
617 
618 	printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);
619 
620 	current_thread_info()->cpu = 0;
621 
622 	/*
623 	 * If SMP should be disabled, then really disable it!
624 	 */
625 	if (!max_cpus) {
626 		printk(KERN_INFO "SMP mode deactivated.\n");
627 		init_cpu_online(cpumask_of(0));
628 		init_cpu_present(cpumask_of(0));
629 		init_cpu_possible(cpumask_of(0));
630 		return;
631 	}
632 }
633 
634 void __devinit smp_prepare_boot_cpu(void)
635 {
636 	cpu_set(smp_processor_id(), cpu_online_map);
637 	cpu_set(smp_processor_id(), cpu_callin_map);
638 	set_numa_node(cpu_to_node_map[smp_processor_id()]);
639 	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
640 	paravirt_post_smp_prepare_boot_cpu();
641 }
642 
643 #ifdef CONFIG_HOTPLUG_CPU
644 static inline void
645 clear_cpu_sibling_map(int cpu)
646 {
647 	int i;
648 
649 	for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
650 		cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
651 	for_each_cpu_mask(i, cpu_core_map[cpu])
652 		cpu_clear(cpu, cpu_core_map[i]);
653 
654 	per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE;
655 }
656 
657 static void
658 remove_siblinginfo(int cpu)
659 {
660 	int last = 0;
661 
662 	if (cpu_data(cpu)->threads_per_core == 1 &&
663 	    cpu_data(cpu)->cores_per_socket == 1) {
664 		cpu_clear(cpu, cpu_core_map[cpu]);
665 		cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu));
666 		return;
667 	}
668 
669 	last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0);
670 
671 	/* remove it from all sibling map's */
672 	clear_cpu_sibling_map(cpu);
673 }
674 
675 extern void fixup_irqs(void);
676 
677 int migrate_platform_irqs(unsigned int cpu)
678 {
679 	int new_cpei_cpu;
680 	struct irq_desc *desc = NULL;
681 	const struct cpumask *mask;
682 	int 		retval = 0;
683 
684 	/*
685 	 * dont permit CPEI target to removed.
686 	 */
687 	if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) {
688 		printk ("CPU (%d) is CPEI Target\n", cpu);
689 		if (can_cpei_retarget()) {
690 			/*
691 			 * Now re-target the CPEI to a different processor
692 			 */
693 			new_cpei_cpu = any_online_cpu(cpu_online_map);
694 			mask = cpumask_of(new_cpei_cpu);
695 			set_cpei_target_cpu(new_cpei_cpu);
696 			desc = irq_desc + ia64_cpe_irq;
697 			/*
698 			 * Switch for now, immediately, we need to do fake intr
699 			 * as other interrupts, but need to study CPEI behaviour with
700 			 * polling before making changes.
701 			 */
702 			if (desc) {
703 				desc->chip->disable(ia64_cpe_irq);
704 				desc->chip->set_affinity(ia64_cpe_irq, mask);
705 				desc->chip->enable(ia64_cpe_irq);
706 				printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu);
707 			}
708 		}
709 		if (!desc) {
710 			printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu);
711 			retval = -EBUSY;
712 		}
713 	}
714 	return retval;
715 }
716 
717 /* must be called with cpucontrol mutex held */
718 int __cpu_disable(void)
719 {
720 	int cpu = smp_processor_id();
721 
722 	/*
723 	 * dont permit boot processor for now
724 	 */
725 	if (cpu == 0 && !bsp_remove_ok) {
726 		printk ("Your platform does not support removal of BSP\n");
727 		return (-EBUSY);
728 	}
729 
730 	if (ia64_platform_is("sn2")) {
731 		if (!sn_cpu_disable_allowed(cpu))
732 			return -EBUSY;
733 	}
734 
735 	cpu_clear(cpu, cpu_online_map);
736 
737 	if (migrate_platform_irqs(cpu)) {
738 		cpu_set(cpu, cpu_online_map);
739 		return -EBUSY;
740 	}
741 
742 	remove_siblinginfo(cpu);
743 	fixup_irqs();
744 	local_flush_tlb_all();
745 	cpu_clear(cpu, cpu_callin_map);
746 	return 0;
747 }
748 
749 void __cpu_die(unsigned int cpu)
750 {
751 	unsigned int i;
752 
753 	for (i = 0; i < 100; i++) {
754 		/* They ack this in play_dead by setting CPU_DEAD */
755 		if (per_cpu(cpu_state, cpu) == CPU_DEAD)
756 		{
757 			printk ("CPU %d is now offline\n", cpu);
758 			return;
759 		}
760 		msleep(100);
761 	}
762  	printk(KERN_ERR "CPU %u didn't die...\n", cpu);
763 }
764 #endif /* CONFIG_HOTPLUG_CPU */
765 
766 void
767 smp_cpus_done (unsigned int dummy)
768 {
769 	int cpu;
770 	unsigned long bogosum = 0;
771 
772 	/*
773 	 * Allow the user to impress friends.
774 	 */
775 
776 	for_each_online_cpu(cpu) {
777 		bogosum += cpu_data(cpu)->loops_per_jiffy;
778 	}
779 
780 	printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
781 	       (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
782 }
783 
784 static inline void __devinit
785 set_cpu_sibling_map(int cpu)
786 {
787 	int i;
788 
789 	for_each_online_cpu(i) {
790 		if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
791 			cpu_set(i, cpu_core_map[cpu]);
792 			cpu_set(cpu, cpu_core_map[i]);
793 			if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
794 				cpu_set(i, per_cpu(cpu_sibling_map, cpu));
795 				cpu_set(cpu, per_cpu(cpu_sibling_map, i));
796 			}
797 		}
798 	}
799 }
800 
801 int __cpuinit
802 __cpu_up (unsigned int cpu)
803 {
804 	int ret;
805 	int sapicid;
806 
807 	sapicid = ia64_cpu_to_sapicid[cpu];
808 	if (sapicid == -1)
809 		return -EINVAL;
810 
811 	/*
812 	 * Already booted cpu? not valid anymore since we dont
813 	 * do idle loop tightspin anymore.
814 	 */
815 	if (cpu_isset(cpu, cpu_callin_map))
816 		return -EINVAL;
817 
818 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
819 	/* Processor goes to start_secondary(), sets online flag */
820 	ret = do_boot_cpu(sapicid, cpu);
821 	if (ret < 0)
822 		return ret;
823 
824 	if (cpu_data(cpu)->threads_per_core == 1 &&
825 	    cpu_data(cpu)->cores_per_socket == 1) {
826 		cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
827 		cpu_set(cpu, cpu_core_map[cpu]);
828 		return 0;
829 	}
830 
831 	set_cpu_sibling_map(cpu);
832 
833 	return 0;
834 }
835 
836 /*
837  * Assume that CPUs have been discovered by some platform-dependent interface.  For
838  * SoftSDV/Lion, that would be ACPI.
839  *
840  * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
841  */
842 void __init
843 init_smp_config(void)
844 {
845 	struct fptr {
846 		unsigned long fp;
847 		unsigned long gp;
848 	} *ap_startup;
849 	long sal_ret;
850 
851 	/* Tell SAL where to drop the APs.  */
852 	ap_startup = (struct fptr *) start_ap;
853 	sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
854 				       ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
855 	if (sal_ret < 0)
856 		printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n",
857 		       ia64_sal_strerror(sal_ret));
858 }
859 
860 /*
861  * identify_siblings(cpu) gets called from identify_cpu. This populates the
862  * information related to logical execution units in per_cpu_data structure.
863  */
864 void __devinit
865 identify_siblings(struct cpuinfo_ia64 *c)
866 {
867 	long status;
868 	u16 pltid;
869 	pal_logical_to_physical_t info;
870 
871 	status = ia64_pal_logical_to_phys(-1, &info);
872 	if (status != PAL_STATUS_SUCCESS) {
873 		if (status != PAL_STATUS_UNIMPLEMENTED) {
874 			printk(KERN_ERR
875 				"ia64_pal_logical_to_phys failed with %ld\n",
876 				status);
877 			return;
878 		}
879 
880 		info.overview_ppid = 0;
881 		info.overview_cpp  = 1;
882 		info.overview_tpc  = 1;
883 	}
884 
885 	status = ia64_sal_physical_id_info(&pltid);
886 	if (status != PAL_STATUS_SUCCESS) {
887 		if (status != PAL_STATUS_UNIMPLEMENTED)
888 			printk(KERN_ERR
889 				"ia64_sal_pltid failed with %ld\n",
890 				status);
891 		return;
892 	}
893 
894 	c->socket_id =  (pltid << 8) | info.overview_ppid;
895 
896 	if (info.overview_cpp == 1 && info.overview_tpc == 1)
897 		return;
898 
899 	c->cores_per_socket = info.overview_cpp;
900 	c->threads_per_core = info.overview_tpc;
901 	c->num_log = info.overview_num_log;
902 
903 	c->core_id = info.log1_cid;
904 	c->thread_id = info.log1_tid;
905 }
906 
907 /*
908  * returns non zero, if multi-threading is enabled
909  * on at least one physical package. Due to hotplug cpu
910  * and (maxcpus=), all threads may not necessarily be enabled
911  * even though the processor supports multi-threading.
912  */
913 int is_multithreading_enabled(void)
914 {
915 	int i, j;
916 
917 	for_each_present_cpu(i) {
918 		for_each_present_cpu(j) {
919 			if (j == i)
920 				continue;
921 			if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) {
922 				if (cpu_data(j)->core_id == cpu_data(i)->core_id)
923 					return 1;
924 			}
925 		}
926 	}
927 	return 0;
928 }
929 EXPORT_SYMBOL_GPL(is_multithreading_enabled);
930