1 /*
2  *  64-bit pSeries and RS/6000 setup code.
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *  Adapted from 'alpha' version by Gary Thomas
6  *  Modified by Cort Dougan (cort@cs.nmt.edu)
7  *  Modified by PPC64 Team, IBM Corp
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 
15 /*
16  * bootup setup stuff..
17  */
18 
19 #undef DEBUG
20 
21 #include <linux/config.h>
22 #include <linux/cpu.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/stddef.h>
28 #include <linux/unistd.h>
29 #include <linux/slab.h>
30 #include <linux/user.h>
31 #include <linux/a.out.h>
32 #include <linux/tty.h>
33 #include <linux/major.h>
34 #include <linux/interrupt.h>
35 #include <linux/reboot.h>
36 #include <linux/init.h>
37 #include <linux/ioport.h>
38 #include <linux/console.h>
39 #include <linux/pci.h>
40 #include <linux/utsname.h>
41 #include <linux/adb.h>
42 #include <linux/module.h>
43 #include <linux/delay.h>
44 #include <linux/irq.h>
45 #include <linux/seq_file.h>
46 #include <linux/root_dev.h>
47 
48 #include <asm/mmu.h>
49 #include <asm/processor.h>
50 #include <asm/io.h>
51 #include <asm/pgtable.h>
52 #include <asm/prom.h>
53 #include <asm/rtas.h>
54 #include <asm/pci-bridge.h>
55 #include <asm/iommu.h>
56 #include <asm/dma.h>
57 #include <asm/machdep.h>
58 #include <asm/irq.h>
59 #include <asm/kexec.h>
60 #include <asm/time.h>
61 #include <asm/nvram.h>
62 #include "xics.h"
63 #include <asm/firmware.h>
64 #include <asm/pmc.h>
65 #include <asm/mpic.h>
66 #include <asm/ppc-pci.h>
67 #include <asm/i8259.h>
68 #include <asm/udbg.h>
69 #include <asm/smp.h>
70 
71 #include "plpar_wrappers.h"
72 #include "ras.h"
73 
74 #ifdef DEBUG
75 #define DBG(fmt...) udbg_printf(fmt)
76 #else
77 #define DBG(fmt...)
78 #endif
79 
80 extern void find_udbg_vterm(void);
81 
82 int fwnmi_active;  /* TRUE if an FWNMI handler is present */
83 
84 static void pseries_shared_idle(void);
85 static void pseries_dedicated_idle(void);
86 
87 struct mpic *pSeries_mpic;
88 
89 void pSeries_show_cpuinfo(struct seq_file *m)
90 {
91 	struct device_node *root;
92 	const char *model = "";
93 
94 	root = of_find_node_by_path("/");
95 	if (root)
96 		model = get_property(root, "model", NULL);
97 	seq_printf(m, "machine\t\t: CHRP %s\n", model);
98 	of_node_put(root);
99 }
100 
101 /* Initialize firmware assisted non-maskable interrupts if
102  * the firmware supports this feature.
103  */
104 static void __init fwnmi_init(void)
105 {
106 	unsigned long system_reset_addr, machine_check_addr;
107 
108 	int ibm_nmi_register = rtas_token("ibm,nmi-register");
109 	if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
110 		return;
111 
112 	/* If the kernel's not linked at zero we point the firmware at low
113 	 * addresses anyway, and use a trampoline to get to the real code. */
114 	system_reset_addr  = __pa(system_reset_fwnmi) - PHYSICAL_START;
115 	machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START;
116 
117 	if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr,
118 				machine_check_addr))
119 		fwnmi_active = 1;
120 }
121 
122 static void __init pSeries_init_mpic(void)
123 {
124         unsigned int *addrp;
125 	struct device_node *np;
126 	unsigned long intack = 0;
127 
128 	/* All ISUs are setup, complete initialization */
129 	mpic_init(pSeries_mpic);
130 
131 	/* Check what kind of cascade ACK we have */
132         if (!(np = of_find_node_by_name(NULL, "pci"))
133             || !(addrp = (unsigned int *)
134                  get_property(np, "8259-interrupt-acknowledge", NULL)))
135                 printk(KERN_ERR "Cannot find pci to get ack address\n");
136         else
137 		intack = addrp[prom_n_addr_cells(np)-1];
138 	of_node_put(np);
139 
140 	/* Setup the legacy interrupts & controller */
141 	i8259_init(intack, 0);
142 
143 	/* Hook cascade to mpic */
144 	mpic_setup_cascade(NUM_ISA_INTERRUPTS, i8259_irq_cascade, NULL);
145 }
146 
147 static void __init pSeries_setup_mpic(void)
148 {
149 	unsigned int *opprop;
150 	unsigned long openpic_addr = 0;
151         unsigned char senses[NR_IRQS - NUM_ISA_INTERRUPTS];
152         struct device_node *root;
153 	int irq_count;
154 
155 	/* Find the Open PIC if present */
156 	root = of_find_node_by_path("/");
157 	opprop = (unsigned int *) get_property(root, "platform-open-pic", NULL);
158 	if (opprop != 0) {
159 		int n = prom_n_addr_cells(root);
160 
161 		for (openpic_addr = 0; n > 0; --n)
162 			openpic_addr = (openpic_addr << 32) + *opprop++;
163 		printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
164 	}
165 	of_node_put(root);
166 
167 	BUG_ON(openpic_addr == 0);
168 
169 	/* Get the sense values from OF */
170 	prom_get_irq_senses(senses, NUM_ISA_INTERRUPTS, NR_IRQS);
171 
172 	/* Setup the openpic driver */
173 	irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */
174 	pSeries_mpic = mpic_alloc(openpic_addr, MPIC_PRIMARY,
175 				  16, 16, irq_count, /* isu size, irq offset, irq count */
176 				  NR_IRQS - 4, /* ipi offset */
177 				  senses, irq_count, /* sense & sense size */
178 				  " MPIC     ");
179 }
180 
181 static void pseries_lpar_enable_pmcs(void)
182 {
183 	unsigned long set, reset;
184 
185 	power4_enable_pmcs();
186 
187 	set = 1UL << 63;
188 	reset = 0;
189 	plpar_hcall_norets(H_PERFMON, set, reset);
190 
191 	/* instruct hypervisor to maintain PMCs */
192 	if (firmware_has_feature(FW_FEATURE_SPLPAR))
193 		get_paca()->lppaca.pmcregs_in_use = 1;
194 }
195 
196 static void __init pSeries_setup_arch(void)
197 {
198 	/* Fixup ppc_md depending on the type of interrupt controller */
199 	if (ppc64_interrupt_controller == IC_OPEN_PIC) {
200 		ppc_md.init_IRQ       = pSeries_init_mpic;
201 		ppc_md.get_irq        = mpic_get_irq;
202 		/* Allocate the mpic now, so that find_and_init_phbs() can
203 		 * fill the ISUs */
204 		pSeries_setup_mpic();
205 	} else {
206 		ppc_md.init_IRQ       = xics_init_IRQ;
207 		ppc_md.get_irq        = xics_get_irq;
208 	}
209 
210 #ifdef CONFIG_SMP
211 	smp_init_pSeries();
212 #endif
213 	/* openpic global configuration register (64-bit format). */
214 	/* openpic Interrupt Source Unit pointer (64-bit format). */
215 	/* python0 facility area (mmio) (64-bit format) REAL address. */
216 
217 	/* init to some ~sane value until calibrate_delay() runs */
218 	loops_per_jiffy = 50000000;
219 
220 	if (ROOT_DEV == 0) {
221 		printk("No ramdisk, default root is /dev/sda2\n");
222 		ROOT_DEV = Root_SDA2;
223 	}
224 
225 	fwnmi_init();
226 
227 	/* Find and initialize PCI host bridges */
228 	init_pci_config_tokens();
229 	find_and_init_phbs();
230 	eeh_init();
231 
232 	pSeries_nvram_init();
233 
234 	/* Choose an idle loop */
235 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
236 		vpa_init(boot_cpuid);
237 		if (get_paca()->lppaca.shared_proc) {
238 			printk(KERN_INFO "Using shared processor idle loop\n");
239 			ppc_md.idle_loop = pseries_shared_idle;
240 		} else {
241 			printk(KERN_INFO "Using dedicated idle loop\n");
242 			ppc_md.idle_loop = pseries_dedicated_idle;
243 		}
244 	} else {
245 		printk(KERN_INFO "Using default idle loop\n");
246 		ppc_md.idle_loop = default_idle;
247 	}
248 
249 	if (platform_is_lpar())
250 		ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
251 	else
252 		ppc_md.enable_pmcs = power4_enable_pmcs;
253 }
254 
255 static int __init pSeries_init_panel(void)
256 {
257 	/* Manually leave the kernel version on the panel. */
258 	ppc_md.progress("Linux ppc64\n", 0);
259 	ppc_md.progress(system_utsname.version, 0);
260 
261 	return 0;
262 }
263 arch_initcall(pSeries_init_panel);
264 
265 
266 /* Build up the ppc64_firmware_features bitmask field
267  * using contents of device-tree/ibm,hypertas-functions.
268  * Ultimately this functionality may be moved into prom.c prom_init().
269  */
270 static void __init fw_feature_init(void)
271 {
272 	struct device_node * dn;
273 	char * hypertas;
274 	unsigned int len;
275 
276 	DBG(" -> fw_feature_init()\n");
277 
278 	ppc64_firmware_features = 0;
279 	dn = of_find_node_by_path("/rtas");
280 	if (dn == NULL) {
281 		printk(KERN_ERR "WARNING ! Cannot find RTAS in device-tree !\n");
282 		goto no_rtas;
283 	}
284 
285 	hypertas = get_property(dn, "ibm,hypertas-functions", &len);
286 	if (hypertas) {
287 		while (len > 0){
288 			int i, hypertas_len;
289 			/* check value against table of strings */
290 			for(i=0; i < FIRMWARE_MAX_FEATURES ;i++) {
291 				if ((firmware_features_table[i].name) &&
292 				    (strcmp(firmware_features_table[i].name,hypertas))==0) {
293 					/* we have a match */
294 					ppc64_firmware_features |=
295 						(firmware_features_table[i].val);
296 					break;
297 				}
298 			}
299 			hypertas_len = strlen(hypertas);
300 			len -= hypertas_len +1;
301 			hypertas+= hypertas_len +1;
302 		}
303 	}
304 
305 	of_node_put(dn);
306 no_rtas:
307 
308 	DBG(" <- fw_feature_init()\n");
309 }
310 
311 
312 static  void __init pSeries_discover_pic(void)
313 {
314 	struct device_node *np;
315 	char *typep;
316 
317 	/*
318 	 * Setup interrupt mapping options that are needed for finish_device_tree
319 	 * to properly parse the OF interrupt tree & do the virtual irq mapping
320 	 */
321 	__irq_offset_value = NUM_ISA_INTERRUPTS;
322 	ppc64_interrupt_controller = IC_INVALID;
323 	for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) {
324 		typep = (char *)get_property(np, "compatible", NULL);
325 		if (strstr(typep, "open-pic")) {
326 			ppc64_interrupt_controller = IC_OPEN_PIC;
327 			break;
328 		} else if (strstr(typep, "ppc-xicp")) {
329 			ppc64_interrupt_controller = IC_PPC_XIC;
330 			break;
331 		}
332 	}
333 	if (ppc64_interrupt_controller == IC_INVALID)
334 		printk("pSeries_discover_pic: failed to recognize"
335 			" interrupt-controller\n");
336 
337 }
338 
339 static void pSeries_mach_cpu_die(void)
340 {
341 	local_irq_disable();
342 	idle_task_exit();
343 	/* Some hardware requires clearing the CPPR, while other hardware does not
344 	 * it is safe either way
345 	 */
346 	pSeriesLP_cppr_info(0, 0);
347 	rtas_stop_self();
348 	/* Should never get here... */
349 	BUG();
350 	for(;;);
351 }
352 
353 static int pseries_set_dabr(unsigned long dabr)
354 {
355 	return plpar_hcall_norets(H_SET_DABR, dabr);
356 }
357 
358 static int pseries_set_xdabr(unsigned long dabr)
359 {
360 	/* We want to catch accesses from kernel and userspace */
361 	return plpar_hcall_norets(H_SET_XDABR, dabr,
362 			H_DABRX_KERNEL | H_DABRX_USER);
363 }
364 
365 /*
366  * Early initialization.  Relocation is on but do not reference unbolted pages
367  */
368 static void __init pSeries_init_early(void)
369 {
370 	int iommu_off = 0;
371 
372 	DBG(" -> pSeries_init_early()\n");
373 
374 	fw_feature_init();
375 
376 	if (platform_is_lpar())
377 		hpte_init_lpar();
378 	else {
379 		hpte_init_native();
380 		iommu_off = (of_chosen &&
381 			     get_property(of_chosen, "linux,iommu-off", NULL));
382 	}
383 
384 	if (platform_is_lpar())
385 		find_udbg_vterm();
386 
387 	if (firmware_has_feature(FW_FEATURE_DABR))
388 		ppc_md.set_dabr = pseries_set_dabr;
389 	else if (firmware_has_feature(FW_FEATURE_XDABR))
390 		ppc_md.set_dabr = pseries_set_xdabr;
391 
392 	iommu_init_early_pSeries();
393 
394 	pSeries_discover_pic();
395 
396 	DBG(" <- pSeries_init_early()\n");
397 }
398 
399 
400 static int pSeries_check_legacy_ioport(unsigned int baseport)
401 {
402 	struct device_node *np;
403 
404 #define I8042_DATA_REG	0x60
405 #define FDC_BASE	0x3f0
406 
407 
408 	switch(baseport) {
409 	case I8042_DATA_REG:
410 		np = of_find_node_by_type(NULL, "8042");
411 		if (np == NULL)
412 			return -ENODEV;
413 		of_node_put(np);
414 		break;
415 	case FDC_BASE:
416 		np = of_find_node_by_type(NULL, "fdc");
417 		if (np == NULL)
418 			return -ENODEV;
419 		of_node_put(np);
420 		break;
421 	}
422 	return 0;
423 }
424 
425 /*
426  * Called very early, MMU is off, device-tree isn't unflattened
427  */
428 extern struct machdep_calls pSeries_md;
429 
430 static int __init pSeries_probe(int platform)
431 {
432 	if (platform != PLATFORM_PSERIES &&
433 	    platform != PLATFORM_PSERIES_LPAR)
434 		return 0;
435 
436 	/* if we have some ppc_md fixups for LPAR to do, do
437 	 * it here ...
438 	 */
439 
440 	return 1;
441 }
442 
443 DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
444 
445 static inline void dedicated_idle_sleep(unsigned int cpu)
446 {
447 	struct paca_struct *ppaca = &paca[cpu ^ 1];
448 
449 	/* Only sleep if the other thread is not idle */
450 	if (!(ppaca->lppaca.idle)) {
451 		local_irq_disable();
452 
453 		/*
454 		 * We are about to sleep the thread and so wont be polling any
455 		 * more.
456 		 */
457 		clear_thread_flag(TIF_POLLING_NRFLAG);
458 		smp_mb__after_clear_bit();
459 
460 		/*
461 		 * SMT dynamic mode. Cede will result in this thread going
462 		 * dormant, if the partner thread is still doing work.  Thread
463 		 * wakes up if partner goes idle, an interrupt is presented, or
464 		 * a prod occurs.  Returning from the cede enables external
465 		 * interrupts.
466 		 */
467 		if (!need_resched())
468 			cede_processor();
469 		else
470 			local_irq_enable();
471 		set_thread_flag(TIF_POLLING_NRFLAG);
472 	} else {
473 		/*
474 		 * Give the HV an opportunity at the processor, since we are
475 		 * not doing any work.
476 		 */
477 		poll_pending();
478 	}
479 }
480 
481 static void pseries_dedicated_idle(void)
482 {
483 	struct paca_struct *lpaca = get_paca();
484 	unsigned int cpu = smp_processor_id();
485 	unsigned long start_snooze;
486 	unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
487 	set_thread_flag(TIF_POLLING_NRFLAG);
488 
489 	while (1) {
490 		/*
491 		 * Indicate to the HV that we are idle. Now would be
492 		 * a good time to find other work to dispatch.
493 		 */
494 		lpaca->lppaca.idle = 1;
495 
496 		if (!need_resched()) {
497 			start_snooze = get_tb() +
498 				*smt_snooze_delay * tb_ticks_per_usec;
499 
500 			while (!need_resched() && !cpu_is_offline(cpu)) {
501 				ppc64_runlatch_off();
502 
503 				/*
504 				 * Go into low thread priority and possibly
505 				 * low power mode.
506 				 */
507 				HMT_low();
508 				HMT_very_low();
509 
510 				if (*smt_snooze_delay != 0 &&
511 				    get_tb() > start_snooze) {
512 					HMT_medium();
513 					dedicated_idle_sleep(cpu);
514 				}
515 
516 			}
517 
518 			HMT_medium();
519 		}
520 
521 		lpaca->lppaca.idle = 0;
522 		ppc64_runlatch_on();
523 
524 		preempt_enable_no_resched();
525 		schedule();
526 		preempt_disable();
527 
528 		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
529 			cpu_die();
530 	}
531 }
532 
533 static void pseries_shared_idle(void)
534 {
535 	struct paca_struct *lpaca = get_paca();
536 	unsigned int cpu = smp_processor_id();
537 
538 	while (1) {
539 		/*
540 		 * Indicate to the HV that we are idle. Now would be
541 		 * a good time to find other work to dispatch.
542 		 */
543 		lpaca->lppaca.idle = 1;
544 
545 		while (!need_resched() && !cpu_is_offline(cpu)) {
546 			local_irq_disable();
547 			ppc64_runlatch_off();
548 
549 			/*
550 			 * Yield the processor to the hypervisor.  We return if
551 			 * an external interrupt occurs (which are driven prior
552 			 * to returning here) or if a prod occurs from another
553 			 * processor. When returning here, external interrupts
554 			 * are enabled.
555 			 *
556 			 * Check need_resched() again with interrupts disabled
557 			 * to avoid a race.
558 			 */
559 			if (!need_resched())
560 				cede_processor();
561 			else
562 				local_irq_enable();
563 
564 			HMT_medium();
565 		}
566 
567 		lpaca->lppaca.idle = 0;
568 		ppc64_runlatch_on();
569 
570 		preempt_enable_no_resched();
571 		schedule();
572 		preempt_disable();
573 
574 		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
575 			cpu_die();
576 	}
577 }
578 
579 static int pSeries_pci_probe_mode(struct pci_bus *bus)
580 {
581 	if (platform_is_lpar())
582 		return PCI_PROBE_DEVTREE;
583 	return PCI_PROBE_NORMAL;
584 }
585 
586 #ifdef CONFIG_KEXEC
587 static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
588 {
589 	/* Don't risk a hypervisor call if we're crashing */
590 	if (!crash_shutdown) {
591 		unsigned long vpa = __pa(&get_paca()->lppaca);
592 
593 		if (unregister_vpa(hard_smp_processor_id(), vpa)) {
594 			printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
595 					"failed\n", smp_processor_id(),
596 					hard_smp_processor_id());
597 		}
598 	}
599 
600 	if (ppc64_interrupt_controller == IC_OPEN_PIC)
601 		mpic_teardown_this_cpu(secondary);
602 	else
603 		xics_teardown_cpu(secondary);
604 }
605 #endif
606 
607 struct machdep_calls __initdata pSeries_md = {
608 	.probe			= pSeries_probe,
609 	.setup_arch		= pSeries_setup_arch,
610 	.init_early		= pSeries_init_early,
611 	.show_cpuinfo		= pSeries_show_cpuinfo,
612 	.log_error		= pSeries_log_error,
613 	.pcibios_fixup		= pSeries_final_fixup,
614 	.pci_probe_mode		= pSeries_pci_probe_mode,
615 	.irq_bus_setup		= pSeries_irq_bus_setup,
616 	.restart		= rtas_restart,
617 	.power_off		= rtas_power_off,
618 	.halt			= rtas_halt,
619 	.panic			= rtas_os_term,
620 	.cpu_die		= pSeries_mach_cpu_die,
621 	.get_boot_time		= rtas_get_boot_time,
622 	.get_rtc_time		= rtas_get_rtc_time,
623 	.set_rtc_time		= rtas_set_rtc_time,
624 	.calibrate_decr		= generic_calibrate_decr,
625 	.progress		= rtas_progress,
626 	.check_legacy_ioport	= pSeries_check_legacy_ioport,
627 	.system_reset_exception = pSeries_system_reset_exception,
628 	.machine_check_exception = pSeries_machine_check_exception,
629 #ifdef CONFIG_KEXEC
630 	.kexec_cpu_down		= pseries_kexec_cpu_down,
631 	.machine_kexec		= default_machine_kexec,
632 	.machine_kexec_prepare	= default_machine_kexec_prepare,
633 	.machine_crash_shutdown	= default_machine_crash_shutdown,
634 #endif
635 };
636