xref: /openbmc/linux/arch/ia64/kernel/setup.c (revision acb04058)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Architecture-specific setup.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
51da177e4SLinus Torvalds  *	David Mosberger-Tang <davidm@hpl.hp.com>
61da177e4SLinus Torvalds  *	Stephane Eranian <eranian@hpl.hp.com>
7e927ecb0SSuresh Siddha  * Copyright (C) 2000, 2004 Intel Corp
8e927ecb0SSuresh Siddha  * 	Rohit Seth <rohit.seth@intel.com>
9e927ecb0SSuresh Siddha  * 	Suresh Siddha <suresh.b.siddha@intel.com>
10e927ecb0SSuresh Siddha  * 	Gordon Jin <gordon.jin@intel.com>
111da177e4SLinus Torvalds  * Copyright (C) 1999 VA Linux Systems
121da177e4SLinus Torvalds  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
131da177e4SLinus Torvalds  *
14e927ecb0SSuresh Siddha  * 12/26/04 S.Siddha, G.Jin, R.Seth
15e927ecb0SSuresh Siddha  *			Add multi-threading and multi-core detection
161da177e4SLinus Torvalds  * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
171da177e4SLinus Torvalds  * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
181da177e4SLinus Torvalds  * 03/31/00 R.Seth	cpu_initialized and current->processor fixes
191da177e4SLinus Torvalds  * 02/04/00 D.Mosberger	some more get_cpuinfo fixes...
201da177e4SLinus Torvalds  * 02/01/00 R.Seth	fixed get_cpuinfo for SMP
211da177e4SLinus Torvalds  * 01/07/99 S.Eranian	added the support for command line argument
221da177e4SLinus Torvalds  * 06/24/99 W.Drummond	added boot_cpu_data.
2308357f82SZoltan Menyhart  * 05/28/05 Z. Menyhart	Dynamic stride size for "flush_icache_range()"
241da177e4SLinus Torvalds  */
251da177e4SLinus Torvalds #include <linux/module.h>
261da177e4SLinus Torvalds #include <linux/init.h>
271da177e4SLinus Torvalds 
281da177e4SLinus Torvalds #include <linux/acpi.h>
291da177e4SLinus Torvalds #include <linux/bootmem.h>
301da177e4SLinus Torvalds #include <linux/console.h>
311da177e4SLinus Torvalds #include <linux/delay.h>
321da177e4SLinus Torvalds #include <linux/kernel.h>
331da177e4SLinus Torvalds #include <linux/reboot.h>
341da177e4SLinus Torvalds #include <linux/sched.h>
351da177e4SLinus Torvalds #include <linux/seq_file.h>
361da177e4SLinus Torvalds #include <linux/string.h>
371da177e4SLinus Torvalds #include <linux/threads.h>
38894673eeSJon Smirl #include <linux/screen_info.h>
393ed3bce8SMatt Domsch #include <linux/dmi.h>
401da177e4SLinus Torvalds #include <linux/serial.h>
411da177e4SLinus Torvalds #include <linux/serial_core.h>
421da177e4SLinus Torvalds #include <linux/efi.h>
431da177e4SLinus Torvalds #include <linux/initrd.h>
446c4fa560SVenkatesh Pallipadi #include <linux/pm.h>
4595235ca2SVenkatesh Pallipadi #include <linux/cpufreq.h>
46a7956113SZou Nan hai #include <linux/kexec.h>
47a7956113SZou Nan hai #include <linux/crash_dump.h>
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds #include <asm/machvec.h>
501da177e4SLinus Torvalds #include <asm/mca.h>
511da177e4SLinus Torvalds #include <asm/meminit.h>
521da177e4SLinus Torvalds #include <asm/page.h>
531da177e4SLinus Torvalds #include <asm/patch.h>
541da177e4SLinus Torvalds #include <asm/pgtable.h>
551da177e4SLinus Torvalds #include <asm/processor.h>
561da177e4SLinus Torvalds #include <asm/sal.h>
571da177e4SLinus Torvalds #include <asm/sections.h>
581da177e4SLinus Torvalds #include <asm/setup.h>
591da177e4SLinus Torvalds #include <asm/smp.h>
602046b94eSFenghua Yu #include <asm/tlbflush.h>
611da177e4SLinus Torvalds #include <asm/unistd.h>
628b713c67SPeter Chubb #include <asm/hpsim.h>
631da177e4SLinus Torvalds 
641da177e4SLinus Torvalds #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
651da177e4SLinus Torvalds # error "struct cpuinfo_ia64 too big!"
661da177e4SLinus Torvalds #endif
671da177e4SLinus Torvalds 
681da177e4SLinus Torvalds #ifdef CONFIG_SMP
691da177e4SLinus Torvalds unsigned long __per_cpu_offset[NR_CPUS];
701da177e4SLinus Torvalds EXPORT_SYMBOL(__per_cpu_offset);
711da177e4SLinus Torvalds #endif
721da177e4SLinus Torvalds 
73877105ccSTejun Heo DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
74e007c533SAl Viro EXPORT_SYMBOL(ia64_cpu_info);
751da177e4SLinus Torvalds DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
76e007c533SAl Viro #ifdef CONFIG_SMP
77e007c533SAl Viro EXPORT_SYMBOL(local_per_cpu_offset);
78e007c533SAl Viro #endif
791da177e4SLinus Torvalds unsigned long ia64_cycles_per_usec;
801da177e4SLinus Torvalds struct ia64_boot_param *ia64_boot_param;
811da177e4SLinus Torvalds struct screen_info screen_info;
8266b7f8a3SMark Maule unsigned long vga_console_iobase;
8366b7f8a3SMark Maule unsigned long vga_console_membase;
841da177e4SLinus Torvalds 
85be379124SKhalid Aziz static struct resource data_resource = {
86be379124SKhalid Aziz 	.name	= "Kernel data",
8703cb525eSToshi Kani 	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
88be379124SKhalid Aziz };
89be379124SKhalid Aziz 
90be379124SKhalid Aziz static struct resource code_resource = {
91be379124SKhalid Aziz 	.name	= "Kernel code",
9203cb525eSToshi Kani 	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
93be379124SKhalid Aziz };
9400bf4098SBernhard Walle 
9500bf4098SBernhard Walle static struct resource bss_resource = {
9600bf4098SBernhard Walle 	.name	= "Kernel bss",
9703cb525eSToshi Kani 	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
9800bf4098SBernhard Walle };
99be379124SKhalid Aziz 
1001da177e4SLinus Torvalds unsigned long ia64_max_cacheline_size;
101e1531b42SJohn W. Linville 
1021da177e4SLinus Torvalds unsigned long ia64_iobase;	/* virtual address for I/O accesses */
1031da177e4SLinus Torvalds EXPORT_SYMBOL(ia64_iobase);
1041da177e4SLinus Torvalds struct io_space io_space[MAX_IO_SPACES];
1051da177e4SLinus Torvalds EXPORT_SYMBOL(io_space);
1061da177e4SLinus Torvalds unsigned int num_io_spaces;
1071da177e4SLinus Torvalds 
1081da177e4SLinus Torvalds /*
10908357f82SZoltan Menyhart  * "flush_icache_range()" needs to know what processor dependent stride size to use
11008357f82SZoltan Menyhart  * when it makes i-cache(s) coherent with d-caches.
11108357f82SZoltan Menyhart  */
11208357f82SZoltan Menyhart #define	I_CACHE_STRIDE_SHIFT	5	/* Safest way to go: 32 bytes by 32 bytes */
11308357f82SZoltan Menyhart unsigned long ia64_i_cache_stride_shift = ~0;
11462fdd767SFenghua Yu /*
11562fdd767SFenghua Yu  * "clflush_cache_range()" needs to know what processor dependent stride size to
11662fdd767SFenghua Yu  * use when it flushes cache lines including both d-cache and i-cache.
11762fdd767SFenghua Yu  */
11862fdd767SFenghua Yu /* Safest way to go: 32 bytes by 32 bytes */
11962fdd767SFenghua Yu #define	CACHE_STRIDE_SHIFT	5
12062fdd767SFenghua Yu unsigned long ia64_cache_stride_shift = ~0;
12108357f82SZoltan Menyhart 
12208357f82SZoltan Menyhart /*
1231da177e4SLinus Torvalds  * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1).  This
1241da177e4SLinus Torvalds  * mask specifies a mask of address bits that must be 0 in order for two buffers to be
1251da177e4SLinus Torvalds  * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
1261da177e4SLinus Torvalds  * address of the second buffer must be aligned to (merge_mask+1) in order to be
1271da177e4SLinus Torvalds  * mergeable).  By default, we assume there is no I/O MMU which can merge physically
1281da177e4SLinus Torvalds  * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
1291da177e4SLinus Torvalds  * page-size of 2^64.
1301da177e4SLinus Torvalds  */
1311da177e4SLinus Torvalds unsigned long ia64_max_iommu_merge_mask = ~0UL;
1321da177e4SLinus Torvalds EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
1331da177e4SLinus Torvalds 
1341da177e4SLinus Torvalds /*
1351da177e4SLinus Torvalds  * We use a special marker for the end of memory and it uses the extra (+1) slot
1361da177e4SLinus Torvalds  */
137dae28066SChen, Kenneth W struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata;
138dae28066SChen, Kenneth W int num_rsvd_regions __initdata;
1391da177e4SLinus Torvalds 
1401da177e4SLinus Torvalds 
1411da177e4SLinus Torvalds /*
1421da177e4SLinus Torvalds  * Filter incoming memory segments based on the primitive map created from the boot
1431da177e4SLinus Torvalds  * parameters. Segments contained in the map are removed from the memory ranges. A
1441da177e4SLinus Torvalds  * caller-specified function is called with the memory ranges that remain after filtering.
1451da177e4SLinus Torvalds  * This routine does not assume the incoming segments are sorted.
1461da177e4SLinus Torvalds  */
147dae28066SChen, Kenneth W int __init
148e088a4adSMatthew Wilcox filter_rsvd_memory (u64 start, u64 end, void *arg)
1491da177e4SLinus Torvalds {
150e088a4adSMatthew Wilcox 	u64 range_start, range_end, prev_start;
1511da177e4SLinus Torvalds 	void (*func)(unsigned long, unsigned long, int);
1521da177e4SLinus Torvalds 	int i;
1531da177e4SLinus Torvalds 
1541da177e4SLinus Torvalds #if IGNORE_PFN0
1551da177e4SLinus Torvalds 	if (start == PAGE_OFFSET) {
1561da177e4SLinus Torvalds 		printk(KERN_WARNING "warning: skipping physical page 0\n");
1571da177e4SLinus Torvalds 		start += PAGE_SIZE;
1581da177e4SLinus Torvalds 		if (start >= end) return 0;
1591da177e4SLinus Torvalds 	}
1601da177e4SLinus Torvalds #endif
1611da177e4SLinus Torvalds 	/*
1621da177e4SLinus Torvalds 	 * lowest possible address(walker uses virtual)
1631da177e4SLinus Torvalds 	 */
1641da177e4SLinus Torvalds 	prev_start = PAGE_OFFSET;
1651da177e4SLinus Torvalds 	func = arg;
1661da177e4SLinus Torvalds 
1671da177e4SLinus Torvalds 	for (i = 0; i < num_rsvd_regions; ++i) {
1681da177e4SLinus Torvalds 		range_start = max(start, prev_start);
1691da177e4SLinus Torvalds 		range_end   = min(end, rsvd_region[i].start);
1701da177e4SLinus Torvalds 
1711da177e4SLinus Torvalds 		if (range_start < range_end)
1721da177e4SLinus Torvalds 			call_pernode_memory(__pa(range_start), range_end - range_start, func);
1731da177e4SLinus Torvalds 
1741da177e4SLinus Torvalds 		/* nothing more available in this segment */
1751da177e4SLinus Torvalds 		if (range_end == end) return 0;
1761da177e4SLinus Torvalds 
1771da177e4SLinus Torvalds 		prev_start = rsvd_region[i].end;
1781da177e4SLinus Torvalds 	}
1791da177e4SLinus Torvalds 	/* end of memory marker allows full processing inside loop body */
1801da177e4SLinus Torvalds 	return 0;
1811da177e4SLinus Torvalds }
1821da177e4SLinus Torvalds 
18398075d24SZoltan Menyhart /*
18498075d24SZoltan Menyhart  * Similar to "filter_rsvd_memory()", but the reserved memory ranges
18598075d24SZoltan Menyhart  * are not filtered out.
18698075d24SZoltan Menyhart  */
18798075d24SZoltan Menyhart int __init
188e088a4adSMatthew Wilcox filter_memory(u64 start, u64 end, void *arg)
18998075d24SZoltan Menyhart {
19098075d24SZoltan Menyhart 	void (*func)(unsigned long, unsigned long, int);
19198075d24SZoltan Menyhart 
19298075d24SZoltan Menyhart #if IGNORE_PFN0
19398075d24SZoltan Menyhart 	if (start == PAGE_OFFSET) {
19498075d24SZoltan Menyhart 		printk(KERN_WARNING "warning: skipping physical page 0\n");
19598075d24SZoltan Menyhart 		start += PAGE_SIZE;
19698075d24SZoltan Menyhart 		if (start >= end)
19798075d24SZoltan Menyhart 			return 0;
19898075d24SZoltan Menyhart 	}
19998075d24SZoltan Menyhart #endif
20098075d24SZoltan Menyhart 	func = arg;
20198075d24SZoltan Menyhart 	if (start < end)
20298075d24SZoltan Menyhart 		call_pernode_memory(__pa(start), end - start, func);
20398075d24SZoltan Menyhart 	return 0;
20498075d24SZoltan Menyhart }
20598075d24SZoltan Menyhart 
206dae28066SChen, Kenneth W static void __init
2071da177e4SLinus Torvalds sort_regions (struct rsvd_region *rsvd_region, int max)
2081da177e4SLinus Torvalds {
2091da177e4SLinus Torvalds 	int j;
2101da177e4SLinus Torvalds 
2111da177e4SLinus Torvalds 	/* simple bubble sorting */
2121da177e4SLinus Torvalds 	while (max--) {
2131da177e4SLinus Torvalds 		for (j = 0; j < max; ++j) {
2141da177e4SLinus Torvalds 			if (rsvd_region[j].start > rsvd_region[j+1].start) {
2151da177e4SLinus Torvalds 				struct rsvd_region tmp;
2161da177e4SLinus Torvalds 				tmp = rsvd_region[j];
2171da177e4SLinus Torvalds 				rsvd_region[j] = rsvd_region[j + 1];
2181da177e4SLinus Torvalds 				rsvd_region[j + 1] = tmp;
2191da177e4SLinus Torvalds 			}
2201da177e4SLinus Torvalds 		}
2211da177e4SLinus Torvalds 	}
2221da177e4SLinus Torvalds }
2231da177e4SLinus Torvalds 
22476d71ebdSPetr Tesarik /* merge overlaps */
22576d71ebdSPetr Tesarik static int __init
22676d71ebdSPetr Tesarik merge_regions (struct rsvd_region *rsvd_region, int max)
22776d71ebdSPetr Tesarik {
22876d71ebdSPetr Tesarik 	int i;
22976d71ebdSPetr Tesarik 	for (i = 1; i < max; ++i) {
23076d71ebdSPetr Tesarik 		if (rsvd_region[i].start >= rsvd_region[i-1].end)
23176d71ebdSPetr Tesarik 			continue;
23276d71ebdSPetr Tesarik 		if (rsvd_region[i].end > rsvd_region[i-1].end)
23376d71ebdSPetr Tesarik 			rsvd_region[i-1].end = rsvd_region[i].end;
23476d71ebdSPetr Tesarik 		--max;
23576d71ebdSPetr Tesarik 		memmove(&rsvd_region[i], &rsvd_region[i+1],
23676d71ebdSPetr Tesarik 			(max - i) * sizeof(struct rsvd_region));
23776d71ebdSPetr Tesarik 	}
23876d71ebdSPetr Tesarik 	return max;
23976d71ebdSPetr Tesarik }
24076d71ebdSPetr Tesarik 
241be379124SKhalid Aziz /*
242be379124SKhalid Aziz  * Request address space for all standard resources
243be379124SKhalid Aziz  */
244be379124SKhalid Aziz static int __init register_memory(void)
245be379124SKhalid Aziz {
246be379124SKhalid Aziz 	code_resource.start = ia64_tpa(_text);
247be379124SKhalid Aziz 	code_resource.end   = ia64_tpa(_etext) - 1;
248be379124SKhalid Aziz 	data_resource.start = ia64_tpa(_etext);
24900bf4098SBernhard Walle 	data_resource.end   = ia64_tpa(_edata) - 1;
250b898a424SBernhard Walle 	bss_resource.start  = ia64_tpa(__bss_start);
25100bf4098SBernhard Walle 	bss_resource.end    = ia64_tpa(_end) - 1;
25200bf4098SBernhard Walle 	efi_initialize_iomem_resources(&code_resource, &data_resource,
25300bf4098SBernhard Walle 			&bss_resource);
254be379124SKhalid Aziz 
255be379124SKhalid Aziz 	return 0;
256be379124SKhalid Aziz }
257be379124SKhalid Aziz 
258be379124SKhalid Aziz __initcall(register_memory);
259be379124SKhalid Aziz 
260cb380853SBernhard Walle 
261cb380853SBernhard Walle #ifdef CONFIG_KEXEC
2628a3360f0SBernhard Walle 
2638a3360f0SBernhard Walle /*
2648a3360f0SBernhard Walle  * This function checks if the reserved crashkernel is allowed on the specific
2658a3360f0SBernhard Walle  * IA64 machine flavour. Machines without an IO TLB use swiotlb and require
2668a3360f0SBernhard Walle  * some memory below 4 GB (i.e. in 32 bit area), see the implementation of
2678a3360f0SBernhard Walle  * lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that
2688a3360f0SBernhard Walle  * in kdump case. See the comment in sba_init() in sba_iommu.c.
2698a3360f0SBernhard Walle  *
2708a3360f0SBernhard Walle  * So, the only machvec that really supports loading the kdump kernel
2718a3360f0SBernhard Walle  * over 4 GB is "sn2".
2728a3360f0SBernhard Walle  */
2738a3360f0SBernhard Walle static int __init check_crashkernel_memory(unsigned long pbase, size_t size)
2748a3360f0SBernhard Walle {
2758a3360f0SBernhard Walle 	if (ia64_platform_is("sn2") || ia64_platform_is("uv"))
2768a3360f0SBernhard Walle 		return 1;
2778a3360f0SBernhard Walle 	else
2788a3360f0SBernhard Walle 		return pbase < (1UL << 32);
2798a3360f0SBernhard Walle }
2808a3360f0SBernhard Walle 
281cb380853SBernhard Walle static void __init setup_crashkernel(unsigned long total, int *n)
282cb380853SBernhard Walle {
283cb380853SBernhard Walle 	unsigned long long base = 0, size = 0;
284cb380853SBernhard Walle 	int ret;
285cb380853SBernhard Walle 
286cb380853SBernhard Walle 	ret = parse_crashkernel(boot_command_line, total,
287cb380853SBernhard Walle 			&size, &base);
288cb380853SBernhard Walle 	if (ret == 0 && size > 0) {
289cb380853SBernhard Walle 		if (!base) {
290cb380853SBernhard Walle 			sort_regions(rsvd_region, *n);
29176d71ebdSPetr Tesarik 			*n = merge_regions(rsvd_region, *n);
292cb380853SBernhard Walle 			base = kdump_find_rsvd_region(size,
293cb380853SBernhard Walle 					rsvd_region, *n);
294cb380853SBernhard Walle 		}
2958a3360f0SBernhard Walle 
2968a3360f0SBernhard Walle 		if (!check_crashkernel_memory(base, size)) {
2978a3360f0SBernhard Walle 			pr_warning("crashkernel: There would be kdump memory "
2988a3360f0SBernhard Walle 				"at %ld GB but this is unusable because it "
2998a3360f0SBernhard Walle 				"must\nbe below 4 GB. Change the memory "
3008a3360f0SBernhard Walle 				"configuration of the machine.\n",
3018a3360f0SBernhard Walle 				(unsigned long)(base >> 30));
3028a3360f0SBernhard Walle 			return;
3038a3360f0SBernhard Walle 		}
3048a3360f0SBernhard Walle 
305cb380853SBernhard Walle 		if (base != ~0UL) {
306cb380853SBernhard Walle 			printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
307cb380853SBernhard Walle 					"for crashkernel (System RAM: %ldMB)\n",
308cb380853SBernhard Walle 					(unsigned long)(size >> 20),
309cb380853SBernhard Walle 					(unsigned long)(base >> 20),
310cb380853SBernhard Walle 					(unsigned long)(total >> 20));
311cb380853SBernhard Walle 			rsvd_region[*n].start =
312cb380853SBernhard Walle 				(unsigned long)__va(base);
313cb380853SBernhard Walle 			rsvd_region[*n].end =
314cb380853SBernhard Walle 				(unsigned long)__va(base + size);
315cb380853SBernhard Walle 			(*n)++;
316cb380853SBernhard Walle 			crashk_res.start = base;
317cb380853SBernhard Walle 			crashk_res.end = base + size - 1;
318cb380853SBernhard Walle 		}
319cb380853SBernhard Walle 	}
320cb380853SBernhard Walle 	efi_memmap_res.start = ia64_boot_param->efi_memmap;
321cb380853SBernhard Walle 	efi_memmap_res.end = efi_memmap_res.start +
322cb380853SBernhard Walle 		ia64_boot_param->efi_memmap_size;
323cb380853SBernhard Walle 	boot_param_res.start = __pa(ia64_boot_param);
324cb380853SBernhard Walle 	boot_param_res.end = boot_param_res.start +
325cb380853SBernhard Walle 		sizeof(*ia64_boot_param);
326cb380853SBernhard Walle }
327cb380853SBernhard Walle #else
328cb380853SBernhard Walle static inline void __init setup_crashkernel(unsigned long total, int *n)
329cb380853SBernhard Walle {}
330cb380853SBernhard Walle #endif
331cb380853SBernhard Walle 
3321da177e4SLinus Torvalds /**
3331da177e4SLinus Torvalds  * reserve_memory - setup reserved memory areas
3341da177e4SLinus Torvalds  *
3351da177e4SLinus Torvalds  * Setup the reserved memory areas set aside for the boot parameters,
3361da177e4SLinus Torvalds  * initrd, etc.  There are currently %IA64_MAX_RSVD_REGIONS defined,
3377f30491cSTony Luck  * see arch/ia64/include/asm/meminit.h if you need to define more.
3381da177e4SLinus Torvalds  */
339dae28066SChen, Kenneth W void __init
3401da177e4SLinus Torvalds reserve_memory (void)
3411da177e4SLinus Torvalds {
3421da177e4SLinus Torvalds 	int n = 0;
343cb380853SBernhard Walle 	unsigned long total_memory;
3441da177e4SLinus Torvalds 
3451da177e4SLinus Torvalds 	/*
3461da177e4SLinus Torvalds 	 * none of the entries in this table overlap
3471da177e4SLinus Torvalds 	 */
3481da177e4SLinus Torvalds 	rsvd_region[n].start = (unsigned long) ia64_boot_param;
3491da177e4SLinus Torvalds 	rsvd_region[n].end   = rsvd_region[n].start + sizeof(*ia64_boot_param);
3501da177e4SLinus Torvalds 	n++;
3511da177e4SLinus Torvalds 
3521da177e4SLinus Torvalds 	rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
3531da177e4SLinus Torvalds 	rsvd_region[n].end   = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
3541da177e4SLinus Torvalds 	n++;
3551da177e4SLinus Torvalds 
3561da177e4SLinus Torvalds 	rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
3571da177e4SLinus Torvalds 	rsvd_region[n].end   = (rsvd_region[n].start
3581da177e4SLinus Torvalds 				+ strlen(__va(ia64_boot_param->command_line)) + 1);
3591da177e4SLinus Torvalds 	n++;
3601da177e4SLinus Torvalds 
3611da177e4SLinus Torvalds 	rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
3621da177e4SLinus Torvalds 	rsvd_region[n].end   = (unsigned long) ia64_imva(_end);
3631da177e4SLinus Torvalds 	n++;
3641da177e4SLinus Torvalds 
3651da177e4SLinus Torvalds #ifdef CONFIG_BLK_DEV_INITRD
3661da177e4SLinus Torvalds 	if (ia64_boot_param->initrd_start) {
3671da177e4SLinus Torvalds 		rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
3681da177e4SLinus Torvalds 		rsvd_region[n].end   = rsvd_region[n].start + ia64_boot_param->initrd_size;
3691da177e4SLinus Torvalds 		n++;
3701da177e4SLinus Torvalds 	}
3711da177e4SLinus Torvalds #endif
3721da177e4SLinus Torvalds 
37317c1f07eSJay Lan #ifdef CONFIG_CRASH_DUMP
374cee87af2SMagnus Damm 	if (reserve_elfcorehdr(&rsvd_region[n].start,
375cee87af2SMagnus Damm 			       &rsvd_region[n].end) == 0)
376cee87af2SMagnus Damm 		n++;
377cee87af2SMagnus Damm #endif
378cee87af2SMagnus Damm 
379cb380853SBernhard Walle 	total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
380d8c97d5fSTony Luck 	n++;
381d8c97d5fSTony Luck 
382cb380853SBernhard Walle 	setup_crashkernel(total_memory, &n);
383cb380853SBernhard Walle 
3841da177e4SLinus Torvalds 	/* end of memory marker */
3851da177e4SLinus Torvalds 	rsvd_region[n].start = ~0UL;
3861da177e4SLinus Torvalds 	rsvd_region[n].end   = ~0UL;
3871da177e4SLinus Torvalds 	n++;
3881da177e4SLinus Torvalds 
3891da177e4SLinus Torvalds 	num_rsvd_regions = n;
3905eb1d63fSAlex Williamson 	BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
3911da177e4SLinus Torvalds 
3921da177e4SLinus Torvalds 	sort_regions(rsvd_region, num_rsvd_regions);
39376d71ebdSPetr Tesarik 	num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions);
3941da177e4SLinus Torvalds }
3951da177e4SLinus Torvalds 
396a7956113SZou Nan hai 
3971da177e4SLinus Torvalds /**
3981da177e4SLinus Torvalds  * find_initrd - get initrd parameters from the boot parameter structure
3991da177e4SLinus Torvalds  *
4001da177e4SLinus Torvalds  * Grab the initrd start and end from the boot parameter struct given us by
4011da177e4SLinus Torvalds  * the boot loader.
4021da177e4SLinus Torvalds  */
403dae28066SChen, Kenneth W void __init
4041da177e4SLinus Torvalds find_initrd (void)
4051da177e4SLinus Torvalds {
4061da177e4SLinus Torvalds #ifdef CONFIG_BLK_DEV_INITRD
4071da177e4SLinus Torvalds 	if (ia64_boot_param->initrd_start) {
4081da177e4SLinus Torvalds 		initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
4091da177e4SLinus Torvalds 		initrd_end   = initrd_start+ia64_boot_param->initrd_size;
4101da177e4SLinus Torvalds 
411e088a4adSMatthew Wilcox 		printk(KERN_INFO "Initial ramdisk at: 0x%lx (%llu bytes)\n",
4121da177e4SLinus Torvalds 		       initrd_start, ia64_boot_param->initrd_size);
4131da177e4SLinus Torvalds 	}
4141da177e4SLinus Torvalds #endif
4151da177e4SLinus Torvalds }
4161da177e4SLinus Torvalds 
4171da177e4SLinus Torvalds static void __init
4181da177e4SLinus Torvalds io_port_init (void)
4191da177e4SLinus Torvalds {
4201da177e4SLinus Torvalds 	unsigned long phys_iobase;
4211da177e4SLinus Torvalds 
4221da177e4SLinus Torvalds 	/*
42344c45120SBjorn Helgaas 	 * Set `iobase' based on the EFI memory map or, failing that, the
42444c45120SBjorn Helgaas 	 * value firmware left in ar.k0.
4251da177e4SLinus Torvalds 	 *
42644c45120SBjorn Helgaas 	 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
42744c45120SBjorn Helgaas 	 * the port's virtual address, so ia32_load_state() loads it with a
42844c45120SBjorn Helgaas 	 * user virtual address.  But in ia64 mode, glibc uses the
42944c45120SBjorn Helgaas 	 * *physical* address in ar.k0 to mmap the appropriate area from
43044c45120SBjorn Helgaas 	 * /dev/mem, and the inX()/outX() interfaces use MMIO.  In both
43144c45120SBjorn Helgaas 	 * cases, user-mode can only use the legacy 0-64K I/O port space.
43244c45120SBjorn Helgaas 	 *
43344c45120SBjorn Helgaas 	 * ar.k0 is not involved in kernel I/O port accesses, which can use
43444c45120SBjorn Helgaas 	 * any of the I/O port spaces and are done via MMIO using the
43544c45120SBjorn Helgaas 	 * virtual mmio_base from the appropriate io_space[].
4361da177e4SLinus Torvalds 	 */
4371da177e4SLinus Torvalds 	phys_iobase = efi_get_iobase();
43844c45120SBjorn Helgaas 	if (!phys_iobase) {
4391da177e4SLinus Torvalds 		phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
44044c45120SBjorn Helgaas 		printk(KERN_INFO "No I/O port range found in EFI memory map, "
44144c45120SBjorn Helgaas 			"falling back to AR.KR0 (0x%lx)\n", phys_iobase);
4421da177e4SLinus Torvalds 	}
4431da177e4SLinus Torvalds 	ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
44444c45120SBjorn Helgaas 	ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
4451da177e4SLinus Torvalds 
4461da177e4SLinus Torvalds 	/* setup legacy IO port space */
4471da177e4SLinus Torvalds 	io_space[0].mmio_base = ia64_iobase;
4481da177e4SLinus Torvalds 	io_space[0].sparse = 1;
4491da177e4SLinus Torvalds 	num_io_spaces = 1;
4501da177e4SLinus Torvalds }
4511da177e4SLinus Torvalds 
4521da177e4SLinus Torvalds /**
4531da177e4SLinus Torvalds  * early_console_setup - setup debugging console
4541da177e4SLinus Torvalds  *
4551da177e4SLinus Torvalds  * Consoles started here require little enough setup that we can start using
4561da177e4SLinus Torvalds  * them very early in the boot process, either right after the machine
4571da177e4SLinus Torvalds  * vector initialization, or even before if the drivers can detect their hw.
4581da177e4SLinus Torvalds  *
4591da177e4SLinus Torvalds  * Returns non-zero if a console couldn't be setup.
4601da177e4SLinus Torvalds  */
4611da177e4SLinus Torvalds static inline int __init
4621da177e4SLinus Torvalds early_console_setup (char *cmdline)
4631da177e4SLinus Torvalds {
46466b7f8a3SMark Maule 	int earlycons = 0;
46566b7f8a3SMark Maule 
4661da177e4SLinus Torvalds #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
4671da177e4SLinus Torvalds 	{
4681da177e4SLinus Torvalds 		extern int sn_serial_console_early_setup(void);
4691da177e4SLinus Torvalds 		if (!sn_serial_console_early_setup())
47066b7f8a3SMark Maule 			earlycons++;
4711da177e4SLinus Torvalds 	}
4721da177e4SLinus Torvalds #endif
4731da177e4SLinus Torvalds #ifdef CONFIG_EFI_PCDP
4741da177e4SLinus Torvalds 	if (!efi_setup_pcdp_console(cmdline))
47566b7f8a3SMark Maule 		earlycons++;
4761da177e4SLinus Torvalds #endif
4778b713c67SPeter Chubb 	if (!simcons_register())
478471e7a44SPeter Chubb 		earlycons++;
4791da177e4SLinus Torvalds 
48066b7f8a3SMark Maule 	return (earlycons) ? 0 : -1;
4811da177e4SLinus Torvalds }
4821da177e4SLinus Torvalds 
4831da177e4SLinus Torvalds static inline void
4841da177e4SLinus Torvalds mark_bsp_online (void)
4851da177e4SLinus Torvalds {
4861da177e4SLinus Torvalds #ifdef CONFIG_SMP
4871da177e4SLinus Torvalds 	/* If we register an early console, allow CPU 0 to printk */
4887d7f9848SSrivatsa S. Bhat 	set_cpu_online(smp_processor_id(), true);
4891da177e4SLinus Torvalds #endif
4901da177e4SLinus Torvalds }
4911da177e4SLinus Torvalds 
492a5b00bb4SHorms static __initdata int nomca;
493a5b00bb4SHorms static __init int setup_nomca(char *s)
494a5b00bb4SHorms {
495a5b00bb4SHorms 	nomca = 1;
496a5b00bb4SHorms 	return 0;
497a5b00bb4SHorms }
498a5b00bb4SHorms early_param("nomca", setup_nomca);
499a5b00bb4SHorms 
50057cac4d1SVivek Goyal #ifdef CONFIG_CRASH_DUMP
501e088a4adSMatthew Wilcox int __init reserve_elfcorehdr(u64 *start, u64 *end)
502cee87af2SMagnus Damm {
503e088a4adSMatthew Wilcox 	u64 length;
504cee87af2SMagnus Damm 
505cee87af2SMagnus Damm 	/* We get the address using the kernel command line,
506cee87af2SMagnus Damm 	 * but the size is extracted from the EFI tables.
507cee87af2SMagnus Damm 	 * Both address and size are required for reservation
508cee87af2SMagnus Damm 	 * to work properly.
509cee87af2SMagnus Damm 	 */
510cee87af2SMagnus Damm 
51185a0ee34SSimon Horman 	if (!is_vmcore_usable())
512cee87af2SMagnus Damm 		return -EINVAL;
513cee87af2SMagnus Damm 
514cee87af2SMagnus Damm 	if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) {
51585a0ee34SSimon Horman 		vmcore_unusable();
516cee87af2SMagnus Damm 		return -EINVAL;
517cee87af2SMagnus Damm 	}
518cee87af2SMagnus Damm 
519cee87af2SMagnus Damm 	*start = (unsigned long)__va(elfcorehdr_addr);
520cee87af2SMagnus Damm 	*end = *start + length;
521cee87af2SMagnus Damm 	return 0;
522cee87af2SMagnus Damm }
523cee87af2SMagnus Damm 
52445a98fc6SHorms #endif /* CONFIG_PROC_VMCORE */
52545a98fc6SHorms 
5261da177e4SLinus Torvalds void __init
5271da177e4SLinus Torvalds setup_arch (char **cmdline_p)
5281da177e4SLinus Torvalds {
5291da177e4SLinus Torvalds 	unw_init();
5301da177e4SLinus Torvalds 
5311da177e4SLinus Torvalds 	ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
5321da177e4SLinus Torvalds 
5331da177e4SLinus Torvalds 	*cmdline_p = __va(ia64_boot_param->command_line);
534a8d91b84SAlon Bar-Lev 	strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
5351da177e4SLinus Torvalds 
5361da177e4SLinus Torvalds 	efi_init();
5371da177e4SLinus Torvalds 	io_port_init();
5381da177e4SLinus Torvalds 
5391da177e4SLinus Torvalds #ifdef CONFIG_IA64_GENERIC
540a07ee862SHorms 	/* machvec needs to be parsed from the command line
541a07ee862SHorms 	 * before parse_early_param() is called to ensure
542a07ee862SHorms 	 * that ia64_mv is initialised before any command line
543a07ee862SHorms 	 * settings may cause console setup to occur
544a07ee862SHorms 	 */
545a07ee862SHorms 	machvec_init_from_cmdline(*cmdline_p);
5461da177e4SLinus Torvalds #endif
5471da177e4SLinus Torvalds 
548a07ee862SHorms 	parse_early_param();
549a07ee862SHorms 
5501da177e4SLinus Torvalds 	if (early_console_setup(*cmdline_p) == 0)
5511da177e4SLinus Torvalds 		mark_bsp_online();
5521da177e4SLinus Torvalds 
553888ba6c6SLen Brown #ifdef CONFIG_ACPI
5541da177e4SLinus Torvalds 	/* Initialize the ACPI boot-time table parser */
5551da177e4SLinus Torvalds 	acpi_table_init();
55662ee0540SDoug Chapman 	early_acpi_boot_init();
5571da177e4SLinus Torvalds # ifdef CONFIG_ACPI_NUMA
5581da177e4SLinus Torvalds 	acpi_numa_init();
559312521d0SRobert Richter 	acpi_numa_fixup();
56062ee0540SDoug Chapman #  ifdef CONFIG_ACPI_HOTPLUG_CPU
56162ee0540SDoug Chapman 	prefill_possible_map();
56262ee0540SDoug Chapman #  endif
5635d2068daSRusty Russell 	per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ?
5645d2068daSRusty Russell 		32 : cpumask_weight(&early_cpu_possible_map)),
565dd4f0888STony Luck 		additional_cpus > 0 ? additional_cpus : 0);
5661da177e4SLinus Torvalds # endif
5671da177e4SLinus Torvalds #endif /* CONFIG_APCI_BOOT */
5681da177e4SLinus Torvalds 
56912cda817STejun Heo #ifdef CONFIG_SMP
57012cda817STejun Heo 	smp_build_cpu_map();
57112cda817STejun Heo #endif
5721da177e4SLinus Torvalds 	find_memory();
5731da177e4SLinus Torvalds 
5741da177e4SLinus Torvalds 	/* process SAL system table: */
575b2c99e3cSBjorn Helgaas 	ia64_sal_init(__va(efi.sal_systab));
5761da177e4SLinus Torvalds 
5774dcc29e1STony Luck #ifdef CONFIG_ITANIUM
5784dcc29e1STony Luck 	ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
5794dcc29e1STony Luck #else
5804dcc29e1STony Luck 	{
581e088a4adSMatthew Wilcox 		unsigned long num_phys_stacked;
5824dcc29e1STony Luck 
5834dcc29e1STony Luck 		if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96)
5844dcc29e1STony Luck 			ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
5854dcc29e1STony Luck 	}
5864dcc29e1STony Luck #endif
5874dcc29e1STony Luck 
5881da177e4SLinus Torvalds #ifdef CONFIG_SMP
5891da177e4SLinus Torvalds 	cpu_physical_id(0) = hard_smp_processor_id();
5901da177e4SLinus Torvalds #endif
5911da177e4SLinus Torvalds 
5921da177e4SLinus Torvalds 	cpu_init();	/* initialize the bootstrap CPU */
593dcc17d1bSPeter Keilty 	mmu_context_init();	/* initialize context_id bitmap */
5941da177e4SLinus Torvalds 
5951da177e4SLinus Torvalds #ifdef CONFIG_VT
5961da177e4SLinus Torvalds 	if (!conswitchp) {
5971da177e4SLinus Torvalds # if defined(CONFIG_DUMMY_CONSOLE)
5981da177e4SLinus Torvalds 		conswitchp = &dummy_con;
5991da177e4SLinus Torvalds # endif
6001da177e4SLinus Torvalds # if defined(CONFIG_VGA_CONSOLE)
6011da177e4SLinus Torvalds 		/*
6021da177e4SLinus Torvalds 		 * Non-legacy systems may route legacy VGA MMIO range to system
6031da177e4SLinus Torvalds 		 * memory.  vga_con probes the MMIO hole, so memory looks like
6041da177e4SLinus Torvalds 		 * a VGA device to it.  The EFI memory map can tell us if it's
6051da177e4SLinus Torvalds 		 * memory so we can avoid this problem.
6061da177e4SLinus Torvalds 		 */
6071da177e4SLinus Torvalds 		if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
6081da177e4SLinus Torvalds 			conswitchp = &vga_con;
6091da177e4SLinus Torvalds # endif
6101da177e4SLinus Torvalds 	}
6111da177e4SLinus Torvalds #endif
6121da177e4SLinus Torvalds 
6131da177e4SLinus Torvalds 	/* enable IA-64 Machine Check Abort Handling unless disabled */
614a5b00bb4SHorms 	if (!nomca)
6151da177e4SLinus Torvalds 		ia64_mca_init();
6161da177e4SLinus Torvalds 
6171da177e4SLinus Torvalds 	platform_setup(cmdline_p);
61806f95ea8SAlex Chiang #ifndef CONFIG_IA64_HP_SIM
6192826f8c0SJes Sorensen 	check_sal_cache_flush();
62006f95ea8SAlex Chiang #endif
6211da177e4SLinus Torvalds 	paging_init();
622acb04058SPeter Zijlstra 
623acb04058SPeter Zijlstra 	clear_sched_clock_stable();
6241da177e4SLinus Torvalds }
6251da177e4SLinus Torvalds 
6261da177e4SLinus Torvalds /*
62772fdbdceSSimon Arlott  * Display cpu info for all CPUs.
6281da177e4SLinus Torvalds  */
6291da177e4SLinus Torvalds static int
6301da177e4SLinus Torvalds show_cpuinfo (struct seq_file *m, void *v)
6311da177e4SLinus Torvalds {
6321da177e4SLinus Torvalds #ifdef CONFIG_SMP
6331da177e4SLinus Torvalds #	define lpj	c->loops_per_jiffy
6341da177e4SLinus Torvalds #	define cpunum	c->cpu
6351da177e4SLinus Torvalds #else
6361da177e4SLinus Torvalds #	define lpj	loops_per_jiffy
6371da177e4SLinus Torvalds #	define cpunum	0
6381da177e4SLinus Torvalds #endif
6391da177e4SLinus Torvalds 	static struct {
6401da177e4SLinus Torvalds 		unsigned long mask;
6411da177e4SLinus Torvalds 		const char *feature_name;
6421da177e4SLinus Torvalds 	} feature_bits[] = {
6431da177e4SLinus Torvalds 		{ 1UL << 0, "branchlong" },
6441da177e4SLinus Torvalds 		{ 1UL << 1, "spontaneous deferral"},
6451da177e4SLinus Torvalds 		{ 1UL << 2, "16-byte atomic ops" }
6461da177e4SLinus Torvalds 	};
647ae0af3e3SAron Griffis 	char features[128], *cp, *sep;
6481da177e4SLinus Torvalds 	struct cpuinfo_ia64 *c = v;
6491da177e4SLinus Torvalds 	unsigned long mask;
65038c0b2c2STony Luck 	unsigned long proc_freq;
651ae0af3e3SAron Griffis 	int i, size;
6521da177e4SLinus Torvalds 
6531da177e4SLinus Torvalds 	mask = c->features;
6541da177e4SLinus Torvalds 
6551da177e4SLinus Torvalds 	/* build the feature string: */
656ae0af3e3SAron Griffis 	memcpy(features, "standard", 9);
6571da177e4SLinus Torvalds 	cp = features;
658ae0af3e3SAron Griffis 	size = sizeof(features);
659ae0af3e3SAron Griffis 	sep = "";
660ae0af3e3SAron Griffis 	for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
6611da177e4SLinus Torvalds 		if (mask & feature_bits[i].mask) {
662ae0af3e3SAron Griffis 			cp += snprintf(cp, size, "%s%s", sep,
663ae0af3e3SAron Griffis 				       feature_bits[i].feature_name),
664ae0af3e3SAron Griffis 			sep = ", ";
6651da177e4SLinus Torvalds 			mask &= ~feature_bits[i].mask;
666ae0af3e3SAron Griffis 			size = sizeof(features) - (cp - features);
6671da177e4SLinus Torvalds 		}
6681da177e4SLinus Torvalds 	}
669ae0af3e3SAron Griffis 	if (mask && size > 1) {
670ae0af3e3SAron Griffis 		/* print unknown features as a hex value */
671ae0af3e3SAron Griffis 		snprintf(cp, size, "%s0x%lx", sep, mask);
6721da177e4SLinus Torvalds 	}
6731da177e4SLinus Torvalds 
67495235ca2SVenkatesh Pallipadi 	proc_freq = cpufreq_quick_get(cpunum);
67595235ca2SVenkatesh Pallipadi 	if (!proc_freq)
67695235ca2SVenkatesh Pallipadi 		proc_freq = c->proc_freq / 1000;
67795235ca2SVenkatesh Pallipadi 
6781da177e4SLinus Torvalds 	seq_printf(m,
6791da177e4SLinus Torvalds 		   "processor  : %d\n"
6801da177e4SLinus Torvalds 		   "vendor     : %s\n"
6811da177e4SLinus Torvalds 		   "arch       : IA-64\n"
68276d08bb3STony Luck 		   "family     : %u\n"
6831da177e4SLinus Torvalds 		   "model      : %u\n"
68476d08bb3STony Luck 		   "model name : %s\n"
6851da177e4SLinus Torvalds 		   "revision   : %u\n"
6861da177e4SLinus Torvalds 		   "archrev    : %u\n"
687ae0af3e3SAron Griffis 		   "features   : %s\n"
6881da177e4SLinus Torvalds 		   "cpu number : %lu\n"
6891da177e4SLinus Torvalds 		   "cpu regs   : %u\n"
6908a3a78d1SVenki Pallipadi 		   "cpu MHz    : %lu.%03lu\n"
6911da177e4SLinus Torvalds 		   "itc MHz    : %lu.%06lu\n"
692e927ecb0SSuresh Siddha 		   "BogoMIPS   : %lu.%02lu\n",
69376d08bb3STony Luck 		   cpunum, c->vendor, c->family, c->model,
69476d08bb3STony Luck 		   c->model_name, c->revision, c->archrev,
6951da177e4SLinus Torvalds 		   features, c->ppn, c->number,
69695235ca2SVenkatesh Pallipadi 		   proc_freq / 1000, proc_freq % 1000,
6971da177e4SLinus Torvalds 		   c->itc_freq / 1000000, c->itc_freq % 1000000,
6981da177e4SLinus Torvalds 		   lpj*HZ/500000, (lpj*HZ/5000) % 100);
699e927ecb0SSuresh Siddha #ifdef CONFIG_SMP
7005d2068daSRusty Russell 	seq_printf(m, "siblings   : %u\n",
7015d2068daSRusty Russell 		   cpumask_weight(&cpu_core_map[cpunum]));
702113134fcSAlex Chiang 	if (c->socket_id != -1)
703113134fcSAlex Chiang 		seq_printf(m, "physical id: %u\n", c->socket_id);
704e927ecb0SSuresh Siddha 	if (c->threads_per_core > 1 || c->cores_per_socket > 1)
705e927ecb0SSuresh Siddha 		seq_printf(m,
706e927ecb0SSuresh Siddha 			   "core id    : %u\n"
707e927ecb0SSuresh Siddha 			   "thread id  : %u\n",
708113134fcSAlex Chiang 			   c->core_id, c->thread_id);
709e927ecb0SSuresh Siddha #endif
710e927ecb0SSuresh Siddha 	seq_printf(m,"\n");
711e927ecb0SSuresh Siddha 
7121da177e4SLinus Torvalds 	return 0;
7131da177e4SLinus Torvalds }
7141da177e4SLinus Torvalds 
7151da177e4SLinus Torvalds static void *
7161da177e4SLinus Torvalds c_start (struct seq_file *m, loff_t *pos)
7171da177e4SLinus Torvalds {
7181da177e4SLinus Torvalds #ifdef CONFIG_SMP
7195dd3c994SRusty Russell 	while (*pos < nr_cpu_ids && !cpu_online(*pos))
7201da177e4SLinus Torvalds 		++*pos;
7211da177e4SLinus Torvalds #endif
7225dd3c994SRusty Russell 	return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL;
7231da177e4SLinus Torvalds }
7241da177e4SLinus Torvalds 
7251da177e4SLinus Torvalds static void *
7261da177e4SLinus Torvalds c_next (struct seq_file *m, void *v, loff_t *pos)
7271da177e4SLinus Torvalds {
7281da177e4SLinus Torvalds 	++*pos;
7291da177e4SLinus Torvalds 	return c_start(m, pos);
7301da177e4SLinus Torvalds }
7311da177e4SLinus Torvalds 
7321da177e4SLinus Torvalds static void
7331da177e4SLinus Torvalds c_stop (struct seq_file *m, void *v)
7341da177e4SLinus Torvalds {
7351da177e4SLinus Torvalds }
7361da177e4SLinus Torvalds 
737a23fe55eSJan Engelhardt const struct seq_operations cpuinfo_op = {
7381da177e4SLinus Torvalds 	.start =	c_start,
7391da177e4SLinus Torvalds 	.next =		c_next,
7401da177e4SLinus Torvalds 	.stop =		c_stop,
7411da177e4SLinus Torvalds 	.show =		show_cpuinfo
7421da177e4SLinus Torvalds };
7431da177e4SLinus Torvalds 
744c5e83e3fSJack Steiner #define MAX_BRANDS	8
745c5e83e3fSJack Steiner static char brandname[MAX_BRANDS][128];
74676d08bb3STony Luck 
747ccce9bb8SPaul Gortmaker static char *
74876d08bb3STony Luck get_model_name(__u8 family, __u8 model)
74976d08bb3STony Luck {
750c5e83e3fSJack Steiner 	static int overflow;
75176d08bb3STony Luck 	char brand[128];
752c5e83e3fSJack Steiner 	int i;
75376d08bb3STony Luck 
75475f6a1deSTony Luck 	memcpy(brand, "Unknown", 8);
75576d08bb3STony Luck 	if (ia64_pal_get_brand_info(brand)) {
75676d08bb3STony Luck 		if (family == 0x7)
75776d08bb3STony Luck 			memcpy(brand, "Merced", 7);
75876d08bb3STony Luck 		else if (family == 0x1f) switch (model) {
75976d08bb3STony Luck 			case 0: memcpy(brand, "McKinley", 9); break;
76076d08bb3STony Luck 			case 1: memcpy(brand, "Madison", 8); break;
76176d08bb3STony Luck 			case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
76275f6a1deSTony Luck 		}
76376d08bb3STony Luck 	}
764c5e83e3fSJack Steiner 	for (i = 0; i < MAX_BRANDS; i++)
765c5e83e3fSJack Steiner 		if (strcmp(brandname[i], brand) == 0)
766c5e83e3fSJack Steiner 			return brandname[i];
767c5e83e3fSJack Steiner 	for (i = 0; i < MAX_BRANDS; i++)
768c5e83e3fSJack Steiner 		if (brandname[i][0] == '\0')
769c5e83e3fSJack Steiner 			return strcpy(brandname[i], brand);
770c5e83e3fSJack Steiner 	if (overflow++ == 0)
771c5e83e3fSJack Steiner 		printk(KERN_ERR
772c5e83e3fSJack Steiner 		       "%s: Table overflow. Some processor model information will be missing\n",
773d4ed8084SHarvey Harrison 		       __func__);
774c5e83e3fSJack Steiner 	return "Unknown";
77576d08bb3STony Luck }
77676d08bb3STony Luck 
777ccce9bb8SPaul Gortmaker static void
7781da177e4SLinus Torvalds identify_cpu (struct cpuinfo_ia64 *c)
7791da177e4SLinus Torvalds {
7801da177e4SLinus Torvalds 	union {
7811da177e4SLinus Torvalds 		unsigned long bits[5];
7821da177e4SLinus Torvalds 		struct {
7831da177e4SLinus Torvalds 			/* id 0 & 1: */
7841da177e4SLinus Torvalds 			char vendor[16];
7851da177e4SLinus Torvalds 
7861da177e4SLinus Torvalds 			/* id 2 */
7871da177e4SLinus Torvalds 			u64 ppn;		/* processor serial number */
7881da177e4SLinus Torvalds 
7891da177e4SLinus Torvalds 			/* id 3: */
7901da177e4SLinus Torvalds 			unsigned number		:  8;
7911da177e4SLinus Torvalds 			unsigned revision	:  8;
7921da177e4SLinus Torvalds 			unsigned model		:  8;
7931da177e4SLinus Torvalds 			unsigned family		:  8;
7941da177e4SLinus Torvalds 			unsigned archrev	:  8;
7951da177e4SLinus Torvalds 			unsigned reserved	: 24;
7961da177e4SLinus Torvalds 
7971da177e4SLinus Torvalds 			/* id 4: */
7981da177e4SLinus Torvalds 			u64 features;
7991da177e4SLinus Torvalds 		} field;
8001da177e4SLinus Torvalds 	} cpuid;
8011da177e4SLinus Torvalds 	pal_vm_info_1_u_t vm1;
8021da177e4SLinus Torvalds 	pal_vm_info_2_u_t vm2;
8031da177e4SLinus Torvalds 	pal_status_t status;
8041da177e4SLinus Torvalds 	unsigned long impl_va_msb = 50, phys_addr_size = 44;	/* Itanium defaults */
8051da177e4SLinus Torvalds 	int i;
8061da177e4SLinus Torvalds 	for (i = 0; i < 5; ++i)
8071da177e4SLinus Torvalds 		cpuid.bits[i] = ia64_get_cpuid(i);
8081da177e4SLinus Torvalds 
8091da177e4SLinus Torvalds 	memcpy(c->vendor, cpuid.field.vendor, 16);
8101da177e4SLinus Torvalds #ifdef CONFIG_SMP
8111da177e4SLinus Torvalds 	c->cpu = smp_processor_id();
812e927ecb0SSuresh Siddha 
813e927ecb0SSuresh Siddha 	/* below default values will be overwritten  by identify_siblings()
81472fdbdceSSimon Arlott 	 * for Multi-Threading/Multi-Core capable CPUs
815e927ecb0SSuresh Siddha 	 */
816e927ecb0SSuresh Siddha 	c->threads_per_core = c->cores_per_socket = c->num_log = 1;
817e927ecb0SSuresh Siddha 	c->socket_id = -1;
818e927ecb0SSuresh Siddha 
819e927ecb0SSuresh Siddha 	identify_siblings(c);
820113134fcSAlex Chiang 
821113134fcSAlex Chiang 	if (c->threads_per_core > smp_num_siblings)
822113134fcSAlex Chiang 		smp_num_siblings = c->threads_per_core;
8231da177e4SLinus Torvalds #endif
8241da177e4SLinus Torvalds 	c->ppn = cpuid.field.ppn;
8251da177e4SLinus Torvalds 	c->number = cpuid.field.number;
8261da177e4SLinus Torvalds 	c->revision = cpuid.field.revision;
8271da177e4SLinus Torvalds 	c->model = cpuid.field.model;
8281da177e4SLinus Torvalds 	c->family = cpuid.field.family;
8291da177e4SLinus Torvalds 	c->archrev = cpuid.field.archrev;
8301da177e4SLinus Torvalds 	c->features = cpuid.field.features;
83176d08bb3STony Luck 	c->model_name = get_model_name(c->family, c->model);
8321da177e4SLinus Torvalds 
8331da177e4SLinus Torvalds 	status = ia64_pal_vm_summary(&vm1, &vm2);
8341da177e4SLinus Torvalds 	if (status == PAL_STATUS_SUCCESS) {
8351da177e4SLinus Torvalds 		impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
8361da177e4SLinus Torvalds 		phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
8371da177e4SLinus Torvalds 	}
8381da177e4SLinus Torvalds 	c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
8391da177e4SLinus Torvalds 	c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
8401da177e4SLinus Torvalds }
8411da177e4SLinus Torvalds 
842872fb6ddSFenghua Yu /*
84362fdd767SFenghua Yu  * Do the following calculations:
84408357f82SZoltan Menyhart  *
84562fdd767SFenghua Yu  * 1. the max. cache line size.
84662fdd767SFenghua Yu  * 2. the minimum of the i-cache stride sizes for "flush_icache_range()".
84762fdd767SFenghua Yu  * 3. the minimum of the cache stride sizes for "clflush_cache_range()".
84808357f82SZoltan Menyhart  */
849ccce9bb8SPaul Gortmaker static void
85062fdd767SFenghua Yu get_cache_info(void)
8511da177e4SLinus Torvalds {
8521da177e4SLinus Torvalds 	unsigned long line_size, max = 1;
853e088a4adSMatthew Wilcox 	unsigned long l, levels, unique_caches;
8541da177e4SLinus Torvalds 	pal_cache_config_info_t cci;
855e088a4adSMatthew Wilcox 	long status;
8561da177e4SLinus Torvalds 
8571da177e4SLinus Torvalds         status = ia64_pal_cache_summary(&levels, &unique_caches);
8581da177e4SLinus Torvalds         if (status != 0) {
8591da177e4SLinus Torvalds                 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
860d4ed8084SHarvey Harrison                        __func__, status);
8611da177e4SLinus Torvalds                 max = SMP_CACHE_BYTES;
86208357f82SZoltan Menyhart 		/* Safest setup for "flush_icache_range()" */
86308357f82SZoltan Menyhart 		ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
86462fdd767SFenghua Yu 		/* Safest setup for "clflush_cache_range()" */
86562fdd767SFenghua Yu 		ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
8661da177e4SLinus Torvalds 		goto out;
8671da177e4SLinus Torvalds         }
8681da177e4SLinus Torvalds 
8691da177e4SLinus Torvalds 	for (l = 0; l < levels; ++l) {
87062fdd767SFenghua Yu 		/* cache_type (data_or_unified)=2 */
87162fdd767SFenghua Yu 		status = ia64_pal_cache_config_info(l, 2, &cci);
8721da177e4SLinus Torvalds 		if (status != 0) {
873e088a4adSMatthew Wilcox 			printk(KERN_ERR "%s: ia64_pal_cache_config_info"
874e088a4adSMatthew Wilcox 				"(l=%lu, 2) failed (status=%ld)\n",
875d4ed8084SHarvey Harrison 				__func__, l, status);
8761da177e4SLinus Torvalds 			max = SMP_CACHE_BYTES;
87708357f82SZoltan Menyhart 			/* The safest setup for "flush_icache_range()" */
87808357f82SZoltan Menyhart 			cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
87962fdd767SFenghua Yu 			/* The safest setup for "clflush_cache_range()" */
88062fdd767SFenghua Yu 			ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
88108357f82SZoltan Menyhart 			cci.pcci_unified = 1;
88262fdd767SFenghua Yu 		} else {
88362fdd767SFenghua Yu 			if (cci.pcci_stride < ia64_cache_stride_shift)
88462fdd767SFenghua Yu 				ia64_cache_stride_shift = cci.pcci_stride;
88562fdd767SFenghua Yu 
8861da177e4SLinus Torvalds 			line_size = 1 << cci.pcci_line_size;
8871da177e4SLinus Torvalds 			if (line_size > max)
8881da177e4SLinus Torvalds 				max = line_size;
88962fdd767SFenghua Yu 		}
89062fdd767SFenghua Yu 
89108357f82SZoltan Menyhart 		if (!cci.pcci_unified) {
89262fdd767SFenghua Yu 			/* cache_type (instruction)=1*/
89362fdd767SFenghua Yu 			status = ia64_pal_cache_config_info(l, 1, &cci);
89408357f82SZoltan Menyhart 			if (status != 0) {
895e088a4adSMatthew Wilcox 				printk(KERN_ERR "%s: ia64_pal_cache_config_info"
896e088a4adSMatthew Wilcox 					"(l=%lu, 1) failed (status=%ld)\n",
897d4ed8084SHarvey Harrison 					__func__, l, status);
898e088a4adSMatthew Wilcox 				/* The safest setup for flush_icache_range() */
89908357f82SZoltan Menyhart 				cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
90008357f82SZoltan Menyhart 			}
90108357f82SZoltan Menyhart 		}
90208357f82SZoltan Menyhart 		if (cci.pcci_stride < ia64_i_cache_stride_shift)
90308357f82SZoltan Menyhart 			ia64_i_cache_stride_shift = cci.pcci_stride;
9041da177e4SLinus Torvalds 	}
9051da177e4SLinus Torvalds   out:
9061da177e4SLinus Torvalds 	if (max > ia64_max_cacheline_size)
9071da177e4SLinus Torvalds 		ia64_max_cacheline_size = max;
9081da177e4SLinus Torvalds }
9091da177e4SLinus Torvalds 
9101da177e4SLinus Torvalds /*
9111da177e4SLinus Torvalds  * cpu_init() initializes state that is per-CPU.  This function acts
9121da177e4SLinus Torvalds  * as a 'CPU state barrier', nothing should get across.
9131da177e4SLinus Torvalds  */
914ccce9bb8SPaul Gortmaker void
9151da177e4SLinus Torvalds cpu_init (void)
9161da177e4SLinus Torvalds {
917ccce9bb8SPaul Gortmaker 	extern void ia64_mmu_init(void *);
918a0776ec8SChen, Kenneth W 	static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG;
9191da177e4SLinus Torvalds 	unsigned long num_phys_stacked;
9201da177e4SLinus Torvalds 	pal_vm_info_2_u_t vmi;
9211da177e4SLinus Torvalds 	unsigned int max_ctx;
9221da177e4SLinus Torvalds 	struct cpuinfo_ia64 *cpu_info;
9231da177e4SLinus Torvalds 	void *cpu_data;
9241da177e4SLinus Torvalds 
9251da177e4SLinus Torvalds 	cpu_data = per_cpu_init();
9264d1efed5STony Luck #ifdef CONFIG_SMP
927d5a7430dSMike Travis 	/*
928d5a7430dSMike Travis 	 * insert boot cpu into sibling and core mapes
929d5a7430dSMike Travis 	 * (must be done after per_cpu area is setup)
930d5a7430dSMike Travis 	 */
931d5a7430dSMike Travis 	if (smp_processor_id() == 0) {
9325d2068daSRusty Russell 		cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0));
9335d2068daSRusty Russell 		cpumask_set_cpu(0, &cpu_core_map[0]);
93410617bbeSTony Luck 	} else {
9351da177e4SLinus Torvalds 		/*
93610617bbeSTony Luck 		 * Set ar.k3 so that assembly code in MCA handler can compute
9371da177e4SLinus Torvalds 		 * physical addresses of per cpu variables with a simple:
9381da177e4SLinus Torvalds 		 *   phys = ar.k3 + &per_cpu_var
93910617bbeSTony Luck 		 * and the alt-dtlb-miss handler can set per-cpu mapping into
94010617bbeSTony Luck 		 * the TLB when needed. head.S already did this for cpu0.
9411da177e4SLinus Torvalds 		 */
9421da177e4SLinus Torvalds 		ia64_set_kr(IA64_KR_PER_CPU_DATA,
9431da177e4SLinus Torvalds 			    ia64_tpa(cpu_data) - (long) __per_cpu_start);
94410617bbeSTony Luck 	}
94510617bbeSTony Luck #endif
9461da177e4SLinus Torvalds 
94762fdd767SFenghua Yu 	get_cache_info();
9481da177e4SLinus Torvalds 
9491da177e4SLinus Torvalds 	/*
9501da177e4SLinus Torvalds 	 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
9511da177e4SLinus Torvalds 	 * ia64_mmu_init() yet.  And we can't call ia64_mmu_init() first because it
9521da177e4SLinus Torvalds 	 * depends on the data returned by identify_cpu().  We break the dependency by
9531da177e4SLinus Torvalds 	 * accessing cpu_data() through the canonical per-CPU address.
9541da177e4SLinus Torvalds 	 */
955877105ccSTejun Heo 	cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
9561da177e4SLinus Torvalds 	identify_cpu(cpu_info);
9571da177e4SLinus Torvalds 
9581da177e4SLinus Torvalds #ifdef CONFIG_MCKINLEY
9591da177e4SLinus Torvalds 	{
9601da177e4SLinus Torvalds #		define FEATURE_SET 16
9611da177e4SLinus Torvalds 		struct ia64_pal_retval iprv;
9621da177e4SLinus Torvalds 
9631da177e4SLinus Torvalds 		if (cpu_info->family == 0x1f) {
9641da177e4SLinus Torvalds 			PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
9651da177e4SLinus Torvalds 			if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
9661da177e4SLinus Torvalds 				PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
9671da177e4SLinus Torvalds 				              (iprv.v1 | 0x80), FEATURE_SET, 0);
9681da177e4SLinus Torvalds 		}
9691da177e4SLinus Torvalds 	}
9701da177e4SLinus Torvalds #endif
9711da177e4SLinus Torvalds 
9721da177e4SLinus Torvalds 	/* Clear the stack memory reserved for pt_regs: */
9736450578fSAl Viro 	memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
9741da177e4SLinus Torvalds 
9751da177e4SLinus Torvalds 	ia64_set_kr(IA64_KR_FPU_OWNER, 0);
9761da177e4SLinus Torvalds 
9771da177e4SLinus Torvalds 	/*
9781da177e4SLinus Torvalds 	 * Initialize the page-table base register to a global
9791da177e4SLinus Torvalds 	 * directory with all zeroes.  This ensure that we can handle
9801da177e4SLinus Torvalds 	 * TLB-misses to user address-space even before we created the
9811da177e4SLinus Torvalds 	 * first user address-space.  This may happen, e.g., due to
9821da177e4SLinus Torvalds 	 * aggressive use of lfetch.fault.
9831da177e4SLinus Torvalds 	 */
9841da177e4SLinus Torvalds 	ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
9851da177e4SLinus Torvalds 
9861da177e4SLinus Torvalds 	/*
98786ebacd3STony Luck 	 * Initialize default control register to defer speculative faults except
98886ebacd3STony Luck 	 * for those arising from TLB misses, which are not deferred.  The
9891da177e4SLinus Torvalds 	 * kernel MUST NOT depend on a particular setting of these bits (in other words,
9901da177e4SLinus Torvalds 	 * the kernel must have recovery code for all speculative accesses).  Turn on
9911da177e4SLinus Torvalds 	 * dcr.lc as per recommendation by the architecture team.  Most IA-32 apps
9921da177e4SLinus Torvalds 	 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
9931da177e4SLinus Torvalds 	 * be fine).
9941da177e4SLinus Torvalds 	 */
9951da177e4SLinus Torvalds 	ia64_setreg(_IA64_REG_CR_DCR,  (  IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
9961da177e4SLinus Torvalds 					| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
9971da177e4SLinus Torvalds 	atomic_inc(&init_mm.mm_count);
9981da177e4SLinus Torvalds 	current->active_mm = &init_mm;
99980a03e29SStoyan Gaydarov 	BUG_ON(current->mm);
10001da177e4SLinus Torvalds 
10011da177e4SLinus Torvalds 	ia64_mmu_init(ia64_imva(cpu_data));
10021da177e4SLinus Torvalds 	ia64_mca_cpu_init(ia64_imva(cpu_data));
10031da177e4SLinus Torvalds 
100472fdbdceSSimon Arlott 	/* Clear ITC to eliminate sched_clock() overflows in human time.  */
10051da177e4SLinus Torvalds 	ia64_set_itc(0);
10061da177e4SLinus Torvalds 
10071da177e4SLinus Torvalds 	/* disable all local interrupt sources: */
10081da177e4SLinus Torvalds 	ia64_set_itv(1 << 16);
10091da177e4SLinus Torvalds 	ia64_set_lrr0(1 << 16);
10101da177e4SLinus Torvalds 	ia64_set_lrr1(1 << 16);
10111da177e4SLinus Torvalds 	ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
10121da177e4SLinus Torvalds 	ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
10131da177e4SLinus Torvalds 
10141da177e4SLinus Torvalds 	/* clear TPR & XTP to enable all interrupt classes: */
10151da177e4SLinus Torvalds 	ia64_setreg(_IA64_REG_CR_TPR, 0);
1016f740e6c9SKenji Kaneshige 
1017f740e6c9SKenji Kaneshige 	/* Clear any pending interrupts left by SAL/EFI */
1018f740e6c9SKenji Kaneshige 	while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
1019f740e6c9SKenji Kaneshige 		ia64_eoi();
1020f740e6c9SKenji Kaneshige 
10211da177e4SLinus Torvalds #ifdef CONFIG_SMP
10221da177e4SLinus Torvalds 	normal_xtp();
10231da177e4SLinus Torvalds #endif
10241da177e4SLinus Torvalds 
10251da177e4SLinus Torvalds 	/* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
10262046b94eSFenghua Yu 	if (ia64_pal_vm_summary(NULL, &vmi) == 0) {
10271da177e4SLinus Torvalds 		max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
1028a6c75b86SFenghua Yu 		setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL);
10292046b94eSFenghua Yu 	} else {
10301da177e4SLinus Torvalds 		printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
10311da177e4SLinus Torvalds 		max_ctx = (1U << 15) - 1;	/* use architected minimum */
10321da177e4SLinus Torvalds 	}
10331da177e4SLinus Torvalds 	while (max_ctx < ia64_ctx.max_ctx) {
10341da177e4SLinus Torvalds 		unsigned int old = ia64_ctx.max_ctx;
10351da177e4SLinus Torvalds 		if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
10361da177e4SLinus Torvalds 			break;
10371da177e4SLinus Torvalds 	}
10381da177e4SLinus Torvalds 
10391da177e4SLinus Torvalds 	if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
10401da177e4SLinus Torvalds 		printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
10411da177e4SLinus Torvalds 		       "stacked regs\n");
10421da177e4SLinus Torvalds 		num_phys_stacked = 96;
10431da177e4SLinus Torvalds 	}
10441da177e4SLinus Torvalds 	/* size of physical stacked register partition plus 8 bytes: */
1045a0776ec8SChen, Kenneth W 	if (num_phys_stacked > max_num_phys_stacked) {
1046a0776ec8SChen, Kenneth W 		ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8);
1047a0776ec8SChen, Kenneth W 		max_num_phys_stacked = num_phys_stacked;
1048a0776ec8SChen, Kenneth W 	}
10491da177e4SLinus Torvalds 	platform_cpu_init();
10501da177e4SLinus Torvalds }
10511da177e4SLinus Torvalds 
1052244fd545SChen, Kenneth W void __init
10531da177e4SLinus Torvalds check_bugs (void)
10541da177e4SLinus Torvalds {
10551da177e4SLinus Torvalds 	ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
10561da177e4SLinus Torvalds 			       (unsigned long) __end___mckinley_e9_bundles);
10571da177e4SLinus Torvalds }
10583ed3bce8SMatt Domsch 
10593ed3bce8SMatt Domsch static int __init run_dmi_scan(void)
10603ed3bce8SMatt Domsch {
10613ed3bce8SMatt Domsch 	dmi_scan_machine();
1062dd6dad42SChen, Gong 	dmi_memdev_walk();
106398e5e1bfSTejun Heo 	dmi_set_dump_stack_arch_desc();
10643ed3bce8SMatt Domsch 	return 0;
10653ed3bce8SMatt Domsch }
10663ed3bce8SMatt Domsch core_initcall(run_dmi_scan);
1067