xref: /openbmc/linux/arch/ia64/kernel/setup.c (revision d9a9855d)
1 /*
2  * Architecture-specific setup.
3  *
4  * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
5  *	David Mosberger-Tang <davidm@hpl.hp.com>
6  *	Stephane Eranian <eranian@hpl.hp.com>
7  * Copyright (C) 2000, 2004 Intel Corp
8  * 	Rohit Seth <rohit.seth@intel.com>
9  * 	Suresh Siddha <suresh.b.siddha@intel.com>
10  * 	Gordon Jin <gordon.jin@intel.com>
11  * Copyright (C) 1999 VA Linux Systems
12  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
13  *
14  * 12/26/04 S.Siddha, G.Jin, R.Seth
15  *			Add multi-threading and multi-core detection
16  * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
17  * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
18  * 03/31/00 R.Seth	cpu_initialized and current->processor fixes
19  * 02/04/00 D.Mosberger	some more get_cpuinfo fixes...
20  * 02/01/00 R.Seth	fixed get_cpuinfo for SMP
21  * 01/07/99 S.Eranian	added the support for command line argument
22  * 06/24/99 W.Drummond	added boot_cpu_data.
23  * 05/28/05 Z. Menyhart	Dynamic stride size for "flush_icache_range()"
24  */
25 #include <linux/module.h>
26 #include <linux/init.h>
27 
28 #include <linux/acpi.h>
29 #include <linux/bootmem.h>
30 #include <linux/console.h>
31 #include <linux/delay.h>
32 #include <linux/kernel.h>
33 #include <linux/reboot.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 #include <linux/threads.h>
38 #include <linux/screen_info.h>
39 #include <linux/dmi.h>
40 #include <linux/serial.h>
41 #include <linux/serial_core.h>
42 #include <linux/efi.h>
43 #include <linux/initrd.h>
44 #include <linux/pm.h>
45 #include <linux/cpufreq.h>
46 #include <linux/kexec.h>
47 #include <linux/crash_dump.h>
48 
49 #include <asm/ia32.h>
50 #include <asm/machvec.h>
51 #include <asm/mca.h>
52 #include <asm/meminit.h>
53 #include <asm/page.h>
54 #include <asm/paravirt.h>
55 #include <asm/patch.h>
56 #include <asm/pgtable.h>
57 #include <asm/processor.h>
58 #include <asm/sal.h>
59 #include <asm/sections.h>
60 #include <asm/setup.h>
61 #include <asm/smp.h>
62 #include <asm/system.h>
63 #include <asm/tlbflush.h>
64 #include <asm/unistd.h>
65 #include <asm/hpsim.h>
66 
67 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
68 # error "struct cpuinfo_ia64 too big!"
69 #endif
70 
71 #ifdef CONFIG_SMP
72 unsigned long __per_cpu_offset[NR_CPUS];
73 EXPORT_SYMBOL(__per_cpu_offset);
74 #endif
75 
76 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
77 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
78 unsigned long ia64_cycles_per_usec;
79 struct ia64_boot_param *ia64_boot_param;
80 struct screen_info screen_info;
81 unsigned long vga_console_iobase;
82 unsigned long vga_console_membase;
83 
84 static struct resource data_resource = {
85 	.name	= "Kernel data",
86 	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
87 };
88 
89 static struct resource code_resource = {
90 	.name	= "Kernel code",
91 	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
92 };
93 
94 static struct resource bss_resource = {
95 	.name	= "Kernel bss",
96 	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
97 };
98 
99 unsigned long ia64_max_cacheline_size;
100 
101 int dma_get_cache_alignment(void)
102 {
103         return ia64_max_cacheline_size;
104 }
105 EXPORT_SYMBOL(dma_get_cache_alignment);
106 
107 unsigned long ia64_iobase;	/* virtual address for I/O accesses */
108 EXPORT_SYMBOL(ia64_iobase);
109 struct io_space io_space[MAX_IO_SPACES];
110 EXPORT_SYMBOL(io_space);
111 unsigned int num_io_spaces;
112 
113 /*
114  * "flush_icache_range()" needs to know what processor dependent stride size to use
115  * when it makes i-cache(s) coherent with d-caches.
116  */
117 #define	I_CACHE_STRIDE_SHIFT	5	/* Safest way to go: 32 bytes by 32 bytes */
118 unsigned long ia64_i_cache_stride_shift = ~0;
119 
120 /*
121  * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1).  This
122  * mask specifies a mask of address bits that must be 0 in order for two buffers to be
123  * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
124  * address of the second buffer must be aligned to (merge_mask+1) in order to be
125  * mergeable).  By default, we assume there is no I/O MMU which can merge physically
126  * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
127  * page-size of 2^64.
128  */
129 unsigned long ia64_max_iommu_merge_mask = ~0UL;
130 EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
131 
132 /*
133  * We use a special marker for the end of memory and it uses the extra (+1) slot
134  */
135 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata;
136 int num_rsvd_regions __initdata;
137 
138 
139 /*
140  * Filter incoming memory segments based on the primitive map created from the boot
141  * parameters. Segments contained in the map are removed from the memory ranges. A
142  * caller-specified function is called with the memory ranges that remain after filtering.
143  * This routine does not assume the incoming segments are sorted.
144  */
145 int __init
146 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
147 {
148 	unsigned long range_start, range_end, prev_start;
149 	void (*func)(unsigned long, unsigned long, int);
150 	int i;
151 
152 #if IGNORE_PFN0
153 	if (start == PAGE_OFFSET) {
154 		printk(KERN_WARNING "warning: skipping physical page 0\n");
155 		start += PAGE_SIZE;
156 		if (start >= end) return 0;
157 	}
158 #endif
159 	/*
160 	 * lowest possible address(walker uses virtual)
161 	 */
162 	prev_start = PAGE_OFFSET;
163 	func = arg;
164 
165 	for (i = 0; i < num_rsvd_regions; ++i) {
166 		range_start = max(start, prev_start);
167 		range_end   = min(end, rsvd_region[i].start);
168 
169 		if (range_start < range_end)
170 			call_pernode_memory(__pa(range_start), range_end - range_start, func);
171 
172 		/* nothing more available in this segment */
173 		if (range_end == end) return 0;
174 
175 		prev_start = rsvd_region[i].end;
176 	}
177 	/* end of memory marker allows full processing inside loop body */
178 	return 0;
179 }
180 
181 /*
182  * Similar to "filter_rsvd_memory()", but the reserved memory ranges
183  * are not filtered out.
184  */
185 int __init
186 filter_memory(unsigned long start, unsigned long end, void *arg)
187 {
188 	void (*func)(unsigned long, unsigned long, int);
189 
190 #if IGNORE_PFN0
191 	if (start == PAGE_OFFSET) {
192 		printk(KERN_WARNING "warning: skipping physical page 0\n");
193 		start += PAGE_SIZE;
194 		if (start >= end)
195 			return 0;
196 	}
197 #endif
198 	func = arg;
199 	if (start < end)
200 		call_pernode_memory(__pa(start), end - start, func);
201 	return 0;
202 }
203 
204 static void __init
205 sort_regions (struct rsvd_region *rsvd_region, int max)
206 {
207 	int j;
208 
209 	/* simple bubble sorting */
210 	while (max--) {
211 		for (j = 0; j < max; ++j) {
212 			if (rsvd_region[j].start > rsvd_region[j+1].start) {
213 				struct rsvd_region tmp;
214 				tmp = rsvd_region[j];
215 				rsvd_region[j] = rsvd_region[j + 1];
216 				rsvd_region[j + 1] = tmp;
217 			}
218 		}
219 	}
220 }
221 
222 /*
223  * Request address space for all standard resources
224  */
225 static int __init register_memory(void)
226 {
227 	code_resource.start = ia64_tpa(_text);
228 	code_resource.end   = ia64_tpa(_etext) - 1;
229 	data_resource.start = ia64_tpa(_etext);
230 	data_resource.end   = ia64_tpa(_edata) - 1;
231 	bss_resource.start  = ia64_tpa(__bss_start);
232 	bss_resource.end    = ia64_tpa(_end) - 1;
233 	efi_initialize_iomem_resources(&code_resource, &data_resource,
234 			&bss_resource);
235 
236 	return 0;
237 }
238 
239 __initcall(register_memory);
240 
241 
242 #ifdef CONFIG_KEXEC
243 
244 /*
245  * This function checks if the reserved crashkernel is allowed on the specific
246  * IA64 machine flavour. Machines without an IO TLB use swiotlb and require
247  * some memory below 4 GB (i.e. in 32 bit area), see the implementation of
248  * lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that
249  * in kdump case. See the comment in sba_init() in sba_iommu.c.
250  *
251  * So, the only machvec that really supports loading the kdump kernel
252  * over 4 GB is "sn2".
253  */
254 static int __init check_crashkernel_memory(unsigned long pbase, size_t size)
255 {
256 	if (ia64_platform_is("sn2") || ia64_platform_is("uv"))
257 		return 1;
258 	else
259 		return pbase < (1UL << 32);
260 }
261 
262 static void __init setup_crashkernel(unsigned long total, int *n)
263 {
264 	unsigned long long base = 0, size = 0;
265 	int ret;
266 
267 	ret = parse_crashkernel(boot_command_line, total,
268 			&size, &base);
269 	if (ret == 0 && size > 0) {
270 		if (!base) {
271 			sort_regions(rsvd_region, *n);
272 			base = kdump_find_rsvd_region(size,
273 					rsvd_region, *n);
274 		}
275 
276 		if (!check_crashkernel_memory(base, size)) {
277 			pr_warning("crashkernel: There would be kdump memory "
278 				"at %ld GB but this is unusable because it "
279 				"must\nbe below 4 GB. Change the memory "
280 				"configuration of the machine.\n",
281 				(unsigned long)(base >> 30));
282 			return;
283 		}
284 
285 		if (base != ~0UL) {
286 			printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
287 					"for crashkernel (System RAM: %ldMB)\n",
288 					(unsigned long)(size >> 20),
289 					(unsigned long)(base >> 20),
290 					(unsigned long)(total >> 20));
291 			rsvd_region[*n].start =
292 				(unsigned long)__va(base);
293 			rsvd_region[*n].end =
294 				(unsigned long)__va(base + size);
295 			(*n)++;
296 			crashk_res.start = base;
297 			crashk_res.end = base + size - 1;
298 		}
299 	}
300 	efi_memmap_res.start = ia64_boot_param->efi_memmap;
301 	efi_memmap_res.end = efi_memmap_res.start +
302 		ia64_boot_param->efi_memmap_size;
303 	boot_param_res.start = __pa(ia64_boot_param);
304 	boot_param_res.end = boot_param_res.start +
305 		sizeof(*ia64_boot_param);
306 }
307 #else
308 static inline void __init setup_crashkernel(unsigned long total, int *n)
309 {}
310 #endif
311 
312 /**
313  * reserve_memory - setup reserved memory areas
314  *
315  * Setup the reserved memory areas set aside for the boot parameters,
316  * initrd, etc.  There are currently %IA64_MAX_RSVD_REGIONS defined,
317  * see arch/ia64/include/asm/meminit.h if you need to define more.
318  */
319 void __init
320 reserve_memory (void)
321 {
322 	int n = 0;
323 	unsigned long total_memory;
324 
325 	/*
326 	 * none of the entries in this table overlap
327 	 */
328 	rsvd_region[n].start = (unsigned long) ia64_boot_param;
329 	rsvd_region[n].end   = rsvd_region[n].start + sizeof(*ia64_boot_param);
330 	n++;
331 
332 	rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
333 	rsvd_region[n].end   = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
334 	n++;
335 
336 	rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
337 	rsvd_region[n].end   = (rsvd_region[n].start
338 				+ strlen(__va(ia64_boot_param->command_line)) + 1);
339 	n++;
340 
341 	rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
342 	rsvd_region[n].end   = (unsigned long) ia64_imva(_end);
343 	n++;
344 
345 	n += paravirt_reserve_memory(&rsvd_region[n]);
346 
347 #ifdef CONFIG_BLK_DEV_INITRD
348 	if (ia64_boot_param->initrd_start) {
349 		rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
350 		rsvd_region[n].end   = rsvd_region[n].start + ia64_boot_param->initrd_size;
351 		n++;
352 	}
353 #endif
354 
355 #ifdef CONFIG_CRASH_KERNEL
356 	if (reserve_elfcorehdr(&rsvd_region[n].start,
357 			       &rsvd_region[n].end) == 0)
358 		n++;
359 #endif
360 
361 	total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
362 	n++;
363 
364 	setup_crashkernel(total_memory, &n);
365 
366 	/* end of memory marker */
367 	rsvd_region[n].start = ~0UL;
368 	rsvd_region[n].end   = ~0UL;
369 	n++;
370 
371 	num_rsvd_regions = n;
372 	BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
373 
374 	sort_regions(rsvd_region, num_rsvd_regions);
375 }
376 
377 
378 /**
379  * find_initrd - get initrd parameters from the boot parameter structure
380  *
381  * Grab the initrd start and end from the boot parameter struct given us by
382  * the boot loader.
383  */
384 void __init
385 find_initrd (void)
386 {
387 #ifdef CONFIG_BLK_DEV_INITRD
388 	if (ia64_boot_param->initrd_start) {
389 		initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
390 		initrd_end   = initrd_start+ia64_boot_param->initrd_size;
391 
392 		printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
393 		       initrd_start, ia64_boot_param->initrd_size);
394 	}
395 #endif
396 }
397 
398 static void __init
399 io_port_init (void)
400 {
401 	unsigned long phys_iobase;
402 
403 	/*
404 	 * Set `iobase' based on the EFI memory map or, failing that, the
405 	 * value firmware left in ar.k0.
406 	 *
407 	 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
408 	 * the port's virtual address, so ia32_load_state() loads it with a
409 	 * user virtual address.  But in ia64 mode, glibc uses the
410 	 * *physical* address in ar.k0 to mmap the appropriate area from
411 	 * /dev/mem, and the inX()/outX() interfaces use MMIO.  In both
412 	 * cases, user-mode can only use the legacy 0-64K I/O port space.
413 	 *
414 	 * ar.k0 is not involved in kernel I/O port accesses, which can use
415 	 * any of the I/O port spaces and are done via MMIO using the
416 	 * virtual mmio_base from the appropriate io_space[].
417 	 */
418 	phys_iobase = efi_get_iobase();
419 	if (!phys_iobase) {
420 		phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
421 		printk(KERN_INFO "No I/O port range found in EFI memory map, "
422 			"falling back to AR.KR0 (0x%lx)\n", phys_iobase);
423 	}
424 	ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
425 	ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
426 
427 	/* setup legacy IO port space */
428 	io_space[0].mmio_base = ia64_iobase;
429 	io_space[0].sparse = 1;
430 	num_io_spaces = 1;
431 }
432 
433 /**
434  * early_console_setup - setup debugging console
435  *
436  * Consoles started here require little enough setup that we can start using
437  * them very early in the boot process, either right after the machine
438  * vector initialization, or even before if the drivers can detect their hw.
439  *
440  * Returns non-zero if a console couldn't be setup.
441  */
442 static inline int __init
443 early_console_setup (char *cmdline)
444 {
445 	int earlycons = 0;
446 
447 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
448 	{
449 		extern int sn_serial_console_early_setup(void);
450 		if (!sn_serial_console_early_setup())
451 			earlycons++;
452 	}
453 #endif
454 #ifdef CONFIG_EFI_PCDP
455 	if (!efi_setup_pcdp_console(cmdline))
456 		earlycons++;
457 #endif
458 	if (!simcons_register())
459 		earlycons++;
460 
461 	return (earlycons) ? 0 : -1;
462 }
463 
464 static inline void
465 mark_bsp_online (void)
466 {
467 #ifdef CONFIG_SMP
468 	/* If we register an early console, allow CPU 0 to printk */
469 	cpu_set(smp_processor_id(), cpu_online_map);
470 #endif
471 }
472 
473 static __initdata int nomca;
474 static __init int setup_nomca(char *s)
475 {
476 	nomca = 1;
477 	return 0;
478 }
479 early_param("nomca", setup_nomca);
480 
481 /*
482  * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
483  * is_kdump_kernel() to determine if we are booting after a panic. Hence
484  * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
485  */
486 #ifdef CONFIG_CRASH_DUMP
487 /* elfcorehdr= specifies the location of elf core header
488  * stored by the crashed kernel.
489  */
490 static int __init parse_elfcorehdr(char *arg)
491 {
492 	if (!arg)
493 		return -EINVAL;
494 
495         elfcorehdr_addr = memparse(arg, &arg);
496 	return 0;
497 }
498 early_param("elfcorehdr", parse_elfcorehdr);
499 
500 int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end)
501 {
502 	unsigned long length;
503 
504 	/* We get the address using the kernel command line,
505 	 * but the size is extracted from the EFI tables.
506 	 * Both address and size are required for reservation
507 	 * to work properly.
508 	 */
509 
510 	if (!is_vmcore_usable())
511 		return -EINVAL;
512 
513 	if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) {
514 		vmcore_unusable();
515 		return -EINVAL;
516 	}
517 
518 	*start = (unsigned long)__va(elfcorehdr_addr);
519 	*end = *start + length;
520 	return 0;
521 }
522 
523 #endif /* CONFIG_PROC_VMCORE */
524 
525 void __init
526 setup_arch (char **cmdline_p)
527 {
528 	unw_init();
529 
530 	paravirt_arch_setup_early();
531 
532 	ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
533 
534 	*cmdline_p = __va(ia64_boot_param->command_line);
535 	strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
536 
537 	efi_init();
538 	io_port_init();
539 
540 #ifdef CONFIG_IA64_GENERIC
541 	/* machvec needs to be parsed from the command line
542 	 * before parse_early_param() is called to ensure
543 	 * that ia64_mv is initialised before any command line
544 	 * settings may cause console setup to occur
545 	 */
546 	machvec_init_from_cmdline(*cmdline_p);
547 #endif
548 
549 	parse_early_param();
550 
551 	if (early_console_setup(*cmdline_p) == 0)
552 		mark_bsp_online();
553 
554 #ifdef CONFIG_ACPI
555 	/* Initialize the ACPI boot-time table parser */
556 	acpi_table_init();
557 # ifdef CONFIG_ACPI_NUMA
558 	acpi_numa_init();
559 	per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
560 		32 : cpus_weight(early_cpu_possible_map)),
561 		additional_cpus > 0 ? additional_cpus : 0);
562 # endif
563 #else
564 # ifdef CONFIG_SMP
565 	smp_build_cpu_map();	/* happens, e.g., with the Ski simulator */
566 # endif
567 #endif /* CONFIG_APCI_BOOT */
568 
569 	find_memory();
570 
571 	/* process SAL system table: */
572 	ia64_sal_init(__va(efi.sal_systab));
573 
574 #ifdef CONFIG_ITANIUM
575 	ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
576 #else
577 	{
578 		u64 num_phys_stacked;
579 
580 		if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96)
581 			ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
582 	}
583 #endif
584 
585 #ifdef CONFIG_SMP
586 	cpu_physical_id(0) = hard_smp_processor_id();
587 #endif
588 
589 	cpu_init();	/* initialize the bootstrap CPU */
590 	mmu_context_init();	/* initialize context_id bitmap */
591 
592 #ifdef CONFIG_ACPI
593 	acpi_boot_init();
594 #endif
595 
596 	paravirt_banner();
597 	paravirt_arch_setup_console(cmdline_p);
598 
599 #ifdef CONFIG_VT
600 	if (!conswitchp) {
601 # if defined(CONFIG_DUMMY_CONSOLE)
602 		conswitchp = &dummy_con;
603 # endif
604 # if defined(CONFIG_VGA_CONSOLE)
605 		/*
606 		 * Non-legacy systems may route legacy VGA MMIO range to system
607 		 * memory.  vga_con probes the MMIO hole, so memory looks like
608 		 * a VGA device to it.  The EFI memory map can tell us if it's
609 		 * memory so we can avoid this problem.
610 		 */
611 		if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
612 			conswitchp = &vga_con;
613 # endif
614 	}
615 #endif
616 
617 	/* enable IA-64 Machine Check Abort Handling unless disabled */
618 	if (paravirt_arch_setup_nomca())
619 		nomca = 1;
620 	if (!nomca)
621 		ia64_mca_init();
622 
623 	platform_setup(cmdline_p);
624 #ifndef CONFIG_IA64_HP_SIM
625 	check_sal_cache_flush();
626 #endif
627 	paging_init();
628 }
629 
630 /*
631  * Display cpu info for all CPUs.
632  */
633 static int
634 show_cpuinfo (struct seq_file *m, void *v)
635 {
636 #ifdef CONFIG_SMP
637 #	define lpj	c->loops_per_jiffy
638 #	define cpunum	c->cpu
639 #else
640 #	define lpj	loops_per_jiffy
641 #	define cpunum	0
642 #endif
643 	static struct {
644 		unsigned long mask;
645 		const char *feature_name;
646 	} feature_bits[] = {
647 		{ 1UL << 0, "branchlong" },
648 		{ 1UL << 1, "spontaneous deferral"},
649 		{ 1UL << 2, "16-byte atomic ops" }
650 	};
651 	char features[128], *cp, *sep;
652 	struct cpuinfo_ia64 *c = v;
653 	unsigned long mask;
654 	unsigned long proc_freq;
655 	int i, size;
656 
657 	mask = c->features;
658 
659 	/* build the feature string: */
660 	memcpy(features, "standard", 9);
661 	cp = features;
662 	size = sizeof(features);
663 	sep = "";
664 	for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
665 		if (mask & feature_bits[i].mask) {
666 			cp += snprintf(cp, size, "%s%s", sep,
667 				       feature_bits[i].feature_name),
668 			sep = ", ";
669 			mask &= ~feature_bits[i].mask;
670 			size = sizeof(features) - (cp - features);
671 		}
672 	}
673 	if (mask && size > 1) {
674 		/* print unknown features as a hex value */
675 		snprintf(cp, size, "%s0x%lx", sep, mask);
676 	}
677 
678 	proc_freq = cpufreq_quick_get(cpunum);
679 	if (!proc_freq)
680 		proc_freq = c->proc_freq / 1000;
681 
682 	seq_printf(m,
683 		   "processor  : %d\n"
684 		   "vendor     : %s\n"
685 		   "arch       : IA-64\n"
686 		   "family     : %u\n"
687 		   "model      : %u\n"
688 		   "model name : %s\n"
689 		   "revision   : %u\n"
690 		   "archrev    : %u\n"
691 		   "features   : %s\n"
692 		   "cpu number : %lu\n"
693 		   "cpu regs   : %u\n"
694 		   "cpu MHz    : %lu.%03lu\n"
695 		   "itc MHz    : %lu.%06lu\n"
696 		   "BogoMIPS   : %lu.%02lu\n",
697 		   cpunum, c->vendor, c->family, c->model,
698 		   c->model_name, c->revision, c->archrev,
699 		   features, c->ppn, c->number,
700 		   proc_freq / 1000, proc_freq % 1000,
701 		   c->itc_freq / 1000000, c->itc_freq % 1000000,
702 		   lpj*HZ/500000, (lpj*HZ/5000) % 100);
703 #ifdef CONFIG_SMP
704 	seq_printf(m, "siblings   : %u\n", cpus_weight(cpu_core_map[cpunum]));
705 	if (c->socket_id != -1)
706 		seq_printf(m, "physical id: %u\n", c->socket_id);
707 	if (c->threads_per_core > 1 || c->cores_per_socket > 1)
708 		seq_printf(m,
709 			   "core id    : %u\n"
710 			   "thread id  : %u\n",
711 			   c->core_id, c->thread_id);
712 #endif
713 	seq_printf(m,"\n");
714 
715 	return 0;
716 }
717 
718 static void *
719 c_start (struct seq_file *m, loff_t *pos)
720 {
721 #ifdef CONFIG_SMP
722 	while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
723 		++*pos;
724 #endif
725 	return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
726 }
727 
728 static void *
729 c_next (struct seq_file *m, void *v, loff_t *pos)
730 {
731 	++*pos;
732 	return c_start(m, pos);
733 }
734 
735 static void
736 c_stop (struct seq_file *m, void *v)
737 {
738 }
739 
740 const struct seq_operations cpuinfo_op = {
741 	.start =	c_start,
742 	.next =		c_next,
743 	.stop =		c_stop,
744 	.show =		show_cpuinfo
745 };
746 
747 #define MAX_BRANDS	8
748 static char brandname[MAX_BRANDS][128];
749 
750 static char * __cpuinit
751 get_model_name(__u8 family, __u8 model)
752 {
753 	static int overflow;
754 	char brand[128];
755 	int i;
756 
757 	memcpy(brand, "Unknown", 8);
758 	if (ia64_pal_get_brand_info(brand)) {
759 		if (family == 0x7)
760 			memcpy(brand, "Merced", 7);
761 		else if (family == 0x1f) switch (model) {
762 			case 0: memcpy(brand, "McKinley", 9); break;
763 			case 1: memcpy(brand, "Madison", 8); break;
764 			case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
765 		}
766 	}
767 	for (i = 0; i < MAX_BRANDS; i++)
768 		if (strcmp(brandname[i], brand) == 0)
769 			return brandname[i];
770 	for (i = 0; i < MAX_BRANDS; i++)
771 		if (brandname[i][0] == '\0')
772 			return strcpy(brandname[i], brand);
773 	if (overflow++ == 0)
774 		printk(KERN_ERR
775 		       "%s: Table overflow. Some processor model information will be missing\n",
776 		       __func__);
777 	return "Unknown";
778 }
779 
780 static void __cpuinit
781 identify_cpu (struct cpuinfo_ia64 *c)
782 {
783 	union {
784 		unsigned long bits[5];
785 		struct {
786 			/* id 0 & 1: */
787 			char vendor[16];
788 
789 			/* id 2 */
790 			u64 ppn;		/* processor serial number */
791 
792 			/* id 3: */
793 			unsigned number		:  8;
794 			unsigned revision	:  8;
795 			unsigned model		:  8;
796 			unsigned family		:  8;
797 			unsigned archrev	:  8;
798 			unsigned reserved	: 24;
799 
800 			/* id 4: */
801 			u64 features;
802 		} field;
803 	} cpuid;
804 	pal_vm_info_1_u_t vm1;
805 	pal_vm_info_2_u_t vm2;
806 	pal_status_t status;
807 	unsigned long impl_va_msb = 50, phys_addr_size = 44;	/* Itanium defaults */
808 	int i;
809 	for (i = 0; i < 5; ++i)
810 		cpuid.bits[i] = ia64_get_cpuid(i);
811 
812 	memcpy(c->vendor, cpuid.field.vendor, 16);
813 #ifdef CONFIG_SMP
814 	c->cpu = smp_processor_id();
815 
816 	/* below default values will be overwritten  by identify_siblings()
817 	 * for Multi-Threading/Multi-Core capable CPUs
818 	 */
819 	c->threads_per_core = c->cores_per_socket = c->num_log = 1;
820 	c->socket_id = -1;
821 
822 	identify_siblings(c);
823 
824 	if (c->threads_per_core > smp_num_siblings)
825 		smp_num_siblings = c->threads_per_core;
826 #endif
827 	c->ppn = cpuid.field.ppn;
828 	c->number = cpuid.field.number;
829 	c->revision = cpuid.field.revision;
830 	c->model = cpuid.field.model;
831 	c->family = cpuid.field.family;
832 	c->archrev = cpuid.field.archrev;
833 	c->features = cpuid.field.features;
834 	c->model_name = get_model_name(c->family, c->model);
835 
836 	status = ia64_pal_vm_summary(&vm1, &vm2);
837 	if (status == PAL_STATUS_SUCCESS) {
838 		impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
839 		phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
840 	}
841 	c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
842 	c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
843 }
844 
845 void __init
846 setup_per_cpu_areas (void)
847 {
848 	/* start_kernel() requires this... */
849 #ifdef CONFIG_ACPI_HOTPLUG_CPU
850 	prefill_possible_map();
851 #endif
852 }
853 
854 /*
855  * Calculate the max. cache line size.
856  *
857  * In addition, the minimum of the i-cache stride sizes is calculated for
858  * "flush_icache_range()".
859  */
860 static void __cpuinit
861 get_max_cacheline_size (void)
862 {
863 	unsigned long line_size, max = 1;
864 	u64 l, levels, unique_caches;
865         pal_cache_config_info_t cci;
866         s64 status;
867 
868         status = ia64_pal_cache_summary(&levels, &unique_caches);
869         if (status != 0) {
870                 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
871                        __func__, status);
872                 max = SMP_CACHE_BYTES;
873 		/* Safest setup for "flush_icache_range()" */
874 		ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
875 		goto out;
876         }
877 
878 	for (l = 0; l < levels; ++l) {
879 		status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2,
880 						    &cci);
881 		if (status != 0) {
882 			printk(KERN_ERR
883 			       "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
884 			       __func__, l, status);
885 			max = SMP_CACHE_BYTES;
886 			/* The safest setup for "flush_icache_range()" */
887 			cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
888 			cci.pcci_unified = 1;
889 		}
890 		line_size = 1 << cci.pcci_line_size;
891 		if (line_size > max)
892 			max = line_size;
893 		if (!cci.pcci_unified) {
894 			status = ia64_pal_cache_config_info(l,
895 						    /* cache_type (instruction)= */ 1,
896 						    &cci);
897 			if (status != 0) {
898 				printk(KERN_ERR
899 				"%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
900 					__func__, l, status);
901 				/* The safest setup for "flush_icache_range()" */
902 				cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
903 			}
904 		}
905 		if (cci.pcci_stride < ia64_i_cache_stride_shift)
906 			ia64_i_cache_stride_shift = cci.pcci_stride;
907 	}
908   out:
909 	if (max > ia64_max_cacheline_size)
910 		ia64_max_cacheline_size = max;
911 }
912 
913 /*
914  * cpu_init() initializes state that is per-CPU.  This function acts
915  * as a 'CPU state barrier', nothing should get across.
916  */
917 void __cpuinit
918 cpu_init (void)
919 {
920 	extern void __cpuinit ia64_mmu_init (void *);
921 	static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG;
922 	unsigned long num_phys_stacked;
923 	pal_vm_info_2_u_t vmi;
924 	unsigned int max_ctx;
925 	struct cpuinfo_ia64 *cpu_info;
926 	void *cpu_data;
927 
928 	cpu_data = per_cpu_init();
929 #ifdef CONFIG_SMP
930 	/*
931 	 * insert boot cpu into sibling and core mapes
932 	 * (must be done after per_cpu area is setup)
933 	 */
934 	if (smp_processor_id() == 0) {
935 		cpu_set(0, per_cpu(cpu_sibling_map, 0));
936 		cpu_set(0, cpu_core_map[0]);
937 	} else {
938 		/*
939 		 * Set ar.k3 so that assembly code in MCA handler can compute
940 		 * physical addresses of per cpu variables with a simple:
941 		 *   phys = ar.k3 + &per_cpu_var
942 		 * and the alt-dtlb-miss handler can set per-cpu mapping into
943 		 * the TLB when needed. head.S already did this for cpu0.
944 		 */
945 		ia64_set_kr(IA64_KR_PER_CPU_DATA,
946 			    ia64_tpa(cpu_data) - (long) __per_cpu_start);
947 	}
948 #endif
949 
950 	get_max_cacheline_size();
951 
952 	/*
953 	 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
954 	 * ia64_mmu_init() yet.  And we can't call ia64_mmu_init() first because it
955 	 * depends on the data returned by identify_cpu().  We break the dependency by
956 	 * accessing cpu_data() through the canonical per-CPU address.
957 	 */
958 	cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
959 	identify_cpu(cpu_info);
960 
961 #ifdef CONFIG_MCKINLEY
962 	{
963 #		define FEATURE_SET 16
964 		struct ia64_pal_retval iprv;
965 
966 		if (cpu_info->family == 0x1f) {
967 			PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
968 			if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
969 				PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
970 				              (iprv.v1 | 0x80), FEATURE_SET, 0);
971 		}
972 	}
973 #endif
974 
975 	/* Clear the stack memory reserved for pt_regs: */
976 	memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
977 
978 	ia64_set_kr(IA64_KR_FPU_OWNER, 0);
979 
980 	/*
981 	 * Initialize the page-table base register to a global
982 	 * directory with all zeroes.  This ensure that we can handle
983 	 * TLB-misses to user address-space even before we created the
984 	 * first user address-space.  This may happen, e.g., due to
985 	 * aggressive use of lfetch.fault.
986 	 */
987 	ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
988 
989 	/*
990 	 * Initialize default control register to defer speculative faults except
991 	 * for those arising from TLB misses, which are not deferred.  The
992 	 * kernel MUST NOT depend on a particular setting of these bits (in other words,
993 	 * the kernel must have recovery code for all speculative accesses).  Turn on
994 	 * dcr.lc as per recommendation by the architecture team.  Most IA-32 apps
995 	 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
996 	 * be fine).
997 	 */
998 	ia64_setreg(_IA64_REG_CR_DCR,  (  IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
999 					| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
1000 	atomic_inc(&init_mm.mm_count);
1001 	current->active_mm = &init_mm;
1002 	if (current->mm)
1003 		BUG();
1004 
1005 	ia64_mmu_init(ia64_imva(cpu_data));
1006 	ia64_mca_cpu_init(ia64_imva(cpu_data));
1007 
1008 #ifdef CONFIG_IA32_SUPPORT
1009 	ia32_cpu_init();
1010 #endif
1011 
1012 	/* Clear ITC to eliminate sched_clock() overflows in human time.  */
1013 	ia64_set_itc(0);
1014 
1015 	/* disable all local interrupt sources: */
1016 	ia64_set_itv(1 << 16);
1017 	ia64_set_lrr0(1 << 16);
1018 	ia64_set_lrr1(1 << 16);
1019 	ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
1020 	ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
1021 
1022 	/* clear TPR & XTP to enable all interrupt classes: */
1023 	ia64_setreg(_IA64_REG_CR_TPR, 0);
1024 
1025 	/* Clear any pending interrupts left by SAL/EFI */
1026 	while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
1027 		ia64_eoi();
1028 
1029 #ifdef CONFIG_SMP
1030 	normal_xtp();
1031 #endif
1032 
1033 	/* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
1034 	if (ia64_pal_vm_summary(NULL, &vmi) == 0) {
1035 		max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
1036 		setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL);
1037 	} else {
1038 		printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
1039 		max_ctx = (1U << 15) - 1;	/* use architected minimum */
1040 	}
1041 	while (max_ctx < ia64_ctx.max_ctx) {
1042 		unsigned int old = ia64_ctx.max_ctx;
1043 		if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
1044 			break;
1045 	}
1046 
1047 	if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
1048 		printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
1049 		       "stacked regs\n");
1050 		num_phys_stacked = 96;
1051 	}
1052 	/* size of physical stacked register partition plus 8 bytes: */
1053 	if (num_phys_stacked > max_num_phys_stacked) {
1054 		ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8);
1055 		max_num_phys_stacked = num_phys_stacked;
1056 	}
1057 	platform_cpu_init();
1058 	pm_idle = default_idle;
1059 }
1060 
1061 void __init
1062 check_bugs (void)
1063 {
1064 	ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
1065 			       (unsigned long) __end___mckinley_e9_bundles);
1066 }
1067 
1068 static int __init run_dmi_scan(void)
1069 {
1070 	dmi_scan_machine();
1071 	return 0;
1072 }
1073 core_initcall(run_dmi_scan);
1074