xref: /openbmc/linux/arch/ia64/kernel/setup.c (revision fa809d70)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Architecture-specific setup.
4  *
5  * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
6  *	David Mosberger-Tang <davidm@hpl.hp.com>
7  *	Stephane Eranian <eranian@hpl.hp.com>
8  * Copyright (C) 2000, 2004 Intel Corp
9  * 	Rohit Seth <rohit.seth@intel.com>
10  * 	Suresh Siddha <suresh.b.siddha@intel.com>
11  * 	Gordon Jin <gordon.jin@intel.com>
12  * Copyright (C) 1999 VA Linux Systems
13  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
14  *
15  * 12/26/04 S.Siddha, G.Jin, R.Seth
16  *			Add multi-threading and multi-core detection
17  * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
18  * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
19  * 03/31/00 R.Seth	cpu_initialized and current->processor fixes
20  * 02/04/00 D.Mosberger	some more get_cpuinfo fixes...
21  * 02/01/00 R.Seth	fixed get_cpuinfo for SMP
22  * 01/07/99 S.Eranian	added the support for command line argument
23  * 06/24/99 W.Drummond	added boot_cpu_data.
24  * 05/28/05 Z. Menyhart	Dynamic stride size for "flush_icache_range()"
25  */
26 #include <linux/module.h>
27 #include <linux/init.h>
28 
29 #include <linux/acpi.h>
30 #include <linux/console.h>
31 #include <linux/delay.h>
32 #include <linux/cpu.h>
33 #include <linux/kdev_t.h>
34 #include <linux/kernel.h>
35 #include <linux/memblock.h>
36 #include <linux/reboot.h>
37 #include <linux/sched/mm.h>
38 #include <linux/sched/clock.h>
39 #include <linux/sched/task_stack.h>
40 #include <linux/seq_file.h>
41 #include <linux/string.h>
42 #include <linux/threads.h>
43 #include <linux/screen_info.h>
44 #include <linux/dmi.h>
45 #include <linux/root_dev.h>
46 #include <linux/serial.h>
47 #include <linux/serial_core.h>
48 #include <linux/efi.h>
49 #include <linux/initrd.h>
50 #include <linux/pm.h>
51 #include <linux/cpufreq.h>
52 #include <linux/kexec.h>
53 #include <linux/crash_dump.h>
54 
55 #include <asm/machvec.h>
56 #include <asm/mca.h>
57 #include <asm/meminit.h>
58 #include <asm/page.h>
59 #include <asm/patch.h>
60 #include <asm/pgtable.h>
61 #include <asm/processor.h>
62 #include <asm/sal.h>
63 #include <asm/sections.h>
64 #include <asm/setup.h>
65 #include <asm/smp.h>
66 #include <asm/tlbflush.h>
67 #include <asm/unistd.h>
68 
69 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
70 # error "struct cpuinfo_ia64 too big!"
71 #endif
72 
73 #ifdef CONFIG_SMP
74 unsigned long __per_cpu_offset[NR_CPUS];
75 EXPORT_SYMBOL(__per_cpu_offset);
76 #endif
77 
78 DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
79 EXPORT_SYMBOL(ia64_cpu_info);
80 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
81 #ifdef CONFIG_SMP
82 EXPORT_SYMBOL(local_per_cpu_offset);
83 #endif
84 unsigned long ia64_cycles_per_usec;
85 struct ia64_boot_param *ia64_boot_param;
86 struct screen_info screen_info;
87 unsigned long vga_console_iobase;
88 unsigned long vga_console_membase;
89 
90 static struct resource data_resource = {
91 	.name	= "Kernel data",
92 	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
93 };
94 
95 static struct resource code_resource = {
96 	.name	= "Kernel code",
97 	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
98 };
99 
100 static struct resource bss_resource = {
101 	.name	= "Kernel bss",
102 	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
103 };
104 
105 unsigned long ia64_max_cacheline_size;
106 
107 unsigned long ia64_iobase;	/* virtual address for I/O accesses */
108 EXPORT_SYMBOL(ia64_iobase);
109 struct io_space io_space[MAX_IO_SPACES];
110 EXPORT_SYMBOL(io_space);
111 unsigned int num_io_spaces;
112 
113 /*
114  * "flush_icache_range()" needs to know what processor dependent stride size to use
115  * when it makes i-cache(s) coherent with d-caches.
116  */
117 #define	I_CACHE_STRIDE_SHIFT	5	/* Safest way to go: 32 bytes by 32 bytes */
118 unsigned long ia64_i_cache_stride_shift = ~0;
119 /*
120  * "clflush_cache_range()" needs to know what processor dependent stride size to
121  * use when it flushes cache lines including both d-cache and i-cache.
122  */
123 /* Safest way to go: 32 bytes by 32 bytes */
124 #define	CACHE_STRIDE_SHIFT	5
125 unsigned long ia64_cache_stride_shift = ~0;
126 
127 /*
128  * We use a special marker for the end of memory and it uses the extra (+1) slot
129  */
130 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata;
131 int num_rsvd_regions __initdata;
132 
133 
134 /*
135  * Filter incoming memory segments based on the primitive map created from the boot
136  * parameters. Segments contained in the map are removed from the memory ranges. A
137  * caller-specified function is called with the memory ranges that remain after filtering.
138  * This routine does not assume the incoming segments are sorted.
139  */
140 int __init
141 filter_rsvd_memory (u64 start, u64 end, void *arg)
142 {
143 	u64 range_start, range_end, prev_start;
144 	void (*func)(unsigned long, unsigned long, int);
145 	int i;
146 
147 #if IGNORE_PFN0
148 	if (start == PAGE_OFFSET) {
149 		printk(KERN_WARNING "warning: skipping physical page 0\n");
150 		start += PAGE_SIZE;
151 		if (start >= end) return 0;
152 	}
153 #endif
154 	/*
155 	 * lowest possible address(walker uses virtual)
156 	 */
157 	prev_start = PAGE_OFFSET;
158 	func = arg;
159 
160 	for (i = 0; i < num_rsvd_regions; ++i) {
161 		range_start = max(start, prev_start);
162 		range_end   = min(end, rsvd_region[i].start);
163 
164 		if (range_start < range_end)
165 			call_pernode_memory(__pa(range_start), range_end - range_start, func);
166 
167 		/* nothing more available in this segment */
168 		if (range_end == end) return 0;
169 
170 		prev_start = rsvd_region[i].end;
171 	}
172 	/* end of memory marker allows full processing inside loop body */
173 	return 0;
174 }
175 
176 /*
177  * Similar to "filter_rsvd_memory()", but the reserved memory ranges
178  * are not filtered out.
179  */
180 int __init
181 filter_memory(u64 start, u64 end, void *arg)
182 {
183 	void (*func)(unsigned long, unsigned long, int);
184 
185 #if IGNORE_PFN0
186 	if (start == PAGE_OFFSET) {
187 		printk(KERN_WARNING "warning: skipping physical page 0\n");
188 		start += PAGE_SIZE;
189 		if (start >= end)
190 			return 0;
191 	}
192 #endif
193 	func = arg;
194 	if (start < end)
195 		call_pernode_memory(__pa(start), end - start, func);
196 	return 0;
197 }
198 
199 static void __init
200 sort_regions (struct rsvd_region *rsvd_region, int max)
201 {
202 	int j;
203 
204 	/* simple bubble sorting */
205 	while (max--) {
206 		for (j = 0; j < max; ++j) {
207 			if (rsvd_region[j].start > rsvd_region[j+1].start) {
208 				struct rsvd_region tmp;
209 				tmp = rsvd_region[j];
210 				rsvd_region[j] = rsvd_region[j + 1];
211 				rsvd_region[j + 1] = tmp;
212 			}
213 		}
214 	}
215 }
216 
217 /* merge overlaps */
218 static int __init
219 merge_regions (struct rsvd_region *rsvd_region, int max)
220 {
221 	int i;
222 	for (i = 1; i < max; ++i) {
223 		if (rsvd_region[i].start >= rsvd_region[i-1].end)
224 			continue;
225 		if (rsvd_region[i].end > rsvd_region[i-1].end)
226 			rsvd_region[i-1].end = rsvd_region[i].end;
227 		--max;
228 		memmove(&rsvd_region[i], &rsvd_region[i+1],
229 			(max - i) * sizeof(struct rsvd_region));
230 	}
231 	return max;
232 }
233 
234 /*
235  * Request address space for all standard resources
236  */
237 static int __init register_memory(void)
238 {
239 	code_resource.start = ia64_tpa(_text);
240 	code_resource.end   = ia64_tpa(_etext) - 1;
241 	data_resource.start = ia64_tpa(_etext);
242 	data_resource.end   = ia64_tpa(_edata) - 1;
243 	bss_resource.start  = ia64_tpa(__bss_start);
244 	bss_resource.end    = ia64_tpa(_end) - 1;
245 	efi_initialize_iomem_resources(&code_resource, &data_resource,
246 			&bss_resource);
247 
248 	return 0;
249 }
250 
251 __initcall(register_memory);
252 
253 
254 #ifdef CONFIG_KEXEC
255 
256 /*
257  * This function checks if the reserved crashkernel is allowed on the specific
258  * IA64 machine flavour. Machines without an IO TLB use swiotlb and require
259  * some memory below 4 GB (i.e. in 32 bit area), see the implementation of
260  * lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that
261  * in kdump case. See the comment in sba_init() in sba_iommu.c.
262  *
263  * So, the only machvec that really supports loading the kdump kernel
264  * over 4 GB is "uv".
265  */
266 static int __init check_crashkernel_memory(unsigned long pbase, size_t size)
267 {
268 	if (ia64_platform_is("uv"))
269 		return 1;
270 	else
271 		return pbase < (1UL << 32);
272 }
273 
274 static void __init setup_crashkernel(unsigned long total, int *n)
275 {
276 	unsigned long long base = 0, size = 0;
277 	int ret;
278 
279 	ret = parse_crashkernel(boot_command_line, total,
280 			&size, &base);
281 	if (ret == 0 && size > 0) {
282 		if (!base) {
283 			sort_regions(rsvd_region, *n);
284 			*n = merge_regions(rsvd_region, *n);
285 			base = kdump_find_rsvd_region(size,
286 					rsvd_region, *n);
287 		}
288 
289 		if (!check_crashkernel_memory(base, size)) {
290 			pr_warning("crashkernel: There would be kdump memory "
291 				"at %ld GB but this is unusable because it "
292 				"must\nbe below 4 GB. Change the memory "
293 				"configuration of the machine.\n",
294 				(unsigned long)(base >> 30));
295 			return;
296 		}
297 
298 		if (base != ~0UL) {
299 			printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
300 					"for crashkernel (System RAM: %ldMB)\n",
301 					(unsigned long)(size >> 20),
302 					(unsigned long)(base >> 20),
303 					(unsigned long)(total >> 20));
304 			rsvd_region[*n].start =
305 				(unsigned long)__va(base);
306 			rsvd_region[*n].end =
307 				(unsigned long)__va(base + size);
308 			(*n)++;
309 			crashk_res.start = base;
310 			crashk_res.end = base + size - 1;
311 		}
312 	}
313 	efi_memmap_res.start = ia64_boot_param->efi_memmap;
314 	efi_memmap_res.end = efi_memmap_res.start +
315 		ia64_boot_param->efi_memmap_size;
316 	boot_param_res.start = __pa(ia64_boot_param);
317 	boot_param_res.end = boot_param_res.start +
318 		sizeof(*ia64_boot_param);
319 }
320 #else
321 static inline void __init setup_crashkernel(unsigned long total, int *n)
322 {}
323 #endif
324 
325 /**
326  * reserve_memory - setup reserved memory areas
327  *
328  * Setup the reserved memory areas set aside for the boot parameters,
329  * initrd, etc.  There are currently %IA64_MAX_RSVD_REGIONS defined,
330  * see arch/ia64/include/asm/meminit.h if you need to define more.
331  */
332 void __init
333 reserve_memory (void)
334 {
335 	int n = 0;
336 	unsigned long total_memory;
337 
338 	/*
339 	 * none of the entries in this table overlap
340 	 */
341 	rsvd_region[n].start = (unsigned long) ia64_boot_param;
342 	rsvd_region[n].end   = rsvd_region[n].start + sizeof(*ia64_boot_param);
343 	n++;
344 
345 	rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
346 	rsvd_region[n].end   = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
347 	n++;
348 
349 	rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
350 	rsvd_region[n].end   = (rsvd_region[n].start
351 				+ strlen(__va(ia64_boot_param->command_line)) + 1);
352 	n++;
353 
354 	rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
355 	rsvd_region[n].end   = (unsigned long) ia64_imva(_end);
356 	n++;
357 
358 #ifdef CONFIG_BLK_DEV_INITRD
359 	if (ia64_boot_param->initrd_start) {
360 		rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
361 		rsvd_region[n].end   = rsvd_region[n].start + ia64_boot_param->initrd_size;
362 		n++;
363 	}
364 #endif
365 
366 #ifdef CONFIG_CRASH_DUMP
367 	if (reserve_elfcorehdr(&rsvd_region[n].start,
368 			       &rsvd_region[n].end) == 0)
369 		n++;
370 #endif
371 
372 	total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
373 	n++;
374 
375 	setup_crashkernel(total_memory, &n);
376 
377 	/* end of memory marker */
378 	rsvd_region[n].start = ~0UL;
379 	rsvd_region[n].end   = ~0UL;
380 	n++;
381 
382 	num_rsvd_regions = n;
383 	BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
384 
385 	sort_regions(rsvd_region, num_rsvd_regions);
386 	num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions);
387 
388 	/* reserve all regions except the end of memory marker with memblock */
389 	for (n = 0; n < num_rsvd_regions - 1; n++) {
390 		struct rsvd_region *region = &rsvd_region[n];
391 		phys_addr_t addr = __pa(region->start);
392 		phys_addr_t size = region->end - region->start;
393 
394 		memblock_reserve(addr, size);
395 	}
396 }
397 
398 /**
399  * find_initrd - get initrd parameters from the boot parameter structure
400  *
401  * Grab the initrd start and end from the boot parameter struct given us by
402  * the boot loader.
403  */
404 void __init
405 find_initrd (void)
406 {
407 #ifdef CONFIG_BLK_DEV_INITRD
408 	if (ia64_boot_param->initrd_start) {
409 		initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
410 		initrd_end   = initrd_start+ia64_boot_param->initrd_size;
411 
412 		printk(KERN_INFO "Initial ramdisk at: 0x%lx (%llu bytes)\n",
413 		       initrd_start, ia64_boot_param->initrd_size);
414 	}
415 #endif
416 }
417 
418 static void __init
419 io_port_init (void)
420 {
421 	unsigned long phys_iobase;
422 
423 	/*
424 	 * Set `iobase' based on the EFI memory map or, failing that, the
425 	 * value firmware left in ar.k0.
426 	 *
427 	 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
428 	 * the port's virtual address, so ia32_load_state() loads it with a
429 	 * user virtual address.  But in ia64 mode, glibc uses the
430 	 * *physical* address in ar.k0 to mmap the appropriate area from
431 	 * /dev/mem, and the inX()/outX() interfaces use MMIO.  In both
432 	 * cases, user-mode can only use the legacy 0-64K I/O port space.
433 	 *
434 	 * ar.k0 is not involved in kernel I/O port accesses, which can use
435 	 * any of the I/O port spaces and are done via MMIO using the
436 	 * virtual mmio_base from the appropriate io_space[].
437 	 */
438 	phys_iobase = efi_get_iobase();
439 	if (!phys_iobase) {
440 		phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
441 		printk(KERN_INFO "No I/O port range found in EFI memory map, "
442 			"falling back to AR.KR0 (0x%lx)\n", phys_iobase);
443 	}
444 	ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
445 	ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
446 
447 	/* setup legacy IO port space */
448 	io_space[0].mmio_base = ia64_iobase;
449 	io_space[0].sparse = 1;
450 	num_io_spaces = 1;
451 }
452 
453 /**
454  * early_console_setup - setup debugging console
455  *
456  * Consoles started here require little enough setup that we can start using
457  * them very early in the boot process, either right after the machine
458  * vector initialization, or even before if the drivers can detect their hw.
459  *
460  * Returns non-zero if a console couldn't be setup.
461  */
462 static inline int __init
463 early_console_setup (char *cmdline)
464 {
465 #ifdef CONFIG_EFI_PCDP
466 	if (!efi_setup_pcdp_console(cmdline))
467 		return 0;
468 #endif
469 	return -1;
470 }
471 
472 static inline void
473 mark_bsp_online (void)
474 {
475 #ifdef CONFIG_SMP
476 	/* If we register an early console, allow CPU 0 to printk */
477 	set_cpu_online(smp_processor_id(), true);
478 #endif
479 }
480 
481 static __initdata int nomca;
482 static __init int setup_nomca(char *s)
483 {
484 	nomca = 1;
485 	return 0;
486 }
487 early_param("nomca", setup_nomca);
488 
489 #ifdef CONFIG_CRASH_DUMP
490 int __init reserve_elfcorehdr(u64 *start, u64 *end)
491 {
492 	u64 length;
493 
494 	/* We get the address using the kernel command line,
495 	 * but the size is extracted from the EFI tables.
496 	 * Both address and size are required for reservation
497 	 * to work properly.
498 	 */
499 
500 	if (!is_vmcore_usable())
501 		return -EINVAL;
502 
503 	if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) {
504 		vmcore_unusable();
505 		return -EINVAL;
506 	}
507 
508 	*start = (unsigned long)__va(elfcorehdr_addr);
509 	*end = *start + length;
510 	return 0;
511 }
512 
513 #endif /* CONFIG_PROC_VMCORE */
514 
515 void __init
516 setup_arch (char **cmdline_p)
517 {
518 	unw_init();
519 
520 	ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
521 
522 	*cmdline_p = __va(ia64_boot_param->command_line);
523 	strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
524 
525 	efi_init();
526 	io_port_init();
527 
528 #ifdef CONFIG_IA64_GENERIC
529 	/* machvec needs to be parsed from the command line
530 	 * before parse_early_param() is called to ensure
531 	 * that ia64_mv is initialised before any command line
532 	 * settings may cause console setup to occur
533 	 */
534 	machvec_init_from_cmdline(*cmdline_p);
535 #endif
536 
537 	parse_early_param();
538 
539 	if (early_console_setup(*cmdline_p) == 0)
540 		mark_bsp_online();
541 
542 	/* Initialize the ACPI boot-time table parser */
543 	acpi_table_init();
544 	early_acpi_boot_init();
545 #ifdef CONFIG_ACPI_NUMA
546 	acpi_numa_init();
547 	acpi_numa_fixup();
548 #ifdef CONFIG_ACPI_HOTPLUG_CPU
549 	prefill_possible_map();
550 #endif
551 	per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ?
552 		32 : cpumask_weight(&early_cpu_possible_map)),
553 		additional_cpus > 0 ? additional_cpus : 0);
554 #endif /* CONFIG_ACPI_NUMA */
555 
556 #ifdef CONFIG_SMP
557 	smp_build_cpu_map();
558 #endif
559 	find_memory();
560 
561 	/* process SAL system table: */
562 	ia64_sal_init(__va(efi.sal_systab));
563 
564 #ifdef CONFIG_ITANIUM
565 	ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
566 #else
567 	{
568 		unsigned long num_phys_stacked;
569 
570 		if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96)
571 			ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
572 	}
573 #endif
574 
575 #ifdef CONFIG_SMP
576 	cpu_physical_id(0) = hard_smp_processor_id();
577 #endif
578 
579 	cpu_init();	/* initialize the bootstrap CPU */
580 	mmu_context_init();	/* initialize context_id bitmap */
581 
582 #ifdef CONFIG_VT
583 	if (!conswitchp) {
584 # if defined(CONFIG_DUMMY_CONSOLE)
585 		conswitchp = &dummy_con;
586 # endif
587 # if defined(CONFIG_VGA_CONSOLE)
588 		/*
589 		 * Non-legacy systems may route legacy VGA MMIO range to system
590 		 * memory.  vga_con probes the MMIO hole, so memory looks like
591 		 * a VGA device to it.  The EFI memory map can tell us if it's
592 		 * memory so we can avoid this problem.
593 		 */
594 		if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
595 			conswitchp = &vga_con;
596 # endif
597 	}
598 #endif
599 
600 	/* enable IA-64 Machine Check Abort Handling unless disabled */
601 	if (!nomca)
602 		ia64_mca_init();
603 
604 	/*
605 	 * Default to /dev/sda2.  This assumes that the EFI partition
606 	 * is physical disk 1 partition 1 and the Linux root disk is
607 	 * physical disk 1 partition 2.
608 	 */
609 	ROOT_DEV = Root_SDA2;		/* default to second partition on first drive */
610 
611 	platform_setup(cmdline_p);
612 	paging_init();
613 
614 	clear_sched_clock_stable();
615 }
616 
617 /*
618  * Display cpu info for all CPUs.
619  */
620 static int
621 show_cpuinfo (struct seq_file *m, void *v)
622 {
623 #ifdef CONFIG_SMP
624 #	define lpj	c->loops_per_jiffy
625 #	define cpunum	c->cpu
626 #else
627 #	define lpj	loops_per_jiffy
628 #	define cpunum	0
629 #endif
630 	static struct {
631 		unsigned long mask;
632 		const char *feature_name;
633 	} feature_bits[] = {
634 		{ 1UL << 0, "branchlong" },
635 		{ 1UL << 1, "spontaneous deferral"},
636 		{ 1UL << 2, "16-byte atomic ops" }
637 	};
638 	char features[128], *cp, *sep;
639 	struct cpuinfo_ia64 *c = v;
640 	unsigned long mask;
641 	unsigned long proc_freq;
642 	int i, size;
643 
644 	mask = c->features;
645 
646 	/* build the feature string: */
647 	memcpy(features, "standard", 9);
648 	cp = features;
649 	size = sizeof(features);
650 	sep = "";
651 	for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
652 		if (mask & feature_bits[i].mask) {
653 			cp += snprintf(cp, size, "%s%s", sep,
654 				       feature_bits[i].feature_name),
655 			sep = ", ";
656 			mask &= ~feature_bits[i].mask;
657 			size = sizeof(features) - (cp - features);
658 		}
659 	}
660 	if (mask && size > 1) {
661 		/* print unknown features as a hex value */
662 		snprintf(cp, size, "%s0x%lx", sep, mask);
663 	}
664 
665 	proc_freq = cpufreq_quick_get(cpunum);
666 	if (!proc_freq)
667 		proc_freq = c->proc_freq / 1000;
668 
669 	seq_printf(m,
670 		   "processor  : %d\n"
671 		   "vendor     : %s\n"
672 		   "arch       : IA-64\n"
673 		   "family     : %u\n"
674 		   "model      : %u\n"
675 		   "model name : %s\n"
676 		   "revision   : %u\n"
677 		   "archrev    : %u\n"
678 		   "features   : %s\n"
679 		   "cpu number : %lu\n"
680 		   "cpu regs   : %u\n"
681 		   "cpu MHz    : %lu.%03lu\n"
682 		   "itc MHz    : %lu.%06lu\n"
683 		   "BogoMIPS   : %lu.%02lu\n",
684 		   cpunum, c->vendor, c->family, c->model,
685 		   c->model_name, c->revision, c->archrev,
686 		   features, c->ppn, c->number,
687 		   proc_freq / 1000, proc_freq % 1000,
688 		   c->itc_freq / 1000000, c->itc_freq % 1000000,
689 		   lpj*HZ/500000, (lpj*HZ/5000) % 100);
690 #ifdef CONFIG_SMP
691 	seq_printf(m, "siblings   : %u\n",
692 		   cpumask_weight(&cpu_core_map[cpunum]));
693 	if (c->socket_id != -1)
694 		seq_printf(m, "physical id: %u\n", c->socket_id);
695 	if (c->threads_per_core > 1 || c->cores_per_socket > 1)
696 		seq_printf(m,
697 			   "core id    : %u\n"
698 			   "thread id  : %u\n",
699 			   c->core_id, c->thread_id);
700 #endif
701 	seq_printf(m,"\n");
702 
703 	return 0;
704 }
705 
706 static void *
707 c_start (struct seq_file *m, loff_t *pos)
708 {
709 #ifdef CONFIG_SMP
710 	while (*pos < nr_cpu_ids && !cpu_online(*pos))
711 		++*pos;
712 #endif
713 	return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL;
714 }
715 
716 static void *
717 c_next (struct seq_file *m, void *v, loff_t *pos)
718 {
719 	++*pos;
720 	return c_start(m, pos);
721 }
722 
723 static void
724 c_stop (struct seq_file *m, void *v)
725 {
726 }
727 
728 const struct seq_operations cpuinfo_op = {
729 	.start =	c_start,
730 	.next =		c_next,
731 	.stop =		c_stop,
732 	.show =		show_cpuinfo
733 };
734 
735 #define MAX_BRANDS	8
736 static char brandname[MAX_BRANDS][128];
737 
738 static char *
739 get_model_name(__u8 family, __u8 model)
740 {
741 	static int overflow;
742 	char brand[128];
743 	int i;
744 
745 	memcpy(brand, "Unknown", 8);
746 	if (ia64_pal_get_brand_info(brand)) {
747 		if (family == 0x7)
748 			memcpy(brand, "Merced", 7);
749 		else if (family == 0x1f) switch (model) {
750 			case 0: memcpy(brand, "McKinley", 9); break;
751 			case 1: memcpy(brand, "Madison", 8); break;
752 			case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
753 		}
754 	}
755 	for (i = 0; i < MAX_BRANDS; i++)
756 		if (strcmp(brandname[i], brand) == 0)
757 			return brandname[i];
758 	for (i = 0; i < MAX_BRANDS; i++)
759 		if (brandname[i][0] == '\0')
760 			return strcpy(brandname[i], brand);
761 	if (overflow++ == 0)
762 		printk(KERN_ERR
763 		       "%s: Table overflow. Some processor model information will be missing\n",
764 		       __func__);
765 	return "Unknown";
766 }
767 
768 static void
769 identify_cpu (struct cpuinfo_ia64 *c)
770 {
771 	union {
772 		unsigned long bits[5];
773 		struct {
774 			/* id 0 & 1: */
775 			char vendor[16];
776 
777 			/* id 2 */
778 			u64 ppn;		/* processor serial number */
779 
780 			/* id 3: */
781 			unsigned number		:  8;
782 			unsigned revision	:  8;
783 			unsigned model		:  8;
784 			unsigned family		:  8;
785 			unsigned archrev	:  8;
786 			unsigned reserved	: 24;
787 
788 			/* id 4: */
789 			u64 features;
790 		} field;
791 	} cpuid;
792 	pal_vm_info_1_u_t vm1;
793 	pal_vm_info_2_u_t vm2;
794 	pal_status_t status;
795 	unsigned long impl_va_msb = 50, phys_addr_size = 44;	/* Itanium defaults */
796 	int i;
797 	for (i = 0; i < 5; ++i)
798 		cpuid.bits[i] = ia64_get_cpuid(i);
799 
800 	memcpy(c->vendor, cpuid.field.vendor, 16);
801 #ifdef CONFIG_SMP
802 	c->cpu = smp_processor_id();
803 
804 	/* below default values will be overwritten  by identify_siblings()
805 	 * for Multi-Threading/Multi-Core capable CPUs
806 	 */
807 	c->threads_per_core = c->cores_per_socket = c->num_log = 1;
808 	c->socket_id = -1;
809 
810 	identify_siblings(c);
811 
812 	if (c->threads_per_core > smp_num_siblings)
813 		smp_num_siblings = c->threads_per_core;
814 #endif
815 	c->ppn = cpuid.field.ppn;
816 	c->number = cpuid.field.number;
817 	c->revision = cpuid.field.revision;
818 	c->model = cpuid.field.model;
819 	c->family = cpuid.field.family;
820 	c->archrev = cpuid.field.archrev;
821 	c->features = cpuid.field.features;
822 	c->model_name = get_model_name(c->family, c->model);
823 
824 	status = ia64_pal_vm_summary(&vm1, &vm2);
825 	if (status == PAL_STATUS_SUCCESS) {
826 		impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
827 		phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
828 	}
829 	c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
830 	c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
831 }
832 
833 /*
834  * Do the following calculations:
835  *
836  * 1. the max. cache line size.
837  * 2. the minimum of the i-cache stride sizes for "flush_icache_range()".
838  * 3. the minimum of the cache stride sizes for "clflush_cache_range()".
839  */
840 static void
841 get_cache_info(void)
842 {
843 	unsigned long line_size, max = 1;
844 	unsigned long l, levels, unique_caches;
845 	pal_cache_config_info_t cci;
846 	long status;
847 
848         status = ia64_pal_cache_summary(&levels, &unique_caches);
849         if (status != 0) {
850                 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
851                        __func__, status);
852                 max = SMP_CACHE_BYTES;
853 		/* Safest setup for "flush_icache_range()" */
854 		ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
855 		/* Safest setup for "clflush_cache_range()" */
856 		ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
857 		goto out;
858         }
859 
860 	for (l = 0; l < levels; ++l) {
861 		/* cache_type (data_or_unified)=2 */
862 		status = ia64_pal_cache_config_info(l, 2, &cci);
863 		if (status != 0) {
864 			printk(KERN_ERR "%s: ia64_pal_cache_config_info"
865 				"(l=%lu, 2) failed (status=%ld)\n",
866 				__func__, l, status);
867 			max = SMP_CACHE_BYTES;
868 			/* The safest setup for "flush_icache_range()" */
869 			cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
870 			/* The safest setup for "clflush_cache_range()" */
871 			ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
872 			cci.pcci_unified = 1;
873 		} else {
874 			if (cci.pcci_stride < ia64_cache_stride_shift)
875 				ia64_cache_stride_shift = cci.pcci_stride;
876 
877 			line_size = 1 << cci.pcci_line_size;
878 			if (line_size > max)
879 				max = line_size;
880 		}
881 
882 		if (!cci.pcci_unified) {
883 			/* cache_type (instruction)=1*/
884 			status = ia64_pal_cache_config_info(l, 1, &cci);
885 			if (status != 0) {
886 				printk(KERN_ERR "%s: ia64_pal_cache_config_info"
887 					"(l=%lu, 1) failed (status=%ld)\n",
888 					__func__, l, status);
889 				/* The safest setup for flush_icache_range() */
890 				cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
891 			}
892 		}
893 		if (cci.pcci_stride < ia64_i_cache_stride_shift)
894 			ia64_i_cache_stride_shift = cci.pcci_stride;
895 	}
896   out:
897 	if (max > ia64_max_cacheline_size)
898 		ia64_max_cacheline_size = max;
899 }
900 
901 /*
902  * cpu_init() initializes state that is per-CPU.  This function acts
903  * as a 'CPU state barrier', nothing should get across.
904  */
905 void
906 cpu_init (void)
907 {
908 	extern void ia64_mmu_init(void *);
909 	static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG;
910 	unsigned long num_phys_stacked;
911 	pal_vm_info_2_u_t vmi;
912 	unsigned int max_ctx;
913 	struct cpuinfo_ia64 *cpu_info;
914 	void *cpu_data;
915 
916 	cpu_data = per_cpu_init();
917 #ifdef CONFIG_SMP
918 	/*
919 	 * insert boot cpu into sibling and core mapes
920 	 * (must be done after per_cpu area is setup)
921 	 */
922 	if (smp_processor_id() == 0) {
923 		cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0));
924 		cpumask_set_cpu(0, &cpu_core_map[0]);
925 	} else {
926 		/*
927 		 * Set ar.k3 so that assembly code in MCA handler can compute
928 		 * physical addresses of per cpu variables with a simple:
929 		 *   phys = ar.k3 + &per_cpu_var
930 		 * and the alt-dtlb-miss handler can set per-cpu mapping into
931 		 * the TLB when needed. head.S already did this for cpu0.
932 		 */
933 		ia64_set_kr(IA64_KR_PER_CPU_DATA,
934 			    ia64_tpa(cpu_data) - (long) __per_cpu_start);
935 	}
936 #endif
937 
938 	get_cache_info();
939 
940 	/*
941 	 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
942 	 * ia64_mmu_init() yet.  And we can't call ia64_mmu_init() first because it
943 	 * depends on the data returned by identify_cpu().  We break the dependency by
944 	 * accessing cpu_data() through the canonical per-CPU address.
945 	 */
946 	cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
947 	identify_cpu(cpu_info);
948 
949 #ifdef CONFIG_MCKINLEY
950 	{
951 #		define FEATURE_SET 16
952 		struct ia64_pal_retval iprv;
953 
954 		if (cpu_info->family == 0x1f) {
955 			PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
956 			if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
957 				PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
958 				              (iprv.v1 | 0x80), FEATURE_SET, 0);
959 		}
960 	}
961 #endif
962 
963 	/* Clear the stack memory reserved for pt_regs: */
964 	memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
965 
966 	ia64_set_kr(IA64_KR_FPU_OWNER, 0);
967 
968 	/*
969 	 * Initialize the page-table base register to a global
970 	 * directory with all zeroes.  This ensure that we can handle
971 	 * TLB-misses to user address-space even before we created the
972 	 * first user address-space.  This may happen, e.g., due to
973 	 * aggressive use of lfetch.fault.
974 	 */
975 	ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
976 
977 	/*
978 	 * Initialize default control register to defer speculative faults except
979 	 * for those arising from TLB misses, which are not deferred.  The
980 	 * kernel MUST NOT depend on a particular setting of these bits (in other words,
981 	 * the kernel must have recovery code for all speculative accesses).  Turn on
982 	 * dcr.lc as per recommendation by the architecture team.  Most IA-32 apps
983 	 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
984 	 * be fine).
985 	 */
986 	ia64_setreg(_IA64_REG_CR_DCR,  (  IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
987 					| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
988 	mmgrab(&init_mm);
989 	current->active_mm = &init_mm;
990 	BUG_ON(current->mm);
991 
992 	ia64_mmu_init(ia64_imva(cpu_data));
993 	ia64_mca_cpu_init(ia64_imva(cpu_data));
994 
995 	/* Clear ITC to eliminate sched_clock() overflows in human time.  */
996 	ia64_set_itc(0);
997 
998 	/* disable all local interrupt sources: */
999 	ia64_set_itv(1 << 16);
1000 	ia64_set_lrr0(1 << 16);
1001 	ia64_set_lrr1(1 << 16);
1002 	ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
1003 	ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
1004 
1005 	/* clear TPR & XTP to enable all interrupt classes: */
1006 	ia64_setreg(_IA64_REG_CR_TPR, 0);
1007 
1008 	/* Clear any pending interrupts left by SAL/EFI */
1009 	while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
1010 		ia64_eoi();
1011 
1012 #ifdef CONFIG_SMP
1013 	normal_xtp();
1014 #endif
1015 
1016 	/* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
1017 	if (ia64_pal_vm_summary(NULL, &vmi) == 0) {
1018 		max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
1019 		setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL);
1020 	} else {
1021 		printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
1022 		max_ctx = (1U << 15) - 1;	/* use architected minimum */
1023 	}
1024 	while (max_ctx < ia64_ctx.max_ctx) {
1025 		unsigned int old = ia64_ctx.max_ctx;
1026 		if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
1027 			break;
1028 	}
1029 
1030 	if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
1031 		printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
1032 		       "stacked regs\n");
1033 		num_phys_stacked = 96;
1034 	}
1035 	/* size of physical stacked register partition plus 8 bytes: */
1036 	if (num_phys_stacked > max_num_phys_stacked) {
1037 		ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8);
1038 		max_num_phys_stacked = num_phys_stacked;
1039 	}
1040 }
1041 
1042 void __init
1043 check_bugs (void)
1044 {
1045 	ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
1046 			       (unsigned long) __end___mckinley_e9_bundles);
1047 }
1048 
1049 static int __init run_dmi_scan(void)
1050 {
1051 	dmi_setup();
1052 	return 0;
1053 }
1054 core_initcall(run_dmi_scan);
1055