xref: /openbmc/linux/arch/ia64/kernel/setup.c (revision bcf5111a)
1 /*
2  * Architecture-specific setup.
3  *
4  * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
5  *	David Mosberger-Tang <davidm@hpl.hp.com>
6  *	Stephane Eranian <eranian@hpl.hp.com>
7  * Copyright (C) 2000, 2004 Intel Corp
8  * 	Rohit Seth <rohit.seth@intel.com>
9  * 	Suresh Siddha <suresh.b.siddha@intel.com>
10  * 	Gordon Jin <gordon.jin@intel.com>
11  * Copyright (C) 1999 VA Linux Systems
12  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
13  *
14  * 12/26/04 S.Siddha, G.Jin, R.Seth
15  *			Add multi-threading and multi-core detection
16  * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
17  * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
18  * 03/31/00 R.Seth	cpu_initialized and current->processor fixes
19  * 02/04/00 D.Mosberger	some more get_cpuinfo fixes...
20  * 02/01/00 R.Seth	fixed get_cpuinfo for SMP
21  * 01/07/99 S.Eranian	added the support for command line argument
22  * 06/24/99 W.Drummond	added boot_cpu_data.
23  * 05/28/05 Z. Menyhart	Dynamic stride size for "flush_icache_range()"
24  */
25 #include <linux/module.h>
26 #include <linux/init.h>
27 
28 #include <linux/acpi.h>
29 #include <linux/bootmem.h>
30 #include <linux/console.h>
31 #include <linux/delay.h>
32 #include <linux/kernel.h>
33 #include <linux/reboot.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 #include <linux/threads.h>
38 #include <linux/screen_info.h>
39 #include <linux/dmi.h>
40 #include <linux/serial.h>
41 #include <linux/serial_core.h>
42 #include <linux/efi.h>
43 #include <linux/initrd.h>
44 #include <linux/pm.h>
45 #include <linux/cpufreq.h>
46 
47 #include <asm/ia32.h>
48 #include <asm/machvec.h>
49 #include <asm/mca.h>
50 #include <asm/meminit.h>
51 #include <asm/page.h>
52 #include <asm/patch.h>
53 #include <asm/pgtable.h>
54 #include <asm/processor.h>
55 #include <asm/sal.h>
56 #include <asm/sections.h>
57 #include <asm/setup.h>
58 #include <asm/smp.h>
59 #include <asm/system.h>
60 #include <asm/unistd.h>
61 #include <asm/system.h>
62 
63 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
64 # error "struct cpuinfo_ia64 too big!"
65 #endif
66 
67 #ifdef CONFIG_SMP
68 unsigned long __per_cpu_offset[NR_CPUS];
69 EXPORT_SYMBOL(__per_cpu_offset);
70 #endif
71 
72 extern void ia64_setup_printk_clock(void);
73 
74 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
75 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
76 DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
77 unsigned long ia64_cycles_per_usec;
78 struct ia64_boot_param *ia64_boot_param;
79 struct screen_info screen_info;
80 unsigned long vga_console_iobase;
81 unsigned long vga_console_membase;
82 
83 static struct resource data_resource = {
84 	.name	= "Kernel data",
85 	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
86 };
87 
88 static struct resource code_resource = {
89 	.name	= "Kernel code",
90 	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
91 };
92 extern void efi_initialize_iomem_resources(struct resource *,
93 		struct resource *);
94 extern char _text[], _end[], _etext[];
95 
96 unsigned long ia64_max_cacheline_size;
97 
98 int dma_get_cache_alignment(void)
99 {
100         return ia64_max_cacheline_size;
101 }
102 EXPORT_SYMBOL(dma_get_cache_alignment);
103 
104 unsigned long ia64_iobase;	/* virtual address for I/O accesses */
105 EXPORT_SYMBOL(ia64_iobase);
106 struct io_space io_space[MAX_IO_SPACES];
107 EXPORT_SYMBOL(io_space);
108 unsigned int num_io_spaces;
109 
110 /*
111  * "flush_icache_range()" needs to know what processor dependent stride size to use
112  * when it makes i-cache(s) coherent with d-caches.
113  */
114 #define	I_CACHE_STRIDE_SHIFT	5	/* Safest way to go: 32 bytes by 32 bytes */
115 unsigned long ia64_i_cache_stride_shift = ~0;
116 
117 /*
118  * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1).  This
119  * mask specifies a mask of address bits that must be 0 in order for two buffers to be
120  * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
121  * address of the second buffer must be aligned to (merge_mask+1) in order to be
122  * mergeable).  By default, we assume there is no I/O MMU which can merge physically
123  * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
124  * page-size of 2^64.
125  */
126 unsigned long ia64_max_iommu_merge_mask = ~0UL;
127 EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
128 
129 /*
130  * We use a special marker for the end of memory and it uses the extra (+1) slot
131  */
132 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata;
133 int num_rsvd_regions __initdata;
134 
135 
136 /*
137  * Filter incoming memory segments based on the primitive map created from the boot
138  * parameters. Segments contained in the map are removed from the memory ranges. A
139  * caller-specified function is called with the memory ranges that remain after filtering.
140  * This routine does not assume the incoming segments are sorted.
141  */
142 int __init
143 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
144 {
145 	unsigned long range_start, range_end, prev_start;
146 	void (*func)(unsigned long, unsigned long, int);
147 	int i;
148 
149 #if IGNORE_PFN0
150 	if (start == PAGE_OFFSET) {
151 		printk(KERN_WARNING "warning: skipping physical page 0\n");
152 		start += PAGE_SIZE;
153 		if (start >= end) return 0;
154 	}
155 #endif
156 	/*
157 	 * lowest possible address(walker uses virtual)
158 	 */
159 	prev_start = PAGE_OFFSET;
160 	func = arg;
161 
162 	for (i = 0; i < num_rsvd_regions; ++i) {
163 		range_start = max(start, prev_start);
164 		range_end   = min(end, rsvd_region[i].start);
165 
166 		if (range_start < range_end)
167 			call_pernode_memory(__pa(range_start), range_end - range_start, func);
168 
169 		/* nothing more available in this segment */
170 		if (range_end == end) return 0;
171 
172 		prev_start = rsvd_region[i].end;
173 	}
174 	/* end of memory marker allows full processing inside loop body */
175 	return 0;
176 }
177 
178 static void __init
179 sort_regions (struct rsvd_region *rsvd_region, int max)
180 {
181 	int j;
182 
183 	/* simple bubble sorting */
184 	while (max--) {
185 		for (j = 0; j < max; ++j) {
186 			if (rsvd_region[j].start > rsvd_region[j+1].start) {
187 				struct rsvd_region tmp;
188 				tmp = rsvd_region[j];
189 				rsvd_region[j] = rsvd_region[j + 1];
190 				rsvd_region[j + 1] = tmp;
191 			}
192 		}
193 	}
194 }
195 
196 /*
197  * Request address space for all standard resources
198  */
199 static int __init register_memory(void)
200 {
201 	code_resource.start = ia64_tpa(_text);
202 	code_resource.end   = ia64_tpa(_etext) - 1;
203 	data_resource.start = ia64_tpa(_etext);
204 	data_resource.end   = ia64_tpa(_end) - 1;
205 	efi_initialize_iomem_resources(&code_resource, &data_resource);
206 
207 	return 0;
208 }
209 
210 __initcall(register_memory);
211 
212 /**
213  * reserve_memory - setup reserved memory areas
214  *
215  * Setup the reserved memory areas set aside for the boot parameters,
216  * initrd, etc.  There are currently %IA64_MAX_RSVD_REGIONS defined,
217  * see include/asm-ia64/meminit.h if you need to define more.
218  */
219 void __init
220 reserve_memory (void)
221 {
222 	int n = 0;
223 
224 	/*
225 	 * none of the entries in this table overlap
226 	 */
227 	rsvd_region[n].start = (unsigned long) ia64_boot_param;
228 	rsvd_region[n].end   = rsvd_region[n].start + sizeof(*ia64_boot_param);
229 	n++;
230 
231 	rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
232 	rsvd_region[n].end   = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
233 	n++;
234 
235 	rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
236 	rsvd_region[n].end   = (rsvd_region[n].start
237 				+ strlen(__va(ia64_boot_param->command_line)) + 1);
238 	n++;
239 
240 	rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
241 	rsvd_region[n].end   = (unsigned long) ia64_imva(_end);
242 	n++;
243 
244 #ifdef CONFIG_BLK_DEV_INITRD
245 	if (ia64_boot_param->initrd_start) {
246 		rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
247 		rsvd_region[n].end   = rsvd_region[n].start + ia64_boot_param->initrd_size;
248 		n++;
249 	}
250 #endif
251 
252 	efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
253 	n++;
254 
255 	/* end of memory marker */
256 	rsvd_region[n].start = ~0UL;
257 	rsvd_region[n].end   = ~0UL;
258 	n++;
259 
260 	num_rsvd_regions = n;
261 	BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
262 
263 	sort_regions(rsvd_region, num_rsvd_regions);
264 }
265 
266 /**
267  * find_initrd - get initrd parameters from the boot parameter structure
268  *
269  * Grab the initrd start and end from the boot parameter struct given us by
270  * the boot loader.
271  */
272 void __init
273 find_initrd (void)
274 {
275 #ifdef CONFIG_BLK_DEV_INITRD
276 	if (ia64_boot_param->initrd_start) {
277 		initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
278 		initrd_end   = initrd_start+ia64_boot_param->initrd_size;
279 
280 		printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
281 		       initrd_start, ia64_boot_param->initrd_size);
282 	}
283 #endif
284 }
285 
286 static void __init
287 io_port_init (void)
288 {
289 	unsigned long phys_iobase;
290 
291 	/*
292 	 * Set `iobase' based on the EFI memory map or, failing that, the
293 	 * value firmware left in ar.k0.
294 	 *
295 	 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
296 	 * the port's virtual address, so ia32_load_state() loads it with a
297 	 * user virtual address.  But in ia64 mode, glibc uses the
298 	 * *physical* address in ar.k0 to mmap the appropriate area from
299 	 * /dev/mem, and the inX()/outX() interfaces use MMIO.  In both
300 	 * cases, user-mode can only use the legacy 0-64K I/O port space.
301 	 *
302 	 * ar.k0 is not involved in kernel I/O port accesses, which can use
303 	 * any of the I/O port spaces and are done via MMIO using the
304 	 * virtual mmio_base from the appropriate io_space[].
305 	 */
306 	phys_iobase = efi_get_iobase();
307 	if (!phys_iobase) {
308 		phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
309 		printk(KERN_INFO "No I/O port range found in EFI memory map, "
310 			"falling back to AR.KR0 (0x%lx)\n", phys_iobase);
311 	}
312 	ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
313 	ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
314 
315 	/* setup legacy IO port space */
316 	io_space[0].mmio_base = ia64_iobase;
317 	io_space[0].sparse = 1;
318 	num_io_spaces = 1;
319 }
320 
321 /**
322  * early_console_setup - setup debugging console
323  *
324  * Consoles started here require little enough setup that we can start using
325  * them very early in the boot process, either right after the machine
326  * vector initialization, or even before if the drivers can detect their hw.
327  *
328  * Returns non-zero if a console couldn't be setup.
329  */
330 static inline int __init
331 early_console_setup (char *cmdline)
332 {
333 	int earlycons = 0;
334 
335 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
336 	{
337 		extern int sn_serial_console_early_setup(void);
338 		if (!sn_serial_console_early_setup())
339 			earlycons++;
340 	}
341 #endif
342 #ifdef CONFIG_EFI_PCDP
343 	if (!efi_setup_pcdp_console(cmdline))
344 		earlycons++;
345 #endif
346 #ifdef CONFIG_SERIAL_8250_CONSOLE
347 	if (!early_serial_console_init(cmdline))
348 		earlycons++;
349 #endif
350 
351 	return (earlycons) ? 0 : -1;
352 }
353 
354 static inline void
355 mark_bsp_online (void)
356 {
357 #ifdef CONFIG_SMP
358 	/* If we register an early console, allow CPU 0 to printk */
359 	cpu_set(smp_processor_id(), cpu_online_map);
360 #endif
361 }
362 
363 #ifdef CONFIG_SMP
364 static void __init
365 check_for_logical_procs (void)
366 {
367 	pal_logical_to_physical_t info;
368 	s64 status;
369 
370 	status = ia64_pal_logical_to_phys(0, &info);
371 	if (status == -1) {
372 		printk(KERN_INFO "No logical to physical processor mapping "
373 		       "available\n");
374 		return;
375 	}
376 	if (status) {
377 		printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
378 		       status);
379 		return;
380 	}
381 	/*
382 	 * Total number of siblings that BSP has.  Though not all of them
383 	 * may have booted successfully. The correct number of siblings
384 	 * booted is in info.overview_num_log.
385 	 */
386 	smp_num_siblings = info.overview_tpc;
387 	smp_num_cpucores = info.overview_cpp;
388 }
389 #endif
390 
391 static __initdata int nomca;
392 static __init int setup_nomca(char *s)
393 {
394 	nomca = 1;
395 	return 0;
396 }
397 early_param("nomca", setup_nomca);
398 
399 void __init
400 setup_arch (char **cmdline_p)
401 {
402 	unw_init();
403 
404 	ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
405 
406 	*cmdline_p = __va(ia64_boot_param->command_line);
407 	strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
408 
409 	efi_init();
410 	io_port_init();
411 
412 	parse_early_param();
413 
414 #ifdef CONFIG_IA64_GENERIC
415 	machvec_init(NULL);
416 #endif
417 
418 	if (early_console_setup(*cmdline_p) == 0)
419 		mark_bsp_online();
420 
421 #ifdef CONFIG_ACPI
422 	/* Initialize the ACPI boot-time table parser */
423 	acpi_table_init();
424 # ifdef CONFIG_ACPI_NUMA
425 	acpi_numa_init();
426 # endif
427 #else
428 # ifdef CONFIG_SMP
429 	smp_build_cpu_map();	/* happens, e.g., with the Ski simulator */
430 # endif
431 #endif /* CONFIG_APCI_BOOT */
432 
433 	find_memory();
434 
435 	/* process SAL system table: */
436 	ia64_sal_init(__va(efi.sal_systab));
437 
438 	ia64_setup_printk_clock();
439 
440 #ifdef CONFIG_SMP
441 	cpu_physical_id(0) = hard_smp_processor_id();
442 
443 	cpu_set(0, cpu_sibling_map[0]);
444 	cpu_set(0, cpu_core_map[0]);
445 
446 	check_for_logical_procs();
447 	if (smp_num_cpucores > 1)
448 		printk(KERN_INFO
449 		       "cpu package is Multi-Core capable: number of cores=%d\n",
450 		       smp_num_cpucores);
451 	if (smp_num_siblings > 1)
452 		printk(KERN_INFO
453 		       "cpu package is Multi-Threading capable: number of siblings=%d\n",
454 		       smp_num_siblings);
455 #endif
456 
457 	cpu_init();	/* initialize the bootstrap CPU */
458 	mmu_context_init();	/* initialize context_id bitmap */
459 
460 #ifdef CONFIG_ACPI
461 	acpi_boot_init();
462 #endif
463 
464 #ifdef CONFIG_VT
465 	if (!conswitchp) {
466 # if defined(CONFIG_DUMMY_CONSOLE)
467 		conswitchp = &dummy_con;
468 # endif
469 # if defined(CONFIG_VGA_CONSOLE)
470 		/*
471 		 * Non-legacy systems may route legacy VGA MMIO range to system
472 		 * memory.  vga_con probes the MMIO hole, so memory looks like
473 		 * a VGA device to it.  The EFI memory map can tell us if it's
474 		 * memory so we can avoid this problem.
475 		 */
476 		if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
477 			conswitchp = &vga_con;
478 # endif
479 	}
480 #endif
481 
482 	/* enable IA-64 Machine Check Abort Handling unless disabled */
483 	if (!nomca)
484 		ia64_mca_init();
485 
486 	platform_setup(cmdline_p);
487 	paging_init();
488 }
489 
490 /*
491  * Display cpu info for all cpu's.
492  */
493 static int
494 show_cpuinfo (struct seq_file *m, void *v)
495 {
496 #ifdef CONFIG_SMP
497 #	define lpj	c->loops_per_jiffy
498 #	define cpunum	c->cpu
499 #else
500 #	define lpj	loops_per_jiffy
501 #	define cpunum	0
502 #endif
503 	static struct {
504 		unsigned long mask;
505 		const char *feature_name;
506 	} feature_bits[] = {
507 		{ 1UL << 0, "branchlong" },
508 		{ 1UL << 1, "spontaneous deferral"},
509 		{ 1UL << 2, "16-byte atomic ops" }
510 	};
511 	char features[128], *cp, sep;
512 	struct cpuinfo_ia64 *c = v;
513 	unsigned long mask;
514 	unsigned long proc_freq;
515 	int i;
516 
517 	mask = c->features;
518 
519 	/* build the feature string: */
520 	memcpy(features, " standard", 10);
521 	cp = features;
522 	sep = 0;
523 	for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) {
524 		if (mask & feature_bits[i].mask) {
525 			if (sep)
526 				*cp++ = sep;
527 			sep = ',';
528 			*cp++ = ' ';
529 			strcpy(cp, feature_bits[i].feature_name);
530 			cp += strlen(feature_bits[i].feature_name);
531 			mask &= ~feature_bits[i].mask;
532 		}
533 	}
534 	if (mask) {
535 		/* print unknown features as a hex value: */
536 		if (sep)
537 			*cp++ = sep;
538 		sprintf(cp, " 0x%lx", mask);
539 	}
540 
541 	proc_freq = cpufreq_quick_get(cpunum);
542 	if (!proc_freq)
543 		proc_freq = c->proc_freq / 1000;
544 
545 	seq_printf(m,
546 		   "processor  : %d\n"
547 		   "vendor     : %s\n"
548 		   "arch       : IA-64\n"
549 		   "family     : %u\n"
550 		   "model      : %u\n"
551 		   "model name : %s\n"
552 		   "revision   : %u\n"
553 		   "archrev    : %u\n"
554 		   "features   :%s\n"	/* don't change this---it _is_ right! */
555 		   "cpu number : %lu\n"
556 		   "cpu regs   : %u\n"
557 		   "cpu MHz    : %lu.%06lu\n"
558 		   "itc MHz    : %lu.%06lu\n"
559 		   "BogoMIPS   : %lu.%02lu\n",
560 		   cpunum, c->vendor, c->family, c->model,
561 		   c->model_name, c->revision, c->archrev,
562 		   features, c->ppn, c->number,
563 		   proc_freq / 1000, proc_freq % 1000,
564 		   c->itc_freq / 1000000, c->itc_freq % 1000000,
565 		   lpj*HZ/500000, (lpj*HZ/5000) % 100);
566 #ifdef CONFIG_SMP
567 	seq_printf(m, "siblings   : %u\n", cpus_weight(cpu_core_map[cpunum]));
568 	if (c->threads_per_core > 1 || c->cores_per_socket > 1)
569 		seq_printf(m,
570 		   	   "physical id: %u\n"
571 		   	   "core id    : %u\n"
572 		   	   "thread id  : %u\n",
573 		   	   c->socket_id, c->core_id, c->thread_id);
574 #endif
575 	seq_printf(m,"\n");
576 
577 	return 0;
578 }
579 
580 static void *
581 c_start (struct seq_file *m, loff_t *pos)
582 {
583 #ifdef CONFIG_SMP
584 	while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
585 		++*pos;
586 #endif
587 	return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
588 }
589 
590 static void *
591 c_next (struct seq_file *m, void *v, loff_t *pos)
592 {
593 	++*pos;
594 	return c_start(m, pos);
595 }
596 
597 static void
598 c_stop (struct seq_file *m, void *v)
599 {
600 }
601 
602 struct seq_operations cpuinfo_op = {
603 	.start =	c_start,
604 	.next =		c_next,
605 	.stop =		c_stop,
606 	.show =		show_cpuinfo
607 };
608 
609 static char brandname[128];
610 
611 static char * __cpuinit
612 get_model_name(__u8 family, __u8 model)
613 {
614 	char brand[128];
615 
616 	if (ia64_pal_get_brand_info(brand)) {
617 		if (family == 0x7)
618 			memcpy(brand, "Merced", 7);
619 		else if (family == 0x1f) switch (model) {
620 			case 0: memcpy(brand, "McKinley", 9); break;
621 			case 1: memcpy(brand, "Madison", 8); break;
622 			case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
623 		} else
624 			memcpy(brand, "Unknown", 8);
625 	}
626 	if (brandname[0] == '\0')
627 		return strcpy(brandname, brand);
628 	else if (strcmp(brandname, brand) == 0)
629 		return brandname;
630 	else
631 		return kstrdup(brand, GFP_KERNEL);
632 }
633 
634 static void __cpuinit
635 identify_cpu (struct cpuinfo_ia64 *c)
636 {
637 	union {
638 		unsigned long bits[5];
639 		struct {
640 			/* id 0 & 1: */
641 			char vendor[16];
642 
643 			/* id 2 */
644 			u64 ppn;		/* processor serial number */
645 
646 			/* id 3: */
647 			unsigned number		:  8;
648 			unsigned revision	:  8;
649 			unsigned model		:  8;
650 			unsigned family		:  8;
651 			unsigned archrev	:  8;
652 			unsigned reserved	: 24;
653 
654 			/* id 4: */
655 			u64 features;
656 		} field;
657 	} cpuid;
658 	pal_vm_info_1_u_t vm1;
659 	pal_vm_info_2_u_t vm2;
660 	pal_status_t status;
661 	unsigned long impl_va_msb = 50, phys_addr_size = 44;	/* Itanium defaults */
662 	int i;
663 	for (i = 0; i < 5; ++i)
664 		cpuid.bits[i] = ia64_get_cpuid(i);
665 
666 	memcpy(c->vendor, cpuid.field.vendor, 16);
667 #ifdef CONFIG_SMP
668 	c->cpu = smp_processor_id();
669 
670 	/* below default values will be overwritten  by identify_siblings()
671 	 * for Multi-Threading/Multi-Core capable cpu's
672 	 */
673 	c->threads_per_core = c->cores_per_socket = c->num_log = 1;
674 	c->socket_id = -1;
675 
676 	identify_siblings(c);
677 #endif
678 	c->ppn = cpuid.field.ppn;
679 	c->number = cpuid.field.number;
680 	c->revision = cpuid.field.revision;
681 	c->model = cpuid.field.model;
682 	c->family = cpuid.field.family;
683 	c->archrev = cpuid.field.archrev;
684 	c->features = cpuid.field.features;
685 	c->model_name = get_model_name(c->family, c->model);
686 
687 	status = ia64_pal_vm_summary(&vm1, &vm2);
688 	if (status == PAL_STATUS_SUCCESS) {
689 		impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
690 		phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
691 	}
692 	c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
693 	c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
694 }
695 
696 void
697 setup_per_cpu_areas (void)
698 {
699 	/* start_kernel() requires this... */
700 #ifdef CONFIG_ACPI_HOTPLUG_CPU
701 	prefill_possible_map();
702 #endif
703 }
704 
705 /*
706  * Calculate the max. cache line size.
707  *
708  * In addition, the minimum of the i-cache stride sizes is calculated for
709  * "flush_icache_range()".
710  */
711 static void __cpuinit
712 get_max_cacheline_size (void)
713 {
714 	unsigned long line_size, max = 1;
715 	unsigned int cache_size = 0;
716 	u64 l, levels, unique_caches;
717         pal_cache_config_info_t cci;
718         s64 status;
719 
720         status = ia64_pal_cache_summary(&levels, &unique_caches);
721         if (status != 0) {
722                 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
723                        __FUNCTION__, status);
724                 max = SMP_CACHE_BYTES;
725 		/* Safest setup for "flush_icache_range()" */
726 		ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
727 		goto out;
728         }
729 
730 	for (l = 0; l < levels; ++l) {
731 		status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2,
732 						    &cci);
733 		if (status != 0) {
734 			printk(KERN_ERR
735 			       "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
736 			       __FUNCTION__, l, status);
737 			max = SMP_CACHE_BYTES;
738 			/* The safest setup for "flush_icache_range()" */
739 			cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
740 			cci.pcci_unified = 1;
741 		}
742 		line_size = 1 << cci.pcci_line_size;
743 		if (line_size > max)
744 			max = line_size;
745 		if (cache_size < cci.pcci_cache_size)
746 			cache_size = cci.pcci_cache_size;
747 		if (!cci.pcci_unified) {
748 			status = ia64_pal_cache_config_info(l,
749 						    /* cache_type (instruction)= */ 1,
750 						    &cci);
751 			if (status != 0) {
752 				printk(KERN_ERR
753 				"%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
754 					__FUNCTION__, l, status);
755 				/* The safest setup for "flush_icache_range()" */
756 				cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
757 			}
758 		}
759 		if (cci.pcci_stride < ia64_i_cache_stride_shift)
760 			ia64_i_cache_stride_shift = cci.pcci_stride;
761 	}
762   out:
763 #ifdef CONFIG_SMP
764 	max_cache_size = max(max_cache_size, cache_size);
765 #endif
766 	if (max > ia64_max_cacheline_size)
767 		ia64_max_cacheline_size = max;
768 }
769 
770 /*
771  * cpu_init() initializes state that is per-CPU.  This function acts
772  * as a 'CPU state barrier', nothing should get across.
773  */
774 void __cpuinit
775 cpu_init (void)
776 {
777 	extern void __cpuinit ia64_mmu_init (void *);
778 	unsigned long num_phys_stacked;
779 	pal_vm_info_2_u_t vmi;
780 	unsigned int max_ctx;
781 	struct cpuinfo_ia64 *cpu_info;
782 	void *cpu_data;
783 
784 	cpu_data = per_cpu_init();
785 
786 	/*
787 	 * We set ar.k3 so that assembly code in MCA handler can compute
788 	 * physical addresses of per cpu variables with a simple:
789 	 *   phys = ar.k3 + &per_cpu_var
790 	 */
791 	ia64_set_kr(IA64_KR_PER_CPU_DATA,
792 		    ia64_tpa(cpu_data) - (long) __per_cpu_start);
793 
794 	get_max_cacheline_size();
795 
796 	/*
797 	 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
798 	 * ia64_mmu_init() yet.  And we can't call ia64_mmu_init() first because it
799 	 * depends on the data returned by identify_cpu().  We break the dependency by
800 	 * accessing cpu_data() through the canonical per-CPU address.
801 	 */
802 	cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
803 	identify_cpu(cpu_info);
804 
805 #ifdef CONFIG_MCKINLEY
806 	{
807 #		define FEATURE_SET 16
808 		struct ia64_pal_retval iprv;
809 
810 		if (cpu_info->family == 0x1f) {
811 			PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
812 			if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
813 				PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
814 				              (iprv.v1 | 0x80), FEATURE_SET, 0);
815 		}
816 	}
817 #endif
818 
819 	/* Clear the stack memory reserved for pt_regs: */
820 	memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
821 
822 	ia64_set_kr(IA64_KR_FPU_OWNER, 0);
823 
824 	/*
825 	 * Initialize the page-table base register to a global
826 	 * directory with all zeroes.  This ensure that we can handle
827 	 * TLB-misses to user address-space even before we created the
828 	 * first user address-space.  This may happen, e.g., due to
829 	 * aggressive use of lfetch.fault.
830 	 */
831 	ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
832 
833 	/*
834 	 * Initialize default control register to defer speculative faults except
835 	 * for those arising from TLB misses, which are not deferred.  The
836 	 * kernel MUST NOT depend on a particular setting of these bits (in other words,
837 	 * the kernel must have recovery code for all speculative accesses).  Turn on
838 	 * dcr.lc as per recommendation by the architecture team.  Most IA-32 apps
839 	 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
840 	 * be fine).
841 	 */
842 	ia64_setreg(_IA64_REG_CR_DCR,  (  IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
843 					| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
844 	atomic_inc(&init_mm.mm_count);
845 	current->active_mm = &init_mm;
846 	if (current->mm)
847 		BUG();
848 
849 	ia64_mmu_init(ia64_imva(cpu_data));
850 	ia64_mca_cpu_init(ia64_imva(cpu_data));
851 
852 #ifdef CONFIG_IA32_SUPPORT
853 	ia32_cpu_init();
854 #endif
855 
856 	/* Clear ITC to eliminiate sched_clock() overflows in human time.  */
857 	ia64_set_itc(0);
858 
859 	/* disable all local interrupt sources: */
860 	ia64_set_itv(1 << 16);
861 	ia64_set_lrr0(1 << 16);
862 	ia64_set_lrr1(1 << 16);
863 	ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
864 	ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
865 
866 	/* clear TPR & XTP to enable all interrupt classes: */
867 	ia64_setreg(_IA64_REG_CR_TPR, 0);
868 #ifdef CONFIG_SMP
869 	normal_xtp();
870 #endif
871 
872 	/* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
873 	if (ia64_pal_vm_summary(NULL, &vmi) == 0)
874 		max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
875 	else {
876 		printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
877 		max_ctx = (1U << 15) - 1;	/* use architected minimum */
878 	}
879 	while (max_ctx < ia64_ctx.max_ctx) {
880 		unsigned int old = ia64_ctx.max_ctx;
881 		if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
882 			break;
883 	}
884 
885 	if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
886 		printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
887 		       "stacked regs\n");
888 		num_phys_stacked = 96;
889 	}
890 	/* size of physical stacked register partition plus 8 bytes: */
891 	__get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
892 	platform_cpu_init();
893 	pm_idle = default_idle;
894 }
895 
896 /*
897  * On SMP systems, when the scheduler does migration-cost autodetection,
898  * it needs a way to flush as much of the CPU's caches as possible.
899  */
900 void sched_cacheflush(void)
901 {
902 	ia64_sal_cache_flush(3);
903 }
904 
905 void __init
906 check_bugs (void)
907 {
908 	ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
909 			       (unsigned long) __end___mckinley_e9_bundles);
910 }
911 
912 static int __init run_dmi_scan(void)
913 {
914 	dmi_scan_machine();
915 	return 0;
916 }
917 core_initcall(run_dmi_scan);
918